serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
22,501 | //pass
//--blockDim=32 --gridDim=1
#include <cuda.h>
__global__ void test_Prog(int *A, int N) {
const int tid = threadIdx.x;
for(int d = N/2; d > 0; d = d / 2) {
if (tid < d) {
A[tid] += A[tid + d];
}
}
}
|
22,502 | #include <stdio.h>
__global__ void holaCUDA(float e) {
printf("Hola, soy el hilo %i del bloque %i con valor pi -> %f \n",threadIdx.x,blockIdx.x,e);
}
int main(int argc, char **argv){
holaCUDA<<<3,4>>>(3.1416);
cudaDeviceReset();
return 0;
} |
22,503 | #include <cmath>
#include <cstdio>
#include <ctime>
#include <iostream>
__global__
void add(float *d_a, float *d_b, float *d_c, long num)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < num) {
d_c[idx] = d_a[idx] + d_b[idx];
}
}
int main(void)
{
std::clock_t start_time;
double duration01;
double duration02;
double duration03;
const long ARR_SIZE = 500000000;
const size_t ARR_BYTES = ARR_SIZE*sizeof(float);
// Clock start
start_time = std::clock();
// Declare and alloc array on host
float h_a[ARR_SIZE];
float h_b[ARR_SIZE];
float h_c[ARR_SIZE];
// initialize input array
for (long i=0; i<ARR_SIZE; i++){
h_a[i] = float(i);
h_b[i] = float(i)*2.0;
}
// Declare and alloc array on device
float *d_a;
float *d_b;
float *d_c;
cudaMalloc(&d_a, ARR_BYTES);
cudaMalloc(&d_b, ARR_BYTES);
cudaMalloc(&d_c, ARR_BYTES);
// Transfer to device
cudaMemcpy(d_a, h_a, ARR_BYTES, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, ARR_BYTES, cudaMemcpyHostToDevice);
// Clock stop 01
duration01 = ( std::clock() - start_time ) / (double) CLOCKS_PER_SEC;
std::cout<<"Computing time until Kernel call: "<< duration01 << "s" << std::endl;
// Call kernel function
const int threadPerBlock = 1024;
const int numBlock = ARR_SIZE/threadPerBlock+1;
add<<<numBlock, threadPerBlock>>>(d_a, d_b, d_c, ARR_SIZE);
// Block until the device has completed all tasks
cudaDeviceSynchronize();
// Clock stop 02
duration02 = ( std::clock() - start_time ) / (double) CLOCKS_PER_SEC;
std::cout<<"Computing time after Kernel call: "<< duration02 << "s" << std::endl;
// Transfer results to host
cudaMemcpy(h_c, d_c, ARR_BYTES, cudaMemcpyDeviceToHost);
// Clock stop 03
duration03 = ( std::clock() - start_time ) / (double) CLOCKS_PER_SEC;
std::cout<<"Computing time after memory copy: "<< duration03 << "s" << std::endl;
// Output results
for(long ii=0; ii<10; ii++){
std::cout<< h_c[ii]<< ", ";
}
std::cout<< std::endl;
return 0;
}
|
22,504 | #include <math.h>
#include <stdlib.h>
#include <time.h>
#include <stdio.h>
#include <sys/timeb.h>
// Hypercube
// Version: pas de mémoire partagée
// On réduit tout sur une dimension à chaque appel (non optimisé pour une mémoire partagée par block)
// Pas de distinction entre des threads du même block
// Pas limité en taille
#define pow2(x) (1<<(x))
// Nombre de threads par bloc
#define NBTHREADS_MAX 1024
#define check(error) { checkCudaError((error), __FILE__, __LINE__); }
void checkCudaError(cudaError_t code, const char *file, int line)
{
if (code != cudaSuccess) {
fprintf(stderr, "Erreur CUDA: %s:%d %s\n", file, line, cudaGetErrorString(code));
exit(EXIT_FAILURE);
}
}
// Note: l'utilisation de mémoire partagée ne paraît pas optimale ici
// nvprof ne montre pas d'amélioration significative (voir des baisses de performance)
// Kernel de la somme hypercube
// Ne fonctionne que pour un seul bloc
__global__
void kernel_hypercube(int *t, int taille) {
__shared__ int s[NBTHREADS_MAX];
const int x = threadIdx.x;
const int nbits = (int)ceil(log2((double)taille));
int d, mask, in, out;
if(x < taille) {
s[x] = t[x];
}
__syncthreads();
for(d = 1; d <= nbits; ++d) {
if (x < pow2(nbits - d)) {
mask = x << d;
in = mask | pow2(d - 1);
out = mask;
if (in < taille) {
s[out] += s[in];
}
}
// NE Fonctionne que sur 1024 à cause de __syncthreads():
// Syncthreads ne marche que pour les threads du même block (donc <= 1024 threads)
// On doit synchroniser mêmes les threads en dehors sinon deadlock
__syncthreads();
}
if(x < taille) {
t[x] = s[x];
}
}
// Kernel pour une dimension
// Réduit 1 dimension de l'hypercube
__global__
void kernel_hypercube_uneDim(int *t, int taille, int d, int nbits) {
const int x = threadIdx.x + blockIdx.x * blockDim.x;
int mask, in, out;
// On copie de XXX1000 vers XXX0000
// Les XXX correspondent à la valeur de x
// suivi de 1 pour l'entrée et 0 pour la sortie
// suivi d'uniquement des 0 car la réduction a été faite sur les 0
if (x < pow2(nbits - d)) {
mask = x << d;
in = mask | pow2(d - 1);
out = mask;
if (in < taille) {
t[out] += t[in];
}
}
}
// Algorithme de l'hypercube pour la somme
// Version séquentielle pour vérifier que l'algorithme est fonctionnel
int somme_hypercube(int *t, int taille) {
const int nbits = (int)ceil(log2((double)taille));
int d, x, mask, in, out, tot = 0;
for(d = 1; d <= nbits; ++d) {
for(x = 0; x < pow2(nbits - d); ++x) {
mask = x << d;
in = mask | pow2(d - 1);
out = mask;
if(in < taille) {
t[out] += t[in];
}
}
}
tot = t[0];
return tot;
}
// Somme séquentielle
int somme(int *arr, int taille) {
int i, tot = 0;
for(i = 0; i < taille; ++i) {
tot += arr[i];
}
return tot;
}
void fillRandomly(int *t, int taille) {
int i;
for(i = 0; i < taille; ++i) {
t[i] = rand() % 3;
}
}
void printArr(int *t, int taille) {
int i;
for(i = 0; i < taille; ++i) {
printf("%d ", t[i]);
}
printf("\n");
}
// Toujours la même graine pour qu'on puisse avoir des résultats reproductibles
int main(int argc, char **argv) {
cudaEvent_t start, stop;
float millis;
int nBytes;
int *h_arr = NULL;
int *d_arr = NULL;
int nbBlocks;
int nbits, d;
struct timeb tav, tap;
double te;
int somCpu;
const int taille = argc < 2 ? 1000000 : strtol(argv[1], NULL, 10);
nBytes = sizeof(int) * taille;
nbits = (int)ceil(log2((double)taille));
h_arr = (int*)malloc(nBytes);
if(!h_arr) {
fprintf(stderr, "Erreur d'allocation mémoire\n");
exit(1);
}
check(cudaEventCreate(&start));
check(cudaEventCreate(&stop));
check(cudaMalloc(&d_arr, nBytes));
srand(1234);
fillRandomly(h_arr, taille);
if(taille < 100) printArr(h_arr, taille);
ftime(&tav);
somCpu = somme(h_arr, taille);
ftime(&tap);
te = (double)((tap.time*1000+tap.millitm)-(tav.time*1000+tav.millitm))/1000 ;
printf("%d éléments, %.3fMo\n", taille, taille / 512.0 / 1024.0 * sizeof(int));
printf("SequentielCPU: %d, %.3lfs\n", somCpu, te);
// somme_hypercube() change le tableau, on remet comme avant
srand(1234);
fillRandomly(h_arr, taille);
check(cudaMemcpy(d_arr, h_arr, nBytes, cudaMemcpyHostToDevice));
cudaEventRecord(start);
nbBlocks = (taille - 1) / NBTHREADS_MAX + 1;
if(nbBlocks == 1) {
// S'il n'y a qu'un seul bloc, pas de problème de synchronisation
kernel_hypercube<<<nbBlocks, NBTHREADS_MAX>>>(d_arr, taille);
}
else {
// Sinon on doit séparer dimension par dimension
// Car on ne peut pas synchroniser des threads de blocs différents sur le device
for(d = 1; d <= nbits; ++d) {
kernel_hypercube_uneDim<<<nbBlocks, NBTHREADS_MAX>>>(d_arr, taille, d, nbits);
check(cudaDeviceSynchronize());
}
}
check(cudaEventRecord(stop));
check(cudaEventSynchronize(stop));
check(cudaEventElapsedTime(&millis, start, stop));
check(cudaMemcpy(h_arr, d_arr, nBytes, cudaMemcpyDeviceToHost));
if(taille < 100) printArr(h_arr, taille);
printf("HypercubeCUDA: %d, %.3fs\n", h_arr[0], millis / 1000.0f);
check(cudaFree(d_arr));
check(cudaEventDestroy(start));
check(cudaEventDestroy(stop));
free(h_arr);
return 0;
}
|
22,505 | #include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <iostream>
int main(int argc, char* argv[]) {
// H has storage for 4 integers
thrust::host_vector<int> H(4);
// initialize individual elements
H[0] = 14;
H[1] = 20;
H[2] = 38;
H[3] = 46;
// H.size() returns the size of vector H
std::cout << "H has size " << H.size() << std::endl;
// print contents of H
for (int i = 0; i < H.size(); i++) {
std::cout << "H[" << i << "] = " << H[i] << std::endl;
}
// resize H
H.resize(2);
std::cout << "H now has size " << H.size() << std::endl;
// Copy host vector H to device vector D
thrust::device_vector<int> D = H;
// elements of D can be modified
D[0] = 99;
D[1] = 88;
// print contents of D
for (int i = 0; i < D.size(); i++) {
std::cout << "D[" << i << "] = " << D[i] << std::endl;
}
// H and D are automatically deleted when the function returns
return 0;
}
|
22,506 | #include "includes.h"
__global__ void MatrixMulKernel(int * _matrixA, int * _matrixB, int * _result, int _width)
{
int k = 0, elementA = 0, elementB = 0;
//2D thread ID
int tx = threadIdx.x;
int ty = threadIdx.y;
//valeu store the _result element that is computed by thread
int value = 0;
for (k = 0; k < _width; k++)
{
elementA = *(_matrixA + (ty*_width + k)); //Go accross the line
elementB = *(_matrixB + (k*_width + tx)); //Go accross the column
value += (elementA * elementB); //Take each element
}
*(_result + (_width*ty + tx)) = value;
return;
} |
22,507 | #include <stdio.h>
#include <stdlib.h>
__global__ void multiplication(int n, int m, int *a, int *b)
{
int index = threadIdx.x;
int stride = blockDim.x;
for (int i = index, j = index; i < m*n; i += stride){ // T threads per iteration
a[i] = a[i] * b[j%n];
}
}
int main(int argc, char **argv){
int N = 1500;
int M = 1000;
//int a[M][N], b[N], c[M];
int *a, *b, *c; // Host copies of a, b, c
int *dev_a, *dev_b, *dev_c; // Device copies of a, b, c
// Create counter
cudaEvent_t start, stop;
float elapsedTime;
// Allocate space for device copies a, b, c
cudaMalloc((void **) &dev_a, sizeof(int)*N*M);
cudaMalloc((void **) &dev_b, sizeof(int)*N);
cudaMalloc((void **) &dev_c, sizeof(int)*M);
a = (int *) malloc(sizeof(int)*N*M);
b = (int *) malloc(sizeof(int)*N);
c = (int *) malloc(sizeof(int)*M);
//initialization
for( int i = 0; i < (M*N); i++ ){ // all lines in sequence
a[i] = 1; //i+1;
}
for(int j = 0; j < N; j++){ // rows
b[j] = 1; //j+1;
}
for(int i = 0; i < M; i++){ // rows
c[i] = 0;
}
/////////////////
// Start counter
cudaEventCreate(&start);
cudaEventRecord(start,0);
// Copy inputs to device
cudaMemcpy(dev_a, a, sizeof(int)*N*M, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, sizeof(int)*N, cudaMemcpyHostToDevice);
// 1 block, N threads (block size)
multiplication<<<1,64>>>(N, M, dev_a, dev_b);
// Copy result back to host
cudaMemcpy(a, dev_a, sizeof(int)*N*M, cudaMemcpyDeviceToHost);
// We need to sum all the columns to make the matrix C
for(int i = 0; i < M*N; i++){ // columns
c[i/N] += a[i];
//printf("%d, ", a[i]);
}
// Stop counter
cudaEventCreate(&stop);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start,stop);
// Resut
for(int i = 0; i < M; i++){
printf("[ %d ]\n", c[i]);
}
printf("Elapsed time: %f ms\n", elapsedTime);
// Clean up
free(a); free(b); free(c);
cudaFree(dev_a); cudaFree(dev_b);
return 0;
} |
22,508 | #include <stdio.h>
#include <math.h>
#include <ctime>
using namespace std;
int transponowanie(){
clock_t begin = clock();
int const size(1000);
static double tablica[size][size];
static double tab[size][size];
for(int i=0; i<size;i++){
for(int j=0; j<size;j++){
tablica[i][j]=i*size+j+1;
}
}
printf("przed: 1:%f ", tablica[0][1]);
printf("2:%f \n", tablica[1][0]);
for(int i=0; i<size;i++){
for(int j=0; j<size;j++){
tab[j][i]=tablica[i][j];
}
}
printf("po: 1:%f ", tab[0][1]);
printf("2:%f \n", tab[1][0]);
clock_t end = clock();
double elapsed_secs = double(end - begin) / CLOCKS_PER_SEC;
printf("czas CPU: %f \n", elapsed_secs);
return 1;
}
int main(void){
transponowanie();
}
|
22,509 | #include "includes.h"
__global__ void saxpy(int n, float a, float *x, float *y, char *ad, char *bd)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n){ y[i] = a*x[i] + y[i];
ad[0] = 'C';
}
} |
22,510 | /*
============================================================================
Name : CWLab3.cu
Author : sm01800
Version :
Copyright : Your copyright notice
Description : CUDA compute reciprocals
============================================================================
*/
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <iostream>
#include <fstream>
#define BLOCK_SIZE 16
using namespace std;
// Matrices are stored in row-major order
typedef struct {
int width;
int height;
float* elements;
int stride;
} Matrix;
//BEGIN HEADER
// Get a matrix element
__device__ float GetElement(const Matrix A, int row, int col) {
return A.elements[row * A.stride + col];
}
// Set a matrix element
__device__ void SetElement(Matrix A, int row, int col, float value) {
A.elements[row * A.stride + col] = value;
}
// Get the BLOCK_SIZExBLOCK_SIZE sub-matrix Asub of A that is
// located col sub-matrices to the right and row sub-matrices down
// from the upper-left corner of A
// Note, we are not doing any copying here - just finding the address of the
// start of the sub-matrix we are interested in
__device__ Matrix GetSubMatrix(Matrix A, int row, int col) {
Matrix Asub;
Asub.width = BLOCK_SIZE;
Asub.height = BLOCK_SIZE;
Asub.stride = A.stride;
Asub.elements = &A.elements[A.stride * BLOCK_SIZE * row + BLOCK_SIZE * col];
return Asub;
}
//END HEADER
__global__ void MultKernShared(const Matrix A, const Matrix B, Matrix C);
// Now we have all the device functions we need to define the matrix multiplication
// Kernel. This is going to be called from the host by MatMul()
__global__ void MultSharedKernel(Matrix A, Matrix B, Matrix C) {
// Identify the Block row and column
int blockRow = blockIdx.y;
int blockCol = blockIdx.x;
// Each thread block computes one sub-matrix Csub of C
Matrix Csub = GetSubMatrix(C, blockRow, blockCol);
// Each thread computes one element of Csub
// by accumulating results into Cvalue
float Cvalue = 0;
// Now find the row and column of the element within Csub
// that this thread is going to calculate
int row = threadIdx.y;
int col = threadIdx.x;
// Loop over all the sub-matrices of A and B that are
// required to compute Csub
for (int m = 0; m < (A.width / BLOCK_SIZE); ++m) {
// Get sub-matrix Asub of A
Matrix Asub = GetSubMatrix(A, blockRow, m);
// Get sub-matrix Bsub of B
Matrix Bsub = GetSubMatrix(B, m, blockCol);
// Shared memory used to store Asub and Bsub respectively
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load Asub and Bsub from global memory into shared memory
// Each thread loads one element of each sub-matrix
As[row][col] = GetElement(Asub, row, col);
Bs[row][col] = GetElement(Bsub, row, col);
// Synchronise to make sure the sub-matrices are completely loaded
// before starting the computation for each phase
__syncthreads();
// Now multiply Asub and Bsub together to complete phase m of the
// calculation of this threads element of Csub
for (int e = 0; e < BLOCK_SIZE; ++e)
Cvalue += As[row][e] * Bs[e][col];
// Synchronise again to make sure that the preceding calculation
// has been completed by all threads in the block before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Once all the phases are complete we can write Csub to device (global) memory
// Each thread writes one element
SetElement(Csub, row, col, Cvalue);
}
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
string MatrixMult(const Matrix h_A, const Matrix h_B, Matrix h_C,
int arraySize) {
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Load A and B to device memory
Matrix d_A;
d_A.width = d_A.stride = h_A.width;
d_A.height = h_A.height;
size_t size = h_A.width * h_A.height * sizeof(float);
cudaError_t err = cudaMalloc(&d_A.elements, size);
printf("CUDA malloc h_A: %s\n", cudaGetErrorString(err));
cudaMemcpy(d_A.elements, h_A.elements, size, cudaMemcpyHostToDevice);
Matrix d_B;
d_B.width = d_B.stride = h_B.width;
d_B.height = h_B.height;
size = h_B.width * h_B.height * sizeof(float);
err = cudaMalloc(&d_B.elements, size);
printf("CUDA malloc h_B: %s\n", cudaGetErrorString(err));
cudaMemcpy(d_B.elements, h_B.elements, size, cudaMemcpyHostToDevice);
// Allocate C in device memory
Matrix d_C;
d_C.width = d_C.stride = h_C.width;
d_C.height = h_C.height;
size = h_C.width * h_C.height * sizeof(float);
err = cudaMalloc(&d_C.elements, size);
printf("CUDA malloc h_C: %s\n", cudaGetErrorString(err));
// Invoke kernel
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(h_B.width / dimBlock.x, h_A.height / dimBlock.y);
cudaEventRecord(start);
MultSharedKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
err = cudaThreadSynchronize();
cudaEventRecord(stop);
printf("Run kernel: %s\n", cudaGetErrorString(err));
// Read C from device memory
err = cudaMemcpy(h_C.elements, d_C.elements, size, cudaMemcpyDeviceToHost);
printf("Copy h_C off device: %s\n", cudaGetErrorString(err));
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("Elapsed time was: %f\n milliseconds", milliseconds);
// Free device memory
cudaFree(d_A.elements);
cudaFree(d_B.elements);
cudaFree(d_C.elements);
string csv = to_string(arraySize) + "," + to_string(milliseconds) + "\n";
return csv;
}
string arrayGen(int size) {
printf("Array size: %i\n", size);
Matrix A, B, C;
// Read Dimensions of A and B
A.height = size;
A.width = size;
B.height = A.width;
B.width = size;
//Allocate memory for arrays
A.elements = (float*) malloc(A.width * A.height * sizeof(float));
B.elements = (float*) malloc(B.width * B.height * sizeof(float));
C.height = A.height;
C.width = B.width;
C.elements = (float*) malloc(C.width * C.height * sizeof(float));
//FILL ARRAYS
for (int i = 0; i < A.height; i++)
for (int j = 0; j < A.width; j++)
A.elements[i * A.width + j] = (float) (rand() % 3);
for (int i = 0; i < B.height; i++)
for (int j = 0; j < B.width; j++)
B.elements[i * B.width + j] = (float) (rand() % 2);
//CALL FUNCTION
string csv = MatrixMult(A, B, C, size);
printf("\n");
return csv;
}
// Function to check if x is power of 2
bool isPowerOfTwo(int n) {
return (n != 0 && (n & (n - 1)) == 0);
}
int main(int argc, char* argv[]) {
//CALL FUNCTION
std::ofstream myFile;
myFile.open("times.csv", std::ofstream::trunc);
myFile << "Array Size,Elapsed Time\n";
for (int i = 16; i <= 8192; i++) {
if (isPowerOfTwo(i)) {
myFile << arrayGen(i);
}
}
myFile.close();
/*
for (int i = 0; i < A.height; i++) {
for (int j = 0; j < A.width; j++)
printf("%f ", A.elements[i * A.width + j]);
printf("\n");
}
printf("\n");
for (int i = 0; i < B.height; i++) {
for (int j = 0; j < B.width; j++)
printf("%f ", B.elements[i * B.width + j]);
printf("\n");
}
printf("\n");
for (int i = 0; i < C.height; i++) {
for (int j = 0; j < C.width; j++)
printf("%f ", C.elements[i * C.width + j]);
printf("\n");
}
printf("\n");
*/
return 0;
}
|
22,511 | #include <cstdio>
#include <cstdlib>
#include <iostream>
using namespace std;
#include <cuda_runtime.h>
#define CUDA_CALL(func, name) \
{ \
cudaError_t e = (func); \
if(e != cudaSuccess) \
cout << "CUDA: " << cudaGetErrorString(e) << ": " << name << endl; \
else \
cout << "CUDA SUCC: " << (name) << endl; \
}
void fill_array(int * data, const int num) {
for(int i = 0; i < num; i++){
data[i] = i;
}
}
void check_array(char * device_prefix,
int * data,
const int num) {
bool error_found = false;
for(int i = 0; i < num; i++) {
if(data[i] != i * 2){
cout << "error: " << device_prefix << "\t" << i << "\t" << data[i] << endl;
error_found = true;
}
}
if (!error_found)
cout << "passed: " << device_prefix << endl;
}
__global__ void gpu_test_kernel(int * data) {
const int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
for(int i = 0; i < 10000; i++){
data[tid] *= 2;
data[tid] /= 2;
}
data[tid] *= 2;
}
#define MAX_NUM_DEVICES (4)
#define NUM_ELEM (1024*1024*8)
cudaStream_t stream[MAX_NUM_DEVICES];
char device_prefix[MAX_NUM_DEVICES][300];
int * gpu_data[MAX_NUM_DEVICES];
int * cpu_src_data[MAX_NUM_DEVICES];
int * cpu_dst_data[MAX_NUM_DEVICES];
cudaEvent_t kernel_start_event[MAX_NUM_DEVICES];
cudaEvent_t memcpy_to_start_event[MAX_NUM_DEVICES];
cudaEvent_t memcpy_from_start_event[MAX_NUM_DEVICES];
cudaEvent_t memcpy_from_stop_event[MAX_NUM_DEVICES];
__host__ void gpu_kernel(void) {
const int shared_memory_usage = 0;
const size_t single_gpu_chunk_size = sizeof(int) * NUM_ELEM;
const int num_threads = 256;
const int num_blocks = ((NUM_ELEM + (num_threads - 1)) / num_threads);
cout << "begin" << endl;
int num_devices;
CUDA_CALL(cudaGetDeviceCount(&num_devices), "cudaGetDeviceCount");
if(num_devices > MAX_NUM_DEVICES)
num_devices = MAX_NUM_DEVICES;
cout << "num devices: " << num_devices << endl;
for(int device_num = 0; device_num < num_devices; device_num++) {
CUDA_CALL(cudaSetDevice(device_num), "cudaSetDevice");
struct cudaDeviceProp device_prop;
CUDA_CALL(cudaGetDeviceProperties(&device_prop,
device_num), "cudaGetDeviceProperties");
sprintf(&device_prefix[device_num][0], "\nID: %d %s : ", device_num,
device_prop.name);
CUDA_CALL(cudaStreamCreate(&stream[device_num]), "cudaStreamCreate");
CUDA_CALL(cudaMalloc((void**)&gpu_data[device_num], single_gpu_chunk_size), "cudaMalloc");
CUDA_CALL(cudaMallocHost((void**)&cpu_src_data[device_num],
single_gpu_chunk_size), "cudaMallocHost");
CUDA_CALL(cudaMallocHost((void**)&cpu_dst_data[device_num],
single_gpu_chunk_size), "cudaMallocHost");
fill_array(cpu_src_data[device_num], NUM_ELEM);
CUDA_CALL(cudaEventCreate(&memcpy_to_start_event[device_num]), "create memcpy_to_start_event");
CUDA_CALL(cudaEventCreate(&kernel_start_event[device_num]), "create kernel_start_event");
CUDA_CALL(cudaEventCreate(&memcpy_from_start_event[device_num]), "create memcpy_from_start_event");
CUDA_CALL(cudaEventCreate(&memcpy_from_stop_event[device_num]), "create memcpy_from_stop_event");
CUDA_CALL(cudaEventRecord(memcpy_to_start_event[device_num]), "memcpy_to_start_event");
CUDA_CALL(cudaMemcpyAsync(gpu_data[device_num],
cpu_src_data[device_num],
single_gpu_chunk_size,
cudaMemcpyHostToDevice,
stream[device_num]), "cudaMemcpyAsync");
CUDA_CALL(cudaEventRecord(kernel_start_event[device_num]), "cudaEventRecord");
gpu_test_kernel<<<num_blocks,
num_threads,
shared_memory_usage,
stream[device_num]>>>(gpu_data[device_num]);
CUDA_CALL(cudaEventRecord(memcpy_from_start_event[device_num]), "memcpy_from_start_event");
CUDA_CALL(cudaMemcpyAsync(cpu_dst_data[device_num],
gpu_data[device_num],
single_gpu_chunk_size,
cudaMemcpyDeviceToHost,
stream[device_num]), "cudaMemcpyAsync");
CUDA_CALL(cudaEventRecord(memcpy_from_stop_event[device_num]), "memcpy_from_stop_event");
}
for(int device_num = 0; device_num < num_devices; device_num++) {
CUDA_CALL(cudaSetDevice(device_num), "");
CUDA_CALL(cudaStreamSynchronize(stream[device_num]), "");
float time_copy_to_ms;
CUDA_CALL(cudaEventElapsedTime(&time_copy_to_ms,
memcpy_to_start_event[device_num],
kernel_start_event[device_num]), "");
float time_kernel_ms;
CUDA_CALL(cudaEventElapsedTime(&time_kernel_ms,
kernel_start_event[device_num],
memcpy_from_start_event[device_num]), "");
float time_copy_from_ms;
CUDA_CALL(cudaEventElapsedTime(&time_copy_from_ms,
memcpy_from_start_event[device_num],
memcpy_from_stop_event[device_num]), "");
float time_exec_ms;
CUDA_CALL(cudaEventElapsedTime(&time_exec_ms,
memcpy_to_start_event[device_num],
memcpy_from_stop_event[device_num]), "");
CUDA_CALL(cudaStreamDestroy(stream[device_num]), "");
CUDA_CALL(cudaFree(gpu_data[device_num]), "");
check_array(device_prefix[device_num],
cpu_dst_data[device_num],
NUM_ELEM);
CUDA_CALL(cudaFreeHost(cpu_src_data[device_num]), "");
CUDA_CALL(cudaFreeHost(cpu_dst_data[device_num]), "");
CUDA_CALL(cudaDeviceReset(), "");
cout << time_copy_to_ms << "\t" << time_kernel_ms << "\t" << time_copy_from_ms << "\n"
<< time_exec_ms << "\t" << time_copy_to_ms + time_kernel_ms + time_copy_from_ms << endl;
}
}
int main(){
gpu_kernel();
} |
22,512 | #include <iostream>
#include <cstdlib>
#include <cstdio>
#include <cuda_runtime.h>
#include <sys/time.h>
double get_time() {
struct timeval tv;
gettimeofday(&tv, nullptr);
return tv.tv_sec + 1e-6 * tv.tv_usec;
}
constexpr int m = 256;
constexpr int n = m * m * m;
constexpr int block_size = 4;
using grid_type = float[m / block_size][m / block_size][m / block_size][4]
[block_size][block_size][block_size];
__global__ void fill(grid_type *grid_) {
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= n)
return;
float *data = (float *)grid_;
grid_type &grid = *grid_;
for (int k = 0; k < 4; k++) {
data[i + k * n] = 0;
}
}
int main() {
float *a;
cudaMallocManaged(&a, n * sizeof(float) * 4);
auto bs = block_size * block_size * block_size;
std::cout << "bs = " << bs << std::endl;
for (int i = 0; i < 20; i++) {
cudaDeviceSynchronize();
auto t = get_time();
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
cudaDeviceSynchronize();
fill<<<n / bs, bs>>>((grid_type *)a);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
std::cout << "device " << milliseconds << std::endl;
}
std::cout << std::endl;
}
|
22,513 | __device__
int rectanglesSum(int** integralImage, int x, int y, int w, int h)
{
int A = x > 0 && y > 0 ? integralImage[x - 1][y - 1] : 0;
int B = x + w > 0 && y > 0 ? integralImage[x + w - 1][y - 1] : 0;
int C = x > 0 && y + h > 0 ? integralImage[x - 1][y + h - 1] : 0;
int D = x + w > 0 && y + h > 0 ? integralImage[x + w - 1][y + h - 1] : 0;
return A + D - B - C;
}
extern "C"
__global__
void computeWindowFeatures(int** integralImage, int* features, int totalNumFeatures, float* window, int* haarFeatures)
{
// Get an "unique id" of the thread
const unsigned int blockId = ((blockIdx.y * 65535) + blockIdx.x);
const unsigned int tidX = blockId * blockDim.x + threadIdx.x;
if (tidX < totalNumFeatures)
{
int type = features[threadIdx.x * 5];
int x = (int) ((float)features[threadIdx.x * 5 + 1] * window[blockId * 3 + 2] + window[blockId * 3]);
int y = (int) ((float)features[threadIdx.x * 5 + 2] * window[blockId * 3 + 2] + window[blockId * 3 + 1]);
int w = (int) (((float) (features[threadIdx.x * 5 + 3])) * window[blockId * 3 + 2]);
int h = (int) (((float) (features[threadIdx.x * 5 + 4])) * window[blockId * 3 + 2]);
if (type == 1)
{
int mid = w / 2;
int r1 = rectanglesSum(integralImage, x, y, mid, h);
int r2 = rectanglesSum(integralImage, x + mid, y, mid, h);
haarFeatures[tidX] = r1 - r2;
}
else if (type == 2)
{
int mid = w / 3;
int r1 = rectanglesSum(integralImage, x, y, mid, h);
int r2 = rectanglesSum(integralImage, x + mid, y, mid, h);
int r3 = rectanglesSum(integralImage, x + 2 * mid, y, mid, h);
haarFeatures[tidX] = r1 - r2 + r3;
}
else if (type == 3)
{
int mid = h / 2;
int r1 = rectanglesSum(integralImage, x, y, w, mid);
int r2 = rectanglesSum(integralImage, x, y + mid, w, mid);
haarFeatures[tidX] = r2 - r1;
}
else if (type == 4)
{
int mid = h / 3;
int r1 = rectanglesSum(integralImage, x, y, w, mid);
int r2 = rectanglesSum(integralImage, x, y + mid, w, mid);
int r3 = rectanglesSum(integralImage, x, y + 2 * mid, w, mid);
haarFeatures[tidX] = r1 - r2 + r3;
}
else if (type == 5)
{
int mid_w = w / 2;
int mid_h = h / 2;
int r1 = rectanglesSum(integralImage, x, y, mid_w, mid_h);
int r2 = rectanglesSum(integralImage, x + mid_w, y, mid_w, mid_h);
int r3 = rectanglesSum(integralImage, x, y + mid_h, mid_w, mid_h);
int r4 = rectanglesSum(integralImage, x + mid_w, y + mid_h, mid_w, mid_h);
haarFeatures[tidX] = r1 - r2 - r3 + r4;
}
}
__syncthreads();
}
|
22,514 | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <float.h>
#include <sys/time.h>
// includes, kernels
#include "trap_kernel.cu"
#define LEFT_ENDPOINT 10
#define RIGHT_ENDPOINT 1005
#define NUM_TRAPEZOIDS 100000000
double compute_on_device(float, float, int, float);
extern "C" double compute_gold(float, float, int, float);
float ts1,ts2;
int
main(void)
{
int n = NUM_TRAPEZOIDS;
float a = LEFT_ENDPOINT;
float b = RIGHT_ENDPOINT;
float h = (b-a)/(float)n; // Height of each trapezoid
printf("The height of the trapezoid is %f \n", h);
struct timeval start, stop;
gettimeofday(&start, NULL);
double reference = compute_gold(a, b, n, h);
gettimeofday(&stop, NULL);
ts1 = (float)(stop.tv_sec - start.tv_sec + (stop.tv_usec - start.tv_usec)/(float)1000000);
printf("CPU Execution Time = %fs. \n", ts1 );
printf("Reference solution computed on the CPU = %f \n", reference);
/* Write this function to complete the trapezoidal on the GPU. */
double gpu_result = compute_on_device(a, b, n, h);
printf("Solution computed on the GPU = %f \n", gpu_result);
}
/* Complete this function to perform the trapezoidal rule on the GPU. */
double
compute_on_device(float a, float b, int n, float h)
{
int Block_num = ceil((GRID/BLOCK));
size_t fs = sizeof(float);
//size_t is = sizeof(int);
double sum;
// one for each block
float *partial_result = (float *)malloc(GRID/BLOCK * fs);
float *Result_fromGPU;
cudaMalloc((void**)&Result_fromGPU, Block_num * fs);
dim3 thread_block(BLOCK, 1, 1);
dim3 grid(Block_num, 1);
struct timeval start, stop;
gettimeofday(&start, NULL);
trap_kernel <<< grid, thread_block >>> (a, b, n, h, Result_fromGPU);
cudaThreadSynchronize();
gettimeofday(&stop, NULL);
cudaMemcpy(partial_result, Result_fromGPU, Block_num *fs, cudaMemcpyDeviceToHost);
ts2 = (float)(stop.tv_sec - start.tv_sec + (stop.tv_usec - start.tv_usec)/(float)1000000);
printf("GPU Execution time = %fs. \n", ts2);
printf("SPEEDUP = %fs. \n", ts1/ts2);
sum = ((b) + 1)/sqrt(pow(b, 2) + (b + 1)) + ((a) + 1)/sqrt(pow(a, 2) + (a + 1))/2;
int i = 0;
while(i < Block_num)
{
sum = sum + partial_result[i];
i++;
}
cudaFree(Result_fromGPU);
free(partial_result);
return (h*(sum));
}
|
22,515 | #include "includes.h"
__device__ void Device_FloodFillZPlane(int zPlane, int L, int M, int N, unsigned char* vol)
{
long idx, idxS, idxN, ts;
bool anyChange = false;
int x, y;
ts = L*M*N;
// set point (0,0) to OUTSIZE_1
idx = zPlane*L*M /* + 0*L + 0 */;
vol[idx] = OUTSIDE_1;
anyChange = true;
while(anyChange) {
anyChange = false;
// loop from left to right and top to bottom
for(x=0; x < L; x++) {
for(y=0; y < M; y++) {
idxS = idx + y*L + x;
// if the point is set to OUTSIDE_1, the set all empty neightbors
// to OUTSIDE_1
if(vol[idxS] == OUTSIDE_1) {
idxN = idxS + L;
if((idxN >= 0) && (idxN < ts) && (vol[idxN] == 0)) {
vol[idxN] = OUTSIDE_1;
anyChange = true;
}
idxN = idxS - L;
if((idxN >= 0) && (idxN < ts) && (vol[idxN] == 0)) {
vol[idxN] = OUTSIDE_1;
anyChange = true;
}
idxN = idxS + 1;
if((idxN >= 0) && (idxN < ts) && (vol[idxN] == 0)) {
vol[idxN] = OUTSIDE_1;
anyChange = true;
}
idxN = idxS - 1;
if((idxN >= 0) && (idxN < ts) && (vol[idxN] == 0)) {
vol[idxN] = OUTSIDE_1;
anyChange = true;
}
}
}
}
if(anyChange) {
// same loop but bottom to top and right to left
anyChange = false;
// loop from left to right and top to bottom
for(x=L-1; x >=0; x--) {
for(y=M-1; y >=0; y--) {
idxS = idx + y*L + x;
// if the point is set to OUTSIDE_1, the set all empty neightbors
// to OUTSIDE_1
if(vol[idxS] == OUTSIDE_1) {
idxN = idxS + L;
if((idxN >= 0) && (idxN < ts) && (vol[idxN] == 0)) {
vol[idxN] = OUTSIDE_1;
anyChange = true;
}
idxN = idxS - L;
if((idxN >= 0) && (idxN < ts) && (vol[idxN] == 0)) {
vol[idxN] = OUTSIDE_1;
anyChange = true;
}
idxN = idxS + 1;
if((idxN >= 0) && (idxN < ts) && (vol[idxN] == 0)) {
vol[idxN] = OUTSIDE_1;
anyChange = true;
}
idxN = idxS - 1;
if((idxN >= 0) && (idxN < ts) && (vol[idxN] == 0)) {
vol[idxN] = OUTSIDE_1;
anyChange = true;
}
}
}
}
}
}
}
__global__ void kernel_for_z_planes(unsigned char *d_vol,int L,int M,int N)
{
Device_FloodFillZPlane(threadIdx.x,L,M,N,d_vol);
} |
22,516 | //#include "cuda_runtime.h"
//#include "device_launch_parameters.h"
//
//#include <stdio.h>
//#include <iostream>
//#include <vector>
//#include <fstream>
//#include <string>
//#include <algorithm>
//#include <chrono>
//#include <random>
//
//using namespace std;
//
//#define IMAGE_PATH "mnist\\train-images.idx3-ubyte"
//#define LABEL_PATH "mnist\\train-labels.idx1-ubyte"
//#define BIAS_WEIGHT_PATH "mnist\\biasweight.txt"
//
//#define IMAGE_SIZE 28
//#define IMAGE_HEADER_SIZE 16
//#define LABEL_HEADER_SIZE 8
//
//#define TOTAL_SIZE 1200 //60000 max
//#define MINI_BATCH_SIZE 100 // <= TOTAL_IMG_SIZE
//
//#define LEARN_COUNT 20
//#define LEARN_RATE 1
//#define DIFF_H 1e-4;
//#define DIFF_H_2 2 * 1e-4;
//
//struct Image {
// int num;
// unsigned char image[28][28];
//};
//
//float sigmoid_cu(float x);
//vector<float>* softmax_cu(vector<float>* a);
//float* matrix_multiplication_cu(float* a, int m, int n, float* b, int o, int p, int blocks, int threads);
//float** matrix_transpose_cu(float** a, int m, int n);
//void set_sigmoid_backward_cu(float** dy, float m, float n, float** y, float o, float p);
//
//vector<Image>* read_image_label_cu(const char* image_path, const char* label_path);
//vector<vector<float>*>* read_bias_weight_cu(const char* bias_weight_path);
//void write_bias_weight_cu(const char* path, float** bs, int* m, float*** ws, int** n);
//
//void print_image_cu(Image* img);
//void print_matrix_cu(float** a, int m, int n);
//float* convert_image_cu(Image* img);
//
//float* get_normal_distribution_array_cu(int n);
//vector<int>* mini_batch_idx_sort_cu(int n, int count);
//float cross_entropy_error_cu(vector<float>* y, vector<float>* t);
//
//vector<float>* predict_cu(Image* img, vector<vector<float>*>* bs, vector<vector<vector<float>*>*>* ws);
//float*** predictlevels_cu(Image* img, float** bs, float*** ws);
//float*** get_weight_gradient(Image* img, float** bs, float*** ws);
//
//void learn_cu(Image* img, float** bs, float*** ws);
//void backprop_cu(Image* img, float** bs, float*** ws);
//void printpredict_cu(vector<Image>* images, vector<int>* mini_batch, vector<vector<float>*>* bs, vector<vector<vector<float>*>*>* ws);
//
//__global__ void kernel_mm(float* a, int m, int n, float* b, int o, int p, float* res);
//__global__ void kernel_bs(float* a, int m, int n, float* bs);
//
//vector<vector<float>*>* bv_cu;
//vector<vector<vector<float>*>*>* wv_cu;
//vector<vector<float>*>* bg_cu;
//vector<vector<vector<float>*>*>* wg_cu;
//
//int main()
//{
// //initialize
// srand(static_cast<unsigned int>(time(NULL)));
// vector<Image>* images = read_image_label_cu(IMAGE_PATH, LABEL_PATH);
// vector<vector<float>*>* vs = read_bias_weight_cu("mnist\\bwvalue.txt");
//
// vector<vector<float>*>* bs = new vector<vector<float>*>();
// int i, j, k, l, cnt = 0;
// for (i = 0; i < 3; i++) {
// bs->push_back(vs->at(cnt++));
// }
//
// vector<vector<vector<float>*>*>* ws = new vector<vector<vector<float>*>*>();
// vector<vector<float>*>* wt;
//
// wt = new vector<vector<float>*>();
// for (i = 0; i < 784; i++) {
// wt->push_back(vs->at(cnt++));
// //wt->push_back(get_normal_distribution_array(50));
// //wt->push_back(new vector<float>(50, 0.5));
// }
// ws->push_back(wt);
//
// wt = new vector<vector<float>*>();
// for (i = 0; i < 50; i++) {
// wt->push_back(vs->at(cnt++));
// //wt->push_back(get_normal_distribution_array(100));
// //wt->push_back(new vector<float>(100, 0.5));
// }
// ws->push_back(wt);
//
// wt = new vector<vector<float>*>();
// for (i = 0; i < 100; i++) {
// wt->push_back(vs->at(cnt++));
// //wt->push_back(get_normal_distribution_array(10));
// //wt->push_back(new vector<float>(10, 0.5));
// }
// ws->push_back(wt);
// //~initialize
//
// //cnn
// int n = images->size(), correct = 0, wrong = 0, seq = 0, max_index;
// float error, max_value, mh, ph;
//
// vector<int>* mini_batch = mini_batch_idx_sort_cu(TOTAL_SIZE, MINI_BATCH_SIZE);
//
// //learn
// printpredict_cu(images, mini_batch, bs, ws);
// //for (i = 0; i < MINI_BATCH_SIZE; i++) {
// // cout << i << " - learn " << images->at(mini_batch->at(i)).num << ' ';
// // //learn(&images->at(mini_batch->at(i)), bs, ws);
// // backprop_cu(&images->at(mini_batch->at(i)), bs, ws);
// // cout << endl;
// //}
// printpredict_cu(images, mini_batch, bs, ws);
// //~learn
//
// //~cnn
//
// //deallocate
// delete mini_batch;
// for (i = 0; i < ws->size(); i++) {
// delete ws->at(i);
// }
// delete ws;
//
// for (i = 0; i < vs->size(); i++) {
// delete vs->at(i);
// }
// delete vs;
// delete images;
// //~deallocate
//
// return 0;
//}
//
//vector<Image>* read_image_label_cu(const char* image_path, const char* label_path)
//{
// int header = 0, row = 0, col = 0, n, m, i = 0, j = 0, k = 0, l = 0, count = 0;
//
// //read image
// ifstream input_image(image_path, ios::binary);
// //vector<char> bytes_i(istreambuf_iterator<char>(input_image), (istreambuf_iterator<char>()));
// vector<char> bytes_i;
// char headerbuffer[IMAGE_HEADER_SIZE];
// input_image.read(headerbuffer, IMAGE_HEADER_SIZE);
// for (i = 0; i < IMAGE_HEADER_SIZE; i++) {
// bytes_i.push_back(headerbuffer[i]);
// }
//
// for (i = 0; i < TOTAL_SIZE; i++) {
// char imagebuffer[784];
// input_image.read(imagebuffer, 784);
// for (j = 0; j < 784; j++) {
// bytes_i.push_back(imagebuffer[j]);
// }
// }
//
// //read label
// ifstream input_label(label_path, ios::binary);
// //vector<char> bytes_l(istreambuf_iterator<char>(input_label), (istreambuf_iterator<char>()));
// vector<char> bytes_l;
// char labelbuffer[LABEL_HEADER_SIZE + TOTAL_SIZE];
// input_label.read(labelbuffer, LABEL_HEADER_SIZE + TOTAL_SIZE);
// for (int i = 0; i < LABEL_HEADER_SIZE + TOTAL_SIZE; i++) {
// bytes_l.push_back(labelbuffer[i]);
// }
//
// n = bytes_i.size();
// m = bytes_l.size();
// vector<Image>* res = new vector<Image>();
//
// i = 0;
// l = 0;
// cout << "image header : ";
// for (j = 0; j < IMAGE_HEADER_SIZE; j++) {
// cout << (int)(unsigned char)bytes_i[i++] << ' ';
// }
// cout << endl;
//
// cout << "label header : ";
// for (j = 0; j < LABEL_HEADER_SIZE; j++) {
// cout << (int)(unsigned char)bytes_l[l++] << ' ';
// }
// cout << endl;
//
// while (i < n && l < m && count < TOTAL_SIZE) {
//
// Image img;
// img.num = (int)(unsigned char)bytes_l[l++];
//
// for (j = 0; j < IMAGE_SIZE; j++) {
// for (k = 0; k < IMAGE_SIZE; k++) {
// img.image[j][k] = (unsigned char)bytes_i[i++];
// }
// }
// res->push_back(img);
// //print_image(&img);
// count++;
// }
//
// cout << res->size() << " images" << endl;
//
// input_image.close();
// input_label.close();
//
// return res;
//}
//
//vector<vector<float>*>* read_bias_weight_cu(const char* bias_weigh_tpath) {
// ifstream bias_wieght(bias_weigh_tpath);
// vector<char> chars(istreambuf_iterator<char>(bias_wieght), (istreambuf_iterator<char>()));
// int n = chars.size(), i, k;
// vector<vector<float>*>* vs = new vector<vector<float>*>();
// vector<float>* v = new vector<float>();
// string s = "";
// for (i = 0; i < n; i++) {
// if (chars[i] == '\n') {
// vs->push_back(v);
// v = new vector<float>();
// }
// else if (chars[i] == '/') {
// v->push_back(stof(s));
// s.clear();
// }
// else {
// s += chars[i];
// }
// }
//
// return vs;
//}
//
//vector<int>* mini_batch_idx_sort_cu(int n, int count) {
// vector<int>* res = new vector<int>();
// int i, x;
//
// if (count <= n && count >= 0) {
// int* arr = new int[n];
// for (i = 0; i < n; i++) {
// arr[i] = i;
// }
// for (i = n; i > n - count; i--) {
// x = rand() % i;
// res->push_back(arr[x]);
// arr[x] = arr[i - 1];
// }
// delete[] arr;
// }
// sort(res->begin(), res->end());
// return res;
//}
//
//
//void printpredict_cu(vector<Image>* images, vector<int>* mini_batch, float** bs, int* bs_arr_sz, int bs_sz
// , vector<vector<vector<float>*>*>* ws) {
// int i, j, seq, max_index, correct = 0, wrong = 0;
// float error, max_value;
// for (i = 0; i < MINI_BATCH_SIZE; i++) {
// seq = mini_batch->at(i);
// cout << "task " << i + 1 << " -> value : " << images->at(seq).num;
// vector<float>* output = predict_cu(&images->at(seq), bs, ws);
// vector<float>* answer = new vector<float>(10, 0);
// answer->at(images->at(seq).num) = 1;
// error = cross_entropy_error_cu(output, answer);
//
// max_value = 0;
// max_index = -1;
// for (j = 0; j < output->size(); j++) {
// if (output->at(j) > max_value) {
// max_value = output->at(j);
// max_index = j;
// }
// }
//
// if (max_index == images->at(seq).num) {
// correct++;
// }
// else {
// wrong++;
// }
// cout << " result : " << max_index << " error : " << error
// << " accuracy : " << correct / (float)(correct + wrong) << endl;
//
// delete output;
// delete answer;
// }
//}
//
//float* convert_image_cu(Image* image) {
// float* res = new float[IMAGE_SIZE * IMAGE_SIZE];
// int i, j;
// for (i = 0; i < IMAGE_SIZE; i++) {
// for (j = 0; j < IMAGE_SIZE; j++) {
// res[i * IMAGE_SIZE + j] = image->image[i][j];
// }
// }
// return res;
//}
//
//vector<float>* predict_cu(Image* img, vector<vector<float>*>* bs, vector<vector<vector<float>*>*>* ws) {
// float* input = convert_image_cu(img);
// int i;
//
// float* ws0 = ws->at(0)
//
// float* level_1 = matrix_multiplication_cu(input, 28, 28, );
// set_bias_sigmoid_cu(level_1, bs->at(0));
//
// float* level_2 = matrix_multiplication_cu(level_1, ws->at(1));
// set_bias_sigmoid_cu(level_2, bs->at(1));
//
// float* level_3 = matrix_multiplication_cu(level_2, ws->at(2));
// set_bias_sigmoid_cu(level_3, bs->at(2));
//
// vector<float>* output = softmax_cu(level_3->at(0));
//
// delete level_1;
// delete level_2;
// delete level_3;
// delete input;
//
// return output;
//}
//
//vector<float>* softmax_cu(vector<float>* a) {
// int i, n = a->size();
// float max = 0, expsum = 0;
// vector<float> ac(n);
// vector<float>* res = new vector<float>();
// for (i = 0; i < n; i++) {
// if (a->at(i) > max) {
// max = a->at(i);
// }
// }
//
// for (i = 0; i < n; i++) {
// ac[i] = a->at(i) - max;
// expsum += exp(ac[i]);
// }
//
// for (i = 0; i < n; i++) {
// res->push_back(exp(ac[i]) / expsum);
// }
//
// return res;
//}
//
//float* matrix_multiplication_cu(float* a, int m, int n, float* b, int o, int p, float* bs, int q, int blocks, int threads) {
//
// float* dev_a = 0;
// float* dev_b = 0;
// float* dev_c = 0;
//
// int mal_a = m * n;
// int mal_b = o * p;
// int mal_c = m * p;
//
// cudaMalloc((void**)&dev_c, mal_c * sizeof(float));
//
// cudaMalloc((void**)&dev_a, mal_a * sizeof(float));
// cudaMemcpy(dev_a, a, mal_a * sizeof(float), cudaMemcpyHostToDevice);
//
// cudaMalloc((void**)&dev_b, mal_b * sizeof(float));
// cudaMemcpy(dev_b, b, mal_b * sizeof(float), cudaMemcpyHostToDevice);
//
// kernel_mm << <blocks, threads >> > (a, m, n, b, o, p, c);
//
// cudaMemcpy(dev_c, dev_c, mal_c * sizeof(int), cudaMemcpyDeviceToHost);
//
// return dev_c;
//}
//
//__global__ void kernel_mm(float* a, int m, int n, float* b, int o, int p, float* res) {
//
//}
//
//__global__ void kernel_bs(float* a, int m, int n, float* bs) {
//
//} |
22,517 | #include <cuda_runtime.h>
#include <stdio.h>
__global__ void sumArraysZeroCopy(float *A,float *B,float *C,const int N){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<N) C[i] = A[i] + B[i];
}
void sumArraysOnHost(float *A, float *B, float *C,const int N){
for (int idx=0;idx<N;idx++){
C[idx] = A[idx] + B[idx];
}
}
__global__ void sumArraysOnGPU(float *A,float *B,float *C,const int N){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<N) C[i] = A[i] + B[i];
}
void checkResult(float *hostRef, float *gpuRef, const int N){
double epsilon = 1.0e-8;
for (int i=0; i < N; i++){
if(abs(hostRef[i]-gpuRef[i])>epsilon){
printf("Arrays do not match!\n");
printf("host %5.2f gpu %5.2f at current %d\n",hostRef[i],gpuRef[i],i);
break;
}
}
return;
}
void initialData(float *ip, int size){
int i;
for (i=0;i<size;i++){
ip[i] = (float)(rand() & 0xFF)/10.0f;
}
return;
}
int main(int argc,char **argv){
// part 0: set up device and array
// set up device
int dev = 0;
cudaSetDevice(dev);
// get device properties
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp,dev);
// check if support mapped memory
if (!deviceProp.canMapHostMemory){
printf("Device %d does not support mapping CPU host memory!\n",dev);
cudaDeviceReset();
exit(EXIT_SUCCESS);
}
printf("Using Device %d: %s ",dev,deviceProp.name);
// set up date size of vectors
int ipower = 10;
if (argc > 1) ipower = atoi(argv[1]);
int nElem = 1 << ipower;
size_t nBytes = nElem * sizeof(float);
if (ipower < 18){
printf("Vector size %d power %d nbytes %3.0f KB\n",nElem,
ipower,(float)nBytes/(1024.0f));
}else{
printf("Vector size %d power %d nbytes %3.0f MB\n",nElem,
ipower,(float)nBytes/(1024.0f));
}
// part 1: using device memory
// malloc host memory
float *h_A,*h_B,*hostRef,*gpuRef;
h_A = (float *)malloc(nBytes);
h_B = (float *)malloc(nBytes);
hostRef = (float *)malloc(nBytes);
gpuRef = (float *)malloc(nBytes);
// initialize data at host side
initialData(h_A,nElem);
initialData(h_B,nElem);
memset(hostRef,0,nBytes);
memset(gpuRef,0,nBytes);
// add vector at host side for result checks
sumArraysOnHost(h_A,h_B,hostRef,nElem);
// malloc device global memory
float *d_A,*d_B,*d_C;
cudaMalloc((float **)&d_A,nBytes);
cudaMalloc((float **)&d_B,nBytes);
cudaMalloc((float **)&d_C,nBytes);
// transfer data from host to device
cudaMemcpy(d_A,h_A,nBytes,cudaMemcpyHostToDevice);
cudaMemcpy(d_B,h_B,nBytes,cudaMemcpyHostToDevice);
// set up execution configuration
int iLen = 512;
dim3 block(iLen);
dim3 grid((nElem+block.x-1)/block.x);
// invoke kernel at host side
sumArraysOnGPU<<<grid,block>>>(d_A,d_B,d_C,nElem);
// copy kernel result back to host side
cudaMemcpy(gpuRef,d_C,nBytes,cudaMemcpyDeviceToHost);
// free device global memory
cudaFree(d_A);
cudaFree(d_B);
free(h_A);
free(h_B);
// part 2: using zerocopy memory for array A and B
// allocate zerocpy memory
unsigned int flags = cudaHostAllocMapped;
cudaHostAlloc((void **)&h_A,nBytes,flags);
cudaHostAlloc((void **)&h_B,nBytes,flags);
// initialize data at host side
initialData(h_A,nElem);
initialData(h_B,nElem);
memset(hostRef,0,nBytes);
memset(gpuRef,0,nBytes);
// pass the pointer to device
cudaHostGetDevicePointer((void **)&d_A,(void *)h_A,0);
cudaHostGetDevicePointer((void **)&d_B,(void *)h_B,0);
// add at host side for result checks
sumArraysOnHost(h_A,h_B,hostRef,nElem);
// execute kernel with zero copy memory
sumArraysZeroCopy<<<grid,block>>>(d_A,d_B,d_C,nElem);
// copy kernel result back to host side
cudaMemcpy(gpuRef,d_C,nBytes,cudaMemcpyDeviceToHost);
// check device results
checkResult(hostRef,gpuRef,nElem);
// free memory
cudaFree(d_C);
cudaFreeHost(h_A);
cudaFreeHost(h_B);
free(hostRef);
free(gpuRef);
// reset device
cudaDeviceReset();
return EXIT_SUCCESS;
}
|
22,518 | #include <cmath>
__global__ void mylog2(float* value)
{
value[threadIdx.x] = std::log2(value[threadIdx.x]);
}
|
22,519 | #include <cuda.h>
#include <cuda_runtime.h>
namespace {
__global__ void wait_kernel(long long int cycles) {
const long long int start = clock64();
long long int cur;
do {
cur = clock64();
} while (cur - start < cycles);
}
} // anonymous namespace
/**
* Launch a kernel on stream that waits for length seconds.
*/
void gpu_wait(double length, cudaStream_t stream) {
// Estimate GPU frequency to convert seconds to cycles.
static long long int freq_hz = 0; // Cache.
if (freq_hz == 0) {
int device;
cudaGetDevice(&device);
int freq_khz;
cudaDeviceGetAttribute(&freq_khz, cudaDevAttrClockRate, device);
freq_hz = (long long int) freq_khz * 1000; // Convert from KHz.
}
double cycles = length * freq_hz;
wait_kernel<<<1, 1, 0, stream>>>((long long int) cycles);
}
|
22,520 | #include "includes.h"
__device__ int position; //index of the largest value
__device__ int largest; //value of the largest value
int lenString = 593;
int maxNumStrings = 1000000;
int threshold = 2;
__global__ void populate (int *d_b, int *copy_db, int *d_c, int size, int *left) {
int n = 0;
*left = 1; // reinitalized to false to check if all strings are merged
int my_id = blockDim.x * blockIdx.x + threadIdx.x;
if (my_id < size) {
n = abs((bool)d_c[my_id] - 1);
copy_db[my_id] = d_b[my_id] * n;
}
} |
22,521 | /*
#ifndef __CUDACC__
#define __CUDACC__
#endif
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include<stdio.h>
#include "string.h"
__global__ void mofor(char* d_str,int len)
{
for(int i=0;i<len;i++)
if(d_str[i]=='A' ||d_str[i]=='E' ||d_str[i]=='I' ||d_str[i]=='O' ||d_str[i]=='U' ||d_str[i]=='a' ||d_str[i]=='e' ||d_str[i]=='i' ||d_str[i]=='o' ||d_str[i]=='u')
d_str[i]='#';
}
__global__ void mofos(char *d_str, int len)
{
int id=threadIdx.x;
int temp;
for(int i=1;i<=len;i++)
{
if(i&1)
{
if(d_str[id+1]>d_str[id+2])
{
temp=d_str[id+1];
d_str[id+1]=d_str[id+2];
d_str[id+2]=temp;
}
}
else
{
if(d_str[id-1]>d_str[id])
{
temp=d_str[id-1];
d_str[id-1]=d_str[id];
d_str[id]=temp;
}
}
if(!(i&1))
{
if(d_str[id+1]>d_str[id+2])
{
temp=d_str[id+1];
d_str[id+1]=d_str[id+2];
d_str[id+2]=temp;
}
}
else
{
if(d_str[id-1]>d_str[id])
{
temp=d_str[id-1];
d_str[id-1]=d_str[id];
d_str[id]=temp;
}
}
}
}
int main()
{
char h_str[100];
scanf("%s",h_str);
char *d_str;
int len=strlen(h_str);
//printf("%d",len);
int size = len*sizeof(char);
cudaMalloc((void **)&d_str,size);
cudaMemcpy(d_str,h_str,size,cudaMemcpyHostToDevice);
mofos<<<1,len>>>(d_str,len);
cudaMemcpy(h_str,d_str,size,cudaMemcpyDeviceToHost);
cudaMemcpy(d_str,h_str,size,cudaMemcpyHostToDevice);
mofor<<<1,len>>>(d_str,len);
cudaMemcpy(h_str,d_str,size,cudaMemcpyDeviceToHost);
printf("%s",h_str);
}
*/ |
22,522 | #include <stdlib.h>
#include <cuda.h>
#include <stdio.h>
__host__
void llenar(float *d_a, int tam) {
int n = 10;
for (int i = 0; i < tam; i++) {
d_a[i] = n;
}
}
void print(float *V, int tam){
for (int i = 0; i < tam; i++) {
printf("%.2f ", V[i]);
}
printf("\n");
}
__global__
void mult_matKernel(float* h_a, float* h_b , int n) {
int i = threadIdx.x + blockDim.x *blockIdx.x;
if (i < n) {
h_b[i] = h_a[i] * 2 ;
}
}
int main(int argc, char const *argv[]) {
int n = 100;
float *h_a = (float*)malloc(n*sizeof(float));
float *h_b = (float*)malloc(n*sizeof(float));
cudaError_t error = cudaSuccess;
float *d_a, *d_b;
error = cudaMalloc((void**)&d_a, n*sizeof(float));
if (error != cudaSuccess) {
printf("Error al asignar espacio a d_a\n" );
return 0;
}
error = cudaMalloc((void**)&d_b, n*sizeof(float));
if (error != cudaSuccess) {
printf("Error al asignar espacio a d_b\n" );
return 0;
}
llenar(h_a, n);
cudaMemcpy(d_a, h_a, n*sizeof(float), cudaMemcpyHostToDevice);
//print(d_a, n);
//print(h_a, n);
dim3 dimGrid(ceil(n/10.0), 1, 1);
dim3 dimBlock(10,1,1);
mult_matKernel<<<dimGrid, dimBlock>>>(d_a, d_b, n);
cudaDeviceSynchronize();
cudaMemcpy(h_b, d_b, n*sizeof(float), cudaMemcpyDeviceToHost);
print(h_b, n);
free(h_a);
free(h_b);
cudaFree(d_a);
cudaFree(d_b);
return 0;
}
|
22,523 | /*
* MSU CUDA Course Examples and Exercises.
*
* Copyright (c) 2011 Dmitry Mikushin
*
* This software is provided 'as-is', without any express or implied warranty.
* In no event will the authors be held liable for any damages arising
* from the use of this software.
* Permission is granted to anyone to use this software for any purpose,
* including commercial applications, and to alter it and redistribute it freely,
* without any restrictons.
*/
#include <cuda_runtime.h>
#include <malloc.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
// Maximum allowed results difference.
#define EPS 1e-04
// Grid block size (see comment in pattern2d below).
#define BLOCK_LENGTH 32
#define BLOCK_HEIGHT 16
#define IN(i,j) in[i + (j) * nx]
#define OUT(i,j) out[i + (j) * nx]
// GPU device kernel.
__global__ void pattern2d_gpu(
int bx, int nx, int ex, int by, int ny, int ey,
float* in, float* out)
{
// Compute absolute (i,j) indexes for
// the current GPU thread using grid mapping params.
int i = blockIdx.x * BLOCK_LENGTH + threadIdx.x + bx;
int j = blockIdx.y * BLOCK_HEIGHT + threadIdx.y + by;
// Compute one data point - a piece of
// work for the current GPU thread.
OUT(i,j) = sqrtf(fabs(IN(i,j) + IN(i-1,j) + IN(i+1,j) -
2.0f * IN(i,j-1) + 3.0f * IN(i,j+1)));
}
// CPU control implementation.
void pattern2d_cpu(
int bx, int nx, int ex, int by, int ny, int ey,
float* in, float* out)
{
for (int j = by; j < ny - ey; j++)
for (int i = bx; i < nx - ex; i++)
OUT(i,j) = sqrtf(fabs(IN(i,j) + IN(i-1,j) + IN(i+1,j) -
2.0f * IN(i,j-1) + 3.0f * IN(i,j+1)));
}
// Perform some dummy 2D field processing on GPU and CPU,
// and compare results.
int pattern2d(int nx, int ny, float* in, float* out, int pid, int step)
{
if ((nx <= 0) || (ny <= 0)) return -1;
long np = nx * ny;
size_t size = sizeof(float) * np;
// Create GPU data array and copy input data to it.
float* in_gpu;
cudaError_t status = cudaMalloc((void**)&in_gpu, size);
if (status != cudaSuccess)
{
fprintf(stderr, "%s %d, status = %d\nInsufficient GPU memory?\n",
"Cannot malloc input memory on GPU by process", pid, status);
return status;
}
status = cudaMemcpy(in_gpu, in, size, cudaMemcpyHostToDevice);
if (status != cudaSuccess)
{
fprintf(stderr, "Cannot copy data from host to gpu by process %d, status = %d\n",
pid, status);
return status;
}
// Create CPU data output array and get
// control result using CPU function.
float* control = (float*)malloc(size);
memset(control, 0, size);
pattern2d_cpu(
1, nx, 1, 1, ny, 1, in, control);
// Configure GPU computational grid:
// nx = nblocks_x * block_length
// ny = nblocks_y * block_height
//
// NOTE: we have degree of freedom in
// selecting how real problem grid maps onto
// computational grid. Usually these params
// are tuned to get optimal performance.
//
// NOTE: chose of grid/block config is
// also limited by device properties:
// - Maximum number of threads per block (512)
// - Maximum sizes of each dimension of a block (512 x 512 x 64)
// - Maximum sizes of each dimension of a grid (65535 x 65535 x 1)
int nblocks_x = (nx - 2) / BLOCK_LENGTH;
int nblocks_y = (ny - 2) / BLOCK_HEIGHT;
// Perform the same processing on GPU,
// returning result to GPU array.
float* out_gpu;
status = cudaMalloc((void**)&out_gpu, size);
if (status != cudaSuccess)
{
fprintf(stderr, "%s %d, status = %d\nInsufficient GPU memory?\n",
"Cannot malloc output memory on GPU by process", pid, status);
return status;
}
status = cudaMemset(out_gpu, 0, size);
if (status != cudaSuccess)
{
fprintf(stderr, "Cannot erase output memory on GPU by process %d, status = %d\n",
pid, status);
return status;
}
pattern2d_gpu<<<
dim3(nblocks_x, nblocks_y, 1),
dim3(BLOCK_LENGTH, BLOCK_HEIGHT, 1)>>>(
1, nx, 1, 1, ny, 1, in_gpu, out_gpu);
status = cudaGetLastError();
if (status != cudaSuccess)
{
fprintf(stderr, "Cannot execute CUDA kernel by process %d, status = %d\n",
pid, status);
return status;
}
status = cudaThreadSynchronize();
if (status != cudaSuccess)
{
fprintf(stderr, "Cannot synchronize thread by process %d, status = %d\n",
pid, status);
return status;
}
// Copy GPU result from GPU memory to CPU buffer.
status = cudaMemcpy(out, out_gpu, size, cudaMemcpyDeviceToHost);
if (status != cudaSuccess)
{
fprintf(stderr, "Cannot copy data from gpu to host by process %d, status = %d\n",
pid, status);
return status;
}
// Don't bother with processing the remainder
// on GPU. Do it on CPU instead.
pattern2d_cpu(
1, nx, 1,
ny - (ny - 2) % BLOCK_HEIGHT - 2, ny, 1,
in, out);
pattern2d_cpu(
nx - (nx - 2) % BLOCK_LENGTH - 2, nx, 1,
1, ny, 1,
in, out);
// Compare results and find the maximum abs difference.
int maxi = 0, maxj = 0;
float maxdiff = fabs(out[0] - control[0]);
float* diffs = (float*)malloc(size);
memset(diffs, 0, size);
for (int j = 0; j < ny; j++)
for (int i = 0; i < nx; i++)
{
float diff = fabs(
out[i + j * nx] -
control[i + j * nx]);
if (diff > maxdiff)
{
maxdiff = diff;
maxi = i; maxj = j;
}
diffs[i + j * nx] = diff;
}
// Release data arrays.
status = cudaFree(in_gpu);
if (status != cudaSuccess)
{
fprintf(stderr, "Cannot free device input memory by process %d, status = %d\n",
pid, status);
return status;
}
free(control);
status = cudaFree(out_gpu);
if (status != cudaSuccess)
{
fprintf(stderr, "Cannot free device output memory by process %d, status = %d\n",
pid, status);
return status;
}
free(diffs);
printf("Step %d result abs max diff by process %d = %f @ (%d,%d)\n",
step, pid, maxdiff, maxi, maxj);
return 0;
}
|
22,524 | // CS 87 - Final Project
// Maria-Elena Solano
//
// Radix-2 Cooley-Tukey Fourier Transform on C^n - parallel 'pi' CUDA version
//
#include <stdio.h> // C's standard I/O library
#include <stdlib.h> // C's standard library
#include <stdint.h> // C's exact width int types
#include <string.h> // C's standard string library
#include <time.h> // C's time types (for random init)
#include <unistd.h> // C's POSIX API
#include <cuda.h> // CUDA runtime API
// macro/constant definitions
#define perror_out(X) perror(X), fflush(stderr)
#define stderr_out(...) fprintf(stderr, __VA_ARGS__), \
fflush(stderr)
#define print_out(...) printf(__VA_ARGS__), fflush(stdout)
#define cuda_try(X) ((X) != cudaSuccess)
#define cuda_malloc(X, Y) cudaMalloc((void**)(X), (Y))
#define cuda_memcpy_todev(X, Y, Z) cudaMemcpy((X), (Y), (Z), \
cudaMemcpyHostToDevice)
#define cuda_memcpy_tohost(X, Y, Z) cudaMemcpy((X), (Y), (Z), \
cudaMemcpyDeviceToHost)
// data structures
// unit data type
typedef struct complex{
float re; // real part
float im; // imaginary part
} data_t;
// simple struct to hold transform info
typedef struct tr tr_t;
typedef struct tr{
uint32_t N; // input size
uint32_t P; // number of processors
uint32_t Pi; // processor ID
data_t* in; // input data,
data_t* out; // output,
data_t* tmp_in; // input scratchpad,
data_t* tmp_out; // output scratchpads, and
uint32_t test_mode; // test mode?
uint32_t no_header; // no timing headers?
tr_t* trs; // array of tr_t objects in host,
tr_t* trs_dev; // and device.
} tr_t;
// simple encapsulating struct for timing
typedef struct timer{
cudaEvent_t start, stop; // start and stop events
float elapsed; // elapsed time (if any), in millisecs
} tmr_t;
// simple helper container (used in how_many_warp_schedulers)
typedef struct sm_to_ws{
int sm;
int ws;
} sm_to_ws_t;
// function declarations
// setting up the transform
int setup_from_args (tr_t* t, int argc, char** argv);
void show_usage ();
// running the transform
int run (tr_t* t);
int initialize_data (tr_t* t);
void cleanup_data (tr_t* t);
__global__ void run_funnel_stage(tr_t* t);
__global__ void run_tube_stage (tr_t* t);
__device__ void butterfly (data_t* out, data_t* in,
uint32_t size, uint32_t N);
__device__ void butterfly_half(data_t* out, data_t* in,
uint32_t size, uint32_t N, uint32_t which_half);
// complex arithmetic
__device__ data_t add (data_t a, data_t b);
__device__ data_t sub (data_t a, data_t b);
__device__ data_t mul (data_t a, data_t b);
__device__ data_t omega(uint32_t N, uint32_t k);
__device__ data_t convex_comb(data_t a, data_t b, uint32_t alpha);
// printing and verifying
void print_input (tr_t* t);
void print_output (tr_t* t);
void verify_results (tr_t* t);
// timing
void timer_start(tmr_t* tm);
void timer_stop (tmr_t* tm);
// misc
__device__ __host__ uint32_t ilog2 (uint32_t x);
__device__ void swap_scratchpads (tr_t* t);
int is_power_of_two (int x);
uint32_t hash (uint32_t x);
uint32_t bit_reverse (uint32_t x, uint32_t N);
uint32_t how_many_concurrent_blocks();
int how_many_warp_schedulers (int arch_major_ver, int arch_minor_ver);
int main(int argc, char** argv){
tr_t t; // main transform object
// (1) setup the transform from the command line args (or ret -1 if err)
if(setup_from_args(&t, argc, argv)){
exit(EXIT_FAILURE);
}
// (2) run the transform according to the given args (or ret -1 if err)
if(run(&t)){
exit(EXIT_FAILURE);
}
// (3) return
exit(EXIT_SUCCESS);
}
// This function parses the provided command line args into the given transform
// object, and initializes it accordingly. Returns 0 if success, or -1 if err).
//
// t: (ptr to) tr_t object to update
// argc: number of command line arguments
// argv: array of strings containing the command line args
//
// returns: 0 if success, or -1 if invalid command line args.
//
int setup_from_args(tr_t* t, int argc, char** argv){
int ret; // return value from getopt, and
int num = 0; // number to be retrieved.
// First, zero all the entries of the tr_t (so we can tell whether or not
// the entries were filled afterwards simply by checking if they're still 0)
memset(t, 0, sizeof(tr_t));
// Then, greedily read all the command line options provided.
while((ret = getopt(argc, argv, "n:p:to")) != -1){
switch(ret){
// If option -n, grab the arg, which should be a nonnegative power of 2
// (otherwise return -1 err)
case 'n':{
if(!(num = atoi(optarg)) || !(num > 1) || !is_power_of_two(num)){
stderr_out("Invalid input size (should be 2^i for i>0)\n");
show_usage();
goto err;
}
t->N = num;
break;
}
// If option -p, grab the arg, which should be 1 or a nonneg power of 2
// (otherwise return -1 err)
case 'p':{
if(!(num = atoi(optarg)) || !(num > 0) || !is_power_of_two(num)){
stderr_out("Invalid number of procs (should be 2^i for i>0)\n");
show_usage();
goto err;
}
t->P = num;
break;
}
// If option -o, set t.no_header to 1.
case 'o':{
t->no_header = 1;
break;
}
// If option -t, set t.N to 8 and t.test_mode to 'true', and notify user.
case 't':{
print_out("Test mode (ignoring provided input size, if any)...\n");
t->N = 8;
t->test_mode = 1;
break;
}
// if unknown or missing arg, show usage and return -1 (error)
case '?':{
stderr_out("Unknown or missing arg %c\n", optopt);
show_usage();
goto err;
}
}
}
// Finally, validate the options
// If the -n option is missing, notify the user, show usage, and return -1.
if(!t->N){
stderr_out("Missing option: -n\n");
show_usage();
goto err;
}
// If the -p option is missing, notify the user, show usage, and return -1.
if(!t->P){
stderr_out("Missing option: -p\n");
show_usage();
goto err;
}
// If more processors than inputs, return -1.
if(t->P > t->N){
stderr_out("More processors than inputs!\n");
show_usage();
goto err;
}
// If more processors than available, return -1.
if(t->P > how_many_concurrent_blocks()){
stderr_out("Too many processors! (only %u concurrent blocks available)\n",
how_many_concurrent_blocks());
goto err;
}
return 0;
err:
stderr_out("Could not setup the transform from the cmdline args\n");
return -1;
}
// This function initializes the given transform's data. Returns 0 if success
// or -1 if err.
//
// t: (ptr to) tr_t object to update
//
// returns: 0 if success, or -1 if error.
//
int initialize_data(tr_t* t){
uint32_t bit;
cudaError_t ret; // return value of CUDA calls
// We need:
// - N entries for the input,
if((t->in = (data_t*)malloc(sizeof(data_t)*t->N)) == NULL){
perror_out("malloc error");
goto err;
}
// - N entries for the output, and
if((t->out = (data_t*)malloc(sizeof(data_t)*t->N)) == NULL){
perror_out("malloc error");
goto err;
}
// - P tr_t objects (one for each block) (on both the host and the device),
if((t->trs = (tr_t*)malloc(sizeof(tr_t)*t->P)) == NULL){
perror_out("malloc error");
goto err;
}
if(cuda_try(ret = cuda_malloc(&t->trs_dev, sizeof(tr_t)*t->P))){
stderr_out("cudaMalloc error: %s\n", cudaGetErrorString(ret));
goto err;
}
// - And, for each processor,
for(t->Pi = 0; t->Pi < t->P; t->Pi++){
// (Initialize their respective tr_t objects first)
t->trs[t->Pi] = *t;
// An input scratchpad of size N, and
if(cuda_try(ret = cuda_malloc(&t->trs[t->Pi].tmp_in,
sizeof(data_t)*t->N))){
stderr_out("cudaMalloc error: %s\n", cudaGetErrorString(ret));
goto err;
}
// An output scratchpad of size N.
if(cuda_try(ret = cuda_malloc(&t->trs[t->Pi].tmp_out,
sizeof(data_t)*t->N))){
stderr_out("cudaMalloc error: %s\n", cudaGetErrorString(ret));
goto err;
}
// (Making sure to copy the updated tr_t to the device's memory too)
if(cuda_try(ret = cuda_memcpy_todev(t->trs_dev + t->Pi,
t->trs + t->Pi,
sizeof(tr_t)))){
stderr_out("cudaMemcpy error: %s\n", cudaGetErrorString(ret));
goto err;
}
}
// Initialize the input with Bernoulli deviates from the L2 unit circle
// --i.e. each element drawn from the distribution 1/sqrt{N}*[-1,1].
if(!t->test_mode){
for(srand(hash(time(NULL))), bit = 0; bit < t->N; bit++){
t->in[bit].re = (rand()/(RAND_MAX/2.0)-1.0)/sqrt(t->N);
t->in[bit].im = (rand()/(RAND_MAX/2.0)-1.0)/sqrt(t->N);
}
}
// (unless in debug mode, in which case the values are always the same
// test case: 0,1,0,1,0,1,0,1 (the output should be 4,0,0,0,-4,0,0,0))
else{
t->in[0].re = 0; t->in[0].im = 0;
t->in[1].re = 1; t->in[1].im = 0;
t->in[2].re = 0; t->in[2].im = 0;
t->in[3].re = 1; t->in[3].im = 0;
t->in[4].re = 0; t->in[4].im = 0;
t->in[5].re = 1; t->in[5].im = 0;
t->in[6].re = 0; t->in[6].im = 0;
t->in[7].re = 1; t->in[7].im = 0;
print_input(t);
}
// - And copy the input to the input scratchpad of each block (of size N).
for(t->Pi = 0; t->Pi < t->P; t->Pi++){
if(cuda_try(ret = cuda_memcpy_todev(t->trs[t->Pi].tmp_in,
t->in, sizeof(data_t)*t->N))){
stderr_out("cudaMemcpy error: %s\n", cudaGetErrorString(ret));
goto err;
}
}
// And return
return 0;
err:
stderr_out("Could not initialize the data\n");
return -1;
}
// This function frees all the data allocated by the given transform.
//
// t: (ptr to) tr_t object to update
//
void cleanup_data(tr_t* t){
cudaError_t ret; // return value of CUDA calls
// Free all the allocated data (if any)
if(t->in != NULL){ free(t->in); t->in = NULL; }
if(t->out != NULL){ free(t->out); t->out = NULL; }
if(t->trs != NULL){
for(t->Pi = 0; t->Pi < t->P; t->Pi++){
if(t->trs[t->Pi].tmp_in != NULL &&
cuda_try(ret = cudaFree(t->trs[t->Pi].tmp_in))){
stderr_out("cudaFree error: %s\n", cudaGetErrorString(ret)); };
if(t->trs[t->Pi].tmp_out != NULL &&
cuda_try(ret = cudaFree(t->trs[t->Pi].tmp_out))){
stderr_out("cudaFree error: %s\n", cudaGetErrorString(ret)); };
}
free(t->trs); t->trs = NULL;
}
if(t->trs_dev != NULL && cuda_try(ret = cudaFree(t->trs_dev))){
stderr_out("cudaFree error: %s\n", cudaGetErrorString(ret));
}
return;
}
// This function shows the cmdline interface usage.
//
void show_usage(){
print_out("\nusage:\n"
" fourier-parallel-pi-gpu-cuda { -n <n> -p <p> [-o] | -t }\n"
"\noptions:\n"
" -n <n> power of two input size\n"
" -p <p> power of two number of processors (less than n)\n"
" -o omit timing headers\n"
" -t compare against precomputed input/output\n"
"\n");
}
// This function runs the given transform on P separate processors. Returns 0
// if success or -1 if err.
//
// t: (ptr to) tr_t object to update
//
// returns: 0 if success, or -1 if error.
//
int run(tr_t* t){
tmr_t tm_funnel, tm_tube; // tmr_t objects (for timing)
cudaError_t ret; // return value of CUDA calls
uint32_t bit;
// Initialize the data (or ret -1 err, cleaning up any malloc'd data first)
if(initialize_data(t)){
cleanup_data(t);
goto err;
}
// Run the 1st (aka 'funnel') stage on P blocks, 1 thread per block.
timer_start(&tm_funnel);
run_funnel_stage<<<t->P, 1>>>(t->trs_dev);
timer_stop(&tm_funnel);
// Run the 2nd (aka 'tube') stage on P blocks, 1 thread per block.
timer_start(&tm_tube);
run_tube_stage<<<t->P, 1>>>(t->trs_dev);
timer_stop(&tm_tube);
// If not in test mode, show elapsed times (total, tree and cylinder stages)
if(!t->test_mode){
if(!t->no_header){
print_out("n\tp\ttime (total)\t"
"time (stage 1)\ttime (stage 2)\n"); };
print_out("%u\t%u\t%f\t%f\t%f\n",
t->N, t->P, tm_funnel.elapsed + tm_tube.elapsed,
tm_funnel.elapsed, tm_tube.elapsed);
}
// Otherwise, copy the result from the _assigned segment_ (of size N/P) of
// the input scratchpad of each processor (which will contain the new results
// at this point) of each processor to the output, in bit-reversed order.
//
// Then print the output, and verify the results.
else{
// Update the host's tr_t array first
if(cuda_try(ret = cuda_memcpy_tohost(
t->trs, t->trs_dev, sizeof(tr_t)*t->P))){
stderr_out("cudaMemcpy error: %s\n", cudaGetErrorString(ret));
}
for(t->Pi = 0; t->Pi < t->P; t->Pi++){
for(bit = (t->N/t->P)*t->Pi; bit < (t->N/t->P)*t->Pi+t->N/t->P; bit++){
if(cuda_try(ret = cuda_memcpy_tohost(
t->out + bit_reverse(bit, ilog2(t->N)), // position in output,
t->trs[t->Pi].tmp_in + bit, // Pi-th scratchpad.
sizeof(data_t)))){
stderr_out("cudaMemcpy error: %s\n", cudaGetErrorString(ret));
}
}
}
print_output(t);
verify_results(t);
}
// Cleanup the data, and return
cleanup_data(t);
return 0;
err:
stderr_out("Could not run the transform\n");
return -1;
}
// This function runs the first stage of the transform in the GPU.
//
// t_ptr: (ptr to) tr_t object to use
//
// returns: NULL always.
//
__global__ void run_funnel_stage(tr_t* trs){
uint32_t size, iter, offset; // butterfly size, offset and iter,
uint32_t which_butterfly, which_half; // which butterfly, and which half,
tr_t* t = trs + blockIdx.x; // local, unpacked tr_t object
// For the first log P iters
// (i.e. butterfly sizes N, N/2, ..., 2(N/P),
// iteration stage log P, log P - 1, ..., 1),
for(size = t->N, iter = ilog2(t->P); size > t->N/t->P; size /= 2, iter--){
// Determine _which_ butterfly to compute, depending on (Pi >> iter)
which_butterfly = (t->Pi >> iter);
// Determine _which_ segment_ of size 'size' to compute the butterfly upon
offset = which_butterfly * size;
// Determine _which half_, depending on the parity of the 1st, 2nd, ...
// most significant bit of Pi.
which_half = ((t->Pi >> (iter - 1)) % 2 == 0);
// And compute the butterfly accordingly
butterfly_half(
t->tmp_out + offset, // output to the _1st_ half of the scratchpad
t->tmp_in + offset, // and reading from the input scratchpad.
size, t->N, which_half);
// And swap scratchpads, so that the new results become the input for
// the next iteration.
swap_scratchpads(t);
}
}
// This function runs the second stage of the transform in the GPU.
//
// t_ptr: (ptr to) tr_t object to use
//
// returns: NULL always.
//
__global__ void run_tube_stage(tr_t* trs){
uint32_t size, offset; // butterfly size and offset
tr_t* t = trs + blockIdx.x; // local tr_t object
// For the last log N/P iters
// (i.e. butterfly sizes N/P, (N/P)/2, ..., 2),
// we work only on the portion of the output assigned to this processor
for(size = t->N/t->P; size > 1; size /= 2){
// Compute 1, 2, ..., (N/P)/2 butterflies of size N/P, (N/P)/2, ..., 2
// over _consecutive_ intervals across the assigned segment (of size N/P),
// (which recall starts from (t.N/t.P)).
for(offset = (t->N/t->P)*t->Pi;
offset < (t->N/t->P)*t->Pi + t->N/t->P;
offset += size){
butterfly(
t->tmp_out + offset, // output to the _assigned_ segment,
t->tmp_in + offset, // and reading from the _assigned_ segment.
size, t->N);
}
// And swap scratchpads, so that the new results become the input for
// the next iteration.
swap_scratchpads(t);
}
}
// This function computes an n-point butterfly over the given input array,
// placing the results on the given output array.
//
// out,in: output and input arrays
// size: size of the butterfly
// N: input size of the broader FFT
//
__device__ void butterfly(data_t* out, data_t* in, uint32_t size, uint32_t N){
// Compute the left butterfly (note it only writes on the _1st_ half of out)
butterfly_half(out, in, size, N, 1);
// And the right butterfly (note it only writes on the _2nd_ half of out)
butterfly_half(out, in, size, N, 0);
return;
}
// This function computes either half of an n-point butterfly. Note it only
// writes to either the _first_ half of the output (if left butterfly), or the
// _second_ half of the output (if right butterfly).
//
// out,in: output and input arrays
// size: size of the butterfly
// N: input size of the broader FFT
// which_half: which half to compute?
//
__device__ void butterfly_half(data_t* out, data_t* in,
uint32_t size, uint32_t N, uint32_t which_half){
uint32_t bit;
// For each bit in the segment
for(out += !which_half*(size/2), bit = 0; bit < size/2; bit++){
out[bit] = // set the current (output) bit to
convex_comb( // either:
add( // the sum of
in[bit], // the current (input) bit, and
in[bit+size/2]), // the (size/2)-next one,
mul( // or the multiplication of
sub( // the sum of the
in[bit], // the (stride)-prev bit, and
in[bit+size/2]), // the next one
omega(N, // times the Nth root of unity,
bit*(N/size))), // to the power of 0,1,2 first
which_half); // 0,2,4 second, etc.
}
return;
}
// This function returns (by value) the sum of the two given complex numbers.
//
// a,b: operands (x+yi,z+wi) (of type data_t)
//
// returns: the sum a+b = (x+z)+(y+w)i
//
__device__ data_t add(data_t a, data_t b){
data_t c;
c.re = a.re + b.re;
c.im = a.im + b.im;
return c;
}
// This function returns (by value) the sum a+(-b) of the given complex nums.
//
// (Note: this can be overriden to provide support for more general transforms
// on arbitrary fields)
//
// a,b: operands (x+yi,z+wi) (of type data_t)
//
// returns: the sum a+(-b) = (x-z)+(y-w)i
//
__device__ data_t sub(data_t a, data_t b){
data_t c;
c.re = a.re - b.re;
c.im = a.im - b.im;
return c;
}
// This function returns (by value) the mult a*b of the given complex numbers.
//
// (Note: this can be overriden to provide support for more general transforms
// on arbitrary fields)
//
// a,b: operands (x+yi,z+wi) (of type data_t)
//
// returns: their mult a*b = (xz-yw) + (xw-yz)i
//
__device__ data_t mul(data_t a, data_t b){
data_t c;
c.re = a.re*b.re - a.im*b.im;
c.im = a.re*b.im + a.im*b.re;
return c;
}
// This function returns (by value) the primitive N-th root of unity of the
// complex field \mathbb{C} the power of k, which is given by
//
// e^{-2\pi k/N} = (cos 2\pi/N - i sin 2\pi/N)^k (by Euler's formula)
// = cos 2\pi/N*k - i sin 2\pi/N*k (by De Moivre's formula)
//
// (Note: this can be overriden to provide support for more general transforms
// on arbitrary fields)
//
// N: Order of the cyclic group.
// k: Power to raise the root of unity to.
//
// returns: N-th primitive root of unity of \mathbb{C}, raised
// to the power of k.
//
__device__ data_t omega(uint32_t N, uint32_t k){
data_t o;
o.re = __cosf(2.0f*3.141592f/N*k);
o.im = -__sinf(2.0f*3.141592f/N*k);
return o;
}
// This function returns (by value) the 'convex combination' of a and b, para-
// meterized by the scalar alpha.
//
// (Note: this can be overriden to provide support for more general transforms
// on arbitrary fields)
//
// a,b: Operands
// alpha: Alpha (between 0 and 1)
//
// returns: alpha*a + (1-alpha)*b
//
__device__ data_t convex_comb(data_t a, data_t b, uint32_t alpha){
data_t o;
o.re = alpha*a.re + (1U-alpha)*b.re;
o.im = alpha*a.im + (1U-alpha)*b.im;
return o;
}
// This function prints out the input of the transform.
//
// t: (ptr to) tr_t object to use.
//
void print_input(tr_t* t){
uint32_t bit; // entry to print out
for(print_out("Input:\n"), bit = 0; bit < t->N; bit++){
print_out("%.1f+%.1fi, ", t->in[bit].re, t->in[bit].im);
}
print_out("\n");
return;
}
// This function prints out the output of the transform.
//
// t: (ptr to) tr_t object to use.
//
void print_output(tr_t* t){
uint32_t bit; // entry to print out
for(print_out("Output:\n"), bit = 0; bit < t->N; bit++){
print_out("%.1f+%.1fi, ", t->out[bit].re, t->out[bit].im);
}
print_out("\n");
return;
}
// This function verifies that the output of the transform is correct.
//
// t: (ptr to) tr_t object to use.
//
void verify_results(tr_t* t){
// since the input is 0,1,0,1,0,1,0,1; the output should be 4,0,0,0,-4,0,0,0.
print_out(
(t->out[0].re == 4 && t->out[0].im == 0 &&
t->out[1].re == 0 && t->out[1].im == 0 &&
t->out[2].re == 0 && t->out[2].im == 0 &&
t->out[3].re == 0 && t->out[3].im == 0 &&
t->out[4].re ==-4 && t->out[4].im == 0 &&
t->out[5].re == 0 && t->out[5].im == 0 &&
t->out[6].re == 0 && t->out[6].im == 0 &&
t->out[7].re == 0 && t->out[7].im == 0)
? "Output is correct. Test passed.\n\n"
: "Output is incorrect! Test failed.\n\n");
return;
}
// This function swaps the input and output scratchpads of the given tr_t obj.
//
// t: (ptr to) tr_t object to use.
//
__device__ void swap_scratchpads(tr_t* t){
data_t* temp;
temp = t->tmp_in;
t->tmp_in = t->tmp_out;
t->tmp_out = temp;
return;
}
// This function determines whether or not the given number is a non-negative
// power of two.
//
// From: www.graphics.stanford.edu/~seander/bithacks.html#DetermineIfPowerOf2
//
// x: value to query.
//
// returns: 0 if _not_ a non-negative power of 2, 1 otherwise.
//
int is_power_of_two(int x){
return x && !(x & (x - 1));
}
// This function computes a hash of the given uint32 number.
// (From http://www.concentric.net/~ttwang/tech/inthash.htm)
//
// x: number to hash.
//
// returns: uint32 hash of the given number.
//
uint32_t hash(uint32_t x){
x = (x + 0x7ed55d16U) + (x << 12); x = (x ^ 0xc761c23cU) ^ (x >> 19);
x = (x + 0x165667b1U) + (x << 5); x = (x + 0xd3a2646cU) ^ (x << 9);
x = (x + 0xfd7046c5U) + (x << 3); x = (x ^ 0xb55a4f09U) ^ (x >> 16);
return x;
}
// This function computes the log2 of the given uint32_t number.
//
// From: graphics.stanford.edu/~seander/bithacks.html#IntegerLogDeBruijn
//
// x: value to compute.
//
// returns: log2(x)
//
__host__ __device__ uint32_t ilog2(uint32_t x){
int lookup_table[32] = {
0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8,
31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9 };
return lookup_table[(uint32_t)(x*0x077CB531U) >> 27];
}
// This function bit-reverses the given index (represented as a m-bit binary
// number), and returns the resulting value as an integer.
//
// This is used in the final step of the FFT: due to the recursive nature of
// the factorization, the butterfly's outputs are always in bit-reversed order
// (when represented as log(N)-bit numbers, where N is the input size). So for
// instance for a butterfly of size 8, the 1st output of the butterfly actually
// maps to the 1 -> 001 -> 100 -> 4th output of the array; while the 3rd output
// of the butterfly maps to the 3 -> 011 -> 110 -> 6th output of the array; and
// so forth.
//
// The algorithm below comes from:
//
// Dietz, H. (2002) The Aggregate Magic Algorithms. University of Kentucky.
// http://aggregate.org/MAGIC/#Bit%20Reversal
//
// which decomposes the (m m-1 ... 1) permutation into log m cycles: first swap
// all adjacent bits, then swap every two bits, and so forth.
//
// Note, however, Dietz' particular algorithm is hard-wired to m=32-bits only:
// so in the example above, 3 is not mapped to 110 but to 1100 0000 0000 0000
// 0000 0000 0000 0000; so, at the end, I just right-shift the result by 32-m
// bits to obtain the desired value (in this case, 110 -> 6).
//
// x: Index to bit-reverse.
// m: How many bits to use when representing the index.
//
// returns: Bit-reversed integer value of x, when represented
// as a m-bit binary number.
//
uint32_t bit_reverse(uint32_t x, uint32_t m){
x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1));
x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2));
x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4));
x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8));
return ((x >> 16) | (x << 16)) >> (32-m);
}
// This function starts the given tmr_t object.
//
// timer: (pointer to) tmr_t object to start.
//
// (From: http://stackoverflow.com/questions/7876624/timing-cuda-operations)
//
void timer_start(tmr_t* tm){
cudaError_t ret; // return value of CUDA calls
if(cuda_try(ret = cudaEventCreate(&tm->start)) ||
cuda_try(ret = cudaEventCreate(&tm->stop)) ||
cuda_try(ret = cudaEventRecord( tm->start, 0))){
stderr_out("cudaEventCreate|Record error: %s\n", cudaGetErrorString(ret));
}
}
// This function stops the given tmr_t object, annd calculates the elapsed
// time since the call last to start_timer.
//
// timer: (pointer to) tmr_t object to stop.
//
// (From: http://stackoverflow.com/questions/7876624/timing-cuda-operations)
//
void timer_stop(tmr_t* tm){
cudaError_t ret; // return value of CUDA calls
if(cuda_try(ret = cudaEventRecord ( tm->stop, 0)) ||
cuda_try(ret = cudaEventSynchronize( tm->stop)) ||
cuda_try(ret = cudaEventElapsedTime(&tm->elapsed, tm->start, tm->stop))){
stderr_out("cudaEventRecord|Synchronize|ElapsedTime error: %s\n",
cudaGetErrorString(ret));
}
}
// This function returns the number of concurrent blocks in the GPU.
//
// returns: Number of concurrent blocks in the GPU (if any)
//
uint32_t how_many_concurrent_blocks(){
cudaError_t ret; // return value of CUDA calls
int dev, devs; // number of devices
cudaDeviceProp pr; // device properties
uint32_t p; // number of concurrent blocks
// Count how many devices are there. If err or no devices, set p=0 and print.
if(cuda_try(ret = cudaGetDeviceCount(&devs)) || devs == 0){
stderr_out("cudaGetDeviceCount error: %s\n", cudaGetErrorString(ret));
return 0;
}
// Get the device properties of the last device
dev = devs - 1;
cudaSetDevice(dev);
cudaGetDeviceProperties(&pr, dev);
// Compute the number of concurrent blocks according to the formula
//
// # of SMs x # of warp schedulers per SM
//
p = pr.multiProcessorCount * how_many_warp_schedulers(pr.major,pr.minor);
return p;
}
// This function determines how many warp schedulers per SM are there in the
// GPU given ts major and minor architectural version.
//
// Adapted from helper_cuda.h in the CUDA SDK:
// (/usr/local/cuda/samples/common/inc/helper_cuda.h).
//
// major, minor: major and minor architecture version
//
// returns: number of warp schedulers per SM
//
int how_many_warp_schedulers(int major, int minor){
int i;
sm_to_ws_t t[13]; // Lookup table
// Tesla architecture (1 warp scheduler per SM)
t[0] .sm = 0x10; t[0] .ws = 1; // Tesla (SM 1.0) G80 class
t[1] .sm = 0x11; t[1] .ws = 1; // Tesla (SM 1.1) G8X class
t[2] .sm = 0x12; t[2] .ws = 1; // Tesla (SM 1.2) G9X class
t[3] .sm = 0x13; t[3] .ws = 1; // Tesla (SM 1.3) GT200 class
// Fermi architecture (2 warp schedulers per SM)
t[4] .sm = 0x20; t[4] .ws = 2; // Fermi (SM 2.0) GF100 class
t[5] .sm = 0x21; t[5] .ws = 2; // Fermi (SM 2.1) GF10x class
// Kepler architecture (4 warp schedulers per SM)
t[6] .sm = 0x30; t[6] .ws = 4; // Kepler (SM 3.0) GK10x class
t[7] .sm = 0x32; t[7] .ws = 4; // Kepler (SM 3.2) GK10x class
t[8] .sm = 0x35; t[8] .ws = 4; // Kepler (SM 3.5) GK11x class
t[9] .sm = 0x37; t[9] .ws = 4; // Kepler (SM 3.7) GK21x class
// Maxwell architecture (4 warp schedulers per SM)
t[10].sm = 0x50; t[10].ws = 4; // Maxwell (SM 5.0) GM10x class
t[11].sm = 0x52; t[11].ws = 4; // Maxwell (SM 5.2) GM20x class
// Unknown architecture
t[12].sm = -1; t[12].ws = -1; // Unknown
for(i=0; i<13; i++){
if(t[i].sm == ((major << 4) + minor)){
return t[i].ws;
}
}
return 0;
}
|
22,525 | #include <math.h>
#include <stdio.h>
__host__ void
mat_swap(double **A, double **B) {
double *temp = *A;
*A = *B;
*B = temp;
}
__global__ void
jacobian(double *OLD, double *NEW, double *f, int size, int max_it, \
double h) {
/* initializing iteration variables */
int i,j;
for (i = 1; i < size - 1; i++) {
for (j = 1; j < size - 1; j++) {
NEW[i * size + j] = 0.25 * ( OLD[(i-1) * size + j] + OLD[(i+1) * size + j] + OLD[i * size + (j-1)]\
+ OLD[i * size + (j+1)] + h * h * f[i * size + j]);
//printf("%5.2f ", NEW[i * size + j]);
}
//printf("\n");
}
}
|
22,526 | #include "includes.h"
/*
* Implementations
*/
__global__ void ca_map_backward_kernel_w(const float *dout, const float *weight, const float *g, float *dw, int num, int chn, int height, int width) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int sp = height * width;
int len = height + width - 1;
int z = blockIdx.z;
if (x < width && y < height && z < height+width-1) {
for (int batch = 0; batch < num; ++batch) {
for (int plane = 0; plane < chn; ++plane) {
float _dout = dout[(batch * chn + plane) * sp + y*width + x];
if (z < width) {
int i = z;
float _g = g[(batch * chn + plane) * sp + y*width + i];
dw[(batch * len + i) * sp + y*width + x] += _dout * _g;
} else {
int i = z - width;
int j = i<y ? i : i+1;
float _g = g[(batch * chn + plane) * sp + j*width + x];
dw[(batch * len + width + i) * sp + y*width + x] += _dout * _g;
}
}
}
}
} |
22,527 | extern "C"
__global__ void im2col_gpu(float *x,float *out,int N,int C,int H,int W,int kh,int kw,int stride,int oHeight,int oWidth,int ow,int oh,int kSize)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int n = i / oHeight / oWidth;
int startH = (i - (n * oHeight * oWidth)) / oHeight * stride;
int startW = (i - (n * oHeight * oWidth)) % oWidth * stride;
for(int j = 0;j<ow;j++) {
int c = j / kSize;
int xSize = j - (c * kSize);
int xh = startH + xSize / kw;
int xw = startW + xSize % kw;
int xi = n * C * H * W + c * H * W + xh * W + xw;
out[i * ow + j] = x[n * C * H * W + c * H * W + xh * W + xw];
}
}
extern "C"
__global__ void im2col_gpuv2(float *x,float *out,int N,int C,int H,int W,int kh,int kw,int stride,int oHeight,int oWidth,int ow,int oh,int kSize)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int s = blockDim.x * gridDim.x;
//printf("Thread %d %d\n", index, s);
for(int i = index;i < oh;i += s){
int n = i / oHeight / oWidth;
int startH = (i - (n * oHeight * oWidth)) / oHeight * stride;
int startW = (i - (n * oHeight * oWidth)) % oWidth * stride;
for(int j = 0;j<ow;j++) {
int c = j / kSize;
int xSize = j - (c * kSize);
int xh = startH + xSize / kw;
int xw = startW + xSize % kw;
int xi = n * C * H * W + c * H * W + xh * W + xw;
out[i * ow + j] = x[n * C * H * W + c * H * W + xh * W + xw];
}
}
}
extern "C"
__global__ void test(int n,int *o)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
} |
22,528 | #include "includes.h"
__global__ void Match7(float *d_pts1, float *d_pts2, float *d_score, int *d_index)
{
__shared__ float4 buffer1[M7W*NDIM/4]; //%%%%
__shared__ float4 buffer2[M7H*NDIM/4];
int tx = threadIdx.x;
int ty = threadIdx.y;
int bp1 = M7W*blockIdx.x;
for (int d=tx;d<NDIM/4;d+=M7W)
for (int j=ty;j<M7W;j+=M7H/M7R) //%%%%
buffer1[j*NDIM/4 + (d + j)%(NDIM/4)] = ((float4*)d_pts1)[(bp1 + j)*(NDIM/4) + d];
float max_score = 0.0f;
int index = -1;
for (int bp2=0;bp2<NPTS;bp2+=M7H) {
for (int d=tx;d<NDIM/4;d+=M7W)
for (int j=ty;j<M7H;j+=M7H/M7R) //%%%%
buffer2[j*NDIM/4 + d] = ((float4*)d_pts2)[(bp2 + j)*(NDIM/4) + d];
__syncthreads();
float score[M7R];
for (int dy=0;dy<M7R;dy++)
score[dy] = 0.0f;
for (int d=0;d<NDIM/4;d++) {
float4 v1 = buffer1[tx*NDIM/4 + (d + tx)%(NDIM/4)];
for (int dy=0;dy<M7R;dy++) {
float4 v2 = buffer2[(M7R*ty + dy)*(NDIM/4) + d];
score[dy] += v1.x*v2.x; score[dy] += v1.y*v2.y;
score[dy] += v1.z*v2.z; score[dy] += v1.w*v2.w;
}
}
for (int dy=0;dy<M7R;dy++) {
if (score[dy]>max_score) {
max_score = score[dy];
index = bp2 + M7R*ty + dy;
}
}
__syncthreads();
}
float *scores = (float*)buffer1;
int *indices = (int*)&scores[M7W*M7H/M7R];
scores[ty*M7W + tx] = max_score;
indices[ty*M7W + tx] = index;
__syncthreads();
if (ty==0) {
max_score = scores[tx];
index = indices[tx];
for (int y=0;y<M7H/M7R;y++)
if (scores[y*M7W + tx]>max_score) {
max_score = scores[y*M7W + tx];
index = indices[y*M7W + tx];
}
d_score[bp1 + tx] = max_score;
d_index[bp1 + tx] = index;
}
} |
22,529 | #include <bits/stdc++.h>
#include <unistd.h>
#include <curand.h>
#include <curand_kernel.h>
#include <thrust/device_vector.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <thrust/functional.h>
#include<time.h>
using namespace std;
#define ni 24 // number of neurons in input layer
#define nh 20 // number of neurons in hidden layer
#define no 4 // number of neurons in output layer
#define width 30 // width of the game boundary
#define height 20 // height og the game boundary
#define max_snake_length 100 // maximum length of the snake
#define population_size 4096
#define natural_selection_rate 0.4
#define mutation_rate 0.01
#define generations 10000
#define negative_reward -150
#define positive_reward 500
#define max_total_steps 1000
// randomly initialise neural network parameters to negative values
__global__ void initialise_nn(float *nns, unsigned int *random_int){
int id = blockIdx.x * blockDim.x + threadIdx.x;
nns[id] = (random_int[id] % 2) ? nns[id] : -nns[id];
}
// set the input for neural network, the input size is 24 i.e it looks for wall,
// it's body and fruit in all the 8 directions
__device__ void set_input(float input[], int x, int y, int fruitx, int fruity,
int tailx[], int taily[], int ntail){
for(int i=0;i<ni;i++)
input[i] = 0;
// check up direction
// check food
if(fruitx == x && fruity < y)
input[0] = 1;
// check body
for(int i=0;i<ntail;i++){
if(tailx[i] == x && taily[i] < y){
input[1] = 1;
break;
}
}
// check wall distance
if(y != 0)
input[2] = 1 / (float)y;
// check down direction
// check food
if(fruitx == x && fruity > y)
input[3] = 1;
// check body
for(int i=0;i<ntail;i++){
if(tailx[i] == x && taily[i] > y){
input[4] = 1;
break;
}
}
// check wall distance
if(height-y != 0)
input[5] = 1 / (float)(height-y);
// check right direction
// check food
if(fruity == y && fruitx > x)
input[6] = 1;
// check body
for(int i=0;i<ntail;i++){
if(taily[i] == y && tailx[i] > x){
input[7] = 1;
break;
}
}
// check wall distance
if(width-x != 0)
input[8] = 1 / (width-x);
// check left direction
// check food
if(fruity == y && fruitx < x)
input[9] = 1;
// check body
for(int i=0;i<ntail;i++){
if(taily[i] == y && tailx[i] < x){
input[10] = 1;
break;
}
}
// check wall distance
if(x != 0)
input[11] = 1 / (float)x;
//check north-east direction
int tempx = x, tempy = y;
bool found_food = false, found_body = false;
// check food and body
while(tempx < width && tempy > 0){
tempx++;
tempy--;
if(!found_food && tempx == fruitx && tempy == fruity){
input[12] = 1;
found_food = true;
}
if(!found_body){
for(int i=0;i<ntail;i++){
if(tempx == tailx[i] && tempy == taily[i]){
input[13] = 1;
found_body = true;
break;
}
}
}
if(found_body && found_food)
break;
}
// check wall distance
int min_value = min(width-x,y);
float distance = sqrt(pow(min_value,2)*2);
if(distance != 0)
input[14] = 1 / distance;
//check north-west direction
tempx = x, tempy = y;
found_food = false, found_body = false;
// check food and body
while(tempx > 0 && tempy > 0){
tempx--;
tempy--;
if(!found_food && tempx == fruitx && tempy == fruity){
input[15] = 1;
found_food = true;
}
if(!found_body){
for(int i=0;i<ntail;i++){
if(tempx == tailx[i] && tempy == taily[i]){
input[16] = 1;
found_body = true;
break;
}
}
}
if(found_body && found_food)
break;
}
// check wall distance
min_value = min(x,y);
distance = sqrt(pow((min_value),2)*2);
if(distance != 0)
input[17] = 1 / distance;
//check south-west direction
tempx = x, tempy = y;
found_food = false, found_body = false;
// check food and body
while(tempx > 0 && tempy < height){
tempx--;
tempy++;
if(!found_food && tempx == fruitx && tempy == fruity){
input[18] = 1;
found_food = true;
}
if(!found_body){
for(int i=0;i<ntail;i++){
if(tempx == tailx[i] && tempy == taily[i]){
input[19] = 1;
found_body = true;
break;
}
}
}
if(found_body && found_food)
break;
}
// check wall distance
min_value = min(x,height-y);
distance = sqrt(pow((min_value),2)*2);
if(distance != 0)
input[20] = 1 / distance;
//check south-east direction
tempx = x, tempy = y;
found_food = false, found_body = false;
// check food and body
while(tempx < width && tempy < height){
tempx++;
tempy++;
if(!found_food && tempx == fruitx && tempy == fruity){
input[21] = 1;
found_food = true;
}
if(!found_body){
for(int i=0;i<ntail;i++){
if(tempx == tailx[i] && tempy == taily[i]){
input[22] = 1;
found_body = true;
break;
}
}
}
if(found_body && found_food)
break;
}
// check wall distance
min_value = min(width-x,height-y);
distance = sqrt(pow((min_value),2)*2);
if(distance != 0)
input[23] = 1 / distance;
}
// function to calculate value of neuron in a layer during forward function
__device__ float forward(float input[], float weight[], float bias[], int len_i, int len_o, int index){
float output = 0;
for(int i=0;i<len_i;i++){
output += weight[i*len_o+index] * input[i];
}
output += bias[index];
// sigmoid function
output = 1.0 / (1.0 + exp(-output));
return output;
}
// play the game with each block corresponding to one neural network and each thread corresponding to a parameter of neural network
__global__ void play_game(float *nns, float *fitness, unsigned int *random_int_fruitx, unsigned int *random_int_fruity,
int parameter_size){
int snake_id = blockIdx.x;
int parameter_id = threadIdx.x;
// neural network of a particular id
extern __shared__ float nn[];
nn[parameter_id] = nns[snake_id*parameter_size+parameter_id];
__syncthreads();
// weights and biases of the neural network
float *w1 = &nn[0];
float *b1 = &nn[ni*nh];
float *w2 = &nn[ni*nh+nh];
float *b2 = &nn[ni*nh+nh+nh*no];
/* setup the game */
// STOP: 0, LEFT: 1, RIGHT: 2, UP: 3, DOWN: 4
// next direction to take
__shared__ int dir;
dir = 0;
// next direction to take if the first value is not possible
__shared__ int dir_next;
dir_next = 0;
// last direction taken
__shared__ int last_dir;
last_dir = 0;
// position of head
__shared__ int x;
x = width/2;
__shared__ int y;
y = height/2;
// position of fruit
__shared__ int fruitx;
__shared__ int fruity;
__shared__ int fruit_index;
fruit_index = snake_id * max_snake_length;
fruitx = random_int_fruitx[fruit_index] % width;
fruity = random_int_fruity[fruit_index] % height;
fruit_index++;
//snake length
__shared__ int ntail;
ntail = 3;
// array to store snake body
__shared__ int tailx[max_snake_length];
__shared__ int taily[max_snake_length];
// local variables
int total_steps = 200;
float total_reward = 0;
float reward = 0;
int steps = 0;
// array to store input, hidden and output layer
__shared__ float input[ni];
__shared__ float hidden_output[nh];
__shared__ float output[no];
// flag used to exit all the threads in a block
__shared__ int break_flag;
break_flag = 0;
// play until the snake dies
while(true){
// set the input for the game
set_input(input,x,y,fruitx,fruity,tailx,taily,ntail);
// forward function for the first layer
if(parameter_id < nh){
hidden_output[parameter_id] = forward(input,w1,b1,ni,nh,parameter_id);
}
__syncthreads();
// forward function for the second layer and thus get the output layer
if(parameter_id < no){
output[parameter_id] = forward(hidden_output,w2,b2,nh,no,parameter_id);
}
__syncthreads();
// thread id = 0 executes the logic of the game
if(parameter_id == 0){
// find the two best directions to be taken
float max_value = output[0];
float max_index = 0;
for(int i=1;i<no;i++){
if(output[i] > max_value){
max_value = output[i];
max_index = i;
}
}
dir = max_index + 1;
float max_value1 = INT16_MIN;
float max_index1 = -1;
for(int i=0;i<no;i++){
if(i != max_index && output[i] > max_value1){
max_value1 = output[i];
max_index1 = i;
}
}
dir_next = max_index1 + 1;
// update the snake body
int prevx = tailx[0];
int prevy = taily[0];
int prev2x, prev2y;
tailx[0] = x;
taily[0] = y;
for(int i=1;i<ntail;i++)
{
prev2x = tailx[i];
prev2y = taily[i];
tailx[i] = prevx;
taily[i] = prevy;
prevx = prev2x;
prevy = prev2y;
}
// move snake in the next direction
switch(dir)
{
case 1:
if(last_dir != 2)
x--;
else{
if(dir_next == 2)
x++;
else if(dir_next == 3)
y--;
else if(dir_next == 4)
y++;
}
break;
case 2:
if(last_dir != 1)
x++;
else{
if(dir_next == 1)
x--;
else if(dir_next == 3)
y--;
else if(dir_next == 4)
y++;
}
break;
case 3:
if(last_dir != 4)
y--;
else{
if(dir_next == 1)
x--;
else if(dir_next == 2)
x++;
else if(dir_next == 4)
y++;
}
break;
case 4:
if(last_dir != 3)
y++;
else{
if(dir_next == 1)
x--;
else if(dir_next == 2)
x++;
else if(dir_next == 3)
y--;
}
break;
}
last_dir = dir;
// snake hits the wall
if(x >= width || x < 0 || y >= height || y < 0)
{
reward = negative_reward;
break_flag = 1;
}
// snake hits its body
for(int i =0; i<ntail;i++)
{
if(tailx[i]==x && taily[i]==y)
{
reward = negative_reward;
break_flag = 1;
}
}
// snake eats the fruit
if(x==fruitx && y==fruity)
{
fruitx = random_int_fruitx[fruit_index] % width;
fruity = random_int_fruity[fruit_index] % height;
fruit_index++;
ntail++;
reward = positive_reward;
}
total_reward += reward;
steps += 1;
if(reward == -1){
break_flag = 1;
}
reward = 0;
// update total steps the snake can take
if(reward > 0)
total_steps = (total_steps+100 > max_total_steps) ? max_total_steps : total_steps + 100;
if(steps > total_steps){
break_flag = 1;
}
}
__syncthreads();
// exit while loop for all the threads in the block if the snake dies
if(break_flag)
break;
}
__syncthreads();
// update the fitness score for the game
if(parameter_id == 0){
fitness[snake_id] = total_reward + steps;
}
}
// update the device array to store top neural networks which will be used for crossover
__global__ void select_top(float *nns, float *nns_new, int *indices){
int id1 = blockIdx.x * blockDim.x + threadIdx.x;
int id2 = indices[blockIdx.x] * blockDim.x + threadIdx.x;
nns_new[id1] = nns[id2];
}
// intialise the device array for indices
__global__ void intialise_indices(int *indices){
int id = blockIdx.x * blockDim.x + threadIdx.x;
indices[id] = id;
}
// crossover the top neural networks to generate new neural networks for the generation population
__global__ void crossover(float *nns, float *fitness, unsigned int *random_int1, unsigned int *random_int2, int top){
int snake_id = blockIdx.x;
int parameter_id = threadIdx.x;
// select parents using Roulette Wheel Selection method
int fitness_sum = 0;
for(int i=0;i<population_size;i++)
fitness_sum += fitness[i];
// select parent 1
int parent1 = 0;
if(fitness_sum != 0){
int rand_num = random_int1[snake_id] % fitness_sum;
int sum = 0;
for(int i=0;i<population_size;i++){
sum += fitness[i];
if(sum > rand_num){
parent1 = i;
break;
}
}
}
// select parent 2
int parent2 = 0;
if(fitness_sum != 0){
int rand_num = random_int2[snake_id + blockDim.x] % fitness_sum;
int sum = 0;
for(int i=0;i<population_size;i++){
sum += fitness[i];
if(sum > rand_num){
parent2 = i;
break;
}
}
}
// child index
int child = top + snake_id;
// choose index of the parameter randomly
int id = blockIdx.x * blockDim.x + threadIdx.x;
int rand_num = random_int2[id];
// perform crossover to generate new neural network
if(rand_num%2 == 0){
nns[child * blockDim.x + parameter_id] = nns[parent1 * blockDim.x + parameter_id];
}
else{
nns[child * blockDim.x + parameter_id] = nns[parent2 * blockDim.x + parameter_id];
}
}
// mutate the neural network parameters based on mutation rate
__global__ void mutate(float *nns, float *random_float1, float *random_float2){
int id = blockIdx.x * blockDim.x + threadIdx.x;
// mutate only if random value is less than mutation rate
if(random_float1[id] < mutation_rate){
nns[id] += random_float2[id] / 5;
if(nns[id] > 1)
nns[id] = 1;
if(nns[id] < -1)
nns[id] = -1;
}
}
int main(){
srand(time(NULL));
ofstream fout;
// file to store best neural network parameters
fout.open("output.txt");
ofstream ftime;
// file to store every generation time
ftime.open("generation_time.txt");
// write model parameters into the file
fout<<"n_input\t\t"<<ni<<endl;
fout<<"n_hidden\t"<<nh<<endl;
fout<<"n_output\t"<<no<<endl;
fout<<"height\t\t"<<height<<endl;
fout<<"width\t\t"<<width<<endl;
// number of parameters of neural network
int parameter_size = ni*nh + nh + nh*no + no;
cout<<"Parameter size: "<<parameter_size<<endl;
// neural networks for device
float *dnns, *dnns_new;
// allocate memory for neural networks in device
cudaMalloc((void **)&dnns,population_size*parameter_size*sizeof(float));
cudaMalloc((void **)&dnns_new,population_size*parameter_size*sizeof(float));
curandGenerator_t prng;
// create pseudo random number generator
curandCreateGenerator(&prng, CURAND_RNG_PSEUDO_MT19937);
curandSetPseudoRandomGeneratorSeed(prng, 41ULL);
// initialise neural networks with uniform distribution
curandGenerateUniform(prng, dnns, population_size*parameter_size);
// create random number generator for integer values
unsigned int *random_int;
cudaMalloc((void**) &random_int,population_size*parameter_size*sizeof(int));
curandGenerate(prng,random_int,population_size*parameter_size);
// initialse the neural networks to have negative values also
initialise_nn<<<population_size,parameter_size>>>(dnns,random_int);
// device variable to store fitness score and their indices
float *dfitness;
int *dindices;
// fitness score on host
float *fitness = (float *) malloc(population_size*sizeof(float));
// fitness score and indices on device
cudaMalloc((void**) &dfitness,population_size*sizeof(float));
cudaMalloc((void**) &dindices,population_size*sizeof(int));
// thrust device pointer to fitness score and indices array
thrust::device_ptr<float> fitness_ptr(dfitness);
thrust::device_ptr<int> indices_ptr(dindices);
// random number generator used for generating indices of fruit
unsigned int *random_int_fruitx;
cudaMalloc((void**) &random_int_fruitx,population_size*max_snake_length*sizeof(int));
unsigned int *random_int_fruity;
cudaMalloc((void**) &random_int_fruity,population_size*max_snake_length*sizeof(int));
// random number generator used during crossover
unsigned int *random_int_crossover1;
cudaMalloc((void**) &random_int_crossover1,2*population_size*sizeof(int));
unsigned int *random_int_crossover2;
cudaMalloc((void**) &random_int_crossover2,population_size*parameter_size*sizeof(int));
// random number generator used during mutation
float *random_float_mutate1;
cudaMalloc((void**) &random_float_mutate1,population_size*parameter_size*sizeof(float));
float *random_float_mutate2;
cudaMalloc((void**) &random_float_mutate2,population_size*parameter_size*sizeof(float));
// local variables
float max_reward = 0;
float avg_reward = 0;
int max_index = 0;
float global_max_reward = 0;
int global_max_generation = 0;
float max_avg_reward = 0;
// array to store parameters of the best neural network
float *best_snake = (float *)malloc(parameter_size*sizeof(float));
// loop for number of generations
for(int k=0;k<generations;k++){
clock_t tStart = clock();
// intialise indices array corresponding to fitness array
int num_threads = (population_size > 1024) ? 1024 : population_size;
int num_blocks = population_size/1024 + 1;
intialise_indices<<<num_blocks,num_threads>>>(dindices);
// create random number generator for integer values of fruit
curandGenerate(prng,random_int_fruitx,population_size*max_snake_length);
curandGenerate(prng,random_int_fruity,population_size*max_snake_length);
// play the games on GPU
play_game<<<population_size,parameter_size,parameter_size*sizeof(float)>>>(dnns,dfitness,random_int_fruitx,random_int_fruity,parameter_size);
// copy device fitness score to host
cudaMemcpy(fitness,dfitness,population_size*sizeof(float),cudaMemcpyDeviceToHost);
// find the index with maximum fitness score and also calculate average fitness score
avg_reward = 0;
max_reward = fitness[0];
max_index = 0;
for(int i=1;i<population_size;i++){
if(fitness[i] > max_reward){
max_reward = fitness[i];
max_index = i;
}
avg_reward += fitness[i];
}
avg_reward /= population_size;
double generation_time = (double)(clock() - tStart)/CLOCKS_PER_SEC;
ftime<<generation_time<<endl;
printf("generation: %d\tAverage fitness: %f\tMax reward: %f\tTime: %f\n",k+1,avg_reward,max_reward,generation_time);
// find the maximum fitness score among all the generations
if(max_reward >= global_max_reward){
global_max_reward = max_reward;
global_max_generation = k+1;
}
// copy parameters of neural network with maximum average fitness score among all the generations
if(avg_reward >= max_avg_reward){
max_avg_reward = avg_reward;
cudaMemcpy(best_snake,dnns+max_index*parameter_size,parameter_size*sizeof(float),cudaMemcpyDeviceToHost);
}
// number of neural networks passed on to next generation from current generation
int top = population_size * natural_selection_rate;
// sort the device fitness score array in descennding order along with the indices array
thrust::sort_by_key(fitness_ptr,fitness_ptr+population_size,indices_ptr,thrust::greater<float>());
// update device neural network array with top neural network parameters
select_top<<<top,parameter_size>>>(dnns,dnns_new,dindices);
float *temp = dnns_new;
dnns_new = dnns;
dnns = temp;
// create random number generator for integer values used during crossover
curandGenerate(prng,random_int_crossover1,2*population_size);
curandGenerate(prng,random_int_crossover2,population_size*parameter_size);
// crossover the top neural networks to generate the remaining neural networks in the population
crossover<<<population_size-top,parameter_size>>>(dnns,dfitness,random_int_crossover1,random_int_crossover2,top);
// create random number generator for float values used during mutation
curandGenerateUniform(prng,random_float_mutate1,population_size*parameter_size);
curandGenerateNormal(prng,random_float_mutate2,population_size*parameter_size,0.0,1.0);
// mutate all neural network parameters in accordance to mutation rate
mutate<<<population_size,parameter_size>>>(dnns,random_float_mutate1,random_float_mutate2);
}
// write parameters of the best neural network into file
fout<<"Best neural network parameters: \n";
for(int i=0;i<parameter_size;i++)
fout<<best_snake[i]<<" ";
fout<<endl;
printf("Generation: %d\tGlobal max reward: %f\n",global_max_generation,global_max_reward);
fout.close();
ftime.close();
cudaFree(dnns);
cudaFree(dnns_new);
cudaFree(random_int);
cudaFree(dfitness);
cudaFree(dindices);
cudaFree(random_int_fruitx);
cudaFree(random_int_fruity);
cudaFree(random_int_crossover1);
cudaFree(random_int_crossover2);
cudaFree(random_float_mutate1);
cudaFree(random_float_mutate2);
free(fitness);
free(best_snake);
return 0;
}
|
22,530 | #include<stdio.h>
#include<stdlib.h>
#include<unistd.h>
#include<stdbool.h>
#include<iostream>
#include<cuda.h>
#include<cuda_runtime.h>
// Convenient Types
typedef unsigned int uint;
typedef unsigned short ushort;
// CUDA external functions
extern "C" {
bool HL_kernelLaunch(ushort threadsCount, int myrank);
unsigned char** HL_initMaster(unsigned int pattern, size_t worldWidth, size_t worldHeight, int myrank, int numranks);
void HL_terminate();
void HL_setRow(unsigned char* buffer, int row);
}
// Result from last compute of world.
unsigned char* d_resultData=NULL;
// Current state of world.
unsigned char* d_data=NULL;
// Current width of world.
size_t g_worldWidth=0;
// Current height of world.
size_t g_worldHeight=0;
// Current data length (product of width and height+2)
size_t g_dataLength=0;
// Current array length (product of width and height)
size_t g_arrayLength=0;
// number of alive cells at 3x3 grid (excluding center)
__device__ static inline unsigned int HL_countAliveCells(const unsigned char* data,
size_t x0,
size_t x1,
size_t x2,
size_t y0,
size_t y1,
size_t y2)
{
return (uint)data[x0+y0] + (uint)data[x0+y1] + (uint)data[x0+y2] + (uint)data[x1+y0] + (uint)data[x1+y2] + (uint)data[x2+y0] + (uint)data[x2+y1] + (uint)data[x2+y2];
}
static inline void HL_initialiaze( size_t worldWidth, size_t worldHeight )
{
g_worldWidth = worldWidth;
g_worldHeight = worldHeight;
g_dataLength = g_worldWidth * (g_worldHeight + 2);
g_arrayLength = g_worldWidth * g_worldHeight;
cudaMallocManaged(&d_data, (g_dataLength * sizeof(unsigned char)));
cudaMallocManaged(&d_resultData, (g_dataLength * sizeof(unsigned char)));
}
static inline void HL_initAllZeros( size_t worldWidth, size_t worldHeight )
{
// calloc init's to all zeros
}
static inline void HL_initAllOnes( size_t worldWidth, size_t worldHeight )
{
// set all rows of world to true
for(int i = g_worldWidth; i < g_dataLength - g_worldWidth; i++)
{
d_data[i] = 1;
}
}
static inline void HL_initOnesInMiddle( size_t worldWidth, size_t worldHeight )
{
// set first 1 rows of world to true
for(int i = 10*g_worldWidth; i < 11*g_worldWidth; i++)
{
if( (i >= ( 10*g_worldWidth + 10)) && (i < (10*g_worldWidth + 20)))
{
d_data[i] = 1;
}
}
}
static inline void HL_initOnesAtCorners( size_t worldWidth, size_t worldHeight, int myrank, int numranks )
{
if (myrank == 0) {
d_data[worldWidth] = 1; // upper left
d_data[worldWidth + worldWidth - 1] = 1; // upper right
}
if (myrank == numranks - 1) {
d_data[worldWidth * worldHeight] = 1; // lower left
d_data[(worldWidth * (worldHeight + 1)) - 1 ] = 1; // lower right
}
}
static inline void HL_initSpinnerAtCorner( size_t worldWidth, size_t worldHeight, int myrank, int numranks )
{
if (myrank == 0) {
d_data[worldWidth] = 1; // upper left
d_data[worldWidth + 1] = 1; // upper left +1
d_data[worldWidth + worldWidth - 1] = 1; // upper right
}
}
static inline void HL_initReplicator( size_t worldWidth, size_t worldHeight, int myrank, int numranks )
{
if (myrank == numranks / 2) {
size_t x, y;
x = worldWidth/2;
y = 1;
d_data[x + y*worldWidth + 1] = 1;
d_data[x + y*worldWidth + 2] = 1;
d_data[x + y*worldWidth + 3] = 1;
d_data[x + (y+1)*worldWidth] = 1;
d_data[x + (y+2)*worldWidth] = 1;
d_data[x + (y+3)*worldWidth] = 1;
}
}
unsigned char** HL_initMaster( unsigned int pattern, size_t worldWidth, size_t worldHeight, int myrank, int numranks )
{
int cudaDeviceCount = -1;
cudaError_t cE = cudaSuccess;
if ((cE = cudaGetDeviceCount(&cudaDeviceCount)) != cudaSuccess) {
printf("Unable to determine cuda device count, error is %d, count is %d\n", cE, cudaDeviceCount);
exit(-1);
}
if ((cE = cudaSetDevice(myrank % cudaDeviceCount)) != cudaSuccess) {
printf("Unable to have rank %d set to cuda device %d, error is %d\n", myrank, (myrank % cudaDeviceCount), cE);
exit(-1);
}
HL_initialiaze(worldWidth, worldHeight);
switch(pattern)
{
case 0:
HL_initAllZeros( worldWidth, worldHeight );
break;
case 1:
HL_initAllOnes( worldWidth, worldHeight );
break;
case 2:
HL_initOnesInMiddle( worldWidth, worldHeight );
break;
case 3:
HL_initOnesAtCorners( worldWidth, worldHeight, myrank, numranks );
break;
case 4:
HL_initSpinnerAtCorner( worldWidth, worldHeight, myrank, numranks );
break;
case 5:
HL_initReplicator( worldWidth, worldHeight, myrank, numranks );
break;
default:
printf("Pattern %u has not been implemented \n", pattern);
exit(-1);
}
return &d_data;
}
// swap the pointers of pA and pB.
static inline void HL_swap( unsigned char **pA, unsigned char **pB)
{
unsigned char *temp = *pA;
*pA = *pB;
*pB = temp;
}
// Set a specific row of data
void HL_setRow(unsigned char* buffer, int row) {
for (int i = 0; i < g_worldWidth; i++) {
d_data[i + (row * g_worldWidth)] = buffer[i];
}
}
// Free CUDA memory
void HL_terminate() {
cudaFree(d_data);
cudaFree(d_resultData);
}
// CUDA kernel
__global__ void HL_kernel(unsigned char* d_data, unsigned int worldWidth, unsigned int worldHeight, unsigned char* d_resultData) {
unsigned int index = blockIdx.x *blockDim.x + threadIdx.x;
size_t x = index % worldWidth;
size_t y = (int)(index / worldWidth) + 1;
// calculate positions around current square
size_t y0 = ((y + (worldHeight + 2) - 1) % (worldHeight + 2)) * worldWidth;
size_t y1 = y * worldWidth;
size_t y2 = ((y + 1) % (worldHeight + 2)) * worldWidth;
size_t x1 = x;
size_t x0 = (x1 + worldWidth - 1) % worldWidth;
size_t x2 = (x1 + 1) % worldWidth;
// count alive cells around current square
uint count = HL_countAliveCells(d_data, x0, x1, x2, y0, y1, y2);
// compute if d_resultData[y1 + x] is 0 or 1
if (d_data[(y * worldWidth) + x] == 1) {
if (count == 2 || count == 3)
d_resultData[(y * worldWidth) + x] = 1;
else
d_resultData[(y * worldWidth) + x] = 0;
} else {
if (count == 3 || count == 6)
d_resultData[(y * worldWidth) + x] = 1;
else
d_resultData[(y * worldWidth) + x] = 0;
}
__syncthreads();
}
// Launch the kernel for a number of iterations
bool HL_kernelLaunch(ushort threadsCount, int myrank)
{
dim3 threadsPerBlock(threadsCount);
dim3 blocksPerGrid(g_arrayLength / threadsCount);
HL_kernel<<<blocksPerGrid, threadsPerBlock>>>(d_data, g_worldWidth, g_worldHeight, d_resultData);
cudaDeviceSynchronize();
HL_swap(&d_resultData, &d_data);
// Clear ghost rows
for (int i = 0; i < g_worldWidth; i++) {
d_data[i] = 0;
d_data[(g_worldWidth * (g_worldHeight + 1)) + i] = 0;
}
cudaDeviceSynchronize();
return true;
}
|
22,531 | #include "includes.h"
__global__ void inclusivePrefixAdd(unsigned int* d_in, unsigned int* d_out)
{
//Hillis Steele implementation
//NOTE: right now, this is only set up for 1 block of 1024 threads
int abs_x = threadIdx.x + blockIdx.x * blockDim.x;
int thread_x = threadIdx.x;
extern __shared__ unsigned int segment[];
segment[thread_x] = d_in[abs_x];
//d_out[thread_x] = d_in[thread_x];
__syncthreads();
for (unsigned int i = 1; i < blockDim.x; i <<= 1)
{
if (thread_x >= i)
{
//d_out[thread_x] = d_out[thread_x] + d_out[thread_x - i];
segment[thread_x] = segment[thread_x] + segment[thread_x - i];
}
__syncthreads();
}
//this happens in different blocks, so no need to syncthreads()
if (blockIdx.x > 0)
{
//carry over the result of the last segment
segment[thread_x] = segment[thread_x] + d_out[blockDim.x * (blockIdx.x - 1)];
}
d_out[abs_x] = segment[thread_x];
} |
22,532 | #include <cuda.h>
#include <stdio.h>
#include <malloc.h>
void save_matriz(float *Matrix, int row, int col){
FILE *f = fopen("result_mult.csv", "a");
if (f == NULL){
printf("File error\n");
exit(-1);
}
for (int i = 0; i < row; i++) {
for (int j = 0; j < col; ++j){
if(col - 1 == j){
fprintf(f, "%.2f", Matrix[i * col + j]);
}
else{
fprintf(f, "%.2f, ", Matrix[i * col + j]);
}
}
fprintf(f, "\n");
}
fprintf(f, "\n");
fclose(f);
return;
}
__host__
void print(float *M, int rows, int cols){
printf("\n");
printf("----------------------------------------\n");
for(int i = 0; i < rows; i++) {
for(int j = 0; j < cols; j++) {
printf("%.2f ", M[i * cols + j]);
}
printf("\n");
}
printf("----------------------------------------\n");
printf("\n");
return;
}
__global__ void matrixMultGPU(float *d_matrix1, float *d_matrix2, float *d_MatrixR, int rowM1 , int rowM2 , int colM1 , int colM2 ) {
int k = 0;
float sum = 0.0;
int col = threadIdx.x + blockDim.x * blockIdx.x;
int row = threadIdx.y + blockDim.y * blockIdx.y;
if (col < colM2 && row < rowM1) {
for (k = 0; k < rowM2; k++) {
sum += d_matrix1[row * colM1 + k] * d_matrix2[k * colM2 + col];
}
d_MatrixR[row * colM2 + col] = sum;
}
}
__host__
void read_matrix_from_file(float *M, FILE *archivo, int rows, int cols){
for (int i = 0; i < rows; ++i){
for (int j = 0; j < cols; ++j){
fscanf(archivo, "%f,", &M[i * cols + j]);
}
}
fclose(archivo);
return;
}
bool validate(int colM1 ,int rowM2){
if (colM1 != rowM2){
return true;
}
else{
return false;
}
}
int main(int argc, char** argv){
if (argc != 3){
printf("agregue los archivos como parametro\n");
return 1;
}
float *h_matrix1, *h_matrix2, *h_MatrixR;
int rowM1 , rowM2 , colM1 , colM2;
cudaError_t error = cudaSuccess;
FILE *file_1 , *file_2;
file_1 = fopen(argv[1], "r");
file_2 = fopen(argv[2], "r");
fscanf(file_1, "%d", &rowM1);
fscanf(file_1, "%d", &colM1);
fscanf(file_2, "%d", &rowM2);
fscanf(file_2, "%d", &colM2);
if (validate(colM1,rowM2)){
printf("Las matrices son incompatibles y no se pueden multiplicar");
return 1;
}
float sizeM1 = rowM1 * colM1 * sizeof(float);
float sizeM2 = rowM2 * colM2 * sizeof(float);
float sizeMR = rowM1 * colM2 * sizeof(float);
h_matrix1 = (float*)malloc(sizeM1);
h_matrix2 = (float*)malloc(sizeM2);
h_MatrixR = (float*)malloc(sizeMR);
read_matrix_from_file(h_matrix1, file_1, rowM1, colM1);
read_matrix_from_file(h_matrix2, file_2, rowM2, colM2);
float *d_matrix1, *d_matrix2, *d_MatrixR;
error = cudaMalloc ((void **) &d_matrix1, sizeM1);
if (error != cudaSuccess){
printf("Error solicitando memoria en la GPU para d_matrix1\n");
exit(-1);
}
error = cudaMalloc ((void **) &d_matrix2, sizeM2);
if (error != cudaSuccess){
printf("Error solicitando memoria en la GPU para d_matrix2\n");
exit(-1);
}
error = cudaMalloc ((void **) &d_MatrixR, sizeMR);
if (error != cudaSuccess){
printf("Error solicitando memoria en la GPU para d_MatrixR\n");
exit(-1);
}
cudaMemcpy(d_matrix1, h_matrix1, sizeM1, cudaMemcpyHostToDevice);
cudaMemcpy(d_matrix2, h_matrix2, sizeM2, cudaMemcpyHostToDevice);
dim3 bloques(ceil(colM2/16.0),ceil(rowM1/16.0),1);
dim3 hilos(16,16,1);
matrixMultGPU<<<bloques,hilos>>>(d_matrix1,d_matrix2,d_MatrixR, rowM1, rowM2 , colM1 , colM2);
cudaMemcpy(h_MatrixR,d_MatrixR,sizeMR,cudaMemcpyDeviceToHost);
printf("Matrix 1");
print(h_matrix1, rowM1 , colM1);
printf("Matrix 2");
print(h_matrix2, rowM2 , colM2);
printf("Matrix Resultado");
print(h_MatrixR, rowM1 , colM2);
save_matriz(h_MatrixR , rowM1, colM2);
cudaFree(d_matrix1); cudaFree(d_matrix2); cudaFree(d_MatrixR);
free(h_matrix1); free(h_matrix2); free(h_MatrixR);
}
|
22,533 | #include <cuda_runtime.h>
#include <stdio.h>
int main(int argc, char** argv) {
printf("%s Starting...\n", argv[0]);
int deviceCount = 0;
cudaError_t error_id = cudaGetDeviceCount(&deviceCount);
} |
22,534 |
// Babak Poursartip
// 09/28/2020
// section 2: video 20
#include <iostream>
__global__ void print_details_of_warps() {
int gid = blockIdx.y + gridDim.x * blockDim.x + blockIdx.x * blockDim.x +
threadIdx.x;
int warp_id = threadIdx.x / 32;
int gbid = blockIdx.y * gridDim.x + blockIdx.x;
printf(" tid: %d, bid.x: %2d, bid.y: %d, gid: %3d, warp_id: %d, gbid: %d\n",
threadIdx.x, blockIdx.x, blockIdx.y, gid, warp_id, gbid);
}
int main() {
printf(" starts ...\n");
dim3 block(42);
dim3 grid(2, 2);
print_details_of_warps<<<grid, block>>>();
cudaDeviceSynchronize();
printf(" finished \n");
cudaDeviceReset();
return EXIT_SUCCESS;
}
|
22,535 | #include "includes.h"
__global__ void xnor_gemm(unsigned int* A, unsigned int* B, float* C, int m, int n, int k) {
// Block row and column
int blockRow = blockIdx.y;
int blockCol = blockIdx.x;
// Thread row and column within Csub
int row = threadIdx.y;
int col = threadIdx.x;
// Each thread block computes one sub-matrix Csub of C
float* Csub = &C[BLOCK_SIZE * k * blockRow + BLOCK_SIZE * blockCol];
// Shared memory used to store Asub and Bsub respectively
__shared__ unsigned int As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ unsigned int Bs[BLOCK_SIZE][BLOCK_SIZE];
// Each thread computes one element of Csub
// by accumulating results into Cvalue
// block_size = 16 -> 256 threads, one per Csub element
unsigned int Cvalue = 0;
// Loop over all the sub-matrices of A and B that are
// required to compute Csub
// Multiply each pair of sub-matrices together
// and accumulate the results
for (int i = 0; i < (n / BLOCK_SIZE); ++i) {
// Get sub-matrix Asub of A
unsigned int* Asub = &A[BLOCK_SIZE * blockRow * n + BLOCK_SIZE * i];
// Get sub-matrix Bsub of B
unsigned int* Bsub = &B[BLOCK_SIZE * k * i + BLOCK_SIZE * blockCol];
// Load Asub and Bsub from device memory to shared memory
// Each thread loads one element of each sub-matrix
As[row][col] = Asub[row*n+col];
Bs[row][col] = Bsub[row*k+col];
// Synchronize to make sure the sub-matrices are loaded
// before starting the computation
__syncthreads();
// Multiply Asub and Bsub together
// THIS IS THE MOST INTERESTING PART
for (int j = 0; j < BLOCK_SIZE; ++j) Cvalue += __popc(As[row][j]^Bs[j][col]);
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write Csub to device memory
// Each thread writes one element
if(col + blockCol* BLOCK_SIZE< k && row + blockRow* BLOCK_SIZE< m) Csub[row*k+col] = -(2*(float)Cvalue-32*n);
} |
22,536 | #include <cuda_runtime.h>
#include <cuda.h>
#include <device_launch_parameters.h>
#include <curand.h>
#include <curand_kernel.h>
#include <iostream>
#include <chrono>
#include <cstdlib>
#include <cmath>
// the max number of (x,y) threads is 1024
// which is 1024 = 32 x 32, so 0 <= threadIdx.x < 32 and 0 <= threadIdx.y < 32
constexpr unsigned int THREAD_COUNT = 32;
#define TILE_WIDTH 32
__global__ void kernel_fill_random(float* matrix, const unsigned int width) {
// index
const unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int j = blockIdx.y * blockDim.y + threadIdx.y;
// set random seed
unsigned int seed = i + j;
curandState s;
curand_init(seed, 0, 0, &s);
//fill matrix
if (i < width && j < width) {
matrix[i * width + j] = curand_uniform(&s);
}
}
__global__ void kernel_naive(const float* M, const float* N, float* P, const unsigned int width) {
// index
const unsigned int i = blockIdx.y * blockDim.y + threadIdx.y;
const unsigned int j = blockIdx.x * blockDim.x + threadIdx.x;
if (i < width && j < width) {
float result = 0;
for (int k = 0; k < width; ++k) {
result += M[i * width + k] * N[k * width + j];
}
P[i * width + j] = result;
}
}
__global__ void kernel_tiled(const float* M, const float* N, float* P, const int width) {
__shared__ float Ms[TILE_WIDTH][TILE_WIDTH];
__shared__ float Ns[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
int row = by * TILE_WIDTH + ty;
int col = bx * TILE_WIDTH + tx;
float result = 0;
const unsigned int TILE_COUNT = (unsigned int)std::ceil((float)width/(float)TILE_WIDTH);
for (int m = 0; m < TILE_COUNT; ++m) {
if (m * TILE_WIDTH + tx < width && row < width) {
Ms[ty][tx] = M[row * width + (m * TILE_WIDTH + tx)];
} else {
Ms[ty][tx] = 0.0;
}
if (m * TILE_WIDTH + ty < width && col < width) {
Ns[ty][tx] = N[col + (m * TILE_WIDTH + ty) * width];
} else {
Ns[ty][tx] = 0.0;
}
__syncthreads();
for (int k = 0; k < TILE_WIDTH; ++k) {
result += Ms[ty][k] * Ns[k][tx];
}
__syncthreads();
}
if (row < width && col < width) {
P[row * width + col] = result;
}
}
double naive(const float* d_M, const float* d_N, float* d_P, const unsigned int width) {
const unsigned int block_count = (unsigned int)std::ceil((float)width/(float)THREAD_COUNT);
dim3 blocks(block_count, block_count);
dim3 threads(THREAD_COUNT, THREAD_COUNT);
auto start = std::chrono::high_resolution_clock::now();
kernel_naive<<<blocks, threads>>>(d_M, d_N, d_P, width);
cudaDeviceSynchronize();
auto end = std::chrono::high_resolution_clock::now();
auto elapsed = std::chrono::duration_cast<std::chrono::duration<double> >(end - start);
return elapsed.count();
}
double tiled(const float* d_M, const float* d_N, float* d_P, const unsigned int width) {
const unsigned int block_count = (unsigned int)std::ceil((float)width/(float)TILE_WIDTH);
dim3 blocks(block_count, block_count);
dim3 threads(TILE_WIDTH, TILE_WIDTH);
auto start = std::chrono::high_resolution_clock::now();
kernel_tiled<<<blocks, threads>>>(d_M, d_N, d_P, width);
cudaDeviceSynchronize();
auto end = std::chrono::high_resolution_clock::now();
auto elapsed = std::chrono::duration_cast<std::chrono::duration<double> >(end - start);
return elapsed.count();
}
void file_load(const char* filepath, float* matrix, const size_t size) {
FILE* file;
size_t result;
file = fopen(filepath, "rb");
if (!file) {
printf(">>>ERROR: opening %s\n", filepath);
exit(1);
}
fseek(file, 0, SEEK_END);
result = ftell(file);
rewind(file);
if (size != result) {
printf(">>>ERROR: wrong filesize %ld\n", size);
exit(1);
}
result = fread(matrix, 1, size, file);
if (result != size) {
printf(">>>ERROR: reading %s\n", filepath);
exit(1);
}
fclose(file);
}
void file_save(float* matrix, const char* filepath, const size_t size) {
FILE* file;
size_t result;
file = fopen(filepath, "wb");
if (!file) {
printf(">>>ERROR: opening %s\n", filepath);
exit(1);
}
result = fwrite(matrix, 1, size, file);
if (result != size) {
printf(">>>ERROR: writing %s\n", filepath);
exit(1);
}
fclose(file);
}
bool is_same(const float* matrix1, const float* matrix2, const size_t width) {
for (size_t i = 0; i < width * width; i++) {
if (matrix1[i] != matrix2[i]) {
printf("position: %ld\n", i);
return false;
}
}
return true;
}
int main(int argc, char** argv) {
if (argc != 5) { return 0; }
const char* filepath_M = argv[1];
const char* filepath_N = argv[2];
const char* filepath_P = argv[3];
const unsigned int width = atoi(argv[4]);
const size_t size = width * width * sizeof(float);
float* M = (float*)malloc(size);
float* N = (float*)malloc(size);
float* P1 = (float*)malloc(size);
float* P2 = (float*)malloc(size);
printf("> COMPLETE: malloc on host\n");
file_load(filepath_M, M, size);
printf("> COMPLETE: load %s\n", filepath_M);
file_load(filepath_N, N, size);
printf("> COMPLETE: load %s\n", filepath_N);
float *d_M, *d_N, *d_P;
cudaMalloc(&d_M, size); cudaMalloc(&d_N, size); cudaMalloc(&d_P, size);
cudaDeviceSynchronize();
printf("> COMPLETE: malloc on device\n");
cudaMemcpy(d_M, M, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_N, N, size, cudaMemcpyHostToDevice);
printf("> COMPLETE: memcpy to device\n");
double elapsed_naive = naive(d_M, d_N, d_P, width);
printf(">>>RESULT: naive elapsed time: %8lf (sec)\n", elapsed_naive);
cudaMemcpy(P1, d_P, size, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
double elapsed_tiled = tiled(d_M, d_N, d_P, width);
printf(">>>RESULT: elapsed time: %8lf (sec)\n", elapsed_tiled);
cudaMemcpy(P2, d_P, size, cudaMemcpyDeviceToHost);
if (is_same(P1, P2, width)) {
printf("> COMPLETE: no difference between results of naive and tiled method\n");
} else {
printf(">>>ERROR: difference between results of naive and tiled method\n");
exit(1);
}
file_save(P2, filepath_P, size);
printf("> COMPLETE: save result matrix on %s\n", filepath_P);
// free memory on device
cudaFree(d_M); cudaFree(d_N); cudaFree(d_P);
// free memory on host
free(M); free(N); free(P1); free(P2);
}
|
22,537 | #include <math.h>
#include <float.h>
#include <cuda.h>
__global__ void gpu_Heat (float *h, float *g, int N, float *residual) {
extern __shared__ float v_reduction[]; // Vector to store the reduction values
int block_id = (blockIdx.x + blockIdx.y*gridDim.x);
int t_id = block_id*blockDim.x*blockDim.y + threadIdx.x + threadIdx.y*blockDim.x;
int red_ix = threadIdx.x*blockDim.x + threadIdx.y;
v_reduction[red_ix];
// As we have created thread in excess, only proceed if the id of the thread is within the size of the matrix of elements to process.
if(t_id < (N-2)*(N-2)){
// As the area we have to process is smaller than u/uhelp and we have created threads in excess, let's do a
// transformation to obtain the index of the position in u/uhelp tha the thread has to modify, so we can have cleaner code later on.
int mod_id = N // First Row of full matrix
+ (t_id/(N-2))*N // y-coord in small matrix times full row matrix
+ 1 // First element of the row in the full matrix
+ (t_id%(N-2)); // x-coord in small matrix
g[mod_id] = 0.25*( h[mod_id-N] // Up
+ h[mod_id+1] // Right
+ h[mod_id+N] // Down
+ h[mod_id-1] // Left
);
float diff = g[mod_id]-h[mod_id];
v_reduction[red_ix] = diff*diff;
__syncthreads();
}
// Reduction
for(int s = (blockDim.x*blockDim.x)>>1; s>0; s>>=1){
if(red_ix < s) v_reduction[red_ix] += v_reduction[red_ix+s];
__syncthreads();
}
if(red_ix == 0) residual[blockIdx.x*gridDim.x+blockIdx.y] = v_reduction[0];
}
|
22,538 | #include<stdio.h>
#include<stdlib.h>
#include<sys/time.h>
int seed;
#define CUDA_ERROR_EXIT(str) do{\
cudaError err = cudaGetLastError();\
if( err != cudaSuccess){\
printf("Cuda Error: '%s' for %s\n", cudaGetErrorString(err), str);\
exit(-1);\
}\
}while(0);
__global__ void XOR(int *da,int cskip, int num)
{
long long int thread_no=blockDim.x * blockIdx.x + threadIdx.x;
long long int start= thread_no*cskip;
if(start>=num)return;
long long int end=start+cskip-1;
if(end>=num){end=num-1;}
if(start==end)return;
if(da[end]!=-1){
if(thread_no%2!=0){da[end]=da[end]^da[start];da[start]=-1;}
else {da[start]=da[start]^da[end];da[end]=-1;}
}
else {
if(thread_no%2!=0){da[end]=da[start];da[start]=-1;}
else {da[end]=-1;}
}
}
int main(int argc,char** argv){
int *ar,num;
if(argc!=3){
printf("Invalid number of Arguments");
exit(-1);
}
num=atoi(argv[1]);
seed=atoi(argv[2]);
if(num<=0){
printf("Invalid Number");
exit(-1);
}
ar=(int*)malloc(num*sizeof(int));
if(!ar){
perror("malloc");
exit(-1);
}
srand(seed);
for(int i=0;i<num;i++){
ar[i]=random();
}
int skip=2;
int size=num*sizeof(int);
int xor_output;
int*dA;
cudaMalloc(&dA,size);
CUDA_ERROR_EXIT("cudaMalloc");
//int*cskip;
//cudaMalloc(&cskip,sizeof(int));
//CUDA_ERROR_EXIT("cudaMalloc");
cudaMemcpy(dA, ar, size, cudaMemcpyHostToDevice);
CUDA_ERROR_EXIT("memcpy1");
//cudaMemcpy(&xor_output,&dA[0],sizeof(int), cudaMemcpyDeviceToHost);
//printf("%d\n",xor_output);
//cudaMemcpy(cskip,&skip, sizeof(int), cudaMemcpyHostToDevice);
//CUDA_ERROR_EXIT("memcpy1");
while(skip/2<num){
int num_threads=num/skip;
if(num%skip)num_threads++;
int num_blocks=num_threads/1024;
if(num_threads%1024)num_blocks++;
XOR<<<num_blocks, 1024 >>>(dA,skip,num);
CUDA_ERROR_EXIT("kernel invocation");
skip*=2;
}
cudaMemcpy(&xor_output,&dA[0],sizeof(int), cudaMemcpyDeviceToHost);
CUDA_ERROR_EXIT("memcpy2");
printf("%d\n",xor_output);
cudaFree(dA);
free(ar);
return 0;
}
|
22,539 | #include<iostream>
#include<stdlib.h>
#include<math.h>
#define MAX_THREADS 512
typedef struct _matrix {
int xDim;
int yDim;
int *vals;
} matrix;
__global__
void doMultiplications(_matrix *a, _matrix *b, int* resultMatrix,int row, int col){ //Result matrix must be a.xDim * b.xDim * a.yDim length array
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int startIndex = (row * a->xDim * b->xDim + col);
// std::cout << "Doing request, Start index for result array:" << startIndex << ". Doing row:" << row << ", " << col << std::endl;
for(int i = index; i < b->xDim; i+= stride){
resultMatrix[startIndex + i*a->xDim] = a->vals[row * a->xDim + col] * b->vals[col * b->xDim + i];
}
}
__global__
void doAdds(_matrix *a, _matrix *b, int* multVals, _matrix *c){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int numToDo = a->xDim;
int matrixDimensions = c->xDim * c->yDim;
for(int a = index; a < matrixDimensions; a += stride){
for(int i = 0; i < numToDo; i++){
c->vals[a] += multVals[a * numToDo + i];
}
}
}
void printArray(int *vals, int size){
std::cout << "[";
for(int i = 0; i < size; i++){
std::cout << vals[i] << ", ";
}
std::cout << "]" << std::endl;
}
void printMatrix(_matrix *m){
for(int i = 0; i < m->yDim; i++){
std::cout << "[ ";
for(int j = 0; j < m->xDim; j++){
std::cout << m->vals[i*m->xDim + j] << " ";
}
std:: cout << "]" << std::endl;
}
std::cout << std::endl;
}
_matrix *multiplyMatrices(_matrix *a, _matrix *b){
int *multiplications;
int multiplicationSize = a->xDim * b->xDim * a->yDim;
cudaMallocManaged(&multiplications,multiplicationSize * sizeof(int));
int threads = (b->xDim / 32 + 1) * 32;
int blocks = 1;
if(threads > MAX_THREADS){
blocks = threads/MAX_THREADS + 1;
threads = MAX_THREADS;
}
std::cout << "Determined that the matrices needs " << blocks << " blocks and " << threads << " threads." << std::endl;
std::cout << "Sending multiplication requests..." << std::endl;
int rows = a->yDim;
int cols = a->xDim;
for(int i = 0; i < rows; i++){
for(int j = 0; j < cols; j++){
doMultiplications<<<blocks,threads>>>(a,b,multiplications,i,j);
}
}
std::cout << "Sent multiplication requests." << std::endl;
matrix *c;
cudaMallocManaged(&c,sizeof(matrix));
cudaDeviceSynchronize();
std::cout << "Finished multiplication operations." << std::endl;
c->yDim = a->yDim;
c->xDim = b->xDim;
int matrixSize = c->yDim * c->xDim;
cudaMallocManaged(&c->vals,sizeof(int) * matrixSize);
threads = (matrixSize/32 +1)* 32;
blocks = 1;
if(threads > MAX_THREADS){
blocks = threads/MAX_THREADS + 1;
threads = MAX_THREADS;
}
std::cout << "Determined that the matrix needs: " << blocks << " blocks and " << threads << " threads to add. Needs a total of " << matrixSize << " add operations." << std::endl;
doAdds<<<blocks,threads>>>(a,b,multiplications,c);
cudaDeviceSynchronize();
std::cout << "Finished all adds." << std::endl;
cudaFree(multiplications);
return c;
}
_matrix *generateRandomMatrix(int x, int y){
int *vals;
cudaMallocManaged(&vals,sizeof(int) * x * y);
for(int i = 0; i < y; i++){
for(int j = 0; j < x; j++){
vals[i*x + j] = rand()%5;
}
}
matrix *m;
cudaMallocManaged(&m,sizeof(matrix));
m->xDim = x;
m->yDim = y;
m->vals = vals;
return m;
}
void deleteMatrix(_matrix *m){
cudaFree(m->vals);
cudaFree(m);
}
int main(void){
matrix *a = generateRandomMatrix(450,450);
matrix *b = generateRandomMatrix(450,450);
// printMatrix(a);
// printMatrix(b);
matrix *c = multiplyMatrices(a,b);
// printMatrix(c);
deleteMatrix(a);
deleteMatrix(b);
deleteMatrix(c);
return 0;
} |
22,540 | #include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
__device__ int diverge_gpu(float c_re, float c_im, int max) {
float z_re = c_re, z_im = c_im;
int i;
for (i = 0; i < max; ++i) {
if (z_re * z_re + z_im * z_im > 4.f)
break;
float new_re = z_re * z_re - z_im * z_im;
float new_im = 2.f * z_re * z_im;
z_re = c_re + new_re;
z_im = c_im + new_im;
}
return i;
}
__global__ void mandelKernel(int *c, float lowerX, float lowerY, float stepX, float stepY, int resX, int resY, int maxIterations) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
int thisX = index % resX;
int thisY = index / resX;
//int *partc = (int *) ((char *) c + thisX * pitch);
float x = lowerX + thisX * stepX;
float y = lowerY + thisY * stepY;
c[index] = diverge_gpu(x, y, maxIterations);
}
// Host front-end function that allocates the memory and launches the GPU kernel
void hostFE (float upperX, float upperY, float lowerX, float lowerY, int* img, int resX, int resY, int maxIterations)
{
float stepX = (upperX - lowerX) / resX;
float stepY = (upperY - lowerY) / resY;
int size = resX * resY;
int *h_c;
int *d_c;
size_t pitch;
h_c = (int *)malloc(size * sizeof(int));
//cudaHostAlloc(&h_c, sizeof(int)*size, cudaHostAllocMapped);
//cudaHostGetDevicePointer(&d_c, h_c, 0);
//cudaMalloc(&d_c, size * sizeof(int));
cudaMallocPitch((void **)&d_c, &pitch, sizeof(int)*resX, resY);
//cudaMemcpy2D(d_c, pitch, h_c, sizeof(int)*resX, sizeof(int)*resX, resY, cudaMemcpyHostToDevice);
int block_size = 1024;
//int block_size = 32;
//int grid_size = 1;
//int bx = (resX + block_size - 1) / block_size;
//int by = (resY + block_size - 1) / block_size;
//dim3 dimBlock(block_size, block_size);
//dim3 dimGrid(1, 1);
//dim3 blocks(bx, by);
//dim3 threads(block_size, block_size);
mandelKernel<<<size/block_size, block_size>>>(d_c, lowerX, lowerY, stepX, stepY, resX, resY, maxIterations);
cudaDeviceSynchronize();
//cudaMemcpy2D(h_c, sizeof(int)*resX, d_c, pitch, sizeof(int)*resX, resY, cudaMemcpyDeviceToHost);
cudaMemcpy(h_c, d_c, size*sizeof(int), cudaMemcpyDeviceToHost);
memcpy(img, h_c, size * sizeof(int));
cudaFree(d_c);
free(h_c);
//cudaFreeHost(h_c);
}
|
22,541 | #include "includes.h"
__global__ void vectorAddition (float *a, float *b, float *c, int n){
int i= blockDim.x * blockIdx.x + threadIdx.x;
if (i<n){
c[i] = a[i]+b[i];
}
} |
22,542 | #include "use_matrix.cuh"
int main()
{
use_matrix();
return 0;
} |
22,543 | #include "includes.h"
__global__ void scan_sum_kernel(unsigned int* input_vals, unsigned int pass, unsigned int * output, unsigned int* output_block, unsigned int size, unsigned int block_num) {
unsigned int tid = threadIdx.x;
unsigned int mid = threadIdx.x + blockIdx.x * blockDim.x;
__shared__ unsigned int shared_input_vals[BLOCK_SIZE];
__shared__ unsigned int shared_output[BLOCK_SIZE];
if (mid >= size) {
shared_input_vals[tid] = 0xFFFFFFFF;
} else {
shared_input_vals[tid] = input_vals[mid];
}
__syncthreads();
if (tid == 0 || ((shared_input_vals[tid - 1] >> pass) & 0x01)) {
shared_output[tid] = 0;
} else {
shared_output[tid] = 1;
}
__syncthreads();
for (unsigned int i = 1; i < BLOCK_SIZE; i <<= 1) {
unsigned int val = 0;
if (tid >= i) {
val = shared_output[tid - i];
}
__syncthreads();
shared_output[tid] += val;
__syncthreads();
}
if (mid < size) {
output[mid] = shared_output[tid];
if ((mid == size - 1) || (tid == BLOCK_SIZE-1)) {
output_block[blockIdx.x] = shared_output[tid];
if (!((shared_input_vals[tid] >> pass) & 0x01)) {
//output_block[mid/BLOCK_SIZE] += 1;
output_block[blockIdx.x] += 1;
}
}
}
__syncthreads();
} |
22,544 | #include <stdio.h>
#include <cuda_runtime.h>
#include <time.h>
#include <sys/time.h>
void checkResult(float *hostRef, float *gpuRef, const int N){
double epsilon = 1.0E-8;
bool match = 1;
for (int i = 0; i < N; ++i) {
if (abs(hostRef[i] - gpuRef[i]) > epsilon){
match = 0;
printf("Array do not match!\n");
printf("host %5.2f gpu %5.2f at current %d\n", hostRef[i], gpuRef[i], i);
break;
}
}
if (match){
printf("Array match.\n\n");
}
}
void initialData(float *ip, int size){
time_t t;
srand((unsigned) time(&t));
for (int i = 0; i < size; ++i) {
ip[i] = (float)(rand() & 0xFF) / 10.0f;
}
}
void initialInt(int *ip, int size){
for (int i = 0; i < size; ++i) {
ip[i] = i;
}
}
void sumArraysOnHost(float *A, float *B, float *C, const int N){
for (int i = 0; i < N; ++i) {
C[i] = A[i] + B[i];
}
}
__global__ void sumArraysOnGPU(float *A, float *B, float *C, const int N){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N){
C[i] = A[i] + B[i];
}
}
double cpuSecond(){
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6);
}
void printMatrix(int *C, const int nx, const int ny){
int *ic = C;
printf("\nMatrix: (%d,%d)\n", nx, ny);
for (int i = 0; i < ny; ++i) {
for (int j = 0; j < nx; ++j) {
printf("%3d", ic[j]);
}
ic += nx;
printf("\n");
}
printf("\n");
}
__global__ void printThreadIndex(int *A, const int nx, const int ny){
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
unsigned int idx = iy * nx + ix;
printf("thread_id (%d,%d) block_id (%d,%d) coordinate(%d,%d)"
"global index %2d ival %2d\n", threadIdx.x, threadIdx.y, blockIdx.x,
blockIdx.y, ix, iy, idx, A[idx]);
}
int main() {
int dev = 0;
struct cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
printf("Using Device %d: %s\n", dev, deviceProp.name);
cudaSetDevice(dev);
int nx = 8;
int ny = 6;
int nxy = nx*ny;
size_t nBytes = nxy * sizeof(float);
int *h_A;
h_A = (int *)malloc(nBytes);
initialInt(h_A, nxy);
printMatrix(h_A, nx, ny);
int *d_A;
cudaMalloc((void **)&d_A, nBytes);
cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice);
dim3 block(4, 2);
dim3 grid((nx+block.x-1)/block.x, (ny+block.y-1)/block.y);
printThreadIndex<<<grid, block>>>(d_A, nx, ny);
cudaDeviceSynchronize();
cudaFree(d_A);
free(h_A);
cudaDeviceReset();
return 0;
} |
22,545 | ///
/// Useful Functions and Types
///
typedef float3 pCoor;
typedef float3 pVect;
struct pMatrix3x3 { float3 r0, r1, r2; };
__device__ float3
make_float3(float4 f4){return make_float3(f4.x,f4.y,f4.z);}
__device__ float3 m3(float4 a){ return make_float3(a); }
__device__ float3 xyz(float4 a){ return m3(a); }
__device__ float4 m4(float3 v, float w) { return make_float4(v.x,v.y,v.z,w); }
__device__ pVect operator +(pVect a,pVect b)
{ return make_float3(a.x+b.x,a.y+b.y,a.z+b.z); }
__device__ pVect operator -(pVect a,pVect b)
{ return make_float3(a.x-b.x,a.y-b.y,a.z-b.z); }
__device__ pVect operator -(float4 a,float4 b)
{ return make_float3(a.x-b.x,a.y-b.y,a.z-b.z); }
__device__ pVect operator -(pCoor a,float4 b)
{ return make_float3(a.x-b.x,a.y-b.y,a.z-b.z); }
__device__ pVect operator *(float s, pVect v)
{return make_float3(s*v.x,s*v.y,s*v.z);}
__device__ float4 operator *(float s, float4 v)
{return make_float4(s*v.x,s*v.y,s*v.z,s*v.w);}
__device__ pVect operator *(pVect u, pVect v)
{return make_float3(u.x*v.x,u.y*v.y,u.z*v.z);}
__device__ pVect operator -(pVect v) { return make_float3(-v.x,-v.y,-v.z); }
__device__ float3 operator -=(float3& a, pVect b) {a = a - b; return a;}
__device__ float3 operator +=(float3& a, pVect b) {a = a + b; return a;}
__device__ float3 operator *=(float3& a, float b)
{ a.x *= b; a.y *= b; a.z *= b; return a;}
struct pNorm {
pVect v;
float mag_sq, magnitude;
};
__device__ pVect operator *(float s, pNorm n) { return s * n.v;}
// Make a Coordinate
__device__ pCoor
mc(float x, float y, float z){ return make_float3(x,y,z); }
__device__ pCoor mc(float4 c){ return make_float3(c.x,c.y,c.z); }
__device__ void set_f3(float3& a, float4 b){a.x = b.x; a.y = b.y; a.z = b.z;}
__device__ void set_f4(float4& a, float3 b)
{a.x = b.x; a.y = b.y; a.z = b.z; a.w = 1;}
__device__ void set_f4(float4& a, float3 b, float c)
{a.x = b.x; a.y = b.y; a.z = b.z; a.w = c;}
// Make a Vector
__device__ pVect
mv(float x, float y, float z){ return make_float3(x,y,z); }
__device__ pVect mv(float3 a, float3 b) { return b-a; }
__device__ pVect mv(float a) { return make_float3(a,a,a); }
__device__ float dot(float4 a, float4 b)
{ return a.x*b.x + a.y*b.y + a.z*b.z + a.w*b.w;}
__device__ float dot(pVect a, pVect b){ return a.x*b.x + a.y*b.y + a.z*b.z;}
__device__ float dot(pVect a, pNorm b){ return dot(a,b.v); }
__device__ float dot(pNorm a, pVect b){ return dot(a.v,b); }
__device__ float dot3(float4 a, float4 b){ return dot(m3(a),m3(b)); }
__device__ float mag_sq(pVect v){ return dot(v,v); }
__device__ float length(pVect a) {return sqrtf(mag_sq(a));}
__device__ pVect normalize(pVect a) { return rsqrtf(mag_sq(a))*a; }
// Make a Normal (a structure containing a normalized vector and length)
__device__ pNorm mn(pVect v)
{
pNorm n;
n.mag_sq = mag_sq(v);
if ( n.mag_sq == 0 )
{
n.magnitude = 0;
n.v.x = n.v.y = n.v.z = 0;
}
else
{
const float rsq = 1.0f/sqrtf(n.mag_sq);
n.magnitude = 1.0f/rsq;
n.v = rsq * v;
}
return n;
}
__device__ pNorm mn(float4 a, float4 b) {return mn(b-a);}
__device__ pNorm mn(pCoor a, pCoor b) {return mn(b-a);}
__device__ pNorm mn(float x, float y, float z) {return mn(mv(x,y,z));}
__device__ pNorm mn(float4 v4)
{ pNorm n; n.v = m3(v4); n.magnitude = v4.w; return n; }
__device__ pNorm mn(float3 v3, float mag)
{ pNorm n; n.v = v3; n.magnitude = mag; return n; }
// The unary - operator doesn't seem to work when used in an argument.
__device__ pNorm operator -(pNorm n)
{
pNorm m;
m.magnitude = n.magnitude;
m.mag_sq = n.mag_sq;
m.v = -n.v;
return m;
}
struct pQuat {
float3 v;
float w;
};
// Make Quaternion
__device__ pQuat mq(pNorm axis, float angle)
{
pQuat q;
q.v = __sinf(angle/2) * axis.v;
q.w = __cosf(angle/2);
return q;
}
__device__ pQuat cast_quat(float4 v)
{
pQuat q;
q.v.x = v.x;
q.v.y = v.y;
q.v.z = v.z;
q.w = v.w;
return q;
}
__device__ pQuat cq(float4 v){ return cast_quat(v); }
__device__ pQuat quat_normalize(pQuat q)
{
float len_sq = dot(q.v,q.v) + q.w * q.w;
float norm_factor = 1.0f / sqrtf(len_sq);
pQuat r;
r.v = norm_factor * q.v;
r.w = norm_factor * q.w;
return r;
}
// Make float4
__device__ float4 c4(pQuat q){ return make_float4(q.v.x,q.v.y,q.v.z,q.w); }
__device__ float4 m4(pNorm v, float w) { return m4(v.v,w); }
__device__ pVect fabs(pVect v){ return mv(fabs(v.x),fabs(v.y),fabs(v.z)); }
__device__ float min(pVect v){ return min(min(v.x,v.y),v.z); }
__device__ float max(pVect v){ return max(max(v.x,v.y),v.z); }
__device__ float sum(pVect v){ return v.x+v.y+v.z; }
// Cross Product of Two Vectors
__device__ float3
cross(float3 a, float3 b)
{
return make_float3
( a.y * b.z - a.z * b.y, a.z * b.x - a.x * b.z, a.x * b.y - a.y * b.x );
}
__device__ pVect cross(pVect a, pNorm b){ return cross(a,b.v); }
__device__ pVect cross(pNorm a, pVect b){ return cross(a.v,b); }
__device__ pVect crossf3(float4 a, float4 b) { return cross(m3(a),m3(b)); }
// Cross Product of Vectors Between Coordinates
__device__ float3
cross3(float3 a, float3 b, float3 c)
{
float3 ab = a - b;
float3 cb = c - b;
return cross(ab,cb);
}
__device__ pVect cross3(pVect a, pVect b, pNorm c) { return cross3(a,b,c.v); }
__device__ pQuat quat_mult(pQuat a, pQuat b)
{
float w = a.w * b.w - dot(a.v,b.v);
float3 v = a.w * b.v + b.w * a.v + cross(a.v,b.v);
pQuat q;
q.w = w;
q.v = v;
return q;
// return cast_quat(v.x,v.y,v.z,w);
};
__device__ pQuat operator * (pQuat q, pQuat v)
{ return quat_mult(q,v); }
__device__ void
pMatrix_set_rotation(pMatrix3x3& m, pVect u, float theta)
{
const float cos_theta = __cosf(theta);
const float sin_theta = sqrtf(1.0f - cos_theta * cos_theta );
m.r0.x = u.x * u.x + cos_theta * ( 1 - u.x * u.x );
m.r0.y = u.x * u.y * ( 1 - cos_theta ) - u.z * sin_theta;
m.r0.z = u.z * u.x * ( 1 - cos_theta ) + u.y * sin_theta;
m.r1.x = u.x * u.y * ( 1 - cos_theta ) + u.z * sin_theta;
m.r1.y = u.y * u.y + cos_theta * ( 1 - u.y * u.y );
m.r1.z = u.y * u.z * ( 1 - cos_theta ) - u.x * sin_theta;
m.r2.x = u.z * u.x * ( 1 - cos_theta ) - u.y * sin_theta;
m.r2.y = u.y * u.z * ( 1 - cos_theta ) + u.x * sin_theta;
m.r2.z = u.z * u.z + cos_theta * ( 1 - u.z * u.z );
}
// Set matrix m to a rotation matrix based on quaternion q.
__device__ void
pMatrix_set_rotation(pMatrix3x3& m, float4 q)
{
m.r0.x = 1.f - 2.f * q.y * q.y - 2.f * q.z * q.z;
m.r0.y = 2.f * q.x * q.y - 2.f * q.w * q.z;
m.r0.z = 2.f * q.x * q.z + 2.f * q.w * q.y;
m.r1.x = 2.f * q.x * q.y + 2.f * q.w * q.z;
m.r1.y = 1.f - 2.f * q.x * q.x - 2.f * q.z * q.z;
m.r1.z = 2.f * q.y * q.z - 2.f * q.w * q.x;
m.r2.x = 2.f * q.x * q.z - 2.f * q.w * q.y;
m.r2.y = 2.f * q.y * q.z + 2.f * q.w * q.x;
m.r2.z = 1.f - 2.f * q.x * q.x - 2.f * q.y * q.y;
}
__device__ void
pMatrix_set_rotation(pMatrix3x3& m, pQuat q)
{
pMatrix_set_rotation(m,c4(q));
}
__device__ float3 operator *(pMatrix3x3 m, float3 coor)
{ return make_float3(dot(m.r0,coor), dot(m.r1,coor), dot(m.r2,coor)); }
|
22,546 | #include <thrust/transform.h>
void test() {
} |
22,547 | #include <iostream>
#include <cuda.h>
int main()
{
cudaError_t err;
double *v;
int c;
cudaGetDeviceCount(&c);
std::cout << c << std::endl;
err = cudaMalloc(&v, 100*sizeof(double));
std::cout << cudaGetErrorString(err) << std::endl;
err = cudaFree(v);
std::cout << cudaGetErrorString(err) << std::endl;
}
|
22,548 | // Include packages and also CUDA packages
#include<stdio.h>
#include<stdlib.h>
#include<unistd.h>
#include<stdbool.h>
#include <cuda.h>
#include <cuda_runtime.h>
// Result from last compute of world.
unsigned char *g_resultData=NULL;
// Current state of world.
unsigned char *g_data=NULL;
// Current width of world.
size_t g_worldWidth=0;
/// Current height of world.
size_t g_worldHeight=0;
/// Current data length (product of width and height)
size_t g_dataLength=0; // g_worldWidth * g_worldHeight
static inline void HL_initAllZeros( size_t worldWidth, size_t worldHeight )
{
g_worldWidth = worldWidth;
g_worldHeight = worldHeight;
g_dataLength = g_worldWidth * g_worldHeight;
// g_data = calloc( g_dataLength, sizeof(unsigned char));
// g_resultData = calloc( g_dataLength, sizeof(unsigned char));
// Set memory to CUDA
cudaMallocManaged(&g_data, (g_dataLength * sizeof(unsigned char)));
// Zero out the elements
cudaMemset(g_data, 0, (g_dataLength * sizeof(unsigned char)));
// Same for results
cudaMallocManaged(&g_resultData, (g_dataLength * sizeof(unsigned char)));
cudaMemset(g_resultData, 0, (g_dataLength * sizeof(unsigned char)));
}
static inline void HL_initAllOnes( size_t worldWidth, size_t worldHeight )
{
int i;
g_worldWidth = worldWidth;
g_worldHeight = worldHeight;
g_dataLength = g_worldWidth * g_worldHeight;
// Set memory to CUDA
cudaMallocManaged(&g_data, (g_dataLength * sizeof(unsigned char)));
cudaMemset(g_data, 0, (g_dataLength * sizeof(unsigned char)));
// set all rows of world to true
for( i = 0; i < g_dataLength; i++)
{
g_data[i] = 1;
}
cudaMallocManaged(&g_resultData, (g_dataLength * sizeof(unsigned char)));
cudaMemset(g_resultData, 0, (g_dataLength * sizeof(unsigned char)));
}
static inline void HL_initOnesInMiddle( size_t worldWidth, size_t worldHeight )
{
int i;
g_worldWidth = worldWidth;
g_worldHeight = worldHeight;
g_dataLength = g_worldWidth * g_worldHeight;
cudaMallocManaged(&g_data, (g_dataLength * sizeof(unsigned char)));
cudaMemset(g_data, 0, (g_dataLength * sizeof(unsigned char)));
// set first 1 rows of world to true
for( i = 10*g_worldWidth; i < 11*g_worldWidth; i++)
{
if( (i >= ( 10*g_worldWidth + 10)) && (i < (10*g_worldWidth + 20)))
{
g_data[i] = 1;
}
}
cudaMallocManaged(&g_resultData, (g_dataLength * sizeof(unsigned char)));
cudaMemset(g_resultData, 0, (g_dataLength * sizeof(unsigned char)));
}
static inline void HL_initOnesAtCorners( size_t worldWidth, size_t worldHeight )
{
g_worldWidth = worldWidth;
g_worldHeight = worldHeight;
g_dataLength = g_worldWidth * g_worldHeight;
cudaMallocManaged(&g_data, (g_dataLength * sizeof(unsigned char)));
cudaMemset(g_data, 0, (g_dataLength * sizeof(unsigned char)));
g_data[0] = 1; // upper left
g_data[worldWidth-1]=1; // upper right
g_data[(worldHeight * (worldWidth-1))]=1; // lower left
g_data[(worldHeight * (worldWidth-1)) + worldWidth-1]=1; // lower right
cudaMallocManaged(&g_resultData, (g_dataLength * sizeof(unsigned char)));
cudaMemset(g_resultData, 0, (g_dataLength * sizeof(unsigned char)));
}
static inline void HL_initSpinnerAtCorner( size_t worldWidth, size_t worldHeight )
{
g_worldWidth = worldWidth;
g_worldHeight = worldHeight;
g_dataLength = g_worldWidth * g_worldHeight;
cudaMallocManaged(&g_data, (g_dataLength * sizeof(unsigned char)));
cudaMemset(g_data, 0, (g_dataLength * sizeof(unsigned char)));
g_data[0] = 1; // upper left
g_data[1] = 1; // upper left +1
g_data[worldWidth-1]=1; // upper right
cudaMallocManaged(&g_resultData, (g_dataLength * sizeof(unsigned char)));
cudaMemset(g_resultData, 0, (g_dataLength * sizeof(unsigned char)));
}
static inline void HL_initReplicator( size_t worldWidth, size_t worldHeight )
{
size_t x, y;
g_worldWidth = worldWidth;
g_worldHeight = worldHeight;
g_dataLength = g_worldWidth * g_worldHeight;
cudaMallocManaged(&g_data, (g_dataLength * sizeof(unsigned char)));
cudaMemset(g_data, 0, (g_dataLength * sizeof(unsigned char)));
x = worldWidth/2;
y = worldHeight/2;
g_data[x + y*worldWidth + 1] = 1;
g_data[x + y*worldWidth + 2] = 1;
g_data[x + y*worldWidth + 3] = 1;
g_data[x + (y+1)*worldWidth] = 1;
g_data[x + (y+2)*worldWidth] = 1;
g_data[x + (y+3)*worldWidth] = 1;
cudaMallocManaged(&g_resultData, (g_dataLength * sizeof(unsigned char)));
cudaMemset(g_resultData, 0, (g_dataLength * sizeof(unsigned char)));
}
static inline void HL_initMaster( unsigned int pattern, size_t worldWidth, size_t worldHeight )
{
switch(pattern)
{
case 0:
HL_initAllZeros( worldWidth, worldHeight );
break;
case 1:
HL_initAllOnes( worldWidth, worldHeight );
break;
case 2:
HL_initOnesInMiddle( worldWidth, worldHeight );
break;
case 3:
HL_initOnesAtCorners( worldWidth, worldHeight );
break;
case 4:
HL_initSpinnerAtCorner( worldWidth, worldHeight );
break;
case 5:
HL_initReplicator( worldWidth, worldHeight );
break;
default:
printf("Pattern %u has not been implemented \n", pattern);
exit(-1);
}
}
static inline void HL_swap( unsigned char **pA, unsigned char **pB)
{
// Create temporay holder to hold A's values
unsigned char *temporary;
temporary = *pA;
// Perform the swap
*pA = *pB;
*pB = temporary;
}
/*
// Don't Modify this function or your submitty autograding will not work
static inline void HL_printWorld(size_t iteration)
{
int i, j;
printf("Print World - Iteration %lu \n", iteration);
for( i = 0; i < g_worldHeight; i++)
{
printf("Row %2d: ", i);
for( j = 0; j < g_worldWidth; j++)
{
printf("%u ", (unsigned int)g_data[(i*g_worldWidth) + j]);
}
printf("\n");
}
printf("\n\n");
}*/
// MAIN KERNEL FUNCTION THAT DOES ALL OF THE WORK
__global__ void HL_kernel(unsigned char* d_data, unsigned int worldWidth, unsigned int worldHeight, unsigned char* d_resultData){
// Store index value
size_t index;
// Loop over the threads
for(index = blockIdx.x * blockDim.x + threadIdx.x; index < worldWidth*worldHeight; index += blockDim.x * gridDim.x){
// Grab the current y
int y0 = ((index + worldHeight - 1) % worldHeight) * worldWidth;
int y1 = index * worldWidth;
int y2 = ((index + 1) % worldHeight) * worldWidth;
// Get the current block and thread
int x;
// Loop over corresponding COLUMNS
for (x = 0; x < worldWidth; ++x){
// Set current column, left column, and right column
int x1 = x;
int x0 = (x1 + worldWidth - 1) % worldWidth;
int x2 = (x1 + 1) % worldWidth;
// Get the status of the current cell to determine logic of life span
int is_alive = d_data[x1+y1];
// Count the number of alive neighbors
int num_alive = 0;
num_alive = d_data[x0+y0] + d_data[x1+y0] + d_data[x2+y0] + d_data[x0+y1] + d_data[x2+y1] + d_data[x0+y2] + d_data[x1+y2] + d_data[x2+y2];
// Logic for updating values
if (is_alive == 1){
// Cell is alive!
if (num_alive < 2){
// Underpopulated
d_resultData[x1+y1] = 0;
}
else if (num_alive == 2 || num_alive == 3){
// Just the right amount of neighbors
d_resultData[x1+y1] = 1;
}
else {
// Overpopulated
d_resultData[x1+y1] = 0;
}
}
else {
// Cell is dead :(
if (num_alive == 3 || num_alive == 6) {
// #Resurrected
d_resultData[x1+y1] = 1;
}
else {
// We stay dead
d_resultData[x1+y1] = 0;
}
}
} // End x loop
} // End loop over each thread
// Synchronize the threads?
__syncthreads();
}
// LAUNCH KERNEL FUNCTION
bool HL_kernelLaunch(unsigned char** d_data, unsigned char** d_resultData, size_t worldWidth, size_t worldHeight, size_t iterationsCount, ushort threadsCount){
// Delcare iteration variable
int i;
// Declare number of blocks
int block_count = (worldHeight * worldWidth) / threadsCount;
// Loop over the iterations
for(i = 0; i < iterationsCount; i++){
// Perform kernel operations in parallel over the threads
HL_kernel<<<block_count,threadsCount>>>(*d_data, worldWidth, worldHeight, *d_resultData);
// Synchronize the CUDA devices
cudaDeviceSynchronize();
// Swap the pointers
HL_swap(d_data, d_resultData);
} // End iterations loop
// Synchronize the device again?
cudaDeviceSynchronize();
return true;
}
int main(int argc, char *argv[])
{
unsigned int pattern = 0;
unsigned int worldSize = 0;
unsigned int iterations = 0;
unsigned int thread_count = 0;
printf("This is the HighLife running in Parallel on a GPU.\n");
if( argc != 5 )
{
printf("HighLife requires 4 arguments, 1st is pattern number, 2nd the sq size of the world, 3rd is the number of iterations, and 4th is the thread count, e.g. ./highlife 0 64 2 32 \n");
exit(-1);
}
// Read in arguments
pattern = atoi(argv[1]);
worldSize = atoi(argv[2]);
iterations = atoi(argv[3]);
thread_count = atoi(argv[4]);
// Initialize the world
HL_initMaster(pattern, worldSize, worldSize);
// Launch the kernel
HL_kernelLaunch(&g_data, &g_resultData, worldSize, worldSize, iterations, thread_count);
// Free memory
cudaFree(g_data);
cudaFree(g_resultData);
return true;
}
|
22,549 | #include "includes.h"
#define BLOCK_SIZE 1024
#ifndef THREADS
# define THREADS 1024
#endif
__global__ void total(float * input, float * output, unsigned int len) {
__shared__ float sum[2*BLOCK_SIZE];
unsigned int i = threadIdx.x;
unsigned int j = blockIdx.x * (blockDim.x * 2) + threadIdx.x;
float localSum = (i < len) ? input[j] : 0;
if (j + blockDim.x < len) localSum += input[j + blockDim.x];
sum[i] = localSum;
__syncthreads();
for (unsigned int step = blockDim.x / 2; step >= 1; step >>= 1) {
if (i < step) sum[i] = localSum = localSum + sum[i + step];
__syncthreads();
}
if(i == 0) output[blockIdx.x] = sum[0];
} |
22,550 | #include "includes.h"
__global__ void transpose_naive(float *odata, float* idata, int width, int height)
{
unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int yIndex = blockDim.y * blockIdx.y + threadIdx.y;
if (xIndex < width && yIndex < height)
{
unsigned int index_in = xIndex + width * yIndex;
unsigned int index_out = yIndex + height * xIndex;
odata[index_out] = idata[index_in];
}
} |
22,551 | #include "pinnedmem.cuh"
#include <iostream>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
cudaError_t mallocHost(void** h_mem ,unsigned int memSize, memoryMode memMode, bool wc)
{
if( PINNED == memMode ) {
#if CUDART_VERSION >= 2020
return cudaHostAlloc( h_mem, memSize, (wc) ? cudaHostAllocWriteCombined : 0 );
#else
if (wc) {printf("Write-Combined unavailable on CUDART_VERSION less than 2020, running is: %d", CUDART_VERSION);}
return cudaMallocHost( h_mem, memSize );
#endif
}
else { // PAGEABLE memory mode
*h_mem = malloc( memSize );
}
return cudaSuccess;
}
cudaError_t freeHost(void* h_mem, memoryMode memMode)
{
if( PINNED == memMode ) {
return cudaFreeHost(h_mem);
}
else {
free(h_mem);
}
return cudaSuccess;
}
|
22,552 | #include "includes.h"
__global__ void dropout_op(size_t sz, float_t* random_nums, float_t* data, float_t drop_rate, float_t scale)
{
size_t index = blockIdx.x*blockDim.x + threadIdx.x;
if(index < sz)
{
if(random_nums[index] <= drop_rate)
{
data[index] = 0;
}
else
{
data[index] *= scale;
}
}
} |
22,553 | //new
/***************** EXAMPLE ***********************
ArrayVals: 9, 31, 4, 18
padded ArrayVals: 09, 31, 04, 18
create histogram of size 10 for buckets 0-9
which each element initialized to 0. Use a thread
on each element of ArrayVals and increment the value
in the bucket it belongs to. This will count how many
values that belong in each bucket. In the above
example the histogram values would look like this:
HISTOGRAM:
0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 BUCKET
--------------------------------------
2 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 VALUES COUNTER
next use an array to count the OFFSET and a copy of that OFFSET array.
This is done by taking the element value at each index of the
histogram and adding it to the value at the previous index.
OFFSET Original:
0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
--------------------------------------
2 | 3 | 3 | 4 | 4 | 4 | 4 | 4 | 4 | 4
OFFSET CHANGED IS JUST A
COPY OF OFFSET ORIGINAL.
OFFSET Changed:
0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
--------------------------------------
2 | 3 | 3 | 4 | 4 | 4 | 4 | 4 | 4 | 4
^ ^ ^
| | |
| | taken from 4th index in histogram plus previous (1+3)
| |
| taken from second index plus the first index (1+2)
|
taken from the first index in histogram (2)
The reason we create a copy is because later, when we
want to determine how to rearange the elements, we have
to decrement the values in OFFSET so they don't overwrite
each other but we must also remember the original OFFSET
values. This will become clearer later.
As you can see the numbers that repeat occur (like index 2
and 4-9) when its corresponding index in the histogram equals 0
so the value doesn't increase.
Now we need to iterate over ArrayVals again and look at
the OFFSET changed array index it corresponds with to determine
where it goes in the list. We'll create a second temporary
list so that we don't ruin the order of the elements in the
original ArrayVals. This can be done in parallel so we can
use a thread to look at each element of ArrayVals at once.
secondList[ArrayValsSize];
we will, for example, look at the first element in ArrayVals.
Its left most digit is 0 so we will look at index 0 in the
OFFSET changed array. We notice it has a value 2 so we can place this
number at the 2nd index of the secondList array we just created.
This would be index 1 because arrays start at 0. So whatever
number fills the OFFSET changed index we subtract 1 to determine the position
to insert into the secondList. After we input into the secondList
we want to decrement the value in OFFSET changed so that the next number
that checks can be placed in an empty spot and not overwrite
the numbers in the same bucket. This means index 0 of the OFFSET changed
array goes from 2 to 1. We do the same thing for the other three
elements in ArrayVals. 31's first digit is a 3 so look at index 3 in
OFFSET changed and we see that it gets placed at 4-1=3 index in the secondList.
Remember to decrement the value at OFFSET changed[3] which = 4 so it becomes 3.
continue this with the next value which is 04 which means we look at
OFFSET changed[0], because its left most digit is 0, which has a value of 1
because the value 2 was decremented when 09 was placed in secondList above
in line 75-78. Because the value is now 1 that means we insert 04 into
index 1-1=0 of secondList. We finish with value 18. OFFSET changed[1] (because its
left most bit is 1) has a value of 3 so we put 18 into secondList[2]
because 3-1 = 2. After every element has been properly inserted into secondList,
it should now look like this:
secondList:
04, 09, 18, 31
We can see that its sorted but the computer doensn't know that.
In order to be sure its sorted we iterate through the histogram
and check to see if each value is at most 1. So if any value
in histogram is greater than 1 then we can't be sure its sorted
because we don't know which threads finished first.
So next if we find a value in histogram that is greater than 1 we
look to that index but in the original OFFSET. So histogram[0] has
a value of 2 which means we look in the original OFFSET[0] to get
the value 2. This means we are working from the ranges of
0-2 in the secondList. so we create histogram and OFFSET again.
To do this we just use a recursion and basically repeate the process
above but now only working with elements 0 and 1 based on the range
provided. We want to do the same process as above but
on the next digit to the right. so we sort 04 and 09
by counting them into the histogram and finding the OFFSET just
like above in lines 15-30.
They will each look like this:
HISTOGRAM:
0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
--------------------------------------
0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1
OFFSET:
0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
--------------------------------------
0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 2
We iterate over histogram and see if any values are
greater than 1. There are none so they must all be
sorted! so we iterate over histogram and when we
get to a value that is non 0 we can point to
secondList and overwrite those numbers with the
current numbers and they will be in the correct
order. histogram[4] is the first element with a
non 0 value. We were given ranges 0-2 from above
(see lines 103-106) so we start at 0 and point
to secondList[0] and insert 4. Then we continue
our iteration over histogram and get to 9 as the
next non 0 element. We can point to secondList[1]
to insert 9. We are done with this part so it will
return to the previous step which is line 102 where
it will continuing iterating over its histogram
looking for values greater than 1. Refer to the
histogram displayed on line 23 as displayed here:
HISTOGRAM:
0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 BUCKET
--------------------------------------
2 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 VALUES COUNTER
We branched off initially from histogram[0] because it
had a value greater than 1 but now we are back and can
continue. The rest of the elemnts contain either a 0 or 1
so don't need to be sorted anymore. This means secondList
contains the sorted array.
All that is left is to use threads for each element
of secondList and copy their value into the original
array ArrayVals because ArrayVals is the one that
was sent from the CPU that needs to go back to the CPU.
The array is sorted and we are done!
**************************************************/
//new
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
// #define MAX 2147483647;
#define MAX 99;
unsigned int * valuesList;
unsigned int totalNumbers;
void printArray(int * array, int size) {
printf("[ ");
for (int i = 0; i < size; i++) {
printf("%d ", array[i]);
}
printf("]\n");
}
void printArrayU(unsigned int * array, int size) {
printf("[ ");
for (int i = 0; i < size; i++) {
printf("%d ", array[i]);
}
printf("]\n");
}
__global__ void radixSort(unsigned int* valuesList, int digit, int arraySize, int* histogram, int* mainOffset, int* mainOffsetAfter) {
// each element is corresponds to a bucket from 0-9
// each element initialized to 0.
// __shared__ int histogram[10];
// int OFFSETOriginal[10];
__shared__ int OFFSETChanged[10];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
// take element in values at this instanced thread and find the digit
// we're looking for thats passed in and increment the corresponding element
// in the histogram
if (tid < arraySize)
atomicAdd(&histogram[valuesList[tid]/digit], 1);
__syncthreads();
// find offset values
// OFFSETOriginal[0] = histogram[0];
OFFSETChanged[0] = histogram[0];
// mainHistogram[0] = histogram[0]; // for testing purposes.
mainOffset[0] = histogram[0];
for (int i = 1; i < 10; i++) {
// mainHistogram[i] = histogram[i]; // for testing purposes.
// OFFSETOriginal[i] = OFFSETOriginal[i-1] + histogram[i];
OFFSETChanged[i] = OFFSETChanged[i-1] + histogram[i];
mainOffset[i] = OFFSETChanged[i];
}
// group numbers together by bucket
if (tid < arraySize) {
// get the value at this instanced threads id that corresponds to the value at its index in valuesList
int value = valuesList[tid];
// find the max index this threads value found from valueList by looking in its offsetbucket
int index = OFFSETChanged[valuesList[tid]/digit] - 1;
// set every element in valuesList to 0.
// valuesList[tid] = 0;
// OFFSETChanged[valuesList[tid]/digit]--;
__syncthreads();
// place the values at their index found above as long as its empty (contains a 0)
// if its filled from another thread already placing a value there,
// go to the index before it and keep searching down until you find an empty spot
// while (valuesList[index] != 0) {
// atomicAdd(&OFFSETChanged[valuesList[tid]/digit], -1);
// index = OFFSETChanged[valuesList[tid]/digit] - 1;
// }
int previousValue = value;
valuesList[index] = value;
atomicAdd(&OFFSETChanged[previousValue/digit], -1);
// the list should now be sorted by the 10's digit
}
__syncthreads();
for (int i = 0; i < 10; i++) {
mainOffsetAfter[i] = OFFSETChanged[i];
}
return;
}
__device__ void bucketSort(int* values, int digit) {
}
int * histogram;
int * offset;
int * offsetAfter;
int main(int argc, char **argv) {
totalNumbers = atoi(argv[1]);
int histogramSize = 10;
valuesList = (unsigned int *)malloc(sizeof(unsigned int)*totalNumbers);
histogram = (int*)malloc(sizeof(int)*histogramSize);
offset = (int*)malloc(sizeof(int)*histogramSize);
offsetAfter = (int*)malloc(sizeof(int)*histogramSize);
unsigned int* d_valuesList;
int* d_histogram;
int* d_offset;
int* d_offsetAfter;
srand(1);
// generate totalNumbers random numbers for valuesList
for (int i = 0; i < totalNumbers; i++) {
valuesList[i] = (int) rand()%MAX;
// valuesList[i] = 26;
}
printf("VALUES BEFORE:\n");
printArrayU(valuesList, totalNumbers);
// fill histogram with 0's
for (int i = 0; i < histogramSize; i++) {
histogram[i] = 0;
offset[i] = 0;
offsetAfter[i] = 0;
}
cudaMalloc((void **) &d_valuesList, sizeof(unsigned int)*totalNumbers);
cudaMemcpy(d_valuesList, valuesList, sizeof(unsigned int)*totalNumbers, cudaMemcpyHostToDevice);
cudaMalloc((void**) &d_histogram, sizeof(int)*histogramSize);
cudaMemcpy(d_histogram, histogram, sizeof(int)*histogramSize, cudaMemcpyHostToDevice);
cudaMalloc((void**) &d_offset, sizeof(int)*histogramSize);
cudaMemcpy(d_offset, offset, sizeof(int)*histogramSize, cudaMemcpyHostToDevice);
cudaMalloc((void**) &d_offsetAfter, sizeof(int)*histogramSize);
cudaMemcpy(d_offsetAfter, offsetAfter, sizeof(int)*histogramSize, cudaMemcpyHostToDevice);
// digit should be the number we divide valuesList[i] by to find a particular digit.
// i.e. if we are looking for the 10's digit we divid by 10. The 100's digit divid
// by 100. 326 divide 100 returns 3. This example we limit our number size to only
// be 2 digits (max_rand defined at top to be 50) so we pass in 10 as our digit to
// find the left most digit, the 10's digit.
// dim3 dimBlock(totalNumbers,1);
dim3 dimGrid(totalNumbers/256 ,1, 1);
if (totalNumbers%256) dimGrid.x++;
dim3 dimBlock (256, 1, 1);
int digit = 10;
radixSort<<<(totalNumbers+255)/256, 256>>>(d_valuesList, digit, totalNumbers, d_histogram, d_offset, d_offsetAfter);
cudaMemcpy(valuesList, d_valuesList, sizeof(unsigned int)*totalNumbers, cudaMemcpyDeviceToHost);
cudaFree(d_valuesList);
cudaMemcpy(histogram, d_histogram, sizeof(int)*histogramSize, cudaMemcpyDeviceToHost);
cudaFree(d_histogram);
cudaMemcpy(offset, d_offset, sizeof(int)*histogramSize, cudaMemcpyDeviceToHost);
cudaFree(d_offset);
cudaMemcpy(offsetAfter, d_offsetAfter, sizeof(int)*histogramSize, cudaMemcpyDeviceToHost);
cudaFree(d_offsetAfter);
printf("HISTOGRAM:\n");
printArray(histogram, histogramSize);
printf("OFFSET BEFORE:\n");
printArray(offset, histogramSize);
printf("OFFSET AFTER:\n");
printArray(offsetAfter, histogramSize);
// print valuesList
printf("VALUES AFTER:\n");
printArrayU(valuesList, totalNumbers);
return 0;
}
|
22,554 | #include <stdio.h>
#include <stdlib.h>
#include <inttypes.h>
#include <math.h>
#include <tiffio.h>
#include <cuda.h>
#include <cuComplex.h>
__device__ float distcalc(unsigned int bidx, unsigned int bidy, unsigned int width, unsigned int height, float pinholedist, float pixelsize){
float xcon, ycon, Rxy;
xcon = (((float)bidx - (float)width/2 - 80) * pixelsize);
ycon = (((float)bidy - (float)height/2) * pixelsize);
Rxy = sqrtf((xcon * xcon) + (ycon * ycon) + (pinholedist * pinholedist));
return(Rxy);
}
__global__ void gpurefwavecalckernel(cuComplex *gpureferencewave, unsigned int width, unsigned int height, float pinholedist, float k, float pixelsize){
int bidx = blockIdx.x * blockDim.x + threadIdx.x;
int bidy = blockIdx.y * blockDim.y + threadIdx.y;
int offset = (bidy * width) + bidx;
float sin, cos;
float Rxy;
Rxy = distcalc(bidx,bidy, width, height, pinholedist, pixelsize);
sincosf(k * Rxy, &sin, &cos);
gpureferencewave[offset].x = cos;
gpureferencewave[offset].y = sin;
}
extern "C" int gpurefwavecalc(cuComplex *refwave, unsigned int width, unsigned int height, float pinholedist, float k, float pixelsize){
dim3 threadsPerBlock(16,16);
dim3 numBlock(width/threadsPerBlock.x, height/threadsPerBlock.y);
cuComplex *gpureferencewave;
cudaMalloc(&gpureferencewave, sizeof(cuComplex) * width * height);
cudaDeviceSynchronize();
printf("Allocating Memory errors (?): %s\n", cudaGetErrorString(cudaGetLastError()));
gpurefwavecalckernel<<<numBlock, threadsPerBlock>>>(gpureferencewave, width, height, pinholedist, k, pixelsize);
cudaMemcpy(refwave, gpureferencewave, sizeof(cuComplex) * width * height, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
printf("Copying results from GPU errors (?): %s\n", cudaGetErrorString(cudaGetLastError()));
cudaFree(gpureferencewave);
return(0);
}
|
22,555 | #include "includes.h"
__global__ void Shrink_DownSampling( float *target, const float *source, const int wt, const int ht, const int ws, const int hs )
{
int y = blockIdx.y * blockDim.y + threadIdx.y;
int x = blockIdx.x * blockDim.x + threadIdx.x;
const int curt = y*wt+x;
const int curs = (y*2)*ws+x*2;
if(y < ht and x < wt) {
target[curt*3+0] = (source[curs*3+0]+source[(curs+1)*3+0]+source[(curs+ws)*3+0]+source[(curs+ws+1)*3+0])/4.0f;
target[curt*3+1] = (source[curs*3+1]+source[(curs+1)*3+1]+source[(curs+ws)*3+1]+source[(curs+ws+1)*3+1])/4.0f;
target[curt*3+2] = (source[curs*3+2]+source[(curs+1)*3+2]+source[(curs+ws)*3+2]+source[(curs+ws+1)*3+2])/4.0f;
}
} |
22,556 | //============================================================================
// Name : MF6.cpp
// Author : Sohrab
// Version : 1
// Copyright : Hi!
// Description : Matched Filter in C++, Ansi-style
//============================================================================
#include <iostream>
#include <string>
#include <cmath>
#include <math.h>
#include <ctime>
#include <complex>
#include <vector>
#include <string>
#include "stdio.h"
#include "stdlib.h"
#include "time.h"
#include <thrust/complex.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
// references
#define date_ref "1015"
#define obj_ref "1"
#define run_ref 2
// 0: down sampling, 1: averaging, 2: nothing
#define average 2
//internal distance
#define int_dst 2.615
// TX relative position to TX's starting point
#define Tx_pos_x 0.41
#define Tx_pos_y -0.028
#define Tx_pos_z -0.012
//starting point of using samples
#define N_spl_fr 1000
#define N_lfreq_spl 0
#define N_hfreq_spl 0
#define N_mfreq_spl (N_spl_fr/2)-N_lfreq_spl-N_hfreq_spl
#define N_mfreq_spl_slow 2*((N_spl_fr/2)-N_lfreq_spl-N_hfreq_spl)
// Number of frames for each axis
#define N_x_stg 20 //1667
#define N_z_stg 20
//constants
#define Ts 6e-3
#define Rs 5e5
#define lambda 4.983873e-3
//step size between each two frames considered
#define dlx 0.005 * 0.006
#define dlz lambda/2
#define linxmax -dlx/2-(N_x_stg-1)*dlx
#define linzmax -dlz/2-(N_z_stg-1)*dlz
// environment dimensions
#define xmin -5
#define xmax 5
#define ymin 0
#define ymax 10
#define zmin -1.5
#define zmax 1.5
//resolution
#define res 0.07
//scientific values for some constants
#define sci_fac 1e8
#define c_sci 2.9979
#define fc_sci 609
#define As1_sci 1.5001e4
#define As2_sci 7.5005e3
#define file_size 100020000
#define size1 3000
#define BLOCK_WIDTH 8
#define Beat_I(uu, vv, nn) Beat_I[uu*N_z_stg*size1 + vv*size1 + nn]
#define Beat_R(uu, vv, nn) Beat_R[uu*N_z_stg*size1 + vv*size1 + nn]
#define cell_MF(xx, yy, zz) cell_MF[xx*Ny*Nz + yy*Nz + zz]
#define deviceCellMF(xx, yy, zz) deviceCellMF[xx*Ny*Nz + yy*Nz + zz]
using namespace std;
/****************** FUNCTIONS ******************/
struct indices {
int kx;
int ky;
int kz;
};
indices idxfinder(int n1, int n2, int n3, int k) {
k = k % (n1 * n2 * n3);
indices I;
I.kx = k % n1;
I.ky = ((int) floor(k / n1)) % n2;
I.kz = ((int) floor(k / (n1 * n2))) % n3;
return I;
}
/****************************************************/
/************* KERNEL CALL *************************/
__global__ void matchedFilterKernel(float* Beat_R, float* Beat_I, thrust::complex<float>* cell_MF, int Nx, int Ny, int Nz) {
#define MF_x_axis(xx) (xx*res + xmin)
#define MF_y_axis(yy) (yy*res + ymin)
#define MF_z_axis(zz) (zz*res + zmin)
#define u_axis(uu) (-dlx/2 - uu*dlx)
#define v_axis(vv) (-dlz/2 - vv*dlz)
const float pi = acosf(-1);
const thrust::complex<double> i(0, 1);
const thrust::complex<float> i_float(0, 1);
int xx, yy, zz;
xx = blockIdx.x * blockDim.x + threadIdx.x;
yy = blockIdx.y * blockDim.y + threadIdx.y;
zz = blockIdx.z * blockDim.z + threadIdx.z;
if(xx < Nx && yy < Ny && zz < Nz) {
float cell_z = MF_z_axis(zz);
float cell_y = MF_y_axis(yy);
float cell_x = MF_x_axis(xx);
thrust::complex<float> cell_sum = 0;
// for(int nn = 0; nn < size1; nn++) // 3000
// Beat[nn] = Beat_R(uu, vv, nn) + i_float * Beat_I(uu, vv, nn);
// __shared__ complex<float> Beat[size1]
float cell_dist_t = sqrtf(
(cell_x - Tx_pos_x) * (cell_x - Tx_pos_x)
+ (cell_y - Tx_pos_y) * (cell_y - Tx_pos_y)
+ (cell_z - Tx_pos_z) * (cell_z - Tx_pos_z));
for (int uu = 0; uu < N_x_stg; uu++) { // N_x_stg
float x_diff = (cell_x - u_axis(uu)) * (cell_x - u_axis(uu));
for (int vv = 0; vv < N_z_stg; vv++) { // 2d receiver 1667*20
float temp_tau = (cell_dist_t + int_dst * 2 + sqrtf( x_diff +
(cell_z - v_axis(vv)) * (cell_z - v_axis(vv)) + cell_y * cell_y) ) / c_sci;
thrust::complex<float> temp_sig = exp(-i_float * (float) fmod((float)2.0 * pi * fc_sci * temp_tau, 2*pi) );
thrust::complex<float> Beat[size1];
thrust::complex<float> cell_sig_fst_temp[N_mfreq_spl];
thrust::complex<float> cell_sig_slow_temp[N_mfreq_spl_slow];
for(int nn = 0; nn < size1; nn++) // 3000
Beat[nn] = Beat_R(uu, vv, nn) + i_float * Beat_I(uu, vv, nn);
for (int nn = 0; nn < N_mfreq_spl; nn++) { // for each fixed receiver and object location, 3000 samples
cell_sig_fst_temp[nn] = temp_sig * exp(-i_float * (float) fmod((float)(2.0 * pi *
As1_sci * (N_lfreq_spl / Rs + nn / Rs) * temp_tau), 2*pi));
cell_sum += cell_sig_fst_temp[nn] * (Beat_R(uu, vv, nn) + i_float * Beat_I(uu, vv, nn)); //Beat[nn];
}
for (int nn = N_mfreq_spl; nn < 2*N_mfreq_spl; nn++) {
cell_sum += cell_sig_fst_temp[2*N_mfreq_spl-1-nn] * Beat[nn];
}
for (int nn = 0; nn < N_mfreq_spl_slow; nn++) {
cell_sig_slow_temp[nn] = temp_sig * exp(-i_float * (float) fmod((float)(2.0 * pi
* As2_sci * (N_lfreq_spl * 2 / Rs + nn / Rs) * temp_tau), 2*pi) );
cell_sum += cell_sig_slow_temp[nn] * Beat[nn+2*N_mfreq_spl];
}
for (int nn = N_mfreq_spl_slow; nn < 2* N_mfreq_spl_slow; nn++) {
cell_sum += cell_sig_slow_temp[2*N_mfreq_spl_slow-1-nn] * Beat[nn+2*N_mfreq_spl];
}
}
}
cell_MF(xx, yy, zz) = cell_sum;
}
#undef MF_x_axis
#undef MF_y_axis
#undef MF_z_axis
#undef u_axis
#undef v_axis
}
/**************************************************/
int
main(void)
{
cudaError_t err = cudaSuccess;
/************* LARGE ARRAY DECLRATATIONS AND NX, NY, NZ************/
int Nx = 143; // (int) floor((xmax-xmin)/res)+1; //143
int Ny = 143; //(int) floor((ymax-ymin)/res)+1; //143
int Nz = 43; //(int) floor((zmax-zmin)/res)+1; //43
// complex<float> cell_sig_fst[N_x_stg][N_z_stg][N_mfreq_spl];
// complex<float> cell_sig_slow[N_x_stg][N_z_stg][N_mfreq_spl_slow];
// Allocate host memory
float* Beat_R = (float *)malloc(N_x_stg * N_z_stg * size1 * sizeof(float)); //[N_x_stg][N_z_stg][size1] = {};
float* Beat_I = (float *)malloc(N_x_stg * N_z_stg * size1 * sizeof(float)); //[N_x_stg][N_z_stg][size1] = {};
thrust::complex<float>* cell_MF = (thrust::complex<float>*)malloc(Nx * Ny * Nz * sizeof(thrust::complex<float>)); //[Nx][Ny][Nz] 143 * 143 *43
// Verify that allocations succeeded
if (Beat_R == NULL || Beat_I == NULL || cell_MF == NULL )
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
/**************************************************/
clock_t begin = clock();
clock_t end;
// srand (time(NULL));
// for (int ii = 0; ii < N_z_stg; ii++)
// for (int jj = 0; jj < N_x_stg; jj++)
// for (int kk = 0; kk < size1; kk++) {
// Beat_R(jj, ii, kk) = (rand()%10 + 1)/10;
// Beat_I(jj, ii, kk) = (rand()%10 + 1)/10;
// }
/*********** READ THE .BIN FILES ************/
FILE *fp = fopen("/home/synrg-gpu1/Desktop/MF6/testReal.bin","rb");
for (int ii = 0; ii < N_z_stg; ii++){
for (int jj = 0; jj < N_x_stg; jj++) {
float b[size1];
fseek(fp, (ii*N_x_stg + jj)*size1*4, SEEK_SET);
fread(b, sizeof *b, size1, fp);
for(int kk = 0; kk < size1; kk++) {
Beat_R(jj, ii, kk) = 1;
//if (ii == 0 && jj == 1 && kk < 500) cout << b[kk] << endl;
}
}
}
fclose(fp);
cout << "Successfully read the file in " << (double) (clock() - begin) / CLOCKS_PER_SEC << " seconds!" << endl;
FILE *fp2 = fopen("/home/synrg-gpu1/Desktop/MF6/testImag.bin","rb");
for (int ii = 0; ii < N_z_stg; ii++){
for (int jj = 0; jj < N_x_stg; jj++) {
float b[size1];
fseek(fp2, (ii*N_x_stg + jj)*size1*4, SEEK_SET);
fread(b, sizeof *b, size1, fp2);
for(int kk = 0; kk < size1; kk++) {
Beat_I(jj, ii, kk) = 0;
}
}
}
fclose(fp2);
cout << "Successfully read the files in " << (double) (clock() - begin) / CLOCKS_PER_SEC << " seconds!" << endl;
// cout << Beat_I(149, 14, 149)<< endl << endl;
/******************** END OF READ FILE *********************/
//some constants
const float pi = acos(-1);
const thrust::complex<double> i(0, 1);
const thrust::complex<float> i_float(0, 1);
for (int i = 0; i < 1; i++){
for (int j = 0; j < 1; j++){
for (int k = 0; k < 10; k++) {
cout << cell_MF(k, j, i) << " ";
}
std::endl( std::cout );
}
std::endl( std::cout );
}
float* deviceBeatI;
float* deviceBeatR;
thrust::complex<float>* deviceCellMF;
clock_t begin_mem = clock();
// Allocate GPU memory
err = cudaMalloc((void **) &deviceBeatR , N_z_stg * N_x_stg * size1 * sizeof(float));
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate deviceBeatR (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMalloc((void **) &deviceBeatI , N_z_stg * N_x_stg * size1 * sizeof(float));
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate deviceBeatI (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMalloc((void **) &deviceCellMF , Nx * Ny * Nz * sizeof(thrust::complex<float>));
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate deviceCellMF (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Copy input data from the host memory to the CUDA device\n");
err = cudaMemcpy(deviceBeatR, Beat_R, N_z_stg * N_x_stg * size1 * sizeof(float), cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy deviceBeatR from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(deviceBeatI, Beat_I, N_z_stg * N_x_stg * size1 * sizeof(float), cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy deviceBeatI from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cout << "hi!" << endl;
dim3 DimGrid(ceil(Nx * 1.0 / BLOCK_WIDTH), ceil(Ny * 1.0 / BLOCK_WIDTH), ceil(Nz * 1.0 /BLOCK_WIDTH));
dim3 DimBlock(BLOCK_WIDTH, BLOCK_WIDTH, BLOCK_WIDTH);
cout << "Allocating & copying memory DONE! Time taken:" << (double) (clock() - begin_mem) / CLOCKS_PER_SEC;
matchedFilterKernel<<<DimGrid, DimBlock>>>(deviceBeatR, deviceBeatI, deviceCellMF, Nx, Ny, Nz);
cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch matchedFilterKernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
begin_mem = clock();
printf("Copy output data from the CUDA device to the host memory\n");
err = cudaMemcpy(cell_MF, deviceCellMF, Nx * Ny * Nz * sizeof(thrust::complex<float>), cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy deviceCellMF from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cout << "Copying memory back DONE! Time taken:" << (double) (clock() - begin_mem) / CLOCKS_PER_SEC;
cout << "Hi! \n";
cout << Nx << endl;
cout << Ny << endl;
cout << Nz << endl;
cout << N_x_stg << endl;
cout << N_z_stg << endl;
cout << N_mfreq_spl_slow << endl;
end = clock();
cout << "DONE! Time taken:" << (double) (end - begin) / CLOCKS_PER_SEC;
for (int i = 0; i < 1; i++){
for (int j = 0; j < 5; j++){
for (int k = 0; k < 5; k++) {
cout << cell_MF(k, j, i) << " ";
}
std::endl( std::cout );
}
std::endl( std::cout );
}
cudaFree(deviceBeatR);
cudaFree(deviceBeatI);
cudaFree(deviceCellMF);
free(Beat_R);
free(Beat_I);
free(cell_MF);
return 0;
}
|
22,557 | #include "stdlib.h"
#include <stdio.h>
#include <unistd.h>
#include <stdint.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <errno.h>
#define BMP_HEADER 14
#define DIB_HEADER 40
#define IMAGE_WIDTH 4608
#define IMAGE_HEIGHT 3456
#define IMAGE_BYTES_PER_PIXEL 3
#define IMAGE_SIZE (IMAGE_WIDTH*IMAGE_HEIGHT*IMAGE_BYTES_PER_PIXEL)
#define BUF_SIZE (IMAGE_SIZE+BMP_HEADER+DIB_HEADER)
void ReadBMP(const char * pPath, uint8_t * imageData, uint32_t bufferLen, uint32_t * imageLen)
{
if (pPath==NULL || imageData==NULL) {
printf("[ReadBMP]: Invalid args: pPath=%p, %s, imageData=%p\n"
, pPath, pPath==NULL?"NULL":pPath, imageData);
return;
}
FILE * pFile=fopen(pPath, "r");
if (NULL==pFile) {
printf("[ReadBMP]Open file %s failed! %d %s\n", pPath, errno, strerror(errno));
return;
}
int ret = fread(imageData, 1, bufferLen, pFile);
if (-1==ret) {
printf("[ReadBMP]Read file %s failed %d, %s\n", pPath, errno, strerror(errno));
}
*imageLen=ret;
fclose(pFile);
return;
}
int main()
{
uint8_t imageBuf[BUF_SIZE];
memset(imageBuf, 0, BUF_SIZE);
uint32_t imageLen;
ReadBMP("./material/cap_1.bmp", imageBuf, BUF_SIZE, &imageLen);
printf("simple read test: len=%u", imageLen);
return 0;
}
|
22,558 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float* var_21,float var_22,float var_23,float var_24) {
comp += (-1.8025E35f + (var_3 + -1.3294E36f * (var_4 - var_5)));
float tmp_1 = -1.7807E-43f;
comp += tmp_1 / cosf(-1.5429E-21f);
comp += (var_6 * (var_7 * -1.3947E-36f - -1.9180E-8f + asinf((+1.4801E-41f - logf((+1.3532E7f / var_8 * -1.1467E35f))))));
if (comp > +1.5884E-44f * cosf(var_9 - sinhf((var_10 * floorf(+1.9366E-41f))))) {
comp = var_11 + var_12 + var_13;
comp += var_14 / -1.9598E24f;
}
for (int i=0; i < var_1; ++i) {
float tmp_2 = (var_15 * (-0.0f + (var_16 / (var_17 - -1.1200E34f + var_18))));
float tmp_3 = +1.2603E35f;
comp = tmp_3 - tmp_2 - (var_19 * (-1.2492E-42f * +1.9760E35f * -1.0139E34f * (+0.0f / var_20)));
}
for (int i=0; i < var_2; ++i) {
var_21[i] = +1.9935E-37f + +1.7535E-37f + var_22 / +1.9573E-41f;
comp = var_21[i] + (var_23 / sinhf((-1.7727E-7f / (var_24 - (+1.5863E-41f * -1.7726E-36f)))));
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float* tmp_22 = initPointer( atof(argv[22]) );
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
float tmp_25 = atof(argv[25]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25);
cudaDeviceSynchronize();
return 0;
}
|
22,559 | #include <iostream>
#include <vector>
#include <cmath>
#include <chrono>
using namespace std;
using namespace std::chrono;
#define BLOCK_SIZE 16
#define N 1024
__global__ void gpu_matrix_mul(int *a, int *b, int *c){
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int sum = 0;
if(col < N && row < N){
for(int i = 0;i < N; i++){
sum += a[row*N + i] * b[i*N + col];
}
c[row*N + col] = sum;
}
}
void cpu_matrix_mul(int A[N][N], int B[N][N], int C[N][N]){
auto start = high_resolution_clock::now();
for(int i = 0;i < N;i ++){
for(int j = 0;j < N;j ++){
for(int k = 0;k < N;k ++){
C[i][j] += A[i][k]*B[k][j];
}
}
}
auto stop = high_resolution_clock::now();
auto cpu_time = duration_cast<microseconds>(stop - start).count();
cout << endl << " CPU exec time: " << cpu_time << endl;
}
int main(){
//CPU duration count
int CPU_A[N][N], CPU_B[N][N], CPU_C[N][N];
for(int i = 0;i < N;i ++){
for(int j = 0;j < N;j ++){
CPU_A[i][j] = rand()%293;
CPU_B[i][j] = rand()%66;
}
}
cpu_matrix_mul(CPU_A, CPU_B, CPU_C);
//GPU duration count
int *host_a, *host_b, *host_c, *device_a, *device_b, *device_c;
host_a = (int *)malloc((N*N) * sizeof(int));
host_b = (int *)malloc((N*N) * sizeof(int));
host_c = (int *)malloc((N*N) * sizeof(int));
for(int i = 0;i < N;i ++){
for(int j = 0;j < N;j ++){
host_a[i * N + j] = CPU_A[i][j];
host_b[i * N + j] = CPU_B[i][j];
}
}
cudaMalloc(&device_a, (N*N)*sizeof(int));
cudaMalloc(&device_b, (N*N)*sizeof(int));
cudaMalloc(&device_c, (N*N)*sizeof(int));
cudaMemcpy(device_a, host_a, (N*N)*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(device_b, host_b, (N*N)*sizeof(int), cudaMemcpyHostToDevice);
dim3 dimGrid((N + BLOCK_SIZE - 1) / BLOCK_SIZE, (N + BLOCK_SIZE - 1) / BLOCK_SIZE);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
auto start = high_resolution_clock::now();
gpu_matrix_mul<<<dimGrid, dimBlock>>>(device_a, device_b, device_c);
auto stop = high_resolution_clock::now();
auto gpu_time = duration_cast<microseconds>(stop - start).count();
cout << endl << " GPU time: " << gpu_time << endl;
cudaMemcpy(host_c, device_c, (N*N)*sizeof(int), cudaMemcpyDeviceToHost);
//Verify
cout << host_c[0] << " " << CPU_C[0][0] << endl;
cout << host_c[1] << " " << CPU_C[0][1] << endl;
for(int i = 0;i < N;i ++){
for(int j = 0;j < N;j ++){
if(host_c[i * N + j] != CPU_C[i][j]){
cout << endl << "FAILED" << endl;
return -1;
}
}
}
cout << endl << "PASSED" << endl;
} |
22,560 | //cuda version of test.c
#include <stdio.h>
#define N 256
#define TPB 256
__global__ void helloWorldKernel(){
const int i = blockIdx.x*blockDim.x + threadIdx.x;
printf("Hello World! My threadId is %2d\n", i);
}
int main(){
helloWorldKernel <<<N/TPB, TPB>>>();
return 0;
}
|
22,561 | #include "includes.h"
// GPU constant memory to hold our kernels (extremely fast access time)
__constant__ float convolutionKernelStore[256];
/**
* Convolution function for cuda. Destination is expected to have the same width/height as source, but there will be a border
* of floor(kWidth/2) pixels left and right and floor(kHeight/2) pixels top and bottom
*
* @param source Source image host pinned memory pointer
* @param width Source image width
* @param height Source image height
* @param paddingX source image padding along x
* @param paddingY source image padding along y
* @param kOffset offset into kernel store constant memory
* @param kWidth kernel width
* @param kHeight kernel height
* @param destination Destination image host pinned memory pointer
*/
//utilizacion del teorema de pitagoras a lo largo del vector en el gpu
//creacion de un buffer de imagenes, regresando al host, pasando del dispositivo al host de puntero a puntero
__global__ void convolve(unsigned char *source, int width, int height, int paddingX, int paddingY, size_t kOffset, int kWidth, int kHeight, unsigned char *destination)
{
//Distribucion de indices para la localizacion de los pixeles
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
float sum = 0.0;
int pWidth = kWidth / 2;
int pHeight = kHeight / 2;
//Solo ejecutamos para pixeles validos
if (x >= pWidth + paddingX &&
y >= pHeight + paddingY &&
x < (blockDim.x * gridDim.x) - pWidth - paddingX &&
y < (blockDim.y * gridDim.y) - pHeight - paddingY)
{
for (int j = -pHeight; j <= pHeight; j++)
{
for (int i = -pWidth; i <= pWidth; i++)
{
//obteniendo el peso para la locacion
int ki = (i + pWidth);
int kj = (j + pHeight);
float w = convolutionKernelStore[(kj * kWidth) + ki + kOffset];
sum += w * float(source[((y + j) * width) + (x + i)]);
}
}
}
//Promedio de la suma
destination[(y * width) + x] = (unsigned char)sum;
} |
22,562 | #include "includes.h"
__global__ void CudaKernel_BatchResize_GRAY2GRAY( int src_width, unsigned char* src_image, int num_rects, int* rects, int dst_width, int dst_height, float* dst_ptr )
{
const int gid = blockIdx.x * blockDim.x + threadIdx.x;
const int dst_image_size = dst_width * dst_height;
if( num_rects*dst_image_size <= gid ){
return;
}
const int image_index = (int)(gid / dst_image_size);
const int pixel_index = gid % dst_image_size;
float scale_x = (float)(rects[image_index*4 + 2])/dst_width;
float fx = (float)(((pixel_index % dst_width)+0.5f)*scale_x - 0.5);
int coor_x_in_rect = floor(fx);
fx = 1.0f - (fx - (float)coor_x_in_rect);
float scale_y = (float)(rects[image_index*4 + 3])/dst_height;
float fy = (float)(((pixel_index / dst_width)+0.5f)*scale_y - 0.5);
int coor_y_in_rect = floor(fy);
fy = 1.0f - (fy - (float)coor_y_in_rect);
int src_x = rects[image_index*4 + 0];
int src_y = rects[image_index*4 + 1];
float value = 0.;
value += (float)src_image[src_width*(src_y + coor_y_in_rect + 0) + (src_x + coor_x_in_rect + 0)] * fx * fy;
value += (float)src_image[src_width*(src_y + coor_y_in_rect + 0) + (src_x + coor_x_in_rect + 1)] * (1.0f - fx)*fy;
value += (float)src_image[src_width*(src_y + coor_y_in_rect + 1) + (src_x + coor_x_in_rect + 0)] * fx*(1.0f - fy);
value += (float)src_image[src_width*(src_y + coor_y_in_rect + 1) + (src_x + coor_x_in_rect + 1)] * (1.0f - fx)*(1.0f - fy);
dst_ptr[blockIdx.x * blockDim.x + threadIdx.x] = value / 255.f;
} |
22,563 | __global__ void PDH_kernel4(unsigned long long* d_histogram,
double* d_atom_x_list, double* d_atom_y_list, double* d_atom_z_list,
long long acnt, double res, int histSize)
{
extern __shared__ double shmem[];
//for now assume a block count of 157 and 80 (based on 10000 pts, 500.0 resolution, and 64 blocks)
// __shared__ int* shmem[(157*3)*sizeof(double) + sizeof(/*unsigned long long*/ int)*80];
// double* R = (double*)shmem;
double* R = shmem;
//2 copies of histogram, but we use one pointer
int * sh_hist = (int *)(R + 3*blockDim.x);
int id = blockIdx.x * blockDim.x + threadIdx.x;
int i, j, h_pos;
int i_id, j_id;
int t = threadIdx.x;
double Lx, Ly, Lz, Rx, Ry, Rz;
double dist;
//initialize the shared histogram to 0
for(i = t; i < histSize; i += blockDim.x)
{
sh_hist[i] = 0;
}
//do tiled algorithm with sh_hist
if(id < acnt)
{
Lx = d_atom_x_list[id];
Ly = d_atom_y_list[id];
Lz = d_atom_z_list[id];
for(i = blockIdx.x +1; i < gridDim.x; i++)
{
i_id = i * blockDim.x + t; //only valid threads may load into shared memory
if(i_id < acnt)
{
R[t] = d_atom_x_list[i_id];
R[t + blockDim.x] = d_atom_y_list[i_id];
R[t + blockDim.x*2] = d_atom_z_list[i_id];
}
__syncthreads();
for(j = 0; j < blockDim.x; j++)
{
j_id = i * blockDim.x + j; //now this prevents us from writing junk data
if(j_id < acnt)
{
/* DISTANCE FUNCTION */
Rx = R[j];
Ry = R[j + blockDim.x];
Rz = R[j + blockDim.x*2];
dist = sqrt((Lx - Rx)*(Lx-Rx) + (Ly - Ry)*(Ly - Ry) + (Lz - Rz)*(Lz - Rz));
h_pos = (int)(dist/res);
/* END DISTANCE FUNCTION */
atomicAdd((int*)&sh_hist[h_pos], 1);
// atomicAdd(&d_histogram[h_pos], 1);
}
}
__syncthreads();
}
//now load the L values into R
R[t] = Lx;
R[t + blockDim.x] = Ly;
R[t + blockDim.x*2] = Lz;
__syncthreads();
for(i = t+ 1; i < blockDim.x; i++)
{
i_id = blockIdx.x * blockDim.x + i;
if(i_id < acnt)
{
/* DISTANCE FUNCTION */
Rx = R[i];
Ry = R[i + blockDim.x];
Rz = R[i + blockDim.x*2];
dist = sqrt((Lx - Rx)*(Lx-Rx) + (Ly - Ry)*(Ly - Ry) + (Lz - Rz)*(Lz - Rz));
/* END DISTANCE FUNCTION */
h_pos = (int)(dist/res);
atomicAdd((int*)&sh_hist[h_pos], 1);
// atomicAdd(&d_histogram[h_pos], 1);
}
}
}
//now write back to output
__syncthreads();
for(i = t; i < histSize; i += blockDim.x)
{
atomicAdd(&d_histogram[i], sh_hist[i]);
}
}
|
22,564 | #include "includes.h"
/*
#define N 512
#define N 2048
#define THREADS_PER_BLOCK 512
*/
const int THREADS_PER_BLOCK = 32;
const int N = 2048;
__global__ void dotProd( int *a, int *b, int *c ) {
__shared__ int temp[THREADS_PER_BLOCK];
int index = threadIdx.x + blockIdx.x * blockDim.x;
temp[threadIdx.x] = a[index] * b[index];
__syncthreads(); // Hasta que no rellenen todos los thread temp no puedo continuar...
if(threadIdx.x == 0) {
int sum = 0;
for( int i= 0; i < THREADS_PER_BLOCK; i++ ) {
sum += temp[i];
}
c[blockIdx.x] = sum;
}
} |
22,565 | /* Vector reduction example using shared memory.
* Works for small vectors that can be operated upon by a single thread block.
* Build as follows: make clean && make
* Execute as follows: ./vector_reduction
* Author: Naga Kandasamy
* Date modified: May 15, 2020
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <float.h>
#include <time.h>
#define NUM_ELEMENTS 1024
/* Include kernel */
#include "vector_reduction_kernel.cu"
void run_test(int);
extern "C" double compute_gold(float *, int);
double compute_on_device(float *, int);
void check_CUDA_error(const char *);
int main(int argc, char **argv)
{
int num_elements = NUM_ELEMENTS;
if (num_elements > 1024) {
fprintf(stderr, "Input exceeds bounds\n");
exit(EXIT_FAILURE);
}
run_test(num_elements);
exit(EXIT_SUCCESS);
}
void run_test(int num_elements)
{
int array_mem_size = sizeof(float) * num_elements;
/* Allocate memory on host to store input data */
float* h_data = (float *)malloc(array_mem_size);
/* Initialize input data to be floating-point values between [-.5, +.5] */
srand(time(NULL));
int i;
for (i = 0; i < num_elements; i++)
h_data[i] = rand()/(float)RAND_MAX - 0.5;
/* Calculate reference solution */
printf("Reducing vector on CPU\n");
double reference = compute_gold(h_data, num_elements);
printf("Answer = %f\n", reference);
/* Calculate solution on device */
printf("Reducing vector on GPU\n");
float gpu_result = compute_on_device(h_data, num_elements);
printf("Answer = %f\n", gpu_result);
/* Check for correctness */
float eps = 1e-6;
if (fabsf((reference - gpu_result)/reference) <= eps)
printf("TEST PASSED\n");
else
printf("TEST FAILED\n");
free(h_data);
exit(EXIT_SUCCESS);
}
/* Reduce vector on device */
double compute_on_device(float* h_data, int num_elements)
{
float *d_data; /* Pointer to device address holding array */
double *d_result; /* Pointer to device address holding result */
int data_size = sizeof(float) * num_elements;
/* Allocate memory on device for the array */
cudaMalloc((void**)&d_data, data_size);
check_CUDA_error("Error allocating memory");
/* Copy data from host memory to device memory */
cudaMemcpy(d_data, h_data, data_size, cudaMemcpyHostToDevice);
check_CUDA_error("Error copying host to device memory");
/* Allocate memory on device to store the reduction result */
cudaMalloc((void **)&d_result, sizeof(double));
check_CUDA_error("Error allocating memory");
/* Set up execution grid and invoke kernel */
dim3 threads(num_elements, 1, 1);
dim3 grid(1, 1);
printf("Using reduction kernel, version 1\n");
vector_reduction_kernel_v1<<<grid, threads>>>(d_data, d_result, num_elements);
check_CUDA_error("Error in kernel");
printf("Using reduction kernel, version 2\n");
vector_reduction_kernel_v2<<<grid, threads>>>(d_data, d_result, num_elements);
check_CUDA_error("Error in kernel");
/* Copy result from device to host */
double h_result;
cudaMemcpy(&h_result, d_result, sizeof(double), cudaMemcpyDeviceToHost);
check_CUDA_error("Error copying host to device memory");
/* Clean up device memory */
cudaFree(d_data);
cudaFree(d_result);
check_CUDA_error("Error freeing memory");
return h_result;
}
void check_CUDA_error(const char *msg)
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA ERROR: %s (%s).\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
|
22,566 | /*
============================================================================
Name : md5.cu
Author : xdegtyarev
Version :
Copyright : alexander degtyarev
Description : CUDA compute reciprocals
============================================================================
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdbool.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/ipc.h>
#include <cuda.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/mman.h>
#include <limits.h>
static void CheckCudaErrorAux (const char *, unsigned, const char *, cudaError_t);
#define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value)
// CUDA kernel that performs op on chunk of data
__global__ void baseChunkOpKernel(char* data, int chunkSize) {
long threadSubchunkSize = chunkSize/blockDim.y;
long threadStartOffset = threadSubchunkSize*threadIdx.y;
}
__global__ void printFileChunkOpKernel(char* data, int chunkSize) {
long threadSubchunkSize = chunkSize/blockDim.y;
long threadStartOffset = threadSubchunkSize*threadIdx.y;
char* subbuf = (char*)malloc(threadSubchunkSize);
for(int i = 0; i<threadSubchunkSize; i++){
subbuf[i] = *(data+i+threadStartOffset);
}
printf("#################\n\n%d:[%s]\n\n##############",threadIdx.y,subbuf);
free(subbuf);
}
__global__ void fileChunkCharCounterKernel(char* data, int chunkSize, unsigned long* res) {
long threadSubchunkSize = chunkSize/blockDim.y;
long threadStartOffset = threadSubchunkSize*threadIdx.y;
char curr;
for(int i = 0; i<threadSubchunkSize; i++){
curr = *(data+i+threadStartOffset);
res[curr]++;
}
}
__global__ void fileChunkCompressorOpKernel(char* data, int chunkSize, char* res) {
long threadSubchunkSize = chunkSize/blockDim.y;
long threadStartOffset = threadSubchunkSize*threadIdx.y;
char curr;
char prev = 0;
char acc = 0;
long resC = threadStartOffset;
for(int i = 0; i<threadSubchunkSize; i++){
curr = *(data+i+threadStartOffset);
if(curr == prev){
acc++;
}else{
if(acc!=0){
res[resC] = acc;
resC++;
}
res[resC] = curr;
acc = 0;
}
}
}
void printDeviceInfo(){
int nDevices;
cudaGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf(" Device %d: \"%s\" with Compute %d.%d capability\n",
i, prop.name, prop.major, prop.minor);
printf("Total global memory: %lu\n", prop.totalGlobalMem);
printf("Number of multiprocessors: %d\n", prop.multiProcessorCount);
printf("Number of compute cores: %d\n", prop.multiProcessorCount*192);
printf("Total amount of shared memory per block: %lu\n",prop.sharedMemPerBlock);
printf("Total registers per block: %d\n", prop.regsPerBlock);
printf("Warp size: %d\n", prop.warpSize);
printf("Maximum memory pitch: %lu\n", prop.memPitch);
printf("Total amount of constant memory: %lu\n", prop.totalConstMem);
printf("Memory Clock Rate (KHz): %d\n", prop.memoryClockRate);
printf("Memory Bus Width (bits): %d\n", prop.memoryBusWidth);
printf("Peak Memory Bandwidth (GB/s): %f\n\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
}
}
void gpuBaseFileChunkRunner(char* data, size_t len, size_t chunkSz)
{
size_t chunksCount = 512;
size_t devBufferSize = chunkSz*chunksCount;
printf("TotalBytes: %lu | Device BufferSize: %lu \n",len,devBufferSize);
char* devBuf;
CUDA_CHECK_RETURN(cudaMalloc(&devBuf, sizeof(char) * devBufferSize));
size_t hostBytesLeft = len;
size_t chunkSize;
int numBlocks = 1;
dim3 dimBlock(numBlocks,chunksCount);
do
{
chunkSize = ( hostBytesLeft < devBufferSize) ? hostBytesLeft : devBufferSize;
printf("Bytes left: %lu| total: %lu| reading: %lu\n",hostBytesLeft,len,chunkSize);
CUDA_CHECK_RETURN(cudaMemcpy(devBuf, data, chunkSize * sizeof( char ) , cudaMemcpyHostToDevice));
baseChunkOpKernel<<<numBlocks,dimBlock>>>(devBuf, chunkSize);
CUDA_CHECK_RETURN(cudaDeviceSynchronize());
data = data + chunkSize;
hostBytesLeft = hostBytesLeft - chunkSize;
} while( hostBytesLeft > 0 );
CUDA_CHECK_RETURN(cudaFree(devBuf));
}
void gpuFileChunkRunner(char* data, size_t len, size_t chunkSz)
{
size_t chunksCount = 512;
size_t devBufferSize = chunkSz*chunksCount;
printf("TotalBytes: %lu | Device BufferSize: %lu \n",len,devBufferSize);
char* devBuf;
unsigned long* res;
unsigned long* result = (unsigned long*)malloc(sizeof(unsigned long) * CHAR_MAX);
unsigned long* accresult = (unsigned long*)malloc(sizeof(unsigned long) * CHAR_MAX);
for(int c = 0; c<CHAR_MAX; ++c){
result[c] = 0;
accresult[c] = 0;
}
CUDA_CHECK_RETURN(cudaMalloc(&devBuf, sizeof(char) * devBufferSize));
CUDA_CHECK_RETURN(cudaMalloc(&res, sizeof(unsigned long) * CHAR_MAX));
size_t hostBytesLeft = len;
size_t chunkSize;
int numBlocks = 1;
dim3 dimBlock(numBlocks,chunksCount);
do
{
chunkSize = ( hostBytesLeft < devBufferSize) ? hostBytesLeft : devBufferSize;
printf("Bytes left: %lu| total: %lu| reading: %lu\n",hostBytesLeft,len,chunkSize);
CUDA_CHECK_RETURN(cudaMemcpy(res,result,sizeof(unsigned long) * CHAR_MAX,cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(devBuf, data, chunkSize * sizeof( char ) , cudaMemcpyHostToDevice));
fileChunkCharCounterKernel<<<numBlocks,dimBlock>>>(devBuf, chunkSize,res);
CUDA_CHECK_RETURN(cudaDeviceSynchronize());
CUDA_CHECK_RETURN(cudaMemcpy( result, res, sizeof(unsigned long) * CHAR_MAX, cudaMemcpyDeviceToHost));
for(int c = 0; c<CHAR_MAX; ++c){
accresult[c]=accresult[c]+result[c];
result[c] = 0;
}
data = data + chunkSize;
hostBytesLeft = hostBytesLeft - chunkSize;
} while( hostBytesLeft > 0 );
printf("______\n");
for(int c = 32; c<CHAR_MAX; ++c){
printf("[%x] %c:%u\n",c,c,accresult[c]);
}
printf("______\n");
CUDA_CHECK_RETURN(cudaFree(devBuf));
CUDA_CHECK_RETURN(cudaFree(res));
free(result);
free(accresult);
}
int main(int argc, char *argv[])
{
//check args
if (argc < 2) {
fprintf(stderr, "%s\n", "File not specified");
exit(EXIT_FAILURE);
}else{
bool debug_print = argc > 2;
if(debug_print){
printDeviceInfo();
}
//check file
if((ftok(argv[1],'R')) < 0){
perror(argv[1]);
exit(EXIT_FAILURE);
}
struct stat64 st;
stat64(argv[1], &st);
long flen = st.st_size;
int fd;
if((fd = open(argv[1],O_RDONLY))<0){
perror("open");
exit(EXIT_FAILURE);
}
void* filep;
if((filep = mmap (0, flen, PROT_READ, MAP_SHARED, fd, 0)) == MAP_FAILED){
perror("MMAP fail");
exit(EXIT_FAILURE);
}
// gpuBaseFileChunkRunner((char*)filep,flen,512);
gpuFileChunkRunner((char*)filep,flen,512);
if ((close (fd)) < 0) {
perror ("close");
return 1;
}
if ((munmap (filep, flen)) < 0) {
perror ("munmap");
return 1;
}
return 0;
}
}
static void CheckCudaErrorAux (const char *file, unsigned line, const char *statement, cudaError_t err)
{
if (err == cudaSuccess)
return;
printf("%s returned %s (err) %d at %s line: %d \n",statement,cudaGetErrorString(err),err,file,line);
exit (1);
}
|
22,567 | #include <math.h>
#include <cuda.h>
__global__ void apply_f1(double h, int lower_bound, double* destination) {
int thread_id = blockDim.x * blockIdx.x + threadIdx.x;
destination[thread_id] = sin(h * (thread_id + lower_bound));
}
__global__ void apply_f2(double h, int lower_bound, double* destination) {
int thread_id = blockDim.x * blockIdx.x + threadIdx.x;
destination[thread_id] = cos(h * (thread_id + lower_bound));
}
extern "C" void populate_vectors(double* f1, double* f2, unsigned int local_length, double h, unsigned int lower_bound) {
double* f1_d;
double* f2_d;
size_t bytes = sizeof(double) * local_length;
size_t threads = 1024;
size_t blocks = local_length / threads;
cudaMalloc(&f1_d, bytes);
cudaMalloc(&f2_d, bytes);
apply_f1<<<blocks,threads>>>(h,lower_bound,f1_d);
apply_f2<<<blocks,threads>>>(h,lower_bound,f2_d);
cudaDeviceSynchronize();
cudaMemcpy(f1, f1_d, bytes, cudaMemcpyDeviceToHost);
cudaMemcpy(f2, f2_d, bytes, cudaMemcpyDeviceToHost);
}
|
22,568 | #include <stdio.h>
// Matrices are stored in row-major order:
// M(row, col) = *(M.elements + row * M.width + col)
typedef struct {
int width;
int height;
float* elements;
} Matrix;
// Thread block size
#define BLOCK_SIZE 16
#define MATRIX_SIZE 1024
// Forward declaration of the matrix multiplication kernel
__global__ void MatMulKernel(const Matrix, const Matrix, Matrix);
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
void MatMul(const Matrix A, const Matrix B, Matrix C)
{
// Load A and B to device memory
Matrix d_A;
d_A.width = A.width; d_A.height = A.height;
size_t size = A.width * A.height * sizeof(float);
cudaMalloc(&d_A.elements, size);
cudaMemcpy(d_A.elements, A.elements, size,
cudaMemcpyHostToDevice);
Matrix d_B;
d_B.width = B.width; d_B.height = B.height;
size = B.width * B.height * sizeof(float);
cudaMalloc(&d_B.elements, size);
cudaMemcpy(d_B.elements, B.elements, size,
cudaMemcpyHostToDevice);
// Allocate C in device memory
Matrix d_C;
d_C.width = C.width; d_C.height = C.height;
size = C.width * C.height * sizeof(float);
cudaMalloc(&d_C.elements, size);
// Initialize timing
cudaEvent_t start;
cudaEvent_t stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
// Invoke kernel
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y);
MatMulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
// Get Time
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("Time elapsed: %f\n", milliseconds);
// Read C from device memory
cudaMemcpy(C.elements, d_C.elements, size,
cudaMemcpyDeviceToHost);
// Free device memory
cudaFree(d_A.elements);
cudaFree(d_B.elements);
cudaFree(d_C.elements);
}
// Matrix multiplication kernel called by MatMul()
__global__ void MatMulKernel(Matrix A, Matrix B, Matrix C)
{
// Each thread computes one element of C
// by accumulating results into Cvalue
float Cvalue = 0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
for (int e = 0; e < A.width; ++e)
Cvalue += A.elements[row * A.width + e]
* B.elements[e * B.width + col];
C.elements[row * C.width + col] = Cvalue;
}
void initMatrix(Matrix *m) {
m->height = MATRIX_SIZE;
m->width = MATRIX_SIZE;
m->elements = (float *) malloc(m->height * m->width * sizeof(float));
}
void randomMatrix(Matrix m) {
for (int i = 0; i < m.height; i++) {
for (int j = 0; j < m.width; j++) {
m.elements[i*m.width + j] = rand();
}
}
}
int main() {
Matrix A, B, C;
initMatrix(&A);
initMatrix(&B);
initMatrix(&C);
randomMatrix(A);
randomMatrix(B);
MatMul(A, B, C);
return 0;
}
|
22,569 | #include <cuda_runtime_api.h>
#include <iostream>
#include <stdio.h>
#define BLOCK_SIZE 16
using namespace std;
typedef struct {
int width;
int height;
int stride;
int* elements;
} Matrix;
typedef struct {
int width;
int* elements;
} Vector;
__device__ float GetElement(const Matrix A, int row, int col)
{
return A.elements[row * A.stride + col];
}
__device__ void SetElement(Matrix A, int row, int col, float value)
{
A.elements[row * A.stride + col] = value;
}
void print_matrix(const Matrix A) {
int i;
int size = A.width * A.height;
cout<<"size A "<<size<<endl;
cout<<" MATRIX \n";
for(i = 1; i <= size; i++) {
cout<<A.elements[i-1]<<" ";
if(i % 10 == 0) {
cout<<"\n";
}
}
}
__global__ void macierz_wektor_10_kernel(const Matrix, const Vector, Vector);
void macierz_wektor_10()
{
//create Matrix and Vector on Host (CPU)
Matrix A;
Vector B;
Vector C;
A.width = A.height = A.stride = 10;
size_t size_A = A.width * A.height * sizeof(int);
A.elements = (int*) malloc(size_A);
B.width = 10;
size_t size_B = B.width * sizeof(int);
B.elements = (int*) malloc(size_B);
C.width = 10;
size_t size_C = C.width * sizeof(int);
C.elements = (int*) malloc(size_C);
int i;
for(i = 0; i < A.width*A.height; i++) {
A.elements[i] = (i % 10) + 1;
}
print_matrix(A);
for(i = 0; i < B.width; i++) {
B.elements[i] = (i % 10) + 1;
}
//Load A and B to device memory
Matrix d_A;
d_A.width = d_A.stride = A.width; d_A.height = A.height;
cudaMalloc(&d_A.elements, size_A);
cudaMemcpy(d_A.elements, A.elements, size_A, cudaMemcpyHostToDevice);
Vector d_B;
d_B.width = B.width;
cudaMalloc(&d_B.elements, size_B);
cudaMemcpy(d_B.elements, B.elements, size_B, cudaMemcpyHostToDevice);
Vector d_C;
d_C.width = C.width;
cudaMalloc(&d_C.elements, size_C);
dim3 dimBlock(10, 1);
dim3 dimGrid(1);
macierz_wektor_10_kernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
cudaMemcpy(C.elements, d_C.elements, size_C, cudaMemcpyDeviceToHost);
for(i = 0; i < C.width; i++) {
cout<<C.elements[i]<<" ";
}
cout<<endl;
}
__global__ void macierz_wektor_10_kernel(const Matrix A, const Vector B, Vector C) {
int col = threadIdx.x;
printf("thread_id_x %d", threadIdx.x);
int vec_val = B.elements[col];
int mul = 0;
int row;
for(row = 0; row < A.height; row++) {
mul += vec_val * A.elements[row*A.width + col];
}
C.elements[col] = mul;
__syncthreads();
}
int main()
{
macierz_wektor_10();
return 0;
}
|
22,570 | /*
Swap the elements of a vector: the first with the last and so on...
*/
#include <stdio.h>
#include <cuda.h>
#include <stdlib.h>
#include <sys/time.h>
void checkCUDAError(const char* msg);
__global__ void rebalta (float *dati, long n)
{
long id;
long t;
id=blockIdx.x*blockDim.x+threadIdx.x;
if (id<n/2)
{
t=dati[n-id-1];
dati[n-id-1]=dati[id];
dati[id]=t;
}
}
int main(int argc, char **argv)
{
long n;
long i;
timeval time;
double t1, t2;
float *dati_h;
float *dati_d;
long blocksize, nblocks;
srand(1);
//inizio cronometro
gettimeofday(&time, NULL);
t1=time.tv_sec+(time.tv_usec/1000000.0);
//settaggio parametri
if (argc<2)
{
printf("./a.out n\n");
exit(0);
}
sscanf(argv[1],"%ld",&n);
blocksize=512;
nblocks=(n/2)/blocksize + ((n/2)%blocksize == 0?0:1);
printf ("numero blocchi %ld\n", nblocks);
printf ("numero threads %ld\n", blocksize);
//allocazione
dati_h=(float *) malloc (n*sizeof(float));
cudaMalloc((void**) &dati_d, n*sizeof(float));
checkCUDAError("Allocazione");
//inizializzazione
for (i=0; i<n; i++)
dati_h[i]=(rand()%100000)/1000.0;
// for (i=0; i<n; i++)
// printf("%f ",dati_h[i]);
// printf("\n");
//trasferimento su device
cudaMemcpy(dati_d,dati_h, sizeof(float)*n, cudaMemcpyHostToDevice);
checkCUDAError("Trasferimento su device");
//lancio kernel
rebalta <<< nblocks, blocksize >>> (dati_d, n);
checkCUDAError("Kernel");
//trasferimento da device
cudaMemcpy(dati_h,dati_d, sizeof(float)*n, cudaMemcpyDeviceToHost);
checkCUDAError("Trasferimento da device");
//stoppa cronometro
cudaThreadSynchronize();
gettimeofday(&time, NULL);
t2=time.tv_sec+(time.tv_usec/1000000.0);
printf("Tempo impiegato: %f\n",t2-t1);
// for (i=0; i<n; i++)
// printf("%f ",dati_h[i]);
// printf("\n");
return 0;
}
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg,
cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
|
22,571 | #include "includes.h"
__global__ void cuda_deactivateBend(double* pE, const double* pA, int n)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < n) {
double x = pE[id];
pE[id] *= 0.5 * (x / sqrt(x * x + 1)) + 1;
}
} |
22,572 | #include "includes.h"
__global__ void cuda_mat_multiply(const double* A, const double* B, double * C, int rowsa, int colsa, int rowsb, int colsb, int rowsc, int colsc){
__shared__ double sA[32][32]; // Tile size of 32x32
__shared__ double sB[32][32];
int Row = blockDim.y*blockIdx.y + threadIdx.y;
int Col = blockDim.x*blockIdx.x + threadIdx.x;
double Cvalue = 0.0;
sA[threadIdx.y][threadIdx.x] = 0.0;
sB[threadIdx.y][threadIdx.x] = 0.0;
for (int k = 0; k < (((colsa - 1)/ 32) + 1); k++){
if ( (Row < rowsa) && (threadIdx.x + (k*32)) < colsa){
sA[threadIdx.y][threadIdx.x] = A[(Row*colsa) + threadIdx.x + (k*32)];
}
else{
sA[threadIdx.y][threadIdx.x] = 0.0;
}
__syncthreads();
if ( Col < colsb && (threadIdx.y + k*32) < rowsb){
sB[threadIdx.y][threadIdx.x] = B[(threadIdx.y + k*32)*colsb + Col];
}
else{
sB[threadIdx.y][threadIdx.x] = 0.0;
}
__syncthreads();
for (int j = 0; j < 32; ++j){
Cvalue += sA[threadIdx.y][j] * sB[j][threadIdx.x];
}
__syncthreads();
}
if (Row < rowsc && Col < colsc){
C[Row*colsc + Col] = Cvalue;
}
} |
22,573 | #include "cuda.h"
#include "stdio.h"
int N = 10;
void printi(int i){
printf("%d\n", i);
}
void init_CPU_array(int* array, int n){
for(int i = 0; i < n; i++) {
array[i] = i;
}
}
void print_CPU_array(int array[], int n){
for(int i = 0; i < n; i++) {
printi(array[i]);
}
}
// realiza la suma de determinantes
__global__ void sumador_3(int* arreglo, int acceso, int offset, int i, float N){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
// if(tid < N/acceso)
// {
// printf("%d\n", arreglo[tid * acceso]);
// printf("%d\n", arreglo[tid * acceso + offset]);
// }
if(tid < (N/acceso))
{
arreglo[tid * acceso] = arreglo[tid * acceso] + arreglo[tid * acceso + offset];
arreglo[tid * acceso + offset] = 0;
printf("%s\n", "TRABAJO");
}
}
int* arreglo_determinantes;
int* d_arreglo_determinantes;
int main(int argc, char** argv){
int* suma_det = (int *) malloc(sizeof(int));
arreglo_determinantes = (int*) malloc(N * sizeof(int));
cudaMalloc(&d_arreglo_determinantes, N * sizeof(int));
init_CPU_array(arreglo_determinantes, N);
cudaMemcpy(d_arreglo_determinantes, arreglo_determinantes, N * sizeof(int), cudaMemcpyHostToDevice);
dim3 miGrid1D_2(1,1);
dim3 miBloque1D_2(N,1);
for(int i=1; i < N; i++)
{
sumador_3<<<miGrid1D_2, miBloque1D_2>>>(d_arreglo_determinantes, (int)pow(2,i), (int)pow(2, i-1), i, N);
cudaThreadSynchronize();
printf("%s\n", "Acceso:");
printf("%d\n", (int)pow(2,i));
printf("%s\n", "Offset:");
printf("%d\n", (int)pow(2,i-1));
printf("%s\n", " ");
if(i==4) break;
}
cudaMemcpy(arreglo_determinantes, d_arreglo_determinantes, 10 * sizeof(int), cudaMemcpyDeviceToHost);
printf("%s\n", "TEST SUMA:");
//printf("%d\n", *suma_det);
print_CPU_array(arreglo_determinantes, 10);
free(arreglo_determinantes);
cudaFree (d_arreglo_determinantes);
} |
22,574 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cmath>
using namespace std;
__device__ __inline__ float trim(unsigned char value)
{
return fminf((unsigned char)255, fmaxf(value, (unsigned char)0));
}
__device__ __inline__ float poly(float x, float a, float b, float c)
{
return a*x*x*x+b*x*x+c*x;
}
__global__ void kernel(unsigned char* img, const float* a)
{
int ix = blockIdx.x;
int iy = threadIdx.x;
int tid = iy*blockDim.x + ix;
float x = (float)ix / blockDim.x;
float y = (float)iy / gridDim.x;
double z = powf(x,powf(sinhf(((((0.546098f < x ? 0.546098f : x) < sinf(x) ? (0.546098f < x ? 0.546098f : x) : sinf(x))-(sinf(y)+(0.205267f > 0.0f ? log(0.205267f) : -log(-0.205267f)))) < (sinhf((y*x)) == 0.0f ? x : 1.0f/sinhf((y*x))) ? (((0.546098f < x ? 0.546098f : x) < sinf(x) ? (0.546098f < x ? 0.546098f : x) : sinf(x))-(sinf(y)+(0.205267f > 0.0f ? log(0.205267f) : -log(-0.205267f)))) : (sinhf((y*x)) == 0.0f ? x : 1.0f/sinhf((y*x))))),sinhf((powf((powf(x,0.285562f) > 0.0f ? sqrt(powf(x,0.285562f)) : -sqrt(-powf(x,0.285562f))),((y-x)+(y == 0.f ? x : x/y)))+(((y > 0.0f ? log(y) : -log(-y)) == 0.f ? coshf(x) : coshf(x)/(y > 0.0f ? log(y) : -log(-y))) < ((y == 0.f ? x : x/y) == 0.f ? (x*y) : (x*y)/(y == 0.f ? x : x/y)) ? ((y > 0.0f ? log(y) : -log(-y)) == 0.f ? coshf(x) : coshf(x)/(y > 0.0f ? log(y) : -log(-y))) : ((y == 0.f ? x : x/y) == 0.f ? (x*y) : (x*y)/(y == 0.f ? x : x/y)))))));
img[tid*4+0] = trim(poly(z,a[0],a[1],a[2]) * 255.0f);
img[tid*4+1] = trim(poly(z,a[3],a[4],a[5]) * 255.0f);
img[tid*4+2] = trim(poly(z,a[6],a[7],a[8]) * 255.0f);
img[tid*4+3] = 255;
} |
22,575 | #include <iostream>
#include <vector>
#include <cuda_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__ void addShared(int * v0, std::size_t size){
extern __shared__ int v0tmp[];
auto tid = blockIdx.x * blockDim.x + threadIdx.x;
v0tmp[tid] = v0[tid];
__syncthreads();
if(tid>0 && tid<size - 1){
v0tmp[tid] += v0tmp[tid-1] + v0tmp[tid+1];
v0[tid] = v0tmp[tid];
}
}
int main(int argc, char const *argv[]) {
std::size_t size = 1024;
std::vector<int> v0(size);
int * v0_d = nullptr;
for(std::size_t i = 0; i < v0.size(); i++){
v0[i] = 1;
}
cudaMalloc(&v0_d, v0.size() * sizeof(int));
cudaMemcpy(v0_d,v0.data(),v0.size() * sizeof(int),cudaMemcpyHostToDevice);
addShared<<<1,1024, 1024 * sizeof(int) >>>(v0_d,v0.size());
cudaDeviceSynchronize();
cudaMemcpy(v0.data(),v0_d,v0.size() * sizeof(int),cudaMemcpyDeviceToHost);
for(std::size_t i = 0; i < v0.size(); i++){
printf("%d\n",v0[i] );
}
cudaFree(v0_d);
return 1;
}
|
22,576 | #include <stdio.h>
__global__ void helloWorld(float f)
{
/*printf("Hello thread %d, f=%f\n", threadIdx.x, f);*/
/* printf("Hello block %i running thread %i, f=%f\n", blockIdx.x, threadIdx.x, f);*/
int idx = threadIdx.x + blockIdx.x * blockDim.x;
printf("Hello block %i running thread %i, f=%f\n", blockIdx.x, idx, f);
}
int main()
{
dim3 grid(2, 2, 1);
dim3 block(2, 2, 1);
/*helloWorld<<<1, 10>>>(1.2345f);*/
/*helloWorld<<<2, 5>>>(1.2345f);*/
helloWorld<<<grid, block>>>(1.2345f);
cudaDeviceReset();
return 0;
} |
22,577 | #include <iostream>
#include <sstream>
#include <list>
int main()
{
std::ostringstream arch;
std::list<std::string> archs;
int count = 0;
if (cudaSuccess != cudaGetDeviceCount(&count)){ return -1; }
if (count == 0) { return -1; }
for (int device = 0; device < count; ++device)
{
cudaDeviceProp prop;
if (cudaSuccess != cudaGetDeviceProperties(&prop, device)){ continue; }
arch << prop.major << "." << prop.minor;
archs.push_back(arch.str());
arch.str("");
}
archs.unique(); // Some devices might have the same arch
for (std::list<std::string>::iterator it=archs.begin(); it!=archs.end(); ++it)
std::cout << *it << " ";
return 0;
}
|
22,578 | #include "observer.cuh"
#include <stdio.h>
__device__ bool checkBorder(int x, int y, int minX, int maxX, int minY, int maxY){
if ((minX <= x) && (x <= maxX)){
if ((minY <= y) && (y <= maxY)){
return true;
}
else{
return false;
}
}
else{
return false;
}
}
template <typename T>
__device__ int checkBorder_where_wrong(T x, T y, T minX, T maxX, T minY, T maxY){
if ((x < minX) && (y < minY)){//x,yともに最小ボーダーを下回る
return 1;
}
else if ((x < minX) && ((minY <= y) && (y <= maxY))){//xが最小ボーダーを下回る
return 2;
}
else if ((x < minX) && (maxY < y)){//xが最小ボーダーを下回り、yが最大ボーダーを上回る
return 3;
}
else if (((minX <= x) && (x <= maxX)) && (y < minY)){//yが最小ボーダーを下回る
return 4;
}
else if (((minX <= x) && (x <= maxX)) && ((minY <= y) && (y <= maxY))){//正常
return 0;
}
else if (((minX <= x) && (x <= maxX)) && (maxY < y)){//yが最大ボーダーを上回る
return 5;
}
else if ((maxX < x) && (y < minY)){//xが最大ボーダーを上回り、yが最小ボーダーを下回る
return 6;
}
else if ((maxX < x) && ((minY <= y) && (y <= maxY))){//xが最大ボーダーを上回る
return 7;
}
else if ((maxX < x) && (maxY < y)){//x,yともに最大ボーダーを上回る
return 8;
}
else{//その他は起こりえないはずだが、一応保険をかけておく。
printf("Some problems caused!\n");
return 100;
}
}
template __device__ int checkBorder_where_wrong<int>(int x, int y, int minX, int maxX, int minY, int maxY);
template __device__ int checkBorder_where_wrong<float>(float x, float y, float minX, float maxX, float minY, float maxY);
template __device__ int checkBorder_where_wrong<double>(double x, double y, double minX, double maxX, double minY, double maxY);
__device__ bool check_x_shita(int distance, int pos, int minX, int maxX, int minY, int maxY){
for (int i=1; i<=distance; ++i){
if (((0+((maxX-minX+1)*(i-1))) <= pos) && (pos <= ((maxX-minX)+(maxX-minX+1)*(i-1)))){
return false;
}
}
return true;
}
__device__ bool check_x_ue(int distance, int pos, int minX, int maxX, int minY, int maxY){
for (int i=1; i<=distance; ++i){
if (((0+((maxX-minX+1)*(maxY-minY))-((maxX-minX+1)*(i-1))) <= pos) && (pos <= ((maxX-minX)+((maxX-minX+1)*(maxY-minY))-((maxX-minX+1)*(i-1))))){
return false;
}
}
return true;
}
__device__ bool check_y_hidari(int distance, int pos, int minX, int maxX, int minY, int maxY){
for (int i=1; i<=distance; ++i){
int tid = 0+(i-1);
while (tid < ((maxX-minX+1)*(maxY-minY+1))){
if (tid == pos){
return false;
}
tid+=(maxX-minX+1);
}
}
return true;
}
__device__ bool check_y_migi(int distance, int pos, int minX, int maxX, int minY, int maxY){
for (int i=1; i<=distance; ++i){
int tid = (maxX-minX)-(i-1);
while (tid < ((maxX-minX+1)*(maxY-minY+1))){
if (tid == pos){
return false;
}
tid+=(maxX-minX+1);
}
}
return true;
}
/*4方向チェック*/
__device__ int check_with_distance(int *world, int distance, int tid, int minX, int maxX, int minY, int maxY){
int count = 0;
for (int i=1; i<=distance; ++i){
if ((check_x_shita(distance, tid, minX, maxX, minY, maxY)== true) && (check_x_ue(distance, tid, minX, maxX, minY, maxY)==true) && (check_y_hidari(distance, tid, minX, maxX, minY, maxY) == true) && (check_y_migi(distance, tid, minX, maxX, minY, maxY) == true)){//all OK
count += world[tid-((maxX-minX+1)*i)] + world[tid+((maxX-minX+1)*i)] + world[tid-i] + world[tid+i];
}
else if ((check_x_shita(distance, tid, minX, maxX, minY, maxY)== true) && (check_x_ue(distance, tid, minX, maxX, minY, maxY)==true) && (check_y_hidari(distance, tid, minX, maxX, minY, maxY) == true) && (check_y_migi(distance, tid, minX, maxX, minY, maxY) == false)){//migi
count += world[tid-i] + world[tid+i-(maxX-minX+1)] + world[tid+((maxX-minX+1)*i)] + world[tid-((maxX-minX+1)*i)];
}
else if ((check_x_shita(distance, tid, minX, maxX, minY, maxY)== true) && (check_x_ue(distance, tid, minX, maxX, minY, maxY)==true) && (check_y_hidari(distance, tid, minX, maxX, minY, maxY) == false) && (check_y_migi(distance, tid, minX, maxX, minY, maxY) == true)){//hidari
count += world[tid-i+(maxX-minX+1)] + world[tid+i] + world[tid+(maxX-minX+1)*i] + world[tid-(maxX-minX+1)*i];
}
else if ((check_x_shita(distance, tid, minX, maxX, minY, maxY)== true) && (check_x_ue(distance, tid, minX, maxX, minY, maxY)==false) && (check_y_hidari(distance, tid, minX, maxX, minY, maxY) == true) && (check_y_migi(distance, tid, minX, maxX, minY, maxY) == true)){//ue
count += world[tid-i] + world[tid+i] + world[tid+((maxX-minX+1)*i)-((maxX-minX+1)*(maxY-minY+1))] + world[tid-(maxX-minX+1)*i];
}
else if ((check_x_shita(distance, tid, minX, maxX, minY, maxY)== true) && (check_x_ue(distance, tid, minX, maxX, minY, maxY)==false) && (check_y_hidari(distance, tid, minX, maxX, minY, maxY) == true) && (check_y_migi(distance, tid, minX, maxX, minY, maxY) == false)){//migiue
count += world[tid-i] + world[tid+i-(maxX-minX+1)] + world[tid+((maxX-minX+1)*i)-((maxX-minX+1)*(maxY-minY+1))] + world[tid-(maxX-minX+1)*i];
}
else if ((check_x_shita(distance, tid, minX, maxX, minY, maxY)== true) && (check_x_ue(distance, tid, minX, maxX, minY, maxY)==false) && (check_y_hidari(distance, tid, minX, maxX, minY, maxY) == false) && (check_y_migi(distance, tid, minX, maxX, minY, maxY) == true)){//hidariue
count += world[tid-i+(maxX-minX+1)] + world[tid+i] + world[tid+((maxX-minX+1)*i)-((maxX-minX+1)*(maxY-minY+1))] + world[tid-((maxX-minX+1)*i)];
}
else if ((check_x_shita(distance, tid, minX, maxX, minY, maxY)== false) && (check_x_ue(distance, tid, minX, maxX, minY, maxY)==true) && (check_y_hidari(distance, tid, minX, maxX, minY, maxY) == true) && (check_y_migi(distance, tid, minX, maxX, minY, maxY) == true)){//shita
count += world[tid-i] + world[tid+i] + world[tid+(maxX-minX+1)*i] + world[tid-((maxX-minX+1)*i)+((maxX-minX+1)*(maxY-minY+1))];
}
else if ((check_x_shita(distance, tid, minX, maxX, minY, maxY)== false) && (check_x_ue(distance, tid, minX, maxX, minY, maxY)==true) && (check_y_hidari(distance, tid, minX, maxX, minY, maxY) == true) && (check_y_migi(distance, tid, minX, maxX, minY, maxY) == false)){//migishita
count += world[tid-i] + world[tid+i-(maxX-minX+1)] + world[tid+(maxX-minX+1)*i] + world[tid-((maxX-minX+1)*i)+((maxX-minX+1)*(maxY-minY+1))];
}
else if ((check_x_shita(distance, tid, minX, maxX, minY, maxY)== false) && (check_x_ue(distance, tid, minX, maxX, minY, maxY)==true) && (check_y_hidari(distance, tid, minX, maxX, minY, maxY) == false) && (check_y_migi(distance, tid, minX, maxX, minY, maxY) == true)){//hidarishita
count += world[tid-i+(maxX-minX+1)] + world[tid+i] + world[tid+(maxX-minX+1)*i] + world[tid-((maxX-minX+1)*i)+((maxX-minX+1)*(maxY-minY+1))];
}
else {
printf("some problems in check at tid%d\n", tid);
}
}
return count;
}
/*8方向チェック(distance=1)*/
__device__ int check_with_distance8(int *world, int tid, int minX, int maxX, int minY, int maxY){
int count = 0;
if ((check_x_shita(1, tid, minX, maxX, minY, maxY)== true) && (check_x_ue(1, tid, minX, maxX, minY, maxY)==true) && (check_y_hidari(1, tid, minX, maxX, minY, maxY) == true) && (check_y_migi(1, tid, minX, maxX, minY, maxY) == true)){//all OK
count += world[tid-((maxX-minX+1)*1)] + world[tid+((maxX-minX+1)*1)] + world[tid-1] + world[tid+1]+world[tid-1+(maxX-minX+1)]+world[tid-1-(maxX-minX+1)]+world[tid+1+(maxX-minX+1)]+world[tid+1-(maxX-minX+1)];//OK
}
else if ((check_x_shita(1, tid, minX, maxX, minY, maxY)== true) && (check_x_ue(1, tid, minX, maxX, minY, maxY)==true) && (check_y_hidari(1, tid, minX, maxX, minY, maxY) == true) && (check_y_migi(1, tid, minX, maxX, minY, maxY) == false)){//migi
count += world[tid-1] + world[tid+1-(maxX-minX+1)] + world[tid+((maxX-minX+1)*1)] + world[tid-((maxX-minX+1)*1)]+world[tid-1+(maxX-minX+1)]+world[tid-1-(maxX-minX+1)]+world[tid+1-(maxX-minX+1)+(maxX-minX+1)]+world[tid+1-(maxX-minX+1)-(maxX-minX+1)];
}
else if ((check_x_shita(1, tid, minX, maxX, minY, maxY)== true) && (check_x_ue(1, tid, minX, maxX, minY, maxY)==true) && (check_y_hidari(1, tid, minX, maxX, minY, maxY) == false) && (check_y_migi(1, tid, minX, maxX, minY, maxY) == true)){//hidari
count += world[tid-1+(maxX-minX+1)] + world[tid+1] + world[tid+(maxX-minX+1)*1] + world[tid-(maxX-minX+1)*1]+world[tid-1+(maxX-minX+1)+(maxX-minX+1)]+world[tid-1+(maxX-minX+1)-(maxX-minX+1)]+world[tid+1+(maxX-minX+1)]+world[tid+1-(maxX-minX+1)];
}
else if ((check_x_shita(1, tid, minX, maxX, minY, maxY)== true) && (check_x_ue(1, tid, minX, maxX, minY, maxY)==false) && (check_y_hidari(1, tid, minX, maxX, minY, maxY) == true) && (check_y_migi(1, tid, minX, maxX, minY, maxY) == true)){//ue
count += world[tid-1] + world[tid+1] + world[tid+((maxX-minX+1)*1)-((maxX-minX+1)*(maxY-minY+1))] + world[tid-(maxX-minX+1)*1]+world[tid-1+(maxX-minX+1)*1-((maxX-minX+1)*(maxY-minY+1))]+world[tid-1-(maxX-minX+1)*1]+world[tid+1-(maxX-minX+1)*1]+world[tid+1+(maxX-minX+1)*1-((maxX-minX+1)*(maxY-minY+1))];
}
else if ((check_x_shita(1, tid, minX, maxX, minY, maxY)== true) && (check_x_ue(1, tid, minX, maxX, minY, maxY)==false) && (check_y_hidari(1, tid, minX, maxX, minY, maxY) == true) && (check_y_migi(1, tid, minX, maxX, minY, maxY) == false)){//migiue
count += world[tid-1] + world[tid+1-(maxX-minX+1)] + world[tid+((maxX-minX+1)*1)-((maxX-minX+1)*(maxY-minY+1))] + world[tid-(maxX-minX+1)*1] + world[tid-1+((maxX-minX+1)*1)-((maxX-minX+1)*(maxY-minY+1))]+world[tid-1-(maxX-minX+1)*1]+world[tid+1-((maxX-minX+1)*1)-(maxX-minX+1)]+world[tid+1+(maxX-minX+1)-(maxX-minX+1)-((maxX-minX+1)*(maxY-minY+1))];
/*
if (tid == 0){
printf("migiuecount=%d\n",count);////////////
}
*/
}
else if ((check_x_shita(1, tid, minX, maxX, minY, maxY)== true) && (check_x_ue(1, tid, minX, maxX, minY, maxY)==false) && (check_y_hidari(1, tid, minX, maxX, minY, maxY) == false) && (check_y_migi(1, tid, minX, maxX, minY, maxY) == true)){//hidariue
count += world[tid-1+(maxX-minX+1)] + world[tid+1] + world[tid+((maxX-minX+1)*1)-((maxX-minX+1)*(maxY-minY+1))] + world[tid-((maxX-minX+1)*1)]+world[tid-1+(maxX-minX+1)+(maxX-minX+1)-((maxX-minX+1)*(maxY-minY+1))]+world[tid-1+(maxX-minX+1)-(maxX-minX+1)]+world[tid+1-(maxX-minX+1)]+world[tid+1+(maxX-minX+1)-((maxX-minX+1)*(maxY-minY+1))];//OK
}
else if ((check_x_shita(1, tid, minX, maxX, minY, maxY)== false) && (check_x_ue(1, tid, minX, maxX, minY, maxY)==true) && (check_y_hidari(1, tid, minX, maxX, minY, maxY) == true) && (check_y_migi(1, tid, minX, maxX, minY, maxY) == true)){//shita
count += world[tid-1] + world[tid+1] + world[tid+(maxX-minX+1)*1] + world[tid-((maxX-minX+1)*1)+((maxX-minX+1)*(maxY-minY+1))]+world[tid-1+(maxX-minX+1)]+world[tid-1-(maxX-minX+1)+((maxX-minX+1)*(maxY-minY+1))]+world[tid+1-(maxX-minX+1)+((maxX-minX+1)*(maxY-minY+1))]+world[tid+1+(maxX-minX+1)];
}
else if ((check_x_shita(1, tid, minX, maxX, minY, maxY)== false) && (check_x_ue(1, tid, minX, maxX, minY, maxY)==true) && (check_y_hidari(1, tid, minX, maxX, minY, maxY) == true) && (check_y_migi(1, tid, minX, maxX, minY, maxY) == false)){//migishita
count += world[tid-1] + world[tid+1-(maxX-minX+1)] + world[tid+(maxX-minX+1)*1] + world[tid-((maxX-minX+1)*1)+((maxX-minX+1)*(maxY-minY+1))] + world[tid-1+(maxX-minX+1)] + world[tid-1-(maxX-minX+1)+((maxX-minX+1)*(maxY-minY+1))] + world[tid+1-(maxX-minX+1)-(maxX-minX+1)+((maxX-minX+1)*(maxY-minY+1))]+world[tid+1-(maxX-minX+1)+(maxX-minX+1)];
}
else if ((check_x_shita(1, tid, minX, maxX, minY, maxY)== false) && (check_x_ue(1, tid, minX, maxX, minY, maxY)==true) && (check_y_hidari(1, tid, minX, maxX, minY, maxY) == false) && (check_y_migi(1, tid, minX, maxX, minY, maxY) == true)){//hidarishita
count += world[tid-1+(maxX-minX+1)] + world[tid+1] + world[tid+(maxX-minX+1)*1] + world[tid-((maxX-minX+1)*1)+((maxX-minX+1)*(maxY-minY+1))]+world[tid-1+(maxX-minX+1)+(maxX-minX+1)]+world[tid-1+(maxX-minX+1)-(maxX-minX+1)+((maxX-minX+1)*(maxY-minY+1))]+world[tid+1-(maxX-minX+1)+((maxX-minX+1)*(maxY-minY+1))]+world[tid+1+(maxX-minX+1)];
}
else {
printf("some problems in check at tid%d\n", tid);
}
return count;
}
|
22,579 | // 20181201
// Yuqiong Li
// a basic CUDA function to test working with device constant memory
#include <stdio.h>
#include <cuda.h>
const unsigned int N = 10; // size of vectors
__constant__ float const_d_a[N * sizeof(float)]; // filter in device const memory
// function declarations
__global__ void vecAddConstantKernel(float * b, float * c, unsigned int n);
__global__ void vecAddConstantKernel2(float * a, float * b, float * c, unsigned int n);
// main function
int main()
{
float * a, * b, * c; // a and b are vectors. c is the result
a = (float *)calloc(N, sizeof(float));
b = (float *)calloc(N, sizeof(float));
/**************************** Exp 1: sequential ***************************/
int i;
int size = N * sizeof(float);
float sum = 0;
for (i = 0; i < N; i++){
a[i] = (float)i / 0.23 + 1;
b[i] = (float)i / 5.89 + 9;
sum += a[i] + b[i];
}
c = (float*) malloc(size);
printf("Results from host :%.2f\n", sum);
/********************** Exp 2: CUDA w/o const mem *************************/
// 1. allocate memory on CUDA
float * d_b, * d_c; // device memory
cudaError_t err2 = cudaMalloc((void **) & d_b, size);
cudaError_t err3 = cudaMalloc((void **) & d_c, size);
if (err2 != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err2), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
if (err3 != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err3), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
// copy memory
cudaError_t err4 = cudaMemcpyToSymbol(const_d_a, a, size);
if (err4 != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err4), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
// 2. operate on kernels
vecAddConstantKernel<<<ceil(N/256.0), 256>>>(d_b, d_c, N); // no need to pass const_d_a as a parameter as it's global
// 3. copy the results back to host
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
float cuda_res = 0;
for(i = 0; i < N; i++){
// printf("%.2f\t", c[i]);
cuda_res += c[i];
}
printf("Results from device :%.2f\n", cuda_res);
// 2. do it again but passing constant variable as a parameter
float * d_c1; // device memory
cudaError_t err5 = cudaMalloc((void **) & d_c1, size);
if (err5 != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err5), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
vecAddConstantKernel2<<<ceil(N/256.0), 256>>>(const_d_a, d_b, d_c1, N); // no need to pass const_d_a as a parameter as it's global
// 3. copy the results back to host
cudaMemcpy(c, d_c1, size, cudaMemcpyDeviceToHost);
cuda_res = 0;
for(i = 0; i < N; i++){
// printf("%.2f\t", c[i]);
cuda_res += c[i];
}
printf("Results from host but pass const var as parameter:%.2f\n", cuda_res);
cudaFree(d_b);
cudaFree(d_c);
free(a);
free(b);
free(c);
return 0;
}
__global__ void vecAddConstantKernel(float * b, float * c, unsigned int N){
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i<N) c[i] = const_d_a[i] + b[i];
}
__global__ void vecAddConstantKernel2(float * a, float * b, float * c, unsigned int N){
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i<N) c[i] = a[i] + b[i];
}
|
22,580 | #include "stdio.h"
#include <cuda.h>
#include <cuda_runtime.h>
#include <iostream>
// Defining two constants
__constant__ int constant_f;
__constant__ int constant_g;
#define N 5
// Kernel function for using constant memory
__global__ void gpu_constant_memory(float *d_in, float *d_out) {
// Getting thread index for current kernel
int tid = threadIdx.x;
d_out[tid] = constant_f * d_in[tid] + constant_g;
}
int main(void) {
// Defining Arrays for host
float h_in[N], h_out[N];
// Defining Pointers for device
float *d_in, *d_out;
int h_f = 2;
int h_g = 20;
// allocate the memory on the cpu
cudaMalloc((void **)&d_in, N * sizeof(float));
cudaMalloc((void **)&d_out, N * sizeof(float));
// Initializing Array
for (int i = 0; i < N; i++) {
h_in[i] = i;
}
// Copy Array from host to device
cudaMemcpy(d_in, h_in, N * sizeof(float), cudaMemcpyHostToDevice);
// Copy constants to constant memory
cudaMemcpyToSymbol(constant_f, &h_f, sizeof(int));
cudaMemcpyToSymbol(constant_g, &h_g, sizeof(int));
// Calling kernel with one block and N threads per block
gpu_constant_memory<<<1, N>>>(d_in, d_out);
// Coping result back to host from device memory
cudaMemcpy(h_out, d_out, N * sizeof(float), cudaMemcpyDeviceToHost);
// Printing result on console
printf("Use of Constant memory on GPU \n");
for (int i = 0; i < N; i++) {
printf("The expression for index %f is %f\n", h_in[i], h_out[i]);
}
cudaFree(d_in);
cudaFree(d_out);
return 0;
} |
22,581 | #include <iostream>
#include <fstream>
#include <vector>
#include <sstream>
extern int solveMatrix(double *A_in, int n, double *b_in, double *x_out);
using namespace std;
int main(int argc, char *argv[]){
ifstream mtx(argv[1]);
ifstream vec(argv[2]);
vector<double> A;
vector<double> b;
string line;
int n=0;
double token;
while(getline(mtx,line)){
stringstream input;
input.str(line);
int count = 0;
while(input>>token){
A.push_back(token);
count++;
}
if(n==0) n=count;
}
while(vec>>token){
b.push_back(token);
}
for(int i=0;i<n;i++){
for(int j=0;j<n;j++){
cout<<A[4*i+j]<<"\t";
}
cout<<endl;
}
cout<<endl;
for(int i=0;i<n;i++){
cout<<b[i]<<endl;
}
cout<<endl;
solveMatrix(&A[0], n, &b[0], &b[0]);
for(int i=0;i<n;i++){
cout<<b[i]<<endl;
}
return 0;
}
|
22,582 | #include <cuda.h>
#include <cufft.h>
#include <cuda_profiler_api.h>
#include <stdio.h>
template<typename T>
__device__ __forceinline__ T ldg(const T* ptr) {
#if __CUDA_ARCH__ >= 350
return __ldg(ptr);
#else
return *ptr;
#endif
}
extern "C"
__global__
void zSmooth(
int nz
, int ny
, int nx
, float alpha
, float * data // data (in/out)
)
{
int kx = blockIdx.x*blockDim.x + threadIdx.x;
int ky = blockIdx.y*blockDim.y + threadIdx.y;
if (kx < nx && ky < ny)
{
int k_0 = nx*ky + kx;
int k_1 = nx*ny + nx*ky + kx;
for (int i = 0; i + 1 < nz; i++, k_0 += nx*ny, k_1 += nx*ny)
{
data[k_1] += data[k_0] * alpha;
}
k_0 -= nx*ny; k_1 -= nx*ny;
for (int i = 0; i + 1 < nz && k_0 >= 0 && k_1 >= 0; i++, k_0 -= nx*ny, k_1 -= nx*ny)
{
data[k_0] += data[k_1] * alpha;
}
}
}
|
22,583 | #include <iostream>
#include <fstream>
#include <cuda.h>
#include <complex>
#include <thrust/complex.h>
#include <cuComplex.h>
using namespace std;
__global__ void fft(thrust::complex<float> *, thrust::complex<float> *);
int main()
{
const int N = 10;
// An array of complex numbers per the specification
complex<double> data[N] = {3.6 + 2.6 * 1i, 2.9 + 6.3 * 1i, 5.6 + 4.0 * 1i,
4.8 + 9.1 * 1i, 3.3 + 0.4 * 1i, 5.9 + 4.8 * 1i, 5.0 + 2.6 * 1i,
4.3 + 4.1 * 1i};
// The results array, initialized to 0 + 0i for each element
complex<double> results[N];
// An output file for FFT calculations
ofstream outfile;
outfile.open("output.txt");
// Device pointers
thrust::complex<float> *datad;
thrust::complex<float> *resultsd;
// Size of double complex data type
const int COMPLEX_ARRAY_SIZE = N * sizeof(cuComplex);
cudaMalloc( (void**)&datad, COMPLEX_ARRAY_SIZE );
cudaMalloc( (void**)&resultsd, COMPLEX_ARRAY_SIZE );
cudaMemcpy( datad, data, COMPLEX_ARRAY_SIZE, cudaMemcpyHostToDevice );
cudaMemcpy( resultsd, results, COMPLEX_ARRAY_SIZE, cudaMemcpyHostToDevice );
dim3 dimGrid( 1, 1 );
dim3 dimBlock( N, 1 );
// Invoke the kernel
fft<<<dimGrid, dimBlock>>>(datad, resultsd);
cudaMemcpy(results, resultsd, COMPLEX_ARRAY_SIZE, cudaMemcpyDeviceToHost);
cudaFree( datad );
cudaFree( resultsd );
// Output the results to fft_output.txt
outfile << "TOTAL PROCESSED SAMPLES: %i\n";
outfile << "================================\n";
// Print X, the results array
for (int i = 0; i < N; i++)
{
outfile << results[i] << '\n';
outfile << "================================\n";
}
// Close output file
outfile.close();
return 0;
}
__global__ void fft(thrust::complex<float> *datad, thrust::complex<float> *resultsd)
{
int i = threadIdx.x;
resultsd[i] = datad[i];
} |
22,584 | #include "stdio.h"
__global__ void kernel(void){
}
int main ( void ){
kernel<<<1,1>>>();
printf("Hello, World! \n");
return 0;
}
|
22,585 | //
// 【pw_multiplies】
//
// 概要: thrust のサンプルコード
// vector の同じ要素同士の掛け算を計算する
// pointwise multiplication 計算
//
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/transform.h>
#include <iostream>
int main(){
// ホスト側のメモリを確保
thrust::host_vector<int> h_A(3);
thrust::host_vector<int> h_B(3);
thrust::host_vector<int> h_C(3);
// 適当に初期化する
for(int i = 0; i<3; i++){
h_A[i] = i;
h_B[i] = i+3;
}
// device 側のメモリを確保
thrust::device_vector<int> d_A(3);
thrust::device_vector<int> d_B(3);
thrust::device_vector<int> d_C(3);
// GPU(device)側へデータを転送
thrust::copy(h_A.begin(), h_A.end(), d_A.begin());
thrust::copy(h_B.begin(), h_B.end(), d_B.begin());
// pointwise multiplicationの計算
// カーネル関数を直接記載しなくても計算できる(ゆとり...)
thrust::transform(d_A.begin(), d_A.end(), d_B.begin(), d_C.begin(), thrust::multiplies<int>());
// device側からhost側へ転送
thrust::copy(d_C.begin(), d_C.end(), h_C.begin());
// 確認
std::cout << h_C[0] << ", " << h_C[1] << ", " << h_C[2] << std::endl;
return(0);
}
|
22,586 | float h_A[]= {
0.9783785703143878, 0.5590614973341264, 0.6797962714660215, 0.8903910511968696, 0.9341342807933763, 0.6495864827547604, 0.5170069800106131, 0.9390146434783977, 0.7431405249408038, 0.5571549932954716, 0.8789095350337303, 0.8766834337695264, 0.7585463937940116, 0.6509300690459523, 0.6655874028349105, 0.7231178759027335, 0.8891385933970858, 0.9281176728649347, 0.6078280381043576, 0.86194353349913, 0.9325839637317146, 0.8164728866074829, 0.6925907895603693, 0.8575931638752015, 0.9951104231817254, 0.9338652033106569, 0.8330357106938336, 0.8949000748459666, 0.8191417037220341, 0.6487588666790309, 0.6234538136800802, 0.7246140425067783, 0.5321291890765816, 0.5334366796507448, 0.9490933099357788, 0.9513074401932311, 0.5693168315682409, 0.8398130739983629, 0.9885960948815545, 0.6171838363548894, 0.9582048045923279, 0.5482132991871642, 0.6291773434676776, 0.8328049027936526, 0.8594676827166452, 0.9363986403501119, 0.561850512737472, 0.8837978711299065, 0.7806788771864628, 0.937278489243172, 0.7520202541586976, 0.9376967772047597, 0.7757573262614637, 0.7963159793456096, 0.9549402202275905, 0.9631072570222146, 0.8176023108037402, 0.6846732080931976, 0.7800878557542462, 0.7855605440959464, 0.7767664947116194, 0.8079273614672018, 0.9529894894269184, 0.6802703790137101, 0.6359416341417881, 0.7738511916855015, 0.8850684709218097, 0.615110583920947, 0.5163316853505082, 0.7836872173712, 0.9471956895642889, 0.7057112167960689, 0.8468088254954294, 0.7598436282035455, 0.5084165624665546, 0.9154903624384825, 0.8028966057742053, 0.7102037781294865, 0.8600758865396794, 0.8301884442951473, 0.5889414884004585, 0.5959320403298415, 0.5121831201267537, 0.5533381451349438, 0.7091303899663259, 0.9569532654032918, 0.9488915503840827, 0.9327769696591495, 0.924177288323695, 0.5931816378208625, 0.6814383321562295, 0.8600283742614665, 0.8558674700175114, 0.7811499825785608, 0.8046120824843013, 0.6102346898141412, 0.7491635050266202, 0.9443325326424566, 0.6606553379965914, 0.8018652851731504, 0.7665981012372467, 0.9165146622101157, 0.5191147849184568, 0.9304250466267756, 0.6392050864142824, 0.9828979322891396, 0.7649730768931022, 0.544839029573087, 0.9804745442265883, 0.5823246055544049, 0.5004048471922875, 0.9858965516806997, 0.8912476536602232, 0.7055689469878161, 0.5145568306214825, 0.9475083788567564, 0.9004553968380146, 0.9414407412179703, 0.5972894181182216, 0.9489500976768604, 0.699324479871724, 0.8718566648129027, 0.8182532052839036, 0.6804428992712597, 0.8019021329372867, 0.9567035606977952, 0.5519009365497846, 0.6753134502899285, 0.5159321150349185, 0.6788232447831674, 0.7323007756737572, 0.7181985957333499, 0.7053806075097283, 0.6521495310532388, 0.802514889912654, 0.5183705047158187, 0.6089568318578493, 0.9543191049757235, 0.9753807019707331, 0.9773094802305067, 0.7624214285780337, 0.5907654715238218, 0.6206144627946906, 0.718286334817536, 0.5468454831109391, 0.9451270497456301, 0.6911920337722264, 0.7347497972655712, 0.6724222983976834, 0.7675710505530784, 0.5022873489046176, 0.5769561868549995, 0.9005734620294354, 0.7562559322098814, 0.7381086468709535, 0.5537492967263491, 0.5999111570697114, 0.5082253289360759, 0.5262791742798483, 0.7151655100454462, 0.8221822016582025, 0.57061858764006, 0.689593115505639, 0.5274821364884155, 0.9321300685977391, 0.5158381502284961, 0.6320761917184889, 0.7615391019757874, 0.548896822944374, 0.6765329827536333, 0.9913316703661691, 0.5300516594875936, 0.7690361283004031, 0.8324498439501666, 0.5200611241549375, 0.6709234000209919, 0.6548399627113423, 0.705171317524605, 0.6251510582181816, 0.6803523696702065, 0.816149039227233, 0.9330149112221404, 0.8990228687003816, 0.8122101799995505, 0.754984111285997, 0.6368456985770742, 0.5670167111577831, 0.9569067669558877, 0.8350659347148907, 0.5868049742177694, 0.7052233969875423, 0.6036895173182629, 0.6607348079429667, 0.9621200198753778, 0.7253092009068489, 0.9059893970915458, 0.8172851579524049, 0.6358703221590611, 0.5443334812487954, 0.5812922358275043, 0.692084553023777, 0.6783020425768782, 0.5563375962582939, 0.5443270618870383, 0.5924762303451667, 0.7461017549942843, 0.7418525410235692, 0.5874646926219159, 0.7372068757046508, 0.662881304787432, 0.7093896930735792, 0.9015209077756687, 0.870837097293927, 0.8640055953795227, 0.8221998755871359, 0.8621341782307137, 0.5979823131680717, 0.719093615556994, 0.5141396681012995, 0.5353599531413602, 0.5216378597584672, 0.8557228972919948, 0.8484485619711166, 0.691332745241422, 0.5246588712722648, 0.5429214015405581, 0.8247275991720989, 0.8054281170595601, 0.5109475265094545, 0.5464686082069568, 0.7280613262845983, 0.9845360262095113, 0.8856106886203625, 0.6461183036818402, 0.7644060349533395, 0.5782849262062809, 0.5792238222410155, 0.9558398524195215, 0.6143695622752913, 0.6951132787118737, 0.8253564632074801, 0.9609964179561692, 0.5544261878316648, 0.882892617521956, 0.758648973155671, 0.7973230391969124, 0.8360188996834459, 0.6899909519864871, 0.5835286991542895, 0.9013048015673759, 0.680291286026814, 0.6454999885032452, 0.7426850870470703, 0.7241486047137525, 0.9480751761306105, 0.5062081129771272, 0.5109790290897565, 0.8619525634620742, 0.8211689185135822, 0.877662178234865, 0.6262218654194213, 0.9848050069017076, 0.7382658932366818, 0.5551720723798292, 0.6157213480298056, 0.7676353526311219, 0.8426130614387433, 0.6824093702151435, 0.5771358600836096, 0.8010120987478972, 0.8204886742963351, 0.6384387793518544, 0.5663678387673694, 0.5915059119460772, 0.5795123657503055, 0.7363978459331033, 0.7098722712665102, 0.5868234030555213, 0.8777224340972629, 0.6645900157420706, 0.9517830304721615, 0.8052266894378451, 0.8022969374911959, 0.8879997814620479, 0.936833474301023, 0.6438798896795335, 0.7533598773412251, 0.7862741698047667, 0.6358693723864313, 0.6980042068541383, 0.7683646427255152, 0.6916812015092122, 0.760806722179688, 0.9292849607924878, 0.7578382604513227, 0.7721438304645466, 0.8145451941375179, 0.8263398817234336, 0.6588086136421472, 0.738705111301567, 0.7729184949930901, 0.6530891733944959, 0.8472893011488035, 0.6775632884682459, 0.98492509017488, 0.53545751191507, 0.9587276095819836, 0.9145409643396698, 0.8638319312019493, 0.6694281613050608, 0.9927172840916045, 0.9022091253296425, 0.633223111213955, 0.5037274538137237, 0.6226752449067599, 0.9255084770688174, 0.7554268166281453, 0.709431872079896, 0.6227185616632462, 0.6725302164273866, 0.9553881396379134, 0.8877067674165442, 0.8339311822945976, 0.7699325995504904, 0.9511150961906832, 0.6958447549122625, 0.7148083164023018, 0.5738790741354236, 0.9953372287241202, 0.8315370419873876, 0.5394717520007328, 0.7590029791314457, 0.6086251788981213, 0.760790886678722, 0.6050759136403339, 0.7146434676688116, 0.7289141696328498, 0.6413512032253278, 0.7636270633439874, 0.9163230821184756, 0.7074588703207565, 0.5253439168118881, 0.5151184718769641, 0.6789697835239902, 0.6669065221656373, 0.803480311788435, 0.9828804097447391, 0.8305688384041476, 0.7896782411870229, 0.6695416097197492, 0.8246784391897518, 0.8369494433996574, 0.7127286458283038, 0.5655014053790464, 0.652046035073869, 0.7287279822875123, 0.8137434648965631, 0.7389801061738275, 0.9208719929275146, 0.6424853146090557, 0.5108593976944599, 0.6889958607156297, 0.5089014161955697, 0.637779376605558, 0.6994083950627819, 0.7760874429039406, 0.8043077387654358, 0.7148581570533641, 0.6635259066993333, 0.7981228783901462, 0.8999146457234695, 0.6615284215894905, 0.97339454077902, 0.8001253046882637, 0.629673175627532, 0.6084396511389923, 0.8440679362392821, 0.5272827830252158, 0.92114581524405, 0.5969232171916758, 0.8584685508304835, 0.9562140100714513, 0.6873580420734446, 0.9524961310477298, 0.9356062320249756, 0.9690725980834713, 0.7508667762274253, 0.6523559780167645, 0.5313286518909421, 0.6216243495195591, 0.8132374009744796, 0.6439805996346826, 0.8404124887327622, 0.9381055588074496, 0.5803474263902668, 0.6219527057182014, 0.5452973791671352, 0.7748418171666979, 0.824962609216408, 0.8609591126616438, 0.56977643677907, 0.8189706782933759, 0.696980081935483, 0.9535381770649896, 0.6894572586310532, 0.88040358811265, 0.9346855891273245, 0.7395340285053414, 0.8615878707731914, 0.9091484099949144, 0.7570844443318252, 0.6653062317483817, 0.9621290019446509, 0.6773349560196424, 0.8111353289584595, 0.7072829858225418, 0.9212407169537588, 0.6992871134463754, 0.6783497014605806, 0.9413154588818013, 0.5064746295365016, 0.6233895841517924, 0.9628011306914519, 0.6372021758961824, 0.8403908373854747, 0.5911742701844471, 0.6834847398720028, 0.8853411663696756, 0.7170652620809508, 0.5099358483208115, 0.760243137208023, 0.600495424694937, 0.6135937754652324, 0.6211594139439804, 0.9934177697148969, 0.9991032156663382, 0.5262592574934988, 0.9667393970506152, 0.9660936566325065, 0.8847052515149023, 0.630433763636245, 0.7096638241045918, 0.643538773406236, 0.918628279370069, 0.5375026027800012, 0.6931117183486104, 0.8880625132830334, 0.8441251930876599, 0.9804085422394678, 0.9153128160354994, 0.6439824482443067, 0.6102375143141738, 0.7989027694377355, 0.5617680578022504, 0.9965970782582392, 0.5967563274691934, 0.9960202585330304, 0.5926931867274352, 0.5433914581069876, 0.8643350329870281, 0.6876826626142496, 0.6621484658906327, 0.6544343609234824, 0.855687908466733, 0.6817243325979627, 0.5955751506485055, 0.9338712120791915, 0.8104876631733681, 0.7013600381486322, 0.8462629080378115, 0.5793464044462067, 0.560963126335007, 0.9536158560773917, 0.9542138297960101, 0.5168280590901394, 0.9852696714652519, 0.8114701961456556, 0.7420920773888623, 0.5978959310003866, 0.8239508654044222, 0.7165802325258049, 0.5061071070375189, 0.9649653607305991, 0.6651133149430204, 0.5640143844512912, 0.752055869973263, 0.5783134798693792, 0.7549908390289346, 0.6786237001078965, 0.5972889404460645, 0.8476473444613197, 0.5935030853039454, 0.5291763791582986, 0.5710072440435637, 0.9844724855110024, 0.6113828673710653, 0.9456950512213738, 0.6722544552054452, 0.9692391221495829, 0.7748622142132127, 0.5386413580485199, 0.5742970042655795, 0.7475606790296507, 0.7399569274532517, 0.7713443931953173, 0.974380165737973, 0.6757039140029939, 0.7558868980335316, 0.9011443954271421, 0.508345172989719, 0.7656717515715585, 0.9806535661159584, 0.8876553289000486, 0.7506382159738181, 0.6921600232016714, 0.9700176593472667, 0.7622328151153519, 0.6575859272591261, 0.550201648594571, 0.7812872745096187, 0.7323430846421479, 0.5948241989937357, 0.6855663047440963, 0.6982512213073025, 0.7121173990013139, 0.9389824360124359, 0.7402046805322375, 0.6293349871990228, 0.586543792201943, 0.5259632238252046, 0.8099294413741065, 0.6421230017136703, 0.5354027070781772, 0.7133695332999552, 0.8926812445310199, 0.9608986135482844, 0.7749303177764802, 0.8904509716875431, 0.6469821437153052, 0.9388177187042697, 0.9000527687721209, 0.6927599051642608, 0.7475590522672961, 0.6149114648184149, 0.9394988380338309, 0.752353619245384, 0.628123599808815, 0.6566391629705368, 0.8446206782215182, 0.7809989751118621, 0.6706879262575296, 0.8009513306831915, 0.7979137191612005, 0.7844330378430713, 0.5744861852065197, 0.5968209042654362, 0.6100453644084888, 0.5655772264590282, 0.809811203380112, 0.7976408312486666, 0.9044873178428005, 0.9125844349424233, 0.6298616696446325, 0.8276258696609049, 0.6495070685241076, 0.95015303457885, 0.6659789150990783, 0.8534905713180101, 0.8330520353510447, 0.6375386112774867, 0.6951746278580435, 0.905173276978652, 0.5573223799459559, 0.6295687772021725, 0.6891147272671565, 0.6470713870702973, 0.8014753431700301, 0.6621251995034206, 0.9992728822778643, 0.8111188410237915, 0.7731142774380628, 0.8975475180527577, 0.9180630854312188, 0.9405211717222568, 0.9447362940583828, 0.6996228764021664, 0.8615274651868532, 0.8709944981634673, 0.8368454775003149, 0.8028112596008463, 0.9726501939885215, 0.5052750828345427, 0.6816752335227665, 0.57485625287229, 0.8920972345141611, 0.5141517393578707, 0.6608675242314601, 0.6375913819636445, 0.5119788300762842, 0.8470354904208568, 0.5739575989645778, 0.7914161511884856, 0.8730670881862866, 0.5258583496497482, 0.841368669048425, 0.5663989001467497, 0.5359712170631767, 0.8312547159602202, 0.6186063821911575, 0.5337921982671363, 0.7598991217110211, 0.9563885838807331, 0.8084690372941677, 0.8694379957173471, 0.9877679538026316, 0.8390043689988246, 0.8181357911820528, 0.7486329503756006, 0.6886074173630861, 0.6251646401288273, 0.781071791332475, 0.778186764648019, 0.6546580291469903, 0.8390007926872771, 0.8187150004628982, 0.9931525832866379, 0.527704984450805, 0.9474023989038151, 0.6088551128820513, 0.5448232383040374, 0.7946270421258748, 0.9654376984520738, 0.857716126459161, 0.9871372522344213, 0.6509599395112108, 0.5961317937113475, 0.617728248172515, 0.6336652251981139, 0.5232901233049783, 0.6119731195303022, 0.9118279675881309, 0.5523707970638059, 0.9499732829386749, 0.8021407976145549, 0.9726153194543461, 0.7078433786091196, 0.8447692073689163, 0.5253675745839262, 0.7599027148217963, 0.9682731502841115, 0.8611695289519449, 0.6866503936302573, 0.5151365376568691, 0.9377410103732388, 0.9541723295099042, 0.515657195671601, 0.9377290776085998, 0.6634407189567987, 0.5173018784829599, 0.8796718608957523, 0.7542591760385686, 0.9722846667669767, 0.7568936912453315, 0.9405678403242788, 0.6788337131522959, 0.6930167607039126, 0.5526996846556349, 0.5222907388111326, 0.9160406291052313, 0.5113125085160732, 0.9373106090081318, 0.6233452130141284, 0.8534397105745714, 0.9013774195490465, 0.7746337505811733, 0.5663373992819432, 0.6454764589049066, 0.9561424688329055, 0.7563322040000771, 0.5826424390228394, 0.9624478567020072, 0.5291130713178934, 0.586371804179669, 0.9177669086334227, 0.50857377617142, 0.8210717598278428, 0.6460376051014625, 0.5431472034783541, 0.523680523116268, 0.7430571846386138, 0.6737073013842126, 0.5019234960815522, 0.5561424323864905, 0.6427765152316469, 0.9608060080481668, 0.8877689487408899, 0.5415263774398213, 0.7376406352184666, 0.5058918922241313, 0.5775276235850986, 0.6830402370490674, 0.6248288534645687, 0.7099949168501545, 0.5426373594330878, 0.6964662255678327, 0.7594886659504245, 0.8495365731542257, 0.5275484685091387, 0.6106665042955136, 0.6683108870483017, 0.6965486083201801, 0.502943903863987, 0.944526742705205, 0.9640432410049666, 0.7720149433678347, 0.933926789773978, 0.8595566953330448, 0.6600733669255882, 0.9365651270940066, 0.6206320617068022, 0.7327684391586771, 0.5525356679430361, 0.862371845919491, 0.9969033248063515, 0.9845760139684012, 0.7677123300631389, 0.7297778254476066, 0.7499706891399944, 0.5700131302546503, 0.7127398556763226, 0.6117113528236812, 0.5723793113871385, 0.5631532601126426, 0.684013432630627, 0.8299035908000922, 0.828379787908688, 0.8200622016452641, 0.667367149143171, 0.9492819562318467, 0.9796964480731312, 0.6023303733016876, 0.5983076579347609, 0.9774824387826564, 0.7706053644711688, 0.6674243125199595, 0.558924060926007, 0.5860604818936603, 0.7855415313897617, 0.7981372582977322, 0.7240367984229485, 0.845512257536273, 0.929673011379559, 0.7499962203498187, 0.5813045819903147, 0.9481098084415365, 0.9380893552958538, 0.7311090293231853, 0.861604903923678, 0.7100249561403591, 0.9636283414372142, 0.8586373696449991, 0.9409391665289137, 0.6261364538901004, 0.9313983384075174, 0.8379770691884565, 0.6649143962600655, 0.7366546922958744, 0.7935195917956536, 0.7822601386662733, 0.8518947491511779, 0.7647712164079964, 0.780462867670985, 0.5855413750391882, 0.6325575185661769, 0.8925654652978836, 0.5261974660571965, 0.5920012615441654, 0.529839885922213, 0.9097844215620494, 0.8283372155434695, 0.5062106858679032, 0.5891039202563801, 0.6055667266960751, 0.7965917835763382, 0.7927702752545467, 0.7826156961915892, 0.7909252690939295, 0.7718709887331834, 0.5687283595460895, 0.6662151565361263, 0.782643290248473, 0.9295067516860986, 0.7550972812501822, 0.9610743860527996, 0.7863065763241887, 0.9384335214579665, 0.9704032502935718, 0.7438959091574321, 0.7302471355103726, 0.9633552479868913, 0.7234098198766352, 0.6481979701249456, 0.8563805561166457, 0.5702572299687491, 0.8991088016286586, 0.7441685520402384, 0.9709409436470255, 0.8896575120908066, 0.5373522595967717, 0.6362092921225919, 0.995382654637107, 0.9700898455425409, 0.6139927853903537, 0.9824056660114253, 0.5646837977734771, 0.9316038751276454, 0.5002501537738777, 0.5514804596040621, 0.7533352034976686, 0.9431570675733221, 0.9781749178396295, 0.5722597582652789, 0.9618988998667573, 0.6816204839903462, 0.5893622690600071, 0.8441142698854746, 0.5221637874816631, 0.7739081202171203, 0.8838263257373986, 0.7505981567102461, 0.6674558729091292, 0.9443474535057664, 0.7873374855655689, 0.6718833637041296, 0.6225529840615126, 0.6736068203333615, 0.750402423768439, 0.6978093154715913, 0.8393324960035935, 0.8739707387975966, 0.5572743000537014, 0.9785585510132497, 0.9957928140645462, 0.8523151594510859, 0.6427119652790494, 0.5025082192658821, 0.5592561538739425, 0.72527362852681, 0.8210133377367926, 0.8260408405858086, 0.5713441495258904, 0.9704796024285338, 0.709947348870053, 0.6280430611398452, 0.5250653457593075, 0.8919280854957452, 0.865569332230169, 0.7375942265767832, 0.8395395593978967, 0.8547232041446722, 0.7108980168590502, 0.891066378792402, 0.8075514625517182, 0.8326781818180675, 0.5891698403415961, 0.6608587770808244, 0.9126306522865261, 0.9681316137584808, 0.5371162622020801, 0.5129238167352612, 0.7926761846187097, 0.9766590209083807, 0.5655733521674298, 0.8247758357164692, 0.6749808219708291, 0.8705482075564914, 0.6726053982695791, 0.9230910847566877, 0.8137416808602376, 0.7206089053957776, 0.8700084098089501, 0.7326925451282716, 0.8118718089502979, 0.6598635980628635, 0.5074946039291974, 0.9068986546482536, 0.758838603885862, 0.6618194769818986, 0.8806681004341071, 0.8151528511912542, 0.5387213705261815, 0.9318998421545979, 0.6071721272127761, 0.7279795423973546, 0.8478872392561009, 0.7358972140909625, 0.5951173741206689, 0.899105665013229, 0.9143575771018122, 0.5112906883100063, 0.9466267067254532, 0.7629843992097489, 0.6715328588199612, 0.7938600368545354, 0.8814865283584018, 0.5707772258349385, 0.9323395266182839, 0.9125966392249621, 0.5650871631376517, 0.7254252513452664, 0.8402366459899091, 0.6163914318416303, 0.9332136523027681, 0.7907145641000155, 0.7581145308787747, 0.7972805654713047, 0.9657670045393287, 0.7935143917428046, 0.5040329234756786, 0.5534911897281523, 0.9793999925197762, 0.8241467437669369, 0.6099467541780337, 0.8491965098826311, 0.9043452472470034, 0.9639684850773825, 0.946706537168601, 0.661819309713784, 0.6965069437108309, 0.7446116632580441, 0.8547351129818563, 0.5369006688681518, 0.9239017617333007, 0.512570278911389, 0.9239538524271306, 0.7761485066227752, 0.7256562259120094, 0.5931119803745086, 0.8880129183394645, 0.6589009805986223, 0.908891050395805, 0.9674756300967526, 0.7653586779823476, 0.7757164326460889, 0.7354471070329431, 0.7052354130045575, 0.7921209631599875, 0.5973323396302508, 0.9545570004938049, 0.9436761573543266, 0.619311836225293, 0.9262916371747564, 0.8844269026908016, 0.7253559412255721, 0.9911312450254658, 0.5822076920437064, 0.7233764210690381, 0.9346708142188367, 0.823660885924661, 0.7622081568159618, 0.714525672468215, 0.9076869092807807, 0.9389644814990882, 0.562002276928692, 0.8660156822911216, 0.7406236451769518, 0.7383671728619702, 0.7657714247745679, 0.9151869055273963, 0.9161079476555771, 0.5291853787901205, 0.8908780855628177, 0.9031444554831043, 0.6411386552041025, 0.7362212429155013, 0.7956659622176394, 0.7176889289254409, 0.6812849360408975, 0.6797488655861639, 0.6475763658703256, 0.8632604466552095, 0.945399174341073, 0.8221005991328598, 0.5104802284492842, 0.5832192471507954, 0.8165388268874036, 0.6925545743393935, 0.9686773181790416, 0.7241588724248161, 0.7346244292081747, 0.9918539755167615, 0.9993206954781793, 0.7891742823445649, 0.5086952691411039, 0.5236255825285918, 0.5949435755378039, 0.6725400432393471, 0.7601463305071587, 0.9514031157455285, 0.795664622837653, 0.9076556265558875, 0.5490354561909838, 0.6260794672898471, 0.6130780740115862, 0.7849288502010843, 0.5328150887956213, 0.9336598272291878, 0.6907783565812118, 0.8691596784552578, 0.5782920956482186, 0.8942933055053158, 0.9344745505354017, 0.6372573278747379, 0.8983904013915365, 0.6582469352317672, 0.6466494113040415, 0.6456503401503461, 0.8741457250444308, 0.7894072006228755, 0.8224983471290946, 0.6404762897098617, 0.8579426857210475, 0.8080096535195641, 0.7494479949019682, 0.9073793534559067, 0.6158187528420229, 0.5372489278220175, 0.7031872889880476, 0.5927743324386786, 0.5864633416229041, 0.8046658416336907, 0.8719132225133726, 0.9467386192679441, 0.9784337353524492, 0.9488178242875251, 0.8981734877229461, 0.7269427298840422, 0.9476891609028084, 0.6288347179182032, 0.9409893739965451, 0.761559088667062, 0.7879767477487758, 0.9333815753080681, 0.6777111988398367, 0.7122257629173736, 0.6353648746955188, 0.9862991819862473, 0.8445431947454448, 0.6449430416047854, 0.6328450234261921, 0.7935118159244308, 0.8569877039238217, 0.747006325512091, 0.8107493522878175, 0.5922896534701884, 0.6165063828091365, 0.8931695454132261, 0.8644846384214491, 0.5148601172661387, 0.9976618951713625, 0.6646085523306258, 0.820989744647443, 0.5129259687327024, 0.5181815588499965, 0.9161563746559851, 0.6171430099575249, 0.737495124297483, 0.6554422480958255, 0.8980563683698133, 0.652339490158816, 0.7805417379418398, 0.9420905215205506, 0.5575001263731527, 0.5204014360989879, 0.7995299866714743, 0.6150131591978272, 0.8924660214471296, 0.8294421447627393, 0.5820696402064329, 0.5038484790048552, 0.5881450540972165, 0.5292741790664977, 0.5849493435866153, 0.7499006171851545, 0.6102133928995066, 0.7910778718773208, 0.5217232434314742, 0.8329359348014249, 0.935087434851384, 0.9017878585974457, 0.9357302307706321, 0.987767369397876, 0.8695887416152168, 0.8404358999553, 0.7119292037319, 0.6344312114151172, 0.7118840901859247, 0.7136760776619959, 0.8219391352898774, 0.8569024993046296, 0.7783591364348352, 0.5589169397484284, 0.6279124490884044, 0.8137201594717944, 0.98579346908104, 0.8037537057965582, 0.752084065117756, 0.7943260626811108, 0.7894154034687028, 0.6801538886243342, 0.5537622010059584, 0.7082554102684449, 0.5509552902977359, 0.8282250910241438, 0.9421903154826172, 0.5900574023339273, 0.7207607273675041, 0.7035267963846201, 0.8743987389992808, 0.6380314515005865, 0.5842823943373119, 0.6387210441247312, 0.8824768638192786, 0.7013705005479731, 0.8118484209455168, 0.7227998149834476, 0.6369232258204595, 0.7276041731364162, 0.9100936994732393, 0.749770146228979, 0.5446702719403422, 0.8796024853030908, 0.8965564419988798, 0.5797839646453289, 0.5621614604374776, 0.9641343157000137, 0.5638055727567683, 0.7629344968129395, 0.9781764483107231, 0.6383329743603375, 0.8520305536630521, 0.6085049003635397, 0.8753533066171213, 0.8013278678285398, 0.5716823739000017, 0.6236126204623402, 0.9618613238341169, 0.9671675497291756, 0.7608211769022093, 0.7947795298583398, 0.8397290442887881, 0.9394538480588982, 0.5503900014147824, 0.6450451864395301, 0.7087513148892547, 0.9028776793670773, 0.7199559633676245, 0.8483400828213814, 0.8766378056698706, 0.6528337160446285, 0.6159520967857253, 0.6549095185432166, 0.5002581157092232, 0.9781428235514178, 0.5565916559305973, 0.8926670195404613, 0.7792580413466663, 0.8782503397643473, 0.8305580857717824, 0.7049000589858917, 0.689177450232501, 0.5393163983094729, 0.9312972528201564, 0.6578795374383306, 0.5203805285850674, 0.6207698578171987, 0.9500092164412384, 0.6477716585445809, 0.5425143024460333, 0.8479747647730882, 0.9995413591607845, 0.7288040439179316, 0.5911990999211716, 0.532130076897398, 0.6879636703119706, 0.6266004339914968, 0.7751983330140606, 0.9751292293524846, 0.8429585307889182, 0.9914676449428429, 0.8197938563219231, 0.5645215369776948, 0.5552983288335156, 0.6241966720576407, 0.5980008283933739, 0.7826453682712466, 0.5612400744909649, 0.5190605606174283, 0.9959607024568287, 0.5053362251321385, 0.9309477238832484, 0.8184864819007318, 0.9836978070003262, 0.726870475477235, 0.6382724552535604, 0.7594672767004131, 0.5907443169752906, 0.7656067352263651, 0.6715760589382286, 0.8037404222646587, 0.8376020206931962, 0.733308416842192, 0.58284647881649, 0.7672632875035962, 0.771648145306217, 0.9078101680755795, 0.7000374057943606, 0.7426434327059181, 0.6064408271262707, 0.6185632699133697, 0.7831059119099213, 0.7975854005785581, 0.5218604724207225, 0.8035121319944553, 0.9863729592893529, 0.8240299215526862, 0.9865235424131622, 0.7937914000212132, 0.9491254962493574, 0.9015078556493564, 0.8896840779377746, 0.5394738992857566, 0.9524594708969707, 0.5493304314638661, 0.5483419023379199, 0.8578016212762778, 0.8167758918384005, 0.9521594548948156, 0.5884816511230933, 0.7044503635161663, 0.9381500212686815, 0.8923480905627397, 0.954236582287895, 0.9247947537237361, 0.8233417357589654, 0.939954242376555, 0.8928161831489861, 0.8151520257584495, 0.6230595777442318, 0.6084599454548594, 0.5424142355466853, 0.8300620266066169, 0.6196558114851951, 0.8030469888292354, 0.9588770872829875, 0.9390838043465087, 0.5117851461080596, 0.9608325987667048, 0.7876528848949859, 0.8870507828684435, 0.9360501518559647, 0.9094886892733518, 0.9391963665827143, 0.5970784081580736, 0.8955843967718372, 0.5538535355300991, 0.5214000774096663, 0.6846771824147933, 0.6284372606418673, 0.680338997345381, 0.6582147606880331, 0.7766617155463847, 0.7626453119726726, 0.7191621572606743, 0.9991692787240192, 0.9554457246774117, 0.8876402495048925, 0.5848146006008261, 0.7903883562870295, 0.8365477949307114, 0.9846093764844704, 0.7798427620946815, 0.831631270421157, 0.8637797774197453, 0.6185213081668877, 0.7962758424896812, 0.9214906369523097, 0.6772240419059259, 0.6638314923179687, 0.8955940350860747, 0.8965914999429059, 0.554739842908345, 0.7822340747729093, 0.6269887409452304, 0.6479869857965536, 0.6255369411304441, 0.8464355605203198, 0.679936676612338, 0.504169277210452, 0.6355248536451698, 0.9370899896188748, 0.5030325219223397, 0.7950130771639957, 0.9974056168499656, 0.5805734079067261, 0.9177273322563079, 0.5135676719688207, 0.8098569767464141, 0.8078774730068767, 0.6899578583785413, 0.6321173087891503, 0.8473868073367148, 0.8254361266959803, 0.5645512957568317, 0.7322955434102539, 0.510174067103629, 0.7175027070220297, 0.755357230996935, 0.8243123130481911, 0.6053464817617812, 0.8285434625504153, 0.7891849039369199, 0.8261813143944385, 0.9640232310704682, 0.82425495714661, 0.6180981705673574, 0.9616993813101447, 0.6190867536684439, 0.8999924405207216, 0.9674575494817264, 0.9851938977172434, 0.8636165261205757, 0.8691801508139445, 0.6580792144826246, 0.5123421422316654, 0.6717795504711408, 0.9154265662558279, 0.9991123548753135, 0.5572115309685661, 0.8426236861304301, 0.5954122753083119, 0.6032961292102683, 0.733820890049605, 0.994426052334207, 0.9572367352132125, 0.5113772930464211, 0.8139449739431914, 0.9972570524550965, 0.7372788970089802, 0.5081767509081823, 0.7416828231329966, 0.8867791296548595, 0.7928342952003812, 0.6375034399402828, 0.9716306702808499, 0.6857401050376377, 0.8921164094424394, 0.619375969972678, 0.86787645942643, 0.5635973589241733, 0.5572978756283985, 0.6419123620399511, 0.814231028003831, 0.7625888346548094, 0.9686457586377143, 0.9252528645559253, 0.5565080893640537, 0.5196521691330528, 0.8814131560106246, 0.7819413046265233, 0.7132057352577024, 0.7255329686389413, 0.5853735220818741, 0.6450998668891681, 0.8380413662446647, 0.7974108022670465, 0.5389867535338841, 0.8800821113150115, 0.6211087777261446, 0.9239858955872566, 0.7158154240525714, 0.6423026904091509, 0.8720000604801836, 0.5122380827831683, 0.7147667179715741, 0.7449661220562941, 0.722352630333889, 0.6013057300834096, 0.8715421614155998, 0.9622084974416377, 0.5427248455726706, 0.7467883131857156, 0.759115373708869, 0.8193472442278907, 0.862955043764934, 0.580864064462907, 0.635466249506744, 0.5680103392551745, 0.5957749710694324, 0.5758818991456277, 0.5675717108702805, 0.8222612077200404, 0.9830878810847467, 0.7170305272002726, 0.6680233039223132, 0.7871071299446418, 0.5362065210941274, 0.8079479336841299, 0.5437872373518629, 0.9383112799285372, 0.5031139373859953, 0.6609155902353567, 0.8909789803018955, 0.8129728842028069, 0.9886985393108485, 0.8530869959949061, 0.9842228851476673, 0.8776847434484127, 0.7163192705120642, 0.7311490127134816, 0.6519745334951055, 0.840926813034144, 0.7258503135235452, 0.8473743077341159, 0.7253106585789122, 0.8007007728605698, 0.5076411744498353, 0.9164278261218499, 0.7128903745872357, 0.8423300481589413, 0.6076908691315075, 0.8150056039392203, 0.578080122129122, 0.638604272331718, 0.8816984455241064, 0.9766596800056455, 0.7176628524408495, 0.8596260742670637, 0.6950241047083158, 0.610179003897557, 0.9895008469473647, 0.5304966813628457, 0.6855545670615404, 0.9760677569771152, 0.6666319953615094, 0.694587322529936, 0.9367422278073485, 0.8622219252707332, 0.839379735130775, 0.9468306328803433, 0.5347931368302153, 0.6728648503021021, 0.9369670325851996, 0.6501393879524516, 0.620916413752145, 0.6344130474210324, 0.5817491772926604, 0.8854422035856946, 0.9683488515105756, 0.8021713415213947, 0.7881788081311427, 0.9650307786426426, 0.7880879360154548, 0.6411267608423878, 0.5721562982141493, 0.5256143183252291, 0.9860699945921871, 0.6436092122586254, 0.8578069292320725, 0.8122374627838326, 0.8451548913322247, 0.889809958625295, 0.8392277066590909, 0.5003971451566758, 0.6488196974445051, 0.5754413352704839, 0.5049203170345433, 0.598559220372197, 0.935676651351059, 0.6767213243422714, 0.7911243867569515, 0.676808254947961, 0.7144461813851384, 0.5840457079460937, 0.6552604398150512, 0.7346096871502684, 0.5395127932053286, 0.6530222668858912, 0.9608483242813294, 0.8207173455963297, 0.7758639399713414, 0.5133293464181923, 0.9083231982457571, 0.8090448891355515, 0.6549528742138284, 0.550612993123959, 0.913779034947215, 0.9059559292814283, 0.8501515346169797, 0.5208025944134894, 0.7596508419261396, 0.5324541883721134, 0.7257709052302868, 0.7637855292759161, 0.8324559072269804, 0.6977642188954531, 0.6068948595714565, 0.9899439101688638, 0.8789027913426223, 0.911266791549935, 0.967844251243942, 0.9602453401859216, 0.6008335530165987, 0.629199939767915, 0.8443381211527227, 0.551568420476792, 0.9260153720728608, 0.6987837426570138, 0.8717807473084178, 0.8758997968383877, 0.5622165817968734, 0.6046494438868505, 0.9099940316481394, 0.7432397656493503, 0.7417889842766603, 0.7148146318482151, 0.6585518425403059, 0.85423856488051, 0.6490270091904604, 0.866615982670945, 0.7257434518617865, 0.8247992502251207, 0.7422586046806103, 0.6737965216538854, 0.7462185226607202, 0.8694249573568815, 0.945323671582758, 0.5071382082241762, 0.816816300632493, 0.9972654188023198, 0.7234243795775057, 0.5429707177175085, 0.8815056284140808, 0.632058449800369, 0.785433227373136, 0.5550464315941546, 0.8411863855641012, 0.9015642611748442, 0.8894977742062216, 0.7663635903065797, 0.7697680600711239, 0.6767898579818672, 0.7272693393667238, 0.6107654761988246, 0.7297845135755703, 0.944806185915022, 0.5550980426276668, 0.9413110017479684, 0.6846664901710613, 0.9225558832129716, 0.6477473158224683, 0.62290586284026, 0.8912163608386838, 0.7543037344294048, 0.7675554094453976, 0.7003767230688734, 0.8495116557022184, 0.7864167411177254, 0.5089137297668481, 0.7235773301017616, 0.9765805219288032, 0.7294139122335229, 0.8523475611982966, 0.8455440244111296, 0.6862653175324276, 0.5810198733353091, 0.8035442476741899, 0.5208066233319792, 0.9427617046031707, 0.7643767322849211, 0.5042534093300255, 0.9987421309863016, 0.545520888005881, 0.5162469346201293, 0.5256271891811657, 0.6954976556253295, 0.8283242483346362, 0.8394550229801829, 0.6864712811495752, 0.8594188512793957, 0.5894769310064538, 0.9649760223290229, 0.8012061375038952, 0.7713113941340759, 0.6923279980614228, 0.945657399035254, 0.5515820803745624, 0.6350490594483621, 0.6811922117154123, 0.8497585610663241, 0.7003274569828021, 0.5531364522740557, 0.8598953137425268, 0.820870943089099, 0.6042744945899636, 0.5270613564487518, 0.964594760217717, 0.6177972808352338, 0.6295999502858594, 0.7643999026368327, 0.6026726495631163, 0.7858060770637197, 0.9864916128795989, 0.9675217759524111, 0.6559534690628872, 0.8506537843784183, 0.7272477824031989, 0.6871544107019265, 0.5755638031364496, 0.5265556561581606, 0.871566810211222, 0.7805286821384438, 0.5469610866282248, 0.5375240815861895, 0.9164037784664207, 0.8903827906853028, 0.9237369426994326, 0.8825129645816473, 0.6836493557879184, 0.9307319575848673, 0.8706603895675535, 0.6390627413672911, 0.619029330928477, 0.8549861259901865, 0.8352857120660578, 0.9410311651668408, 0.7243826198602085, 0.8846607071405235, 0.5747618150553997, 0.9598106416154184, 0.6648375579499217, 0.5226123247012118, 0.8478002702438698, 0.9817384032884153, 0.560748050042081, 0.824343545738565, 0.6053398275106057, 0.5569293139913989, 0.8348855601663907, 0.7155703816906436, 0.715306601395598, 0.5340303224806705, 0.6955499770570749, 0.6203485205959971, 0.5056963906613365, 0.789494861581374, 0.8176930615644111, 0.8943022376980441, 0.5851426357984255, 0.6205742457065906, 0.6781217731257404, 0.7505288824833605, 0.606140886278957, 0.7081970557577916, 0.6644895674621487, 0.7928759053285793, 0.5183338897208473, 0.985654412850151, 0.6368005028766469, 0.8361354996260297, 0.9732116257995389, 0.893187195336772, 0.9647489296303737, 0.5284870552133364, 0.953147080569728, 0.8630316500932589, 0.9200369731024294, 0.8935041684614026, 0.7192406172267881, 0.7024644218826213, 0.6114421512889697, 0.9723399341868437, 0.6549148319843539, 0.9586983406834044, 0.7799519343487602, 0.6797318529831105, 0.6684763869982793, 0.6734950210391222, 0.7356005572080448, 0.8482681081877488, 0.5227534140605625, 0.8921552407265512, 0.9272112886786081, 0.6270564504479572, 0.6183000900591076, 0.7483957489539182, 0.5875944538261448, 0.5107299652984829, 0.5991600653740436, 0.6247725118667051, 0.907028906932263, 0.8970869233122039, 0.8088247437879073, 0.7961317959877617, 0.6409019189541537, 0.688138191832431, 0.631996408072016, 0.5595940303311839, 0.9331756721111202, 0.772783450049044, 0.578774987429401, 0.9148659770151899, 0.736256485798102, 0.6755676279660624, 0.5121670368886055, 0.5052492785261423, 0.8138176006872789, 0.6103804156843566, 0.5338158763993983, 0.8549701155377674, 0.699874227035296, 0.8163949333409775, 0.568428480870182, 0.8408720178804967, 0.840491997900222, 0.8836661133279116, 0.5573450478899094, 0.7587910254439547, 0.7282271125298299, 0.8581435793974987, 0.6328879828855021, 0.8457373227319835, 0.8329825412174832, 0.5133446047411971, 0.8701266406198114, 0.6157918998160958, 0.6390319089133683, 0.6674808513128856, 0.9832732945214673, 0.9130721224586906, 0.8531657350304231, 0.9988960900063393, 0.782155012623387, 0.6268907189261004, 0.7293185143588321, 0.9369931298488482, 0.9708131987505713, 0.8927746493621092, 0.7360435156445684, 0.6954167796673094, 0.7544165107279961, 0.58581525260838, 0.9636261682672589, 0.6599780116928555, 0.7611740863538838, 0.671106623288225, 0.5463140918943883, 0.9420523973533903, 0.5375955291676211, 0.5071324273753837, 0.83562188479374, 0.7047114861051904, 0.6263720716968152, 0.5224707421644357, 0.7737087765881494, 0.6376724283945072, 0.6321054551047292, 0.6179246527137997, 0.6104259982598921, 0.6192767093387588, 0.5306990197236926, 0.775364037594945, 0.6364028676188902, 0.667303058097081, 0.6470196325071449, 0.5841968782802363, 0.5380163624159677, 0.9434949320483483, 0.6458707461545801, 0.7212386692379804, 0.8088458272500135, 0.8302292118460515, 0.9835298778059269, 0.8173251830608848, 0.9741009596164928, 0.7025671746117673, 0.5727197592376634, 0.689406720678023, 0.5955904493296003, 0.7095741484160336, 0.7865281613393591, 0.611580673963181, 0.9372068604836739, 0.8068754837979034, 0.7332651092561486, 0.6071109662402252, 0.5460266196287892, 0.9419464113058502, 0.5742666101244439, 0.5306085811674934, 0.9594441174097397, 0.8072514379350257, 0.6143112322202482, 0.6319270700907234, 0.6481193965346459, 0.7812297484911378, 0.8360568739935083, 0.7055775525170515, 0.8199371733973946, 0.6594785193287194, 0.746662689309314, 0.6366449073934783, 0.533557182646705, 0.6470884412018425, 0.9135904774841103, 0.8710544752547114, 0.9269838148061, 0.8479524750173135, 0.8852898879646307, 0.5144491678712646, 0.8442027229354712, 0.6823535945073871, 0.8845401746436856, 0.6000287183925976, 0.6354716684069517, 0.8801388946209163, 0.5567884453370084, 0.8640428733820127, 0.5967372622306297, 0.9468326062536592, 0.7983216079092146, 0.8870554674378681, 0.6510457762808893, 0.8556241701901213, 0.8799713528177315, 0.6537234260607212, 0.524228322119924, 0.6441634738644441, 0.5800866866166734, 0.91226948967332, 0.9391928451867484, 0.7591688923351256, 0.9063827943873628, 0.5350502731398941, 0.888218683854739, 0.7936662431625794, 0.5422881990766188, 0.664965322745668, 0.6690568522232013, 0.7477926226484058, 0.5925727885986272, 0.8012472838457727, 0.7057129507017124, 0.9216701241242302, 0.8643707400998979, 0.910761557259534, 0.9713370777112987, 0.70441510960375, 0.9480832279532554, 0.8461481204642379, 0.7448980235601688, 0.5730721889109405, 0.5703014693014417, 0.721567346884078, 0.5206143500162693, 0.5988276053084847, 0.6545793065017986, 0.8905618072008274, 0.994630908733648, 0.8764090815080869, 0.9713696736161563, 0.5478894404363448, 0.9343213790834463, 0.8322100683644522, 0.7479625609652489, 0.6160206444546917, 0.8927095346044057, 0.9014835151274647, 0.8689991934083992, 0.789445225465352, 0.8975238129940492, 0.9748886537410796, 0.8195523926730047, 0.871996482768503, 0.8822582236719338, 0.9287431665793637, 0.5210243509930857, 0.5875343720894708, 0.5105790356349509, 0.8266037647121194, 0.8312120585557543, 0.5766588889506172, 0.7836535721206596, 0.7134395494867607, 0.9844708784859808, 0.7822920228643244, 0.5279008438717019, 0.9405542458636649, 0.8182107323992251, 0.6249066075697489, 0.9210726317847195, 0.7031071528992905, 0.8864342261862671, 0.5663396936945384, 0.8053308378088724, 0.8643213451664349, 0.596828272285904, 0.6706875766229502, 0.8807605104412299, 0.547279997565991, 0.8884874347116776, 0.6707542536374106, 0.7357049807242654, 0.8649666356645029, 0.8159547996541939, 0.9299689869139516, 0.7976399285231466, 0.7329055963451188, 0.5120902448560916, 0.6913796569912217, 0.811026139217162, 0.7308219926049662, 0.6698345040431148, 0.6781251256506162, 0.7653434121757529, 0.9387709464026304, 0.6447073372244401, 0.7194083453556659, 0.6944795034968076, 0.5107587674825687, 0.9341555985614227, 0.9014621182850857, 0.9598810478079736, 0.5305919618377835, 0.6510728857999792, 0.7295873541519552, 0.5819229157685468, 0.8134332373175354, 0.9946266942831878, 0.6312894982448319, 0.7158493236434551, 0.7002018501558229, 0.6394206820790282, 0.9627000498607521, 0.5741179367521203, 0.9139130999280842, 0.602139229807097, 0.7724129474416326, 0.5010469180187409, 0.7831936788022473, 0.5735251430266519, 0.895970318883502, 0.7904092532541864, 0.7132215437843841, 0.5419873533002446, 0.5234368285414834, 0.6026981593503116, 0.5659627043515656, 0.9829593868285492, 0.8907550773260982, 0.6114669851962193, 0.786768788300347, 0.9687077070111869, 0.6497748586806027, 0.9305067762142414, 0.5946290680252628, 0.8589490538491142, 0.5664245876952657, 0.8802910696596783, 0.9385205853755592, 0.9003217725333801, 0.6192195279333754, 0.8090433727532438, 0.9520915881401311, 0.5912857285601123, 0.7195929146882418, 0.8582353984568816, 0.5279234974221908, 0.877191781780598, 0.8578030381852612, 0.834049207703782, 0.9729865797295075, 0.5427511399501477, 0.7236055089057254, 0.6311015323068856, 0.8532997606580377, 0.8162097198448435, 0.9757151571622639, 0.5583824154307198, 0.5817942899152402, 0.8776077226182857, 0.8398645574127985, 0.8883648672222317, 0.7636564624769056, 0.7822295873812195, 0.6634276963256515, 0.6732291100095646, 0.6393564543376923, 0.7091782090361747, 0.8928635833072359, 0.5275266723508899, 0.5372244363335965, 0.5877869768539282, 0.8693756339450238, 0.5480981099250122, 0.8420167592026693, 0.7455572236806133, 0.7094385658815313, 0.928818497651343, 0.7283025602473667, 0.5561013547808862, 0.5450143652619147, 0.9075392894199863, 0.8326201740059358, 0.835907489435314, 0.8423209365446394, 0.9686868304625373, 0.6127217117340349, 0.7009433666148512, 0.8351683749974589, 0.763860844522176, 0.900408907945873, 0.8586594859154424, 0.5400764241224199, 0.7540555665372024, 0.9871687474432276, 0.7588573965344982, 0.6028894487779894, 0.5890737523964236, 0.8194968558547386, 0.7522515425152085, 0.9240605139872171, 0.9280059275792576, 0.6758825854878674, 0.8192953166620562, 0.7857234784957, 0.738594714352665, 0.7990553036158679, 0.981031809480075, 0.729204134348826, 0.8967644543978737, 0.5377483782439989, 0.9587696206672083, 0.6869950796057764, 0.6106473430937576, 0.7006180090787122, 0.8299110319190738, 0.7721748117547483, 0.6289877425953112, 0.852169450106559, 0.9408878682442383, 0.7571854233638968, 0.5467682776034297, 0.5210493592507995, 0.6237428783562918, 0.5643312567704138, 0.5396468806496129, 0.6592130617411488, 0.6603480882236951, 0.6857999085624573, 0.9276235459088638, 0.7375575374215384, 0.5343590357264696, 0.5425320608988642, 0.9196314956895627, 0.9493916278566598, 0.8034602688118215, 0.5053002309257743, 0.8026533364117638, 0.518161294890215, 0.8622278254289085, 0.6772532635623933, 0.6435442212304223, 0.7143296671380696, 0.5090217050181349, 0.514225423586196, 0.9374512892489226, 0.8777647203626431, 0.8850284897281375, 0.7953099503686467, 0.537414487518673, 0.6933668616245569, 0.6604741564824763, 0.9818859836170144, 0.9991578941576336, 0.9754210469980653, 0.6858216647187143, 0.6062430259555114, 0.7341916973892157, 0.9653196716025567, 0.5948623601107901, 0.7632908252775077, 0.6647678138496949, 0.7785177774848232, 0.8136973446661698, 0.7026976409041459, 0.6899344163994254, 0.7801253385324103, 0.7691881635271745, 0.8934552389875992, 0.9750670591635158, 0.7392310059374825, 0.837985507201453, 0.6711810064925778, 0.6905725737963898, 0.6818388917881091, 0.977876643679697, 0.8987285994779173, 0.9118308578274756, 0.6138092840882029, 0.7515711064027497, 0.6685198026630366, 0.8125565326705748, 0.820198449147971, 0.6387218179113622, 0.9630624543337021, 0.6826492139019879, 0.8644223880106944, 0.7862762123499731, 0.8171043023113949, 0.7267359984611201, 0.9825195447943542, 0.5847096678058181, 0.9922165609770884, 0.9410508476693298, 0.5613912362070624, 0.7164707130771257, 0.9008121219184044, 0.9238243808971593, 0.8393523743458189, 0.7401192785464212, 0.9899729908370646, 0.8594447484787378, 0.6200690273858009, 0.8621824093386834, 0.6904595813219201, 0.981960086089269, 0.5755881209667278, 0.8782784500971987, 0.9007282999343389, 0.6729385723136714, 0.8730967951481167, 0.8364686175394033, 0.578924983488093, 0.5095319541675607, 0.8493423303602026, 0.7288357266080747, 0.5661454678351965, 0.7269542064982555, 0.567532564326888, 0.7398283636533589, 0.5521118732848938, 0.5021677597252113, 0.8592799317276221, 0.6011092905887915, 0.5415059811064351, 0.8850569198263147, 0.6414968674869317, 0.8846045868400931, 0.9154629125237623, 0.6482908811420011, 0.9063985659588216, 0.7348902381033615, 0.9363883031596143, 0.735841507834406, 0.543300069328493, 0.608798243424165, 0.7419438769441531, 0.6714981107437403, 0.5707457080413016, 0.7485813262750394, 0.8376955902194863, 0.7578191392838173, 0.5147542534102348, 0.852817089249662, 0.781621816596062, 0.7501150849428646, 0.8354790543315719, 0.7909674361380616, 0.5372849417644693, 0.5693808692362854, 0.9157684675372172, 0.99631753748127, 0.7400354754045921, 0.5539565786371166, 0.9151043270864261, 0.7040424229356224, 0.7374454271522238, 0.8779371301800494, 0.8653064605622791, 0.5090283605704149, 0.5401922091477273, 0.5632515041019082, 0.6061633643781517, 0.6706056517598067, 0.8174107689368721, 0.6359307135500345, 0.5647951446753916, 0.5108452364990295, 0.7219416827882972, 0.8166034699256619, 0.82141325203135, 0.7312233969338783, 0.826711784911099, 0.5406239691835866, 0.6383558483052887, 0.5138547447929407, 0.6824632934787923, 0.6646781230885637, 0.8236828491064487, 0.791622994050102, 0.5094465776844563, 0.8204467352641114, 0.6467913418862355, 0.6412624770924706, 0.713679691097133, 0.9256523874484206, 0.7651743605730206, 0.6028085169447441, 0.9405702947199939, 0.7352132793815257, 0.9015402190949755, 0.9951577336031332, 0.9699674797512271, 0.7810088932692738, 0.8040223216043696, 0.6302307570489118, 0.6480097450784921, 0.9712735942929609, 0.9632111469563543, 0.600275662340475, 0.7321065837765139, 0.5341810253319104, 0.6118586269970137, 0.5020051463317708, 0.7508436939979071, 0.5111949500717409, 0.6601798683745319, 0.820770944232923, 0.6369376751406883, 0.8561307783409284, 0.8515153256508232, 0.5143903870944433, 0.9666496072722225, 0.8225923905732995, 0.6626626574993032, 0.784537554149354, 0.5858101229366102, 0.5832828729349435, 0.7468226174791874, 0.919223269716865, 0.6475179922415268, 0.999582342090196, 0.5944008832140786, 0.5347555777074264, 0.9149221255698745, 0.8341878438773689, 0.5106642112881108, 0.9103699765555759, 0.8260220404314191, 0.8079794412774371, 0.9283648095364148, 0.6879606536545199, 0.8458598159572013, 0.7735627914627744, 0.8294387007643846, 0.551011258534003, 0.8382316989586647, 0.8353880749493826, 0.6599959358333103, 0.6527229781904125, 0.6200831809792784, 0.7036028746085925, 0.9153037309629515, 0.7014925463272296, 0.6495027746560902, 0.8064139567050124, 0.6044220988438397, 0.5504918150764534, 0.8385207818103739, 0.5372315041353397, 0.7766565806543302, 0.9327411747015649, 0.5947334075824483, 0.7435401135773769, 0.5585606722059524, 0.7679958166715217, 0.7883836288800776, 0.9043042354247925, 0.7304083778245767, 0.7673821977437634, 0.8136049280805757, 0.6626847530168045, 0.6721686887202004, 0.7969699426088924, 0.5586960516271118, 0.682139107791814, 0.7526729352994475, 0.9426717435779046, 0.6335767011075615, 0.5073720615690276, 0.5344883229873297, 0.803733296259825, 0.5014026008878998, 0.9551054081433905, 0.8221232651296684, 0.5312274791559024, 0.6709082375492781, 0.9140922788506118, 0.5454808975312415, 0.857603108858795, 0.5271781735738521, 0.708078780166818, 0.7373314448585412, 0.8939738701673754, 0.6284649948249725, 0.9467909807722906, 0.5258022581034176, 0.960227504962821, 0.5938165733191627, 0.7942271660390599, 0.8833881980983851, 0.5710250333657854, 0.9278328222273721, 0.7754954343884156, 0.8004379716884525, 0.7117989003443874, 0.5579569427825494, 0.5567305323691247, 0.9256036648882559, 0.8661509500152882, 0.6812601181387394, 0.8767952204329545, 0.8907272351285201, 0.7480840551328929, 0.9847813188137964, 0.9571440760604275, 0.5214003580766342, 0.6862389049421787, 0.9869373528599193, 0.7040886116426915, 0.7401818952971124, 0.6359852148505803, 0.7000549279603683, 0.5070312318488206, 0.8100359480129256, 0.8899026254489397, 0.5003341867399425, 0.5153970505014807, 0.6232613003135392, 0.8153457718198887, 0.9438952333042505, 0.5902271782431979, 0.599613814104435, 0.978012036577528, 0.927340768710702, 0.8198422915975188, 0.5658795927669609, 0.9993244971060498, 0.8689152892684759, 0.5895016408848268, 0.6081840009219683, 0.6013786340936665, 0.8367724080821988, 0.7399747837074713, 0.8946731592725897, 0.8980460952123344, 0.7352332203175644, 0.8509627557559283, 0.511302758109915, 0.6686300409249372, 0.9671421090474621, 0.5593157432944618, 0.992390808160688, 0.6565131641995039, 0.8014009601911823, 0.9583542018510058, 0.6074045716943665, 0.7422789635455558, 0.9377387346813489, 0.5802865006290883, 0.6955614122751486, 0.590224598696768, 0.5401965159777078, 0.8059941654836643, 0.816967400034558, 0.7376070245991662, 0.5009227055774148, 0.7661227035585674, 0.7691307065600643, 0.7079834046716899, 0.5202503285383692, 0.9481884184755531, 0.833022711719897, 0.9260600147528257, 0.7295909351045318, 0.7836386609269808, 0.8485831744479839, 0.5550550913633142, 0.9450816002722415, 0.9616744915596243, 0.7785036370783076, 0.8172337509668899, 0.8653817530031824, 0.7893987204388452, 0.5472794317669257, 0.6950684837321448, 0.6856417896309672, 0.7745696722087876, 0.905852790733976, 0.8712668138003052, 0.7015203264441201, 0.9216202430886341, 0.8217841885882302, 0.8992294476012777, 0.7372047152554518, 0.660369237135302, 0.6301917299366756, 0.7564371821766097, 0.851898965048046, 0.9812402279077563, 0.8160335846402733, 0.815698911876499, 0.6641573204053868, 0.6077560652286957, 0.7742438203022146, 0.657562192029206, 0.6121101093497476, 0.5019489488377671, 0.6347035284583684, 0.9090824952488417, 0.7371314439575087, 0.8386683949447502, 0.9862243138635854, 0.7560627623052119, 0.7835884483369622, 0.6373591080336539, 0.7253469091627274, 0.5317503737038227, 0.6511233759367234, 0.6742824291011913, 0.8097156696126959, 0.9149447119031016, 0.8727443649354659, 0.9342210452391198, 0.6999672625310618, 0.5604798282764081, 0.8539507354454877, 0.7975774577294998, 0.5244289960003214, 0.9197787598313738, 0.9283509305699187, 0.5209955025756171, 0.5128173060973981, 0.9523094866606081, 0.5065964495508811, 0.6554807741017876, 0.7444715875360004, 0.6252405086635471, 0.9517718177259991, 0.847837685881193, 0.5019005014537121, 0.5130197473106279, 0.9214764800863426, 0.7763563752688782, 0.9607988293999528, 0.5471582547620161, 0.6290380992611092, 0.699360956880811, 0.8635571517787445, 0.5675935218438048, 0.8847938867187268, 0.8560834439145829, 0.9658305890252399, 0.8151316052502132, 0.6023603755338243, 0.6695252846412346, 0.9474258820140344, 0.8482706373259792, 0.6462264190564488, 0.890758359424505, 0.7280082628553532, 0.9318039360489685, 0.5169376478169017, 0.7362780484110778, 0.8774902184639135, 0.7269908970115916, 0.6893400622030047, 0.6732062065675251, 0.7634050624103794, 0.7776374527505597, 0.8747875433893104, 0.6693913389931989, 0.8411434922591704, 0.9866414079490067, 0.8828518957819806, 0.6598790592326367, 0.5207795118060861, 0.6976399533268982, 0.5805164099063889, 0.9216514591456437, 0.6150744658426819, 0.9883409801934846, 0.5908393249980967, 0.8649562695246193, 0.5099458278561517, 0.8604000651215993, 0.6143674388792549, 0.858583324406349, 0.6977851251241951, 0.5843460403358336, 0.9821842301503652, 0.7451653506124497, 0.8783213854288091, 0.6842028159251291, 0.9188341286556096, 0.8516812509919173, 0.6439992759066158, 0.8038062570796793, 0.5785745860095488, 0.7448397475300232, 0.6018328854798127, 0.6945992861409105, 0.8011266896493177, 0.8162189529618133, 0.8039087164523444, 0.5116080936219993, 0.9002971548011698, 0.5623067017515799, 0.557802534360447, 0.6517052253273669, 0.7646283711798625, 0.9148659361482887, 0.9894242718038546, 0.721480019119247, 0.669832874589156, 0.747592617412406, 0.8472627108009405, 0.7838940951360935, 0.6502286258484015, 0.6636569107397527, 0.856020786352257, 0.9782291696884626, 0.6370111470941765, 0.5659098845298767, 0.5166604147154261, 0.9529269902392679, 0.6961556207940307, 0.697705679651556, 0.729003500752845, 0.9015576324091512, 0.5228764673569914, 0.6803110377481739, 0.8312892845823082, 0.696470821771128, 0.9344490113731811, 0.8748020101765873, 0.884538362954992, 0.7782142975646569, 0.642825046346927, 0.8434267640316463, 0.6229833465990651, 0.7157628933003539, 0.6916585194535585, 0.6879795008044476, 0.5127940142289702, 0.9342316403685271, 0.6564399329415089, 0.9006172172720219, 0.8798833036407985, 0.9662446570866782, 0.5091884310462454, 0.9281156043448209, 0.5857025991157901, 0.5192832841304442, 0.5270554956427502, 0.7235473671462462, 0.7456070841032629, 0.9620726946288427, 0.5429152112253531, 0.8599342331980209, 0.6883376488137326, 0.6191411036530929, 0.9368519215656085, 0.6042306109681435, 0.832673385802742, 0.8423568436742745, 0.9444106911825849, 0.5126298172565726, 0.6402929368702965, 0.9507957653718864, 0.7738070921815403, 0.6123453575219885, 0.9413785026851476, 0.6245165476028397, 0.8485605759864968, 0.8221467089164584, 0.8225213993332323, 0.9244403839559707, 0.6104516651977803, 0.9690098235514952, 0.9269455068676917, 0.5111726463800398, 0.5750259459559357, 0.9451506431080456, 0.8974896566585957, 0.7375392481397542, 0.9014990915098771, 0.694748256811417, 0.8795532163342419, 0.8852707656237753, 0.9902694097908344, 0.9021390507544579, 0.8740477015181101, 0.8061293410724306, 0.8084607697740189, 0.7133941531267305, 0.6449286122414389, 0.9052094280569053, 0.7601714571312809, 0.8449334120618934, 0.71259661171449, 0.6176272176002331, 0.8857185652642916, 0.7858100961740527, 0.8871810104198301, 0.732409523661365, 0.5988558053672433, 0.8418324165732058, 0.5731386946345551, 0.7386228715985073, 0.9140875000455776, 0.6158067268928735, 0.9618065089934741, 0.8570712804829919, 0.5979229255752776, 0.7714726564133485, 0.7943454431632031, 0.6778169443235933, 0.7421270370169372, 0.680253603923662, 0.6112191391542903, 0.8379730611251321, 0.9772844532005036, 0.9037503385211367, 0.9748381262451288, 0.7997261131846974, 0.5435075557227955, 0.7393214584581694, 0.9852803829790264, 0.6920246621489976, 0.7226490181221406, 0.5759359312202823, 0.929136460254127, 0.6558563474482834, 0.6682059763173585, 0.6868617476579105, 0.8274173223268029, 0.8911989603069967, 0.9969939472823038, 0.5534378024552735, 0.8981808821813524, 0.996800018469865, 0.8522428061266314, 0.6590524747014231, 0.9777031116897346, 0.7001658993309152, 0.9336365030007865, 0.9871812729813669, 0.7040354509621636, 0.7920115724647689, 0.5250852043812553, 0.5598351696626802, 0.5977915714244972, 0.5303296891129174, 0.704261574002727, 0.6891671751340619, 0.9752786481571107, 0.8858798986872, 0.8422449480938934, 0.8225768198177279, 0.7141432135789426, 0.7750657011781169, 0.6354992447348702, 0.581937301346097, 0.8853915245842885, 0.5349329091686561, 0.8431350501165749, 0.7505381129009263, 0.5954611183775045, 0.8887226516231077, 0.8528998172207612, 0.9515554335574226, 0.7593934233780517, 0.5118459494227272, 0.8818206068830889, 0.9756966339935671, 0.5089839179431888, 0.5652765510739384, 0.8539292626326891, 0.5515168033251852, 0.8785647666051515, 0.5494894278354823, 0.7047357568216672, 0.6033425325818507, 0.7185193373432119, 0.5385135580889417, 0.9081215599377886, 0.8075564408703813, 0.9623583296776788, 0.9679175631176048, 0.7485817335636901, 0.8468984740777575, 0.5248841615817756, 0.938924995661952, 0.706341857691577, 0.6177201042881741, 0.8024089731221, 0.7142382938959884, 0.9332279055457522, 0.9360483077478234, 0.79738626597739, 0.6813671123790255, 0.5776693781960143, 0.7392280147643515, 0.9229994605213405, 0.9347631086169959, 0.9561048799561769, 0.7981476307829264, 0.7241605599818173, 0.967561844689317, 0.5322815572924631, 0.6004182431366267, 0.5385347819974352, 0.6352528946658584, 0.6512451228568097, 0.9038462574391137, 0.584580272989165, 0.9740215922902873, 0.8171267415549628, 0.5639847089418774, 0.526460462672961, 0.533045307903192, 0.7331240259050074, 0.5355642828765367, 0.997737746827542, 0.9255648085895221, 0.7679063843394016, 0.8295684717004352, 0.5758382126189123, 0.7185678934913123, 0.6175821441977142, 0.8081974540080724, 0.5769331147316914, 0.7523353070713339, 0.5945356611068333, 0.8001698912151758, 0.8550379522017321, 0.5425878624990114, 0.875930798664042, 0.8596334027382408, 0.7339761422662845, 0.9663959061592781, 0.6137844414303044, 0.55647581367112, 0.8880035974316149, 0.5377177351296192, 0.8437015378067452, 0.5444281723508592, 0.9463121983137406, 0.6174669924061025, 0.8750481367374237, 0.7617549694228731, 0.8468469043367028, 0.8762587740393641, 0.6868072717785918, 0.5972088871832414, 0.8490360943068371, 0.7841954209906034, 0.9933991260521469, 0.6341158147747519, 0.9765458096791821, 0.9436814183075646, 0.9731208973210407, 0.6370755612264922, 0.9995247191426884, 0.5865008557853145, 0.6053988665913155, 0.838746042642396, 0.744164652612917, 0.6194574770569095, 0.5138222870730595, 0.5263864023124238, 0.9893362477393158, 0.9155338855876678, 0.552657496912526, 0.5454886893917005, 0.5882279274499573, 0.5356329096098534, 0.7982577388839345, 0.8440035519485041, 0.5492179263780208, 0.787468208530552, 0.6452119667328453, 0.8835143774092071, 0.6094417156526751, 0.5344434958418249, 0.9526315988890102, 0.6323488167848792, 0.5425380332967356, 0.6430147915381301, 0.8076910879941985, 0.8844146668663897, 0.521813058591496, 0.5464068305853966, 0.8508750834250386, 0.5792673655562577, 0.7869156306688192, 0.8502450184182015, 0.7003475317394554, 0.6112260691437951, 0.7681043432390233, 0.5275720388732715, 0.7840027411448542, 0.8209288695220487, 0.6949521845070749, 0.7791795581426357, 0.9114511628640332, 0.6149048761298601, 0.7302229145049209, 0.5069139025992286, 0.7792546555770627, 0.7711974562062758, 0.837775360267713, 0.7912815380758382, 0.7251524260644187, 0.5562701389467073, 0.6087053817673302, 0.9687884077307376, 0.8268805628906597, 0.8645982146125282, 0.9990653472540721, 0.6542831178249924, 0.7659986370729008, 0.9807779250194575, 0.6317590251987293, 0.6391728557435339, 0.9573333759110536, 0.9511829092931777, 0.8472853276663319, 0.6780700335631419, 0.9087873244134062, 0.6227279270802752, 0.903259879814746, 0.9080501903107551, 0.5874942792502659, 0.6838327110322111, 0.8508015823910808, 0.7910337506255878, 0.9317050224015371, 0.6801247868474807, 0.7590374183136478, 0.9030355726947239, 0.680193982897125, 0.9814758371496444, 0.9510973505967791, 0.9522807873381192, 0.969988942835034, 0.9822915907612633, 0.9563249661386686, 0.5437014341058546, 0.7328940847171628, 0.7362742740853767, 0.7657659385294109, 0.6114925665613022, 0.5502378972689534, 0.946000569804446, 0.609970411668247, 0.7274906331383778, 0.9929888388780133, 0.9212549660457101, 0.7592533573264932, 0.8715317928761441, 0.5269322298377743, 0.5722899216465287, 0.8107295941567023, 0.6930076683894367, 0.7911151918995356, 0.9648999000710745, 0.7188183567353459, 0.5890724150172251, 0.7785856523994679, 0.8013719811190088, 0.8314069353215527, 0.7966685689182655, 0.5745515038004396, 0.5101160927338113, 0.8301029475542473, 0.5551035544245511, 0.9169320944245065, 0.8533547330373599, 0.6086410609350261, 0.6971499003454407, 0.765702843059149, 0.8657805907535905, 0.7201759072471813, 0.9277631171922335, 0.7006006496495021, 0.8355246014881604, 0.9552482823404276, 0.6673518450375802, 0.6956100505069909, 0.5819415707963997, 0.9815447033229814, 0.7917925394691139, 0.7799423572683718, 0.5320415724875434, 0.5076815600708897, 0.889250463204863, 0.6063611036023452, 0.8266955352065596, 0.9919388570250507, 0.6448595749417718, 0.8282610037498248, 0.7791547952056401, 0.7217740974546126, 0.7994031290260476, 0.8641471631804423, 0.7157327311593011, 0.6737861071840155, 0.825029926054403, 0.6552327012718621, 0.5131594694323733, 0.9395506698902014, 0.9388620474211014, 0.7178564310949733, 0.9078485823202485, 0.7704207744753895, 0.5847232459592728, 0.7237107146113146, 0.5449364872781328, 0.7482533971731669, 0.9013385615385778, 0.7849009206209181, 0.9325145715843466, 0.7240369561521982, 0.5495244459076831, 0.7206739647346012, 0.9043564968040123, 0.648054390468404, 0.89541112948414, 0.7266632189962485, 0.7682629302475661, 0.5778742726010571, 0.9122816458242701, 0.7931368288124526, 0.6796466492531623, 0.6356095538961279, 0.7107352000142286, 0.9746057430349393, 0.6851004192397783, 0.8180931588113939, 0.9725674018210143, 0.5410347676295995, 0.9182255008018233, 0.5135513896363392, 0.9698130081988159, 0.9860296215326286, 0.5618582332784159, 0.8873248908722051, 0.7943313585899582, 0.7558136946185489, 0.5048104253636951, 0.8227911321277241, 0.7290430208570211, 0.6868117117028487, 0.7463184665516589, 0.9991694426147955, 0.7937955795647063, 0.6350566474028629, 0.9735546159884231, 0.7908064242156401, 0.6236416754474172, 0.7834782051355245, 0.8328779027156087, 0.5756829790421629, 0.9898835840782252, 0.9852709666967339, 0.6737160355005756, 0.7177886758717994, 0.7610822932866697, 0.6700241223915114, 0.7852380763305962, 0.9146033520347603, 0.9138100907810742, 0.9102940635360657, 0.9373049044334079, 0.7231406960666569, 0.6869962684438295, 0.6695768842741534, 0.8774475158359509, 0.9176374935837068, 0.7035157789967329, 0.8830143148542378, 0.594563354170015, 0.617014236189205, 0.935767854104063, 0.5778811618800813, 0.6159042556329803, 0.5326205979885468, 0.6358923299197614, 0.5804558716190042, 0.5573833831979942, 0.5196831192847523, 0.9053551466093067, 0.8619320108835078, 0.6744620166301946, 0.9791931920339856, 0.5657087314955838, 0.7075484727441912, 0.5960570431297914, 0.9281828181847336, 0.7642913437469485, 0.9711037238053004, 0.5942183574937379, 0.5728106130581789, 0.5889574728179986, 0.6115593126584109, 0.9857727356547125, 0.6167308490987105, 0.9628032275440095, 0.7894933394443318, 0.8069833619267034, 0.6780335622818252, 0.6638266856741977, 0.6742026351807313, 0.7371645179760236, 0.6830961744578337, 0.9815121926474797, 0.7346552331545753, 0.6364160058531168, 0.9364123323460328, 0.6007733632060035, 0.8591270757383567, 0.5864859264272441, 0.8557838113521598, 0.7533519817175529, 0.6233073446585341, 0.9462687041060183, 0.6372098224387472, 0.6307696383498846, 0.5503403217822287, 0.5249382413202335, 0.8790055725889769, 0.8556891461597289, 0.9339377643526015, 0.7982647691369418, 0.5422422736729613, 0.5452859089165651, 0.7179124165700286, 0.8200348976008364, 0.8533894291973754, 0.600099014008285, 0.8747573117870548, 0.5347611569490883, 0.6557472467451206, 0.9383200006638248, 0.5985348160959059, 0.6580207836365398, 0.9604077214520627, 0.778613357652348, 0.9192543672348603, 0.8087133177456349, 0.8324912649654934, 0.6579533575625218, 0.8409813030838648, 0.5051898514895576, 0.6561442958450225, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0};
int h_B[]= {
0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64, 66, 68, 70, 72, 74, 76, 78, 80, 82, 84, 86, 88, 90, 92, 94, 96, 98, 100, 102, 104, 106, 108, 110, 112, 114, 116, 118, 120, 122, 124, 126, 128, 130, 132, 134, 138, 140, 142, 144, 146, 148, 150, 152, 154, 156, 158, 160, 162, 164, 166, 168, 170, 172, 174, 176, 178, 180, 182, 184, 186, 188, 190, 192, 194, 196, 198, 200, 202, 204, 206, 208, 210, 212, 214, 216, 218, 220, 222, 224, 226, 228, 230, 232, 234, 236, 238, 240, 242, 244, 246, 248, 250, 252, 254, 256, 258, 260, 262, 264, 266, 268, 270, 272, 274, 276, 278, 280, 282, 284, 286, 288, 290, 292, 294, 296, 298, 300, 302, 304, 306, 308, 310, 312, 314, 316, 318, 320, 322, 324, 326, 328, 330, 332, 334, 336, 338, 340, 342, 344, 346, 348, 350, 352, 354, 356, 358, 360, 362, 364, 366, 368, 370, 372, 375, 377, 379, 381, 383, 385, 387, 389, 391, 393, 396, 398, 400, 402, 404, 406, 410, 412, 414, 416, 418, 420, 422, 424, 426, 428, 430, 432, 434, 436, 438, 440, 442, 444, 446, 448, 450, 452, 454, 456, 458, 460, 462, 464, 466, 468, 470, 472, 474, 476, 478, 480, 482, 484, 486, 488, 490, 492, 495, 497, 499, 501, 503, 505, 507, 509, 511, 513, 515, 517, 519, 521, 524, 526, 528, 530, 532, 534, 536, 538, 540, 542, 544, 546, 548, 550, 552, 554, 556, 558, 560, 562, 564, 566, 568, 570, 573, 575, 577, 579, 581, 583, 585, 587, 589, 591, 593, 595, 597, 599, 601, 603, 605, 607, 609, 611, 613, 615, 617, 619, 621, 623, 625, 627, 629, 631, 633, 635, 637, 639, 641, 643, 645, 647, 649, 651, 654, 656, 659, 661, 663, 665, 667, 669, 671, 673, 675, 677, 679, 681, 683, 685, 687, 689, 691, 693, 695, 697, 699, 701, 703, 705, 707, 709, 711, 713, 715, 717, 719, 721, 723, 725, 727, 729, 731, 733, 735, 737, 739, 741, 743, 745, 747, 749, 752, 754, 756, 758, 761, 763, 765, 767, 771, 773, 775, 777, 779, 781, 783, 785, 787, 789, 791, 793, 795, 797, 799, 801, 803, 805, 807, 809, 811, 813, 815, 817, 819, 821, 823, 825, 827, 829, 831, 833, 836, 838, 840, 842, 846, 848, 850, 852, 854, 856, 859, 861, 863, 865, 868, 870, 873, 875, 880, 882, 884, 886, 888, 890, 893, 895, 897, 899, 901, 903, 905, 907, 909, 911, 913, 915, 918, 920, 923, 925, 928, 930, 933, 935, 938, 940, 942, 944, 947, 949, 952, 954, 959, 961, 963, 965, 967, 969, 971, 973, 975, 977, 979, 981, 983, 985, 987, 989, 991, 993, 995, 997, 999, 1001, 1003, 1005, 1007, 1009, 1011, 1013, 1015, 1017, 1019, 1021, 1023, 1025, 1027, 1029, 1031, 1033, 1035, 1037, 1040, 1042, 1044, 1046, 1049, 1051, 1053, 1055, 1057, 1059, 1061, 1063, 1065, 1067, 1069, 1071, 1073, 1075, 1077, 1079, 1082, 1084, 1086, 1088, 1092, 1094, 1096, 1098, 1100, 1102, 1104, 1106, 1108, 1110, 1112, 1114, 1116, 1118, 1120, 1122, 1125, 1127, 1129, 1131, 1133, 1135, 1137, 1139, 1142, 1144, 1147, 1149, 1152, 1154, 1157, 1159, 1165, 1167, 1170, 1172, 1175, 1177, 1180, 1182, 1185, 1187, 1189, 1191, 1193, 1195, 1198, 1200, 1203, 1205, 1207, 1209, 1211, 1213, 1215, 1217, 1219, 1221, 1223, 1225, 1227, 1229, 1231, 1233, 1236, 1238, 1240, 1242, 1244, 1246, 1248, 1250, 1252, 1254, 1257, 1259, 1261, 1263, 1265, 1267, 1269, 1271, 1273, 1275, 1277, 1279, 1281, 1283, 1285, 1287, 1289, 1291, 1293, 1295, 1297, 1299, 1301, 1303, 1306, 1308, 1310, 1312, 1314, 1316, 1318, 1320, 1322, 1324, 1326, 1328, 1330, 1332, 1334, 1336, 1338, 1340, 1342, 1344, 1346, 1348, 1350, 1352, 1354, 1356, 1358, 1360, 1362, 1364, 1366, 1368, 1370, 1372, 1374, 1376, 1379, 1381, 1383, 1385, 1387, 1389, 1391, 1393, 1396, 1398, 1400, 1402, 1405, 1407, 1409, 1411, 1413, 1415, 1417, 1419, 1421, 1423, 1425, 1427, 1430, 1432, 1434, 1436, 1438, 1440, 1443, 1445, 1448, 1450, 1456, 1458, 1461, 1463, 1467, 1469, 1471, 1473, 1475, 1477, 1480, 1482, 1485, 1487, 1490, 1492, 1495, 1497, 1500, 1502, 1505, 1507, 1510, 1512, 1514, 1516, 1518, 1520, 1523, 1525, 1527, 1529, 1531, 1533, 1535, 1537, 1539, 1541, 1543, 1545, 1547, 1549, 1551, 1553, 1555, 1557, 1560, 1562, 1564, 1566, 1569, 1571, 1574, 1576, 1581, 1583, 1586, 1588, 1591, 1593, 1596, 1598, 1601, 1603, 1606, 1608, 1611, 1613, 1616, 1618, 1621, 1623, 1626, 1628, 1631, 1633, 1156, 1156, 1164, 1162, 1161, 1164, 1162, 1161, 1590, 1455, 1453, 409, 408, 1590, 1455, 1453, 1455, 1453, 1578, 1573, 1585, 1504, 1509, 1455, 1453, 1455, 1453, 409, 408, 1455, 1453, 1455, 1453, 409, 408, 1453, 1455, 1455, 1453, 1504, 1509, 1479, 1489, 1479, 1489, 1635, 1630, 1585, 1455, 1453, 1455, 1453, 1465, 1460, 1455, 1453, 1455, 1453, 1465, 1460, 1455, 1453, 1455, 1453, 409, 408, 1455, 1453, 1455, 1453, 1455, 1453, 1455, 1453, 1378, 1504, 1509, 1504, 1509, 1378, 1504, 1509, 1504, 1509, 1578, 1573, 1585, 1590, 1578, 1573, 1585, 1590, 1630, 1635, 1635, 1630, 1578, 1573, 1585, 1590, 946, 958, 1455, 1453, 1489, 1489, 1504, 1509, 1504, 1509, 1578, 1573, 1578, 1573, 1585, 1590, 1578, 1573, 1578, 1573, 1585, 1590, 1580, 1305, 858, 858, 845, 845, 879, 879, 958, 946, 946, 958, 1141, 1141, 1091, 1091, 1164, 1162, 1164, 1162, 1164, 1162, 1164, 1162, 1455, 1453, 1479, 1479, 1479, 1489, 1479, 1489, 1455, 1453, 1305, 1509, 1509, 1504, 1504, 1455, 1453, 1455, 1453, 1455, 1453, 1455, 1453, 1635, 1630, 1635, 1630, 1580, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 3328, 3330, 3332, 3334, 3336, 3338, 3340, 3342, 3344, 3346, 3348, 3350, 3352, 3354, 3356, 3358, 3360, 3362, 3364, 3366, 3368, 3370, 3372, 3374, 3376, 3378, 3380, 3382, 3384, 3386, 3388, 3390, 3392, 3394, 3396, 3398, 3400, 3402, 3404, 3406, 3408, 3410, 3412, 3414, 3416, 3418, 3420, 3422, 3424, 3426, 3428, 3430, 3432, 3434, 3436, 3438, 3440, 3442, 3444, 3446, 3448, 3450, 3452, 3454, 3456, 3458, 3460, 3462, 3464, 3466, 3468, 3470, 3472, 3474, 3476, 3478, 3480, 3482, 3484, 3486, 3488, 3490, 3492, 3494, 3496, 3498, 3500, 3502, 3504, 3506, 3508, 3510, 3512, 3514, 3516, 3518, 3520, 3522, 3524, 3526, 3528, 3530, 3532, 3534, 3536, 3538, 3540, 3542, 3544, 3546, 3548, 3550, 3552, 3554, 3556, 3558, 3560, 3562, 3564, 3566, 3568, 3570, 3572, 3574, 3576, 3578, 3580, 3582, 3584, 3586, 3588, 3590, 3592, 3594, 3596, 3598, 3600, 3602, 3604, 3606, 3608, 3610, 3612, 3614, 3616, 3618, 3620, 3622, 3624, 3626, 3628, 3630, 3632, 3634, 3636, 3638, 3640, 3642, 3644, 3646, 3648, 3650, 3652, 3654, 3656, 3658, 3660, 3662, 3664, 3666, 3668, 3670, 3672, 3674, 3676, 3678, 3680, 3682, 3684, 3686, 3688, 3690, 3692, 3694, 3696, 3698, 3700, 3702, 3704, 3706, 3708, 3710, 3712, 3714, 3716, 3718, 3720, 3722, 3724, 3726, 3728, 3730, 3732, 3734, 3736, 3738, 3740, 3742, 3744, 3746, 3748, 3750, 3752, 3754, 3756, 3758, 3760, 3762, 3764, 3766, 3768, 3770, 3772, 3774, 3776, 3778, 3780, 3782, 3784, 3786, 3788, 3790, 3792, 3794, 3796, 3798, 3800, 3802, 3804, 3806, 3808, 3810, 3812, 3814, 3816, 3818, 3820, 3822, 3824, 3826, 3828, 3830, 3832, 3834, 3836, 3838, 3840, 3842, 3844, 3846, 3848, 3850, 3852, 3854, 3856, 3858, 3860, 3862, 3864, 3866, 3868, 3870, 3872, 3874, 3876, 3878, 3880, 3882, 3884, 3886, 3888, 3890, 3892, 3894, 3896, 3898, 3900, 3902, 3904, 3906, 3908, 3910, 3912, 3914, 3916, 3918, 3920, 3922, 3924, 3926, 3928, 3930, 3932, 3934, 3936, 3938, 3940, 3942, 3944, 3946, 3948, 3950, 3952, 3954, 3956, 3958, 3960, 3962, 3964, 3966, 3968, 3970, 3972, 3974, 3976, 3978, 3980, 3982, 3984, 3986, 3988, 3990, 3992, 3994, 3996, 3998, 4000, 4002, 4004, 4006, 4008, 4010, 4012, 4014, 4016, 4018, 4020, 4022, 4024, 4026, 4028, 4030, 4032, 4034, 4036, 4038, 4040, 4042, 4044, 4046, 4048, 4050, 4052, 4054, 4056, 4058, 4060, 4062, 4064, 4066, 4068, 4070, 4072, 4074, 4076, 4078, 4080, 4082, 4084, 4086, 4088, 4090, 4092, 4094, 4096, 4098, 4099, 4100, 4101, 4102, 4103, 4104, 4105, 4106, 4107, 4108, 4109, 4110, 4111, 4112, 4113, 4114, 4115, 4116, 4117, 4118, 4119, 4120, 4121, 4122, 4123, 4124, 4125, 4126, 4127, 4128, 4129, 4130, 4131, 4132, 4133, 4134, 4135, 4136, 4137, 4138, 4139, 4140, 4141, 4142, 4143, 4144, 4145, 4146, 4147, 4148, 4149, 4150, 4151, 4152, 4153, 4154, 4155, 4156, 4157, 4158, 4159, 4160, 4161, 4162, 4163, 4164, 4165, 4166, 4167, 4168, 4169, 4170, 4171, 4172, 4173, 4174, 4175, 4176, 4177, 4178, 4179, 4180, 4181, 4182, 4183, 4184, 4185, 4186, 4187, 4188, 4189, 4190, 4191, 4192, 4193, 4194, 4195, 4196, 4197, 4198, 4199, 4200, 4201, 4202, 4203, 4204, 4205, 4206, 4207, 4208, 4209, 4210, 4211, 4212, 4213, 4214, 4215, 4216, 4217, 4218, 4219, 4220, 4221, 4222, 4223, 4224, 4225, 4226, 4227, 4228, 4229, 4230, 4231, 4232, 4233, 4234, 4235, 4236, 4237, 4238, 4239, 4240, 4241, 4242, 4243, 4244, 4245, 4246, 4247, 4248, 4249, 4250, 4251, 4252, 4253, 4254, 4255, 4256, 4257, 4258, 4259, 4260, 4261, 4262, 4263, 4264, 4265, 4266, 4267, 4268, 4269, 4270, 4271, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 877, 872, 4355, 858, 927, 922, 956, 951, 653, 653, 892, 1146, 1151, 1151, 1146, 1162, 1179, 1174, 658, 658, 927, 922, 4373, 956, 951, 927, 922, 1151, 1146, 1146, 1151, 658, 653, 1202, 1197, 4739, 4742, 1202, 1197, 1465, 1460, 1484, 1479, 1452, 1447, 4746, 4748, 1484, 1499, 1494, 4397, 4751, 1452, 1447, 4753, 1465, 1460, 1484, 1509, 4402, 4755, 1452, 1447, 1453, 1455, 1465, 1460, 4480, 1479, 1489, 1499, 1494, 4758, 1452, 1447, 4760, 1452, 1447, 4762, 1460, 4764, 1452, 1447, 4766, 1452, 1447, 4768, 1465, 4770, 1452, 1447, 1452, 1447, 1484, 1484, 1499, 1494, 1484, 1484, 1499, 1494, 1484, 1484, 1499, 1494, 1504, 4774, 1479, 1499, 1494, 1447, 1452, 1455, 1453, 1465, 1460, 4480, 1479, 1489, 1494, 1499, 4776, 1455, 1453, 1465, 1460, 4480, 4778, 4780, 4439, 4440, 4782, 4714, 1452, 1447, 4785, 1452, 1447, 4787, 4789, 4445, 1447, 1452, 4791, 1452, 1447, 4793, 4795, 4450, 1452, 1447, 4797, 1452, 1447, 4799, 1465, 1460, 4801, 1484, 1484, 1499, 1494, 1499, 1494, 1499, 1494, 1504, 1452, 1447, 4803, 1452, 1447, 4805, 1465, 1460, 4462, 1452, 1447, 4807, 1452, 1447, 4809, 1465, 1460, 4701, 1484, 1479, 1499, 1494, 1504, 1499, 1494, 1499, 1494, 1452, 1447, 1455, 1453, 1465, 1460, 4473, 1479, 1489, 1499, 1494, 4812, 1499, 1494, 4814, 1447, 1452, 1453, 1455, 1465, 1460, 4480, 1479, 1489, 1499, 1494, 4817, 1494, 1499, 4819, 4821, 4823, 1600, 4825, 4827, 1595, 1610, 1605, 1615, 1625, 1620, 1625, 1620, 1625, 1620, 4831, 4833, 4835, 927, 922, 4496, 956, 951, 927, 922, 4502, 956, 951, 1151, 1146, 1151, 1146, 1146, 1151, 1156, 658, 653, 1202, 1197, 4839, 1479, 1484, 1484, 1499, 1494, 1509, 1499, 1494, 1504, 1447, 1452, 1453, 1455, 1465, 1460, 4532, 1378, 1479, 1489, 1499, 1494, 4843, 1499, 1494, 4845, 4847, 4849, 4851, 1600, 1595, 4853, 4855, 4857, 1600, 1595, 4714, 4661, 1590, 1585, 1595, 1600, 1605, 1610, 1559, 1620, 1625, 1635, 1630, 877, 872, 877, 872, 877, 872, 877, 872, 4556, 4558, 877, 872, 858, 877, 872, 877, 872, 4567, 892, 927, 922, 4572, 956, 951, 4867, 927, 922, 937, 932, 956, 951, 956, 951, 1151, 1146, 1151, 1146, 1146, 1151, 1156, 1162, 1169, 1151, 1146, 1151, 1146, 1146, 1151, 1156, 1164, 1169, 1151, 1146, 1151, 1146, 1146, 1151, 1156, 1162, 1164, 1161, 1179, 1174, 1179, 1174, 4636, 1202, 1197, 1151, 1146, 1141, 1151, 1146, 1156, 4875, 1169, 4877, 1161, 1151, 1146, 1141, 1151, 1146, 1156, 4879, 1161, 4881, 1169, 1179, 1174, 1184, 4636, 1202, 1197, 1573, 1590, 1585, 4646, 4883, 1484, 1484, 1578, 1590, 1585, 1452, 1447, 1455, 1453, 1465, 1460, 4646, 1378, 4887, 1499, 1494, 1494, 1499, 1452, 1447, 1455, 1453, 1465, 1460, 4669, 1378, 4889, 1499, 1494, 1494, 1499, 1452, 1447, 1452, 1447, 4891, 1484, 1479, 1509, 1504, 4661, 4663, 1452, 1447, 1453, 1455, 1460, 1465, 4669, 1378, 1489, 1479, 1499, 1494, 1499, 1494, 1447, 1452, 1455, 1453, 1465, 1460, 4680, 1378, 1479, 1489, 1499, 1494, 1499, 1494, 1452, 1447, 4898, 1452, 1447, 4900, 1465, 1460, 4694, 1452, 1447, 4902, 1452, 1447, 4904, 1465, 1460, 4701, 1484, 1479, 1484, 1489, 1499, 1494, 1509, 1504, 4714, 1590, 1585, 1600, 1595, 1605, 1610, 1559, 1625, 1620, 4906, 4714, 1585, 1590, 1595, 1600, 1605, 1610, 1559, 1625, 1620, 4908, 1578, 1573, 1590, 1585, 1600, 1595, 1610, 1605, 1615, 1625, 1620, 1635, 1630, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 5120, 5121, 5122, 5123, 5124, 5125, 5126, 5127, 5128, 5129, 5130, 5131, 5132, 5133, 5134, 5135, 5136, 5137, 5138, 5139, 5140, 5141, 5142, 5143, 5144, 5145, 5146, 5147, 5148, 5149, 5150, 5151, 5152, 5153, 5154, 5157, 5158, 5159, 5160, 5161, 5162, 5163, 5164, 5167, 5168, 5169, 5170, 5172, 5173, 5175, 5176, 5177, 5178, 5179, 5181, 5182, 5183, 5184, 5185, 5186, 5187, 5188, 5189, 5190, 5191, 5193, 5194, 5196, 5197, 5199, 5201, 5202, 5204, 5205, 5207, 5209, 5210, 5211, 5212, 5213, 5214, 5215, 5216, 5217, 5218, 5219, 5220, 5221, 5222, 5223, 5224, 5225, 5227, 5228, 5229, 5230, 5231, 5232, 5233, 5234, 5235, 5236, 5237, 5238, 5239, 5240, 5242, 5243, 5244, 5245, 5246, 5249, 5250, 5252, 5253, 5254, 5256, 5257, 5260, 5261, 5262, 5264, 5265, 5268, 5269, 5270, 5272, 5273, 5275, 5276, 5278, 5279, 5280, 5281, 5282, 5283, 5284, 5285, 5286, 5287, 5288, 5290, 5291, 5293, 5294, 5295, 5296, 5297, 5299, 5300, 5302, 5303, 5304, 5305, 5306, 5307, 5308, 5309, 5310, 5311, 5312, 5313, 5314, 5315, 5316, 5317, 5318, 5319, 5320, 5321, 5322, 5323, 5324, 5326, 5327, 5329, 5330, 5331, 5332, 5333, 5334, 5335, 5336, 5337, 5338, 5339, 5341, 5342, 5346, 5349, 5350, 5351, 5352, 5353, 5354, 5355, 5356, 5357, 5358, 5362, 5363, 5364, 5365, 5366, 5367, 5368, 5369, 5370, 5371, 5372, 5373, 5374, 5375, 5376, 5377, 5378, 5379, 5380, 5381, 5382, 5384, 5385, 5386, 5387, 5388, 5389, 5390, 5391, 5392, 5393, 5394, 5395, 5396, 5397, 5398, 5399, 5400, 5401, 5402, 5403, 5404, 5406, 5407, 5412, 5413, 5417, 5418, 5419, 5420, 5421, 5422, 5423, 5424, 5425, 5426, 5427, 5428, 5429, 5430, 5431, 5432, 5433, 5434, 5435, 5436, 5437, 5438, 5439, 5440, 5441, 5442, 5443, 5444, 5445, 5446, 5447, 5448, 5449, 5450, 5451, 5452, 5453, 5454, 5455, 5457, 5458, 5459, 5460, 5461, 5462, 5463, 5464, 5465, 5466, 5467, 5468, 5469, 5470, 5471, 5472, 5473, 5474, 5475, 5476, 5477, 5478, 5479, 5480, 5481, 5482, 5483, 5484, 5485, 5486, 5487, 5488, 5489, 5490, 5491, 5492, 5493, 5494, 5495, 5496, 5497, 5498, 5499, 5500, 5501, 5502, 5503, 5504, 5505, 5507, 5509, 5510, 5511, 5512, 5513, 5514, 5515, 5517, 5519, 5520, 5521, 5522, 5523, 5524, 5525, 5526, 5527, 5528, 5529, 5531, 5532, 5533, 5534, 5535, 5536, 5537, 5538, 5539, 5540, 5541, 5542, 5543, 5545, 5546, 5547, 5548, 5549, 5550, 5551, 5552, 5553, 5554, 5555, 5556, 5558, 5559, 5560, 5561, 5562, 5563, 5564, 5565, 5567, 5568, 5569, 5570, 5571, 5572, 5573, 5574, 5575, 5576, 5577, 5578, 5579, 5580, 5581, 5582, 5583, 5584, 5585, 5586, 5587, 5588, 5589, 5590, 5591, 5592, 5593, 5594, 5595, 5596, 5597, 5598, 5599, 5600, 5601, 5602, 5604, 5605, 5607, 5608, 5609, 5610, 5611, 5613, 5614, 5616, 5617, 5618, 5619, 5620, 5621, 5622, 5623, 5624, 5625, 5626, 5627, 5628, 5629, 5630, 5631, 5632, 5633, 5634, 5635, 5636, 5638, 5639, 5640, 5641, 5642, 5643, 5644, 5645, 5646, 5647, 5649, 5650, 5651, 5652, 5653, 5654, 5655, 5656, 5657, 5658, 5659, 5660, 5661, 4744, 4741, 4860, 4860, 5248, 5247, 4860, 4860, 5248, 5247, 4860, 4860, 4860, 4860, 4859, 4860, 4859, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 5888, 5892, 5894, 5899, 5901, 5904, 5908, 5911, 5913, 5915, 5917, 5919, 5921, 5923, 5925, 5929, 5932, 5935, 5937, 5942, 5944, 5946, 5949, 5951, 5953, 5955, 5958, 5960, 5963, 5965, 5969, 5973, 5977, 5981, 5983, 5985, 5987, 5990, 5992, 5994, 5996, 6002, 6004, 6007, 6009, 6012, 6014, 6016, 6020, 6022, 6024, 6027, 6029, 6031, 6034, 6036, 6038, 6043, 6046, 6048, 6050, 6052, 6054, 6057, 6059, 6061, 6063, 6065, 6067, 6070, 6072, 6074, 6078, 6081, 6083, 6085, 6087, 6090, 6092, 6095, 6097, 6099, 6101, 6104, 6106, 6111, 6114, 6117, 6119, 6121, 6125, 6127, 6129, 6131, 6133, 6137, 6139, 6141, 6144, 6146, 6148, 6150, 6152, 6154, 6158, 6161, 6163, 6167, 6170, 6172, 6174, 6176, 6178, 6180, 6182, 6184, 6189, 6191, 6193, 6198, 6200, 6202, 6205, 6208, 6210, 6213, 6215, 6218, 6223, 6226, 6231, 6235, 6238, 6244, 6246, 6248, 6250, 6254, 6256, 6258, 6260, 6262, 6266, 6268, 6270, 6272, 6276, 6280, 6282, 6284, 6288, 6290, 6292, 6294, 6296, 6298, 6302, 6304, 6306, 6308, 6310, 6312, 6315, 6317, 6319, 6326, 6328, 6331, 6333, 6335, 6338, 6341, 6343, 6345, 6348, 6350, 6352, 6354, 6356, 6359, 6361, 4864, 4863, 5898, 6188, 6197, 6222, 6221, 6230, 6229, 4864, 4863, 5898, 6188, 6197, 6222, 6221, 6230, 6229, 4864, 4863, 6166, 6188, 6197, 6222, 6221, 6230, 6363, 6364, 6108, 4842, 4841, 6365, 4859, 6366, 4859, 6367, 6368, 5928, 5166, 5208, 6006, 6011, 4842, 4841, 4842, 4841, 4842, 4841, 4859, 6076, 4859, 6077, 4859, 4842, 4841, 6108, 6369, 4859, 6370, 4859, 5200, 5208, 6006, 6011, 4842, 4841, 4842, 4841, 4842, 4841, 5980, 6371, 6372, 4859, 6076, 4859, 6077, 4859, 6006, 6011, 4842, 4841, 6042, 6373, 6076, 6374, 6077, 6375, 4842, 4841, 6108, 6376, 6377, 6378, 6379, 4860, 4859, 4864, 4863, 6166, 6188, 6197, 6222, 6221, 6230, 6229, 6275, 6325, 4893, 6325, 4886, 4885, 4893, 5544, 5557, 6275, 6325, 4893, 4893, 6325, 6323, 4910, 4910, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 4866, 4865, 4862, 4861, 6581, 6582, 5891, 4866, 4865, 6583, 6089, 4837, 6094, 4838, 6510, 4869, 4870, 4871, 4737, 6584, 4737, 4871, 6585, 4737, 4871, 6207, 4874, 4873, 6412, 4737, 4871, 6586, 6587, 4737, 4871, 6588, 6589, 6233, 6413, 4866, 4865, 4862, 4861, 6590, 6591, 4866, 4865, 6160, 6592, 6089, 4837, 6094, 4838, 6510, 4870, 4869, 4871, 4737, 6593, 4737, 4871, 6594, 4737, 4871, 6207, 4874, 4873, 6412, 4737, 4871, 6595, 6596, 4737, 4871, 6597, 6598, 6233, 6413, 4866, 4865, 4862, 4861, 6599, 6600, 4866, 4865, 6160, 6601, 5910, 4837, 6094, 4838, 6510, 4870, 4869, 4738, 4872, 6602, 4738, 4872, 6603, 4738, 4872, 6207, 4874, 4873, 6412, 4738, 4872, 6604, 6605, 4738, 4872, 6606, 6233, 6413, 5174, 5171, 6314, 5615, 5612, 6321, 6609, 6610, 6611, 6116, 5940, 6488, 6123, 6490, 5408, 5405, 6613, 6577, 6358, 6580, 6615, 6496, 6143, 6499, 6439, 5998, 6616, 5328, 5325, 5292, 5226, 6033, 5301, 5298, 6040, 6618, 6045, 4895, 4894, 6435, 5989, 6437, 5241, 5198, 5165, 6619, 5206, 5203, 6620, 5258, 5255, 6621, 5266, 4773, 4772, 6622, 6623, 6624, 4894, 6625, 6626, 4895, 6627, 6628, 5979, 6420, 5948, 6422, 5192, 6629, 6630, 6631, 6632, 6080, 4830, 4829, 5251, 6633, 6496, 6143, 6499, 5174, 5171, 6314, 5615, 5612, 6321, 6634, 6635, 6636, 6116, 5940, 6488, 6123, 6490, 5408, 5405, 6638, 6577, 6358, 6580, 6640, 6496, 6143, 6499, 6420, 5948, 6422, 5192, 5198, 5195, 6641, 5206, 5203, 6642, 5258, 5255, 6643, 5266, 4773, 4772, 6644, 6645, 6646, 4894, 6647, 6648, 4895, 6649, 6650, 5979, 5292, 5226, 6033, 5301, 5298, 6040, 6651, 6045, 4895, 4894, 6435, 5989, 6437, 5241, 6439, 5998, 6652, 5328, 5325, 6654, 6655, 6656, 6657, 6080, 4830, 4829, 5251, 6658, 6496, 6143, 6499, 5258, 5255, 6659, 5266, 5263, 6660, 5274, 5271, 5277, 6661, 6662, 6026, 4895, 4894, 5292, 5289, 6033, 5301, 5298, 6040, 6663, 4895, 4894, 6045, 6461, 6056, 6463, 5328, 5325, 6467, 6069, 6469, 5343, 5340, 6665, 6667, 6080, 5359, 4830, 4829, 6496, 6143, 6499, 6089, 4837, 6094, 4838, 6103, 4872, 4871, 6484, 5603, 5383, 6314, 5615, 5612, 6321, 6669, 6670, 6671, 6116, 6113, 6488, 6123, 6490, 5408, 5405, 6672, 6493, 6674, 6494, 6358, 6580, 6676, 6677, 6496, 6143, 6499, 4866, 4865, 4862, 4861, 6678, 6679, 4866, 4865, 6160, 6680, 6169, 5456, 6510, 4870, 4869, 6186, 4872, 4871, 6681, 6195, 4872, 4871, 6682, 6204, 4872, 4871, 6207, 4874, 4873, 6525, 6220, 6217, 6683, 6684, 6228, 6225, 6685, 6686, 6233, 6531, 6548, 6286, 6550, 4895, 4894, 6554, 6240, 6556, 4897, 4896, 5603, 5606, 6314, 5612, 5566, 6321, 6687, 6688, 6546, 6689, 6577, 6358, 6580, 6548, 6286, 6550, 4895, 4894, 6554, 6240, 6556, 4897, 4896, 5603, 5530, 6314, 5612, 5566, 6321, 6690, 6691, 6692, 6566, 6693, 6577, 6358, 6580, 6535, 6252, 6694, 4897, 4896, 6540, 6264, 6695, 4897, 4896, 6548, 6286, 6550, 4895, 4894, 5603, 5606, 6314, 5612, 5566, 6321, 6696, 6697, 6546, 6698, 6568, 6337, 5637, 6699, 6572, 6347, 5648, 6548, 6286, 6550, 4895, 4894, 6554, 6300, 6556, 4897, 4896, 5606, 5603, 6314, 5615, 5612, 6321, 6700, 6701, 6566, 6702, 6568, 6337, 5637, 6703, 6572, 6347, 5648, 4910, 6577, 6358, 6580, 250, 251, 252, 253, 254, 255, 6912, 6913, 6914, 6915, 6916, 6918, 6919, 6920, 6922, 6923, 6924, 6925, 6926, 6927, 6928, 6929, 6930, 6932, 6933, 6935, 6936, 6937, 6938, 6939, 6940, 6941, 6942, 6943, 6945, 6946, 6947, 6949, 6950, 6951, 6952, 6953, 6954, 6955, 6957, 6958, 6959, 6961, 6962, 6963, 6964, 6965, 6966, 6967, 6968, 6969, 6971, 6972, 6974, 6975, 6976, 6977, 6978, 6979, 6980, 6981, 6982, 6984, 6985, 6986, 6988, 6989, 6990, 6991, 6992, 6993, 6994, 6996, 6997, 6998, 7000, 7001, 7002, 7003, 7004, 7005, 7006, 7007, 7008, 7010, 7011, 7013, 7014, 7015, 7016, 7017, 7018, 7019, 7020, 7021, 7023, 7024, 7025, 7026, 7027, 7028, 7029, 7030, 7031, 7032, 7033, 7034, 7037, 7038, 7039, 7040, 7041, 7042, 7043, 6612, 7045, 7046, 7047, 6614, 7049, 7050, 7051, 7052, 7053, 7055, 7056, 7057, 7058, 7059, 7060, 7061, 7062, 7064, 7065, 7066, 7067, 7068, 7069, 7070, 7071, 7072, 7074, 7075, 7077, 7078, 7080, 7081, 7082, 7084, 7086, 7087, 7089, 7090, 7092, 7093, 7094, 7095, 7096, 7101, 7102, 7103, 7104, 7106, 7107, 7108, 7109, 7110, 7111, 7112, 7113, 7114, 7115, 7118, 7119, 7120, 7121, 7122, 7123, 7124, 6637, 7126, 7127, 7128, 6639, 7130, 7131, 7132, 7133, 7134, 7135, 7136, 7137, 7138, 7140, 7141, 7143, 7144, 7146, 7147, 7148, 7150, 7152, 7153, 7155, 7156, 7158, 7159, 7160, 7161, 7162, 7163, 7164, 7166, 7167, 7168, 7169, 7170, 7171, 7172, 7173, 7174, 7176, 7177, 7182, 7183, 7184, 7185, 7187, 7188, 7189, 7190, 7191, 7193, 7194, 7196, 7197, 7198, 7199, 7201, 7202, 7203, 7204, 7205, 7206, 7207, 7208, 7209, 7211, 7212, 7213, 7214, 7215, 7216, 7217, 7218, 7219, 7220, 7221, 7222, 7223, 7226, 7227, 7228, 7229, 7230, 7231, 7232, 7233, 7234, 7235, 7236, 7237, 7238, 7239, 7240, 7241, 7242, 7243, 7244, 7245, 7246, 7247, 7250, 7251, 7252, 7253, 7254, 7255, 7256, 7258, 7260, 7261, 7262, 7263, 7265, 7266, 7267, 7268, 7269, 7270, 7271, 7272, 7274, 7275, 7276, 7278, 7279, 7280, 7281, 7282, 7283, 7284, 7285, 7287, 7288, 7289, 7291, 7292, 7293, 7294, 7295, 7296, 7297, 7298, 7299, 7300, 7302, 7303, 7304, 7306, 7307, 7308, 7309, 7310, 7311, 7312, 7313, 7314, 7315, 7316, 7317, 7318, 7319, 7320, 7321, 7322, 7323, 7324, 7326, 7328, 7329, 7330, 7331, 7332, 7333, 7334, 7335, 7336, 7337, 7338, 7339, 7340, 7341, 7342, 7343, 7344, 7345, 7346, 7347, 7350, 7352, 7353, 7354, 7355, 7356, 7358, 7359, 7360, 7361, 7363, 7364, 7365, 7366, 7367, 7368, 7369, 7370, 7371, 7372, 7373, 7374, 7375, 7376, 7378, 7380, 7381, 7382, 7384, 7385, 7386, 7387, 7388, 7389, 7390, 7391, 7392, 7393, 7394, 7395, 7396, 7397, 7398, 7399, 7400, 7401, 7402, 7403, 7405, 7407, 7408, 7409, 7411, 7412, 7413, 7414, 7415, 7416, 7417, 7100, 7098, 7181, 7179, 7225, 7224, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 7424, 7426, 7429, 7437, 7439, 7441, 7443, 7446, 7449, 7452, 7457, 7459, 7462, 7470, 7472, 7474, 7476, 7479, 7482, 7485, 7490, 7492, 7495, 7503, 7505, 7507, 7509, 7512, 7515, 7518, 7520, 7523, 7526, 7529, 7530, 7535, 7547, 7549, 7552, 7555, 7562, 7564, 7566, 7568, 7582, 7588, 7591, 7594, 7595, 7600, 7614, 7616, 7618, 7620, 7629, 7632, 7635, 7644, 7647, 7653, 7655, 7657, 7661, 7664, 7667, 7670, 7676, 7681, 7684, 7694, 7698, 7701, 7704, 7705, 7710, 7720, 7722, 7725, 7731, 7733, 7736, 7739, 7743, 7746, 7749, 7757, 7762, 7764, 7767, 7778, 7783, 7785, 7788, 7791, 7798, 7802, 7807, 7809, 7812, 7826, 7831, 7833, 7836, 7435, 7433, 7456, 7468, 7466, 7489, 7501, 7499, 7522, 7533, 7538, 7540, 7542, 7544, 7546, 7559, 7561, 7576, 7574, 7572, 7578, 7580, 7851, 7852, 7585, 7587, 7598, 7603, 7605, 7607, 7609, 7611, 7613, 7628, 7626, 7624, 7639, 7641, 7643, 7853, 7854, 7650, 7652, 7674, 7679, 7855, 7856, 7687, 7689, 7693, 7691, 7753, 7708, 7713, 7712, 7715, 7717, 7719, 7729, 7753, 7755, 7760, 7771, 7772, 7774, 7776, 7781, 7793, 7795, 7797, 7801, 7805, 7816, 7817, 7819, 7820, 7822, 7824, 7829, 7840, 7841, 7843, 7844, 7846, 7848, 7850, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 7936, 7938, 7946, 7948, 7956, 7958, 7975, 7979, 7980, 7989, 7992, 7994, 7998, 8001, 8004, 8005, 8011, 8013, 8015, 8016, 8017, 7939, 8039, 8040, 7445, 6934, 6931, 7448, 7454, 7451, 8041, 7949, 8042, 8043, 7478, 6973, 6970, 7481, 7487, 7484, 8044, 7959, 8045, 8046, 7511, 7012, 7009, 7514, 7966, 7517, 8047, 7528, 7525, 7970, 8048, 7971, 8049, 8050, 8051, 8052, 8053, 7972, 7554, 7551, 8054, 8055, 7079, 7076, 7073, 8056, 8057, 8058, 8059, 8060, 8061, 8063, 8064, 7593, 7590, 7984, 8065, 7985, 8066, 8067, 8068, 8069, 8070, 8071, 7145, 7142, 7139, 8072, 8073, 8074, 7634, 7631, 8075, 8076, 8077, 7993, 8078, 8080, 8081, 7659, 7195, 7192, 7669, 7666, 8082, 8002, 8083, 8003, 8084, 8086, 8087, 8014, 8088, 8089, 7697, 7751, 7748, 8090, 7703, 7700, 8009, 8091, 8010, 8092, 8093, 8094, 8095, 8096, 8014, 8097, 7745, 7751, 7748, 8098, 8099, 8021, 8100, 8022, 7769, 7766, 8101, 8102, 8103, 8104, 8025, 8105, 8026, 7790, 7787, 7792, 8106, 8107, 8108, 8030, 8109, 8031, 8110, 8032, 7814, 7811, 8111, 8112, 8113, 8114, 8115, 8116, 8035, 8117, 8036, 7838, 7835, 8118, 8119, 8120, 8121, 8122, 8123, 8124, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 6921, 7428, 8213, 8216, 8217, 8218, 8219, 8220, 8221, 6960, 7461, 8223, 8226, 8227, 8228, 8229, 8230, 8231, 6999, 7494, 8233, 8236, 8237, 8238, 8239, 8240, 8241, 8243, 8244, 8245, 8247, 8253, 8254, 8255, 8198, 7083, 8258, 8259, 8260, 8261, 8200, 8269, 8270, 8271, 8273, 7149, 8280, 8281, 8282, 8283, 8286, 8287, 8202, 8291, 8203, 8295, 8296, 8297, 8204, 8298, 8299, 8205, 8301, 8303, 8206, 7277, 7724, 8307, 7742, 7290, 7286, 8310, 8311, 8312, 8314, 8315, 8316, 8318, 8319, 7277, 7724, 8324, 7742, 7290, 7286, 8326, 8327, 8328, 8331, 8333, 8334, 8335, 8340, 8342, 8343, 8344, 8345, 8349, 8351, 8353, 8354, 8355, 8362, 8364, 8365, 8366, 8251, 8249, 8265, 8257, 8268, 8277, 8275, 8279, 8289, 8294, 8306, 8323, 8338, 8347, 8360, 8358, 8373, 8371, 8369, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 8448, 8449, 8450, 8451, 8455, 8457, 8458, 8459, 8460, 8464, 8466, 8467, 8468, 8469, 8473, 8475, 8480, 8482, 8483, 8485, 8487, 8488, 8489, 8493, 8495, 8497, 8498, 8500, 8502, 8503, 8506, 8507, 8509, 8512, 8513, 8514, 8515, 8516, 8517, 8518, 8520, 8522, 8527, 8528, 8529, 8530, 8531, 8532, 8534, 8538, 8542, 8548, 8552, 8478, 8554, 8555, 8556, 8557, 8479, 8558, 8492, 8559, 8560, 8561, 8501, 8562, 8563, 8511, 8510, 8564, 8525, 8565, 8321, 8537, 8536, 8566, 8541, 8540, 8567, 8547, 8546, 8545, 8568, 8569, 8551, 8550, 8570, 8571, 8572, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 8704, 8706, 8707, 8709, 8711, 8712, 8714, 8716, 8717, 8722, 8727, 8733, 8738, 8740, 8741, 8746, 8749, 8222, 8232, 8242, 8757, 8477, 8758, 8721, 8762, 8725, 8764, 8491, 8765, 8731, 8768, 8732, 8771, 8772, 8736, 8737, 8313, 8774, 8524, 8776, 8329, 8336, 8777, 8778, 8544, 8780, 8781, 8356, 8783, 8784, 8785, 8786, 8367, 8788, 8789, 8790, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 8969, 8970, 8974, 8976, 8961, 8977, 8454, 8964, 8978, 8463, 8967, 8979, 8472, 8981, 8983, 8985, 8987, 8989, 8767, 8991, 8994, 8734, 8992, 8995, 8973, 8996, 8998, 8775, 8748, 9000, 9001, 9004, 9007, 9009, 9012, 9015, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 9220, 9222, 9223, 9225, 9226, 9228, 8980, 8724, 8760, 8763, 8986, 8729, 8770, 9237, 8773, 9240, 8519, 8997, 9244, 8533, 9246, 9247, 9248, 9250, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 9221, 9224, 9227, 9479, 9483, 9236, 9488, 9491, 9492, 9493, 9494, 9495, 8988, 8982, 9243, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 9731, 9732, 9238, 9241, 9245, 9730, 9729, 9728, 9740, 9741, 9742, 9251, 9011, 8782, 8779, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 9480, 9985, 9989, 9990, 9991, 9486, 9987, 9988, 9995, 9996, 9997, 9998, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 10240, 10241, 10242, 10245, 10246, 10247, 10248, 10250, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 10498, 9484, 9481, 10502, 9994, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 10753, 10754, 10755, 10756, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 9992, 10499, 11010, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 11264, 11266, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 11520, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 11776, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 12032, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 11521, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255};
int h_C[]= {
1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 33, 35, 37, 39, 41, 43, 45, 47, 49, 51, 53, 55, 57, 59, 61, 63, 65, 67, 69, 71, 73, 75, 77, 79, 81, 83, 85, 87, 89, 91, 93, 95, 97, 99, 101, 103, 105, 107, 109, 111, 113, 115, 117, 119, 121, 123, 125, 127, 129, 131, 133, 135, 139, 141, 143, 145, 147, 149, 151, 153, 155, 157, 159, 161, 163, 165, 167, 169, 171, 173, 175, 177, 179, 181, 183, 185, 187, 189, 191, 193, 195, 197, 199, 201, 203, 205, 207, 209, 211, 213, 215, 217, 219, 221, 223, 225, 227, 229, 231, 233, 235, 237, 239, 241, 243, 245, 247, 249, 251, 253, 255, 257, 259, 261, 263, 265, 267, 269, 271, 273, 275, 277, 279, 281, 283, 285, 287, 289, 291, 293, 295, 297, 299, 301, 303, 305, 307, 309, 311, 313, 315, 317, 319, 321, 323, 325, 327, 329, 331, 333, 335, 337, 339, 341, 343, 345, 347, 349, 351, 353, 355, 357, 359, 361, 363, 365, 367, 369, 371, 373, 376, 378, 380, 382, 384, 386, 388, 390, 392, 394, 397, 399, 401, 403, 405, 407, 411, 413, 415, 417, 419, 421, 423, 425, 427, 429, 431, 433, 435, 437, 439, 441, 443, 445, 447, 449, 451, 453, 455, 457, 459, 461, 463, 465, 467, 469, 471, 473, 475, 477, 479, 481, 483, 485, 487, 489, 491, 493, 496, 498, 500, 502, 504, 506, 508, 510, 512, 514, 516, 518, 520, 522, 525, 527, 529, 531, 533, 535, 537, 539, 541, 543, 545, 547, 549, 551, 553, 555, 557, 559, 561, 563, 565, 567, 569, 571, 574, 576, 578, 580, 582, 584, 586, 588, 590, 592, 594, 596, 598, 600, 602, 604, 606, 608, 610, 612, 614, 616, 618, 620, 622, 624, 626, 628, 630, 632, 634, 636, 638, 640, 642, 644, 646, 648, 650, 652, 655, 657, 660, 662, 664, 666, 668, 670, 672, 674, 676, 678, 680, 682, 684, 686, 688, 690, 692, 694, 696, 698, 700, 702, 704, 706, 708, 710, 712, 714, 716, 718, 720, 722, 724, 726, 728, 730, 732, 734, 736, 738, 740, 742, 744, 746, 748, 750, 753, 755, 757, 759, 762, 764, 766, 768, 772, 774, 776, 778, 780, 782, 784, 786, 788, 790, 792, 794, 796, 798, 800, 802, 804, 806, 808, 810, 812, 814, 816, 818, 820, 822, 824, 826, 828, 830, 832, 834, 837, 839, 841, 843, 847, 849, 851, 853, 855, 857, 860, 862, 864, 866, 869, 871, 874, 876, 881, 883, 885, 887, 889, 891, 894, 896, 898, 900, 902, 904, 906, 908, 910, 912, 914, 916, 919, 921, 924, 926, 929, 931, 934, 936, 939, 941, 943, 945, 948, 950, 953, 955, 960, 962, 964, 966, 968, 970, 972, 974, 976, 978, 980, 982, 984, 986, 988, 990, 992, 994, 996, 998, 1000, 1002, 1004, 1006, 1008, 1010, 1012, 1014, 1016, 1018, 1020, 1022, 1024, 1026, 1028, 1030, 1032, 1034, 1036, 1038, 1041, 1043, 1045, 1047, 1050, 1052, 1054, 1056, 1058, 1060, 1062, 1064, 1066, 1068, 1070, 1072, 1074, 1076, 1078, 1080, 1083, 1085, 1087, 1089, 1093, 1095, 1097, 1099, 1101, 1103, 1105, 1107, 1109, 1111, 1113, 1115, 1117, 1119, 1121, 1123, 1126, 1128, 1130, 1132, 1134, 1136, 1138, 1140, 1143, 1145, 1148, 1150, 1153, 1155, 1158, 1160, 1166, 1168, 1171, 1173, 1176, 1178, 1181, 1183, 1186, 1188, 1190, 1192, 1194, 1196, 1199, 1201, 1204, 1206, 1208, 1210, 1212, 1214, 1216, 1218, 1220, 1222, 1224, 1226, 1228, 1230, 1232, 1234, 1237, 1239, 1241, 1243, 1245, 1247, 1249, 1251, 1253, 1255, 1258, 1260, 1262, 1264, 1266, 1268, 1270, 1272, 1274, 1276, 1278, 1280, 1282, 1284, 1286, 1288, 1290, 1292, 1294, 1296, 1298, 1300, 1302, 1304, 1307, 1309, 1311, 1313, 1315, 1317, 1319, 1321, 1323, 1325, 1327, 1329, 1331, 1333, 1335, 1337, 1339, 1341, 1343, 1345, 1347, 1349, 1351, 1353, 1355, 1357, 1359, 1361, 1363, 1365, 1367, 1369, 1371, 1373, 1375, 1377, 1380, 1382, 1384, 1386, 1388, 1390, 1392, 1394, 1397, 1399, 1401, 1403, 1406, 1408, 1410, 1412, 1414, 1416, 1418, 1420, 1422, 1424, 1426, 1428, 1431, 1433, 1435, 1437, 1439, 1441, 1444, 1446, 1449, 1451, 1457, 1459, 1462, 1464, 1468, 1470, 1472, 1474, 1476, 1478, 1481, 1483, 1486, 1488, 1491, 1493, 1496, 1498, 1501, 1503, 1506, 1508, 1511, 1513, 1515, 1517, 1519, 1521, 1524, 1526, 1528, 1530, 1532, 1534, 1536, 1538, 1540, 1542, 1544, 1546, 1548, 1550, 1552, 1554, 1556, 1558, 1561, 1563, 1565, 1567, 1570, 1572, 1575, 1577, 1582, 1584, 1587, 1589, 1592, 1594, 1597, 1599, 1602, 1604, 1607, 1609, 1612, 1614, 1617, 1619, 1622, 1624, 1627, 1629, 1632, 1634, 1039, 1048, 1163, 1163, 136, 1163, 1163, 137, 760, 1442, 1442, 1429, 1429, 751, 1454, 1454, 1442, 1442, 572, 572, 760, 1395, 1395, 1442, 1442, 1454, 1454, 1429, 1429, 1442, 1442, 1454, 1454, 1429, 1429, 1454, 1454, 1442, 1442, 1404, 1404, 1235, 1235, 1256, 1256, 1568, 1568, 751, 1454, 1454, 1442, 1442, 374, 374, 1454, 1454, 1442, 1442, 395, 395, 1442, 1442, 1454, 1454, 1429, 1429, 1442, 1442, 1454, 1454, 1454, 1454, 1442, 1442, 494, 1395, 1395, 1404, 1404, 523, 1395, 1395, 1404, 1404, 572, 572, 751, 751, 572, 572, 751, 751, 1522, 1522, 1568, 1568, 572, 572, 751, 751, 917, 917, 1442, 1442, 1235, 1256, 1395, 1395, 1404, 1404, 769, 769, 769, 769, 751, 751, 769, 769, 769, 769, 760, 760, 770, 770, 867, 878, 835, 844, 867, 878, 917, 917, 957, 957, 1039, 1048, 1081, 1090, 1124, 1124, 1124, 1124, 1163, 1163, 1163, 1163, 1442, 1442, 1235, 1256, 1235, 1235, 1256, 1256, 1454, 1454, 1579, 1395, 1404, 1395, 1404, 1454, 1454, 1442, 1442, 1442, 1442, 1454, 1454, 1522, 1522, 1568, 1568, 1579, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 3329, 3331, 3333, 3335, 3337, 3339, 3341, 3343, 3345, 3347, 3349, 3351, 3353, 3355, 3357, 3359, 3361, 3363, 3365, 3367, 3369, 3371, 3373, 3375, 3377, 3379, 3381, 3383, 3385, 3387, 3389, 3391, 3393, 3395, 3397, 3399, 3401, 3403, 3405, 3407, 3409, 3411, 3413, 3415, 3417, 3419, 3421, 3423, 3425, 3427, 3429, 3431, 3433, 3435, 3437, 3439, 3441, 3443, 3445, 3447, 3449, 3451, 3453, 3455, 3457, 3459, 3461, 3463, 3465, 3467, 3469, 3471, 3473, 3475, 3477, 3479, 3481, 3483, 3485, 3487, 3489, 3491, 3493, 3495, 3497, 3499, 3501, 3503, 3505, 3507, 3509, 3511, 3513, 3515, 3517, 3519, 3521, 3523, 3525, 3527, 3529, 3531, 3533, 3535, 3537, 3539, 3541, 3543, 3545, 3547, 3549, 3551, 3553, 3555, 3557, 3559, 3561, 3563, 3565, 3567, 3569, 3571, 3573, 3575, 3577, 3579, 3581, 3583, 3585, 3587, 3589, 3591, 3593, 3595, 3597, 3599, 3601, 3603, 3605, 3607, 3609, 3611, 3613, 3615, 3617, 3619, 3621, 3623, 3625, 3627, 3629, 3631, 3633, 3635, 3637, 3639, 3641, 3643, 3645, 3647, 3649, 3651, 3653, 3655, 3657, 3659, 3661, 3663, 3665, 3667, 3669, 3671, 3673, 3675, 3677, 3679, 3681, 3683, 3685, 3687, 3689, 3691, 3693, 3695, 3697, 3699, 3701, 3703, 3705, 3707, 3709, 3711, 3713, 3715, 3717, 3719, 3721, 3723, 3725, 3727, 3729, 3731, 3733, 3735, 3737, 3739, 3741, 3743, 3745, 3747, 3749, 3751, 3753, 3755, 3757, 3759, 3761, 3763, 3765, 3767, 3769, 3771, 3773, 3775, 3777, 3779, 3781, 3783, 3785, 3787, 3789, 3791, 3793, 3795, 3797, 3799, 3801, 3803, 3805, 3807, 3809, 3811, 3813, 3815, 3817, 3819, 3821, 3823, 3825, 3827, 3829, 3831, 3833, 3835, 3837, 3839, 3841, 3843, 3845, 3847, 3849, 3851, 3853, 3855, 3857, 3859, 3861, 3863, 3865, 3867, 3869, 3871, 3873, 3875, 3877, 3879, 3881, 3883, 3885, 3887, 3889, 3891, 3893, 3895, 3897, 3899, 3901, 3903, 3905, 3907, 3909, 3911, 3913, 3915, 3917, 3919, 3921, 3923, 3925, 3927, 3929, 3931, 3933, 3935, 3937, 3939, 3941, 3943, 3945, 3947, 3949, 3951, 3953, 3955, 3957, 3959, 3961, 3963, 3965, 3967, 3969, 3971, 3973, 3975, 3977, 3979, 3981, 3983, 3985, 3987, 3989, 3991, 3993, 3995, 3997, 3999, 4001, 4003, 4005, 4007, 4009, 4011, 4013, 4015, 4017, 4019, 4021, 4023, 4025, 4027, 4029, 4031, 4033, 4035, 4037, 4039, 4041, 4043, 4045, 4047, 4049, 4051, 4053, 4055, 4057, 4059, 4061, 4063, 4065, 4067, 4069, 4071, 4073, 4075, 4077, 4079, 4081, 4083, 4085, 4087, 4089, 4091, 4093, 4095, 4097, 1655, 1672, 1673, 1674, 1675, 1676, 1677, 1678, 1681, 1688, 1689, 1690, 1691, 1696, 1697, 1698, 1701, 1702, 1708, 1709, 1710, 1722, 1723, 1726, 1727, 1730, 1731, 1733, 1734, 1737, 1738, 1741, 1742, 1744, 1745, 1748, 1751, 1765, 1766, 1781, 1782, 1788, 1789, 1790, 1791, 1794, 1795, 1797, 1800, 1801, 1804, 1805, 1806, 1807, 1811, 1812, 1815, 1816, 1817, 1818, 1822, 1823, 1826, 1827, 1830, 1831, 1843, 1844, 1847, 1848, 1854, 1855, 1858, 1859, 1879, 1884, 1885, 1888, 1889, 1897, 1902, 1903, 1906, 1907, 1908, 1909, 1910, 1911, 1913, 1914, 1915, 1916, 1923, 1926, 1929, 1930, 1931, 1932, 1933, 1934, 1940, 1946, 1958, 1959, 1962, 1964, 1983, 1984, 1987, 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1997, 1998, 1999, 2000, 2001, 2002, 2006, 2008, 2022, 2025, 2031, 2033, 2039, 2042, 2050, 2051, 2058, 2061, 2082, 2085, 2094, 2097, 2107, 2108, 2110, 2111, 2119, 2120, 2122, 2123, 2135, 2136, 2138, 2140, 2152, 2153, 2166, 2167, 2176, 2177, 2184, 2197, 2200, 2213, 2216, 2219, 2220, 2223, 2224, 2230, 2231, 2234, 2235, 2257, 2258, 2269, 2270, 2273, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 4353, 4352, 4354, 4356, 4358, 4357, 4360, 4359, 4361, 4635, 4362, 4364, 4363, 4366, 4365, 4367, 4369, 4368, 4383, 4635, 4371, 4370, 4372, 4375, 4374, 4377, 4376, 4379, 4378, 4381, 4380, 4383, 4382, 4385, 4384, 4740, 4743, 4387, 4386, 4389, 4388, 4390, 4391, 4393, 4392, 4747, 4749, 4394, 4396, 4395, 572, 4752, 4459, 4458, 4754, 4399, 4398, 4400, 4401, 572, 4756, 4525, 4403, 4477, 4527, 4530, 4478, 4438, 4535, 4481, 4685, 4684, 4759, 4405, 4404, 4761, 4407, 4406, 4763, 4408, 4765, 4410, 4409, 4767, 4412, 4411, 4769, 4413, 4771, 4415, 4414, 4416, 4415, 4417, 4418, 4420, 4419, 4421, 4422, 4424, 4423, 4425, 4426, 4428, 4427, 4429, 4775, 4430, 4523, 4522, 4476, 4525, 4527, 4431, 4530, 4432, 4438, 4535, 4433, 4686, 4652, 4777, 4435, 4434, 4437, 4436, 4438, 4779, 4781, 572, 572, 4783, 572, 4442, 4441, 4786, 4444, 4443, 4788, 4790, 1466, 4447, 4446, 4792, 4449, 4448, 4794, 4796, 1466, 4459, 4451, 4798, 4656, 4655, 4800, 4461, 4452, 4802, 4453, 4454, 4467, 4455, 4469, 4468, 4523, 4456, 4457, 4459, 4458, 4804, 4656, 4655, 4806, 4461, 4460, 1429, 4698, 4697, 4808, 4696, 4695, 4810, 4700, 4699, 1466, 4463, 4516, 4465, 4464, 4524, 4467, 4466, 4469, 4468, 4470, 4476, 4471, 4477, 4530, 4478, 4472, 4535, 4474, 4685, 4475, 4813, 4536, 4672, 4815, 4476, 4525, 4477, 4527, 4530, 4478, 4479, 4535, 4481, 4685, 4684, 4818, 4686, 4652, 4820, 4822, 4824, 4482, 4826, 4828, 4483, 4485, 4484, 4486, 4488, 4487, 4490, 4489, 4492, 4491, 4832, 4834, 4836, 4494, 4493, 4495, 4498, 4497, 4500, 4499, 4501, 4504, 4503, 4506, 4505, 4508, 4507, 4510, 4509, 4511, 4513, 4512, 4515, 4514, 4840, 4516, 4517, 4518, 4520, 4519, 4521, 4523, 4522, 4524, 4526, 4525, 4528, 4527, 4530, 4529, 4531, 4533, 4535, 4534, 4685, 4684, 4844, 4536, 4672, 4846, 4848, 4850, 4852, 4538, 4537, 4854, 4856, 4858, 4540, 4539, 769, 769, 4715, 4710, 4541, 4729, 4720, 4542, 4543, 4722, 4544, 4546, 4545, 4548, 4547, 4550, 4549, 4552, 4551, 4554, 4553, 4555, 4557, 4560, 4559, 4561, 4563, 4562, 4565, 4564, 4566, 4568, 4570, 4569, 4571, 4574, 4573, 4868, 4576, 4575, 4578, 4577, 4580, 4579, 4582, 4581, 4584, 4583, 4586, 4585, 4588, 4587, 4589, 4590, 4591, 4593, 4592, 4595, 4594, 4597, 4596, 4598, 4599, 4600, 4602, 4601, 4604, 4603, 4606, 4605, 4607, 4609, 4608, 4610, 4612, 4611, 4614, 4613, 4635, 4638, 4615, 4617, 4616, 4618, 4620, 4619, 4621, 4876, 4622, 4878, 4623, 4625, 4624, 4626, 4628, 4627, 4629, 4880, 4630, 4882, 4631, 4633, 4632, 4634, 4635, 4638, 4637, 4639, 4715, 4640, 4650, 4884, 4641, 4642, 4643, 4645, 4644, 4647, 4674, 4649, 4648, 4678, 4677, 4650, 4651, 4888, 4685, 4684, 4686, 4652, 4647, 4674, 4649, 4648, 4678, 4677, 4650, 4651, 4890, 4685, 4684, 4686, 4652, 4654, 4653, 4656, 4655, 4892, 4657, 4658, 4660, 4659, 4662, 4662, 4665, 4664, 4675, 4666, 4677, 4667, 4668, 4670, 4682, 4671, 4685, 4684, 4687, 4672, 4674, 4673, 4676, 4675, 4678, 4677, 4679, 4681, 4683, 4682, 4685, 4684, 4687, 4686, 4689, 4688, 4899, 4691, 4690, 4901, 4693, 4692, 1429, 4696, 4695, 4903, 4698, 4697, 4905, 4700, 4699, 1466, 4702, 4703, 4704, 4705, 4707, 4706, 4709, 4708, 4713, 4715, 4710, 4717, 4711, 4720, 4719, 4712, 4723, 4722, 4907, 4713, 4716, 4715, 4718, 4717, 4720, 4719, 4721, 4723, 4722, 4909, 4725, 4724, 4727, 4726, 4729, 4728, 4731, 4730, 4732, 4734, 4733, 4736, 4735, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 1636, 1637, 1638, 1639, 1640, 1641, 1642, 1643, 1644, 1645, 1646, 1647, 1648, 1649, 1650, 1651, 1652, 1653, 1654, 1656, 1657, 1658, 1659, 1660, 1661, 1662, 1663, 1664, 1665, 1666, 1667, 1668, 1669, 1670, 1671, 1679, 1680, 1682, 1683, 1684, 1685, 1686, 1687, 1692, 1693, 1694, 1695, 1699, 1700, 1703, 1704, 1705, 1706, 1707, 1711, 1712, 1713, 1714, 1715, 1716, 1717, 1718, 1719, 1720, 1721, 1724, 1725, 1728, 1729, 1732, 1735, 1736, 1739, 1740, 1743, 1746, 1747, 1749, 1750, 1752, 1753, 1754, 1755, 1756, 1757, 1758, 1759, 1760, 1761, 1762, 1763, 1764, 1767, 1768, 1769, 1770, 1771, 1772, 1773, 1774, 1775, 1776, 1777, 1778, 1779, 1780, 1783, 1784, 1785, 1786, 1787, 1792, 1793, 1796, 1798, 1799, 1802, 1803, 1808, 1809, 1810, 1813, 1814, 1819, 1820, 1821, 1824, 1825, 1828, 1829, 1832, 1833, 1834, 1835, 1836, 1837, 1838, 1839, 1840, 1841, 1842, 1845, 1846, 1849, 1850, 1851, 1852, 1853, 1856, 1857, 1860, 1861, 1862, 1863, 1864, 1865, 1866, 1867, 1868, 1869, 1870, 1871, 1872, 1873, 1874, 1875, 1876, 1877, 1878, 1880, 1881, 1882, 1883, 1886, 1887, 1890, 1891, 1892, 1893, 1894, 1895, 1896, 1898, 1899, 1900, 1901, 1904, 1905, 1912, 1917, 1918, 1919, 1920, 1921, 1922, 1924, 1925, 1927, 1928, 1935, 1936, 1937, 1938, 1939, 1941, 1942, 1943, 1944, 1945, 1947, 1948, 1949, 1950, 1951, 1952, 1953, 1954, 1955, 1956, 1957, 1960, 1961, 1963, 1965, 1966, 1967, 1968, 1969, 1970, 1971, 1972, 1973, 1974, 1975, 1976, 1977, 1978, 1979, 1980, 1981, 1982, 1985, 1986, 1995, 1996, 2003, 2004, 2005, 2007, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021, 2023, 2024, 2026, 2027, 2028, 2029, 2030, 2032, 2034, 2035, 2036, 2037, 2038, 2040, 2041, 2043, 2044, 2045, 2046, 2047, 2048, 2049, 2052, 2053, 2054, 2055, 2056, 2057, 2059, 2060, 2062, 2063, 2064, 2065, 2066, 2067, 2068, 2069, 2070, 2071, 2072, 2073, 2074, 2075, 2076, 2077, 2078, 2079, 2080, 2081, 2083, 2084, 2086, 2087, 2088, 2089, 2090, 2091, 2092, 2093, 2095, 2096, 2098, 2099, 2100, 2101, 2102, 2103, 2104, 2105, 2106, 2109, 2112, 2113, 2114, 2115, 2116, 2117, 2118, 2121, 2124, 2125, 2126, 2127, 2128, 2129, 2130, 2131, 2132, 2133, 2134, 2137, 2139, 2141, 2142, 2143, 2144, 2145, 2146, 2147, 2148, 2149, 2150, 2151, 2154, 2155, 2156, 2157, 2158, 2159, 2160, 2161, 2162, 2163, 2164, 2165, 2168, 2169, 2170, 2171, 2172, 2173, 2174, 2175, 2178, 2179, 2180, 2181, 2182, 2183, 2185, 2186, 2187, 2188, 2189, 2190, 2191, 2192, 2193, 2194, 2195, 2196, 2198, 2199, 2201, 2202, 2203, 2204, 2205, 2206, 2207, 2208, 2209, 2210, 2211, 2212, 2214, 2215, 2217, 2218, 2221, 2222, 2225, 2226, 2227, 2228, 2229, 2232, 2233, 2236, 2237, 2238, 2239, 2240, 2241, 2242, 2243, 2244, 2245, 2246, 2247, 2248, 2249, 2250, 2251, 2252, 2253, 2254, 2255, 2256, 2259, 2260, 2261, 2262, 2263, 2264, 2265, 2266, 2267, 2268, 2271, 2272, 2274, 2275, 2276, 2277, 2278, 2279, 2280, 2281, 2282, 2283, 2284, 5156, 5155, 5180, 5360, 4811, 4811, 5180, 5360, 4811, 4811, 5344, 5347, 5360, 5410, 5409, 5415, 5414, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 5889, 5893, 5895, 5900, 5902, 5905, 5909, 5912, 5914, 5916, 5918, 5920, 5922, 5924, 5926, 5930, 5933, 5936, 5938, 5943, 5945, 5947, 5950, 5952, 5954, 5956, 5959, 5961, 5964, 5966, 5970, 5974, 5978, 5982, 5984, 5986, 5988, 5991, 5993, 5995, 5997, 6003, 6005, 6008, 6010, 6013, 6015, 6017, 6021, 6023, 6025, 6028, 6030, 6032, 6035, 6037, 6039, 6044, 6047, 6049, 6051, 6053, 6055, 6058, 6060, 6062, 6064, 6066, 6068, 6071, 6073, 6075, 6079, 6082, 6084, 6086, 6088, 6091, 6093, 6096, 6098, 6100, 6102, 6105, 6107, 6112, 6115, 6118, 6120, 6122, 6126, 6128, 6130, 6132, 6134, 6138, 6140, 6142, 6145, 6147, 6149, 6151, 6153, 6155, 6159, 6162, 6164, 6168, 6171, 6173, 6175, 6177, 6179, 6181, 6183, 6185, 6190, 6192, 6194, 6199, 6201, 6203, 6206, 6209, 6211, 6214, 6216, 6219, 6224, 6227, 6232, 6236, 6239, 6245, 6247, 6249, 6251, 6255, 6257, 6259, 6261, 6263, 6267, 6269, 6271, 6273, 6277, 6281, 6283, 6285, 6289, 6291, 6293, 6295, 6297, 6299, 6303, 6305, 6307, 6309, 6311, 6313, 6316, 6318, 6320, 6327, 6329, 6332, 6334, 6336, 6339, 6342, 6344, 6346, 6349, 6351, 6353, 6355, 6357, 6360, 6362, 6157, 5890, 6165, 5903, 6196, 5508, 5506, 5518, 5516, 6157, 6156, 6165, 5903, 6196, 5508, 5506, 5518, 5516, 6157, 6156, 6165, 6187, 6196, 5508, 5506, 5518, 2399, 2400, 6274, 6110, 5939, 2419, 5941, 2424, 6001, 2431, 2432, 5927, 5957, 5962, 5259, 5267, 5968, 5931, 5972, 5971, 5976, 5975, 5934, 4750, 6000, 4750, 6001, 6110, 5939, 6322, 2503, 5941, 2508, 6001, 5957, 5962, 5259, 5267, 5968, 5967, 5972, 5971, 5976, 5975, 6041, 2555, 2556, 5999, 4784, 6000, 4784, 6001, 5259, 5267, 6019, 6018, 6041, 2605, 5345, 2607, 5348, 2613, 6110, 6109, 6322, 2641, 2642, 2644, 2645, 6136, 6135, 6157, 6156, 6165, 6187, 6196, 5508, 5506, 5518, 5516, 6274, 6324, 6237, 6324, 6242, 6241, 6243, 6253, 6265, 6274, 6324, 6278, 6279, 6324, 6322, 6330, 6340, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 6503, 6502, 6501, 6400, 2289, 2290, 6504, 6506, 6505, 2294, 6401, 6407, 6478, 6479, 6509, 6511, 6402, 6404, 6403, 2304, 6518, 6516, 2307, 6521, 6519, 6522, 6524, 6405, 5896, 6527, 6526, 2316, 2317, 6529, 6528, 2320, 2321, 6530, 5897, 6503, 6502, 6501, 6500, 2328, 2329, 6506, 6505, 6504, 2333, 6476, 6407, 6478, 6479, 6509, 6512, 6511, 6404, 6403, 2343, 6518, 6516, 2346, 6521, 6519, 6522, 6524, 6405, 5906, 6527, 6526, 2355, 2356, 6529, 6528, 2359, 2360, 6530, 5907, 6503, 6502, 6501, 6500, 2367, 2368, 6506, 6505, 6504, 2372, 6406, 6407, 6408, 6479, 6509, 6512, 6511, 6410, 6409, 2382, 6518, 6517, 2385, 6521, 6520, 6522, 6524, 6523, 6411, 6527, 6526, 2394, 2395, 6529, 6528, 2398, 6530, 6234, 6417, 6559, 6418, 6563, 6562, 6564, 2409, 2410, 2411, 6486, 6485, 6487, 6489, 6124, 6492, 6491, 2420, 4745, 6578, 6579, 2425, 4745, 6497, 6498, 6460, 6440, 6617, 6465, 6464, 6452, 6451, 6414, 6455, 6454, 6456, 2441, 6433, 6459, 6458, 6434, 6436, 4816, 6438, 6425, 6415, 2451, 6427, 6426, 2454, 6442, 6441, 2457, 6444, 6429, 6428, 2461, 2462, 2463, 6416, 2465, 2466, 6431, 2468, 2469, 6432, 6419, 6421, 4816, 6423, 2475, 2476, 2477, 2478, 6472, 6474, 6473, 6475, 2483, 4750, 6497, 6498, 6417, 6545, 6418, 6563, 6562, 6564, 2493, 2494, 2495, 6486, 6485, 6487, 6489, 6124, 6492, 6491, 2504, 4757, 6578, 6579, 2509, 4757, 6497, 6498, 6419, 6421, 4816, 6423, 6425, 6424, 2519, 6427, 6426, 2522, 6442, 6441, 2525, 6444, 6429, 6428, 2529, 2530, 2531, 6430, 2533, 2534, 6431, 2536, 2537, 6432, 6452, 6451, 6453, 6455, 6454, 6456, 2545, 6433, 6459, 6458, 6434, 6436, 4816, 6438, 6460, 6440, 6653, 6465, 6464, 2559, 2560, 2561, 2562, 6472, 6474, 6473, 6475, 2567, 4784, 6497, 6498, 6442, 6441, 2573, 6444, 6443, 2576, 6446, 6445, 6447, 2580, 2581, 6450, 6449, 6448, 6452, 6451, 6453, 6455, 6454, 6456, 2591, 6459, 6458, 6457, 6460, 6462, 4811, 6465, 6464, 6466, 6468, 4816, 6471, 6470, 2606, 2608, 6472, 6475, 6474, 6473, 5361, 6497, 6498, 6476, 6477, 6478, 6479, 6482, 6481, 6480, 6483, 6559, 6544, 6561, 6563, 6562, 6564, 2631, 2632, 2633, 6486, 6485, 6487, 6489, 6124, 6492, 6491, 6673, 5411, 6675, 5416, 6578, 6579, 2649, 2650, 6495, 6497, 6498, 6503, 6502, 6501, 6500, 2658, 2659, 6506, 6505, 6504, 2663, 6507, 6508, 6509, 6512, 6511, 6515, 6514, 6513, 2672, 6518, 6517, 6516, 2676, 6521, 6520, 6519, 6522, 6524, 6523, 6212, 6527, 6526, 2686, 2687, 6529, 6528, 2690, 2691, 6530, 6234, 6547, 6549, 6287, 6552, 6551, 6553, 6555, 6301, 6558, 6557, 6545, 6560, 6561, 6562, 6563, 6564, 2710, 2711, 6565, 2713, 6532, 6578, 6579, 6547, 6549, 6287, 6552, 6551, 6553, 6555, 6301, 6558, 6557, 6545, 6560, 6561, 6562, 6563, 6564, 2733, 2734, 2735, 6565, 2737, 6533, 6578, 6579, 6534, 6536, 2743, 6538, 6537, 6539, 6541, 2748, 6543, 6542, 6547, 6549, 6287, 6552, 6551, 6545, 6544, 6561, 6562, 6563, 6564, 2762, 2763, 6565, 2765, 6567, 6569, 6570, 2769, 6571, 6573, 6574, 6547, 6549, 6287, 6552, 6551, 6553, 6555, 6301, 6558, 6557, 6560, 6559, 6561, 6563, 6562, 6564, 2789, 2790, 6565, 2792, 6567, 6569, 6570, 2796, 6571, 6573, 6574, 6575, 6576, 6578, 6579, 250, 251, 252, 253, 254, 255, 2285, 2286, 2287, 2288, 6917, 2291, 2292, 2293, 2295, 2296, 2297, 2298, 2299, 2300, 2301, 2302, 2303, 2305, 2306, 2308, 2309, 2310, 2311, 2312, 2313, 2314, 2315, 6944, 2318, 2319, 6948, 2322, 2323, 2324, 2325, 2326, 2327, 6956, 2330, 2331, 2332, 2334, 2335, 2336, 2337, 2338, 2339, 2340, 2341, 2342, 2344, 2345, 2347, 2348, 2349, 2350, 2351, 2352, 2353, 2354, 6983, 2357, 2358, 6987, 2361, 2362, 2363, 2364, 2365, 2366, 6995, 2369, 2370, 2371, 2373, 2374, 2375, 2376, 2377, 2378, 2379, 2380, 2381, 2383, 2384, 2386, 2387, 2388, 2389, 2390, 2391, 2392, 2393, 7022, 2396, 2397, 6607, 2401, 2402, 2403, 2404, 2405, 2406, 2407, 2408, 7035, 2412, 2413, 2414, 2415, 2416, 2417, 2418, 7044, 2421, 2422, 2423, 7048, 2426, 2427, 2428, 2429, 2430, 2433, 2434, 2435, 2436, 2437, 2438, 2439, 2440, 2442, 2443, 2444, 2445, 2446, 2447, 2448, 2449, 2450, 2452, 2453, 2455, 2456, 2458, 2459, 2460, 7085, 2464, 7088, 2467, 7091, 2470, 2471, 2472, 2473, 2474, 2479, 2480, 2481, 2482, 2484, 2485, 2486, 2487, 2488, 2489, 2490, 2491, 2492, 7116, 2496, 2497, 2498, 2499, 2500, 2501, 2502, 7125, 2505, 2506, 2507, 7129, 2510, 2511, 2512, 2513, 2514, 2515, 2516, 2517, 2518, 2520, 2521, 2523, 2524, 2526, 2527, 2528, 7151, 2532, 7154, 2535, 7157, 2538, 2539, 2540, 2541, 2542, 2543, 2544, 2546, 2547, 2548, 2549, 2550, 2551, 2552, 2553, 2554, 2557, 2558, 2563, 2564, 2565, 2566, 2568, 2569, 2570, 2571, 2572, 2574, 2575, 2577, 2578, 2579, 7200, 2582, 2583, 2584, 2585, 2586, 2587, 2588, 2589, 2590, 2592, 2593, 2594, 2595, 2596, 2597, 2598, 2599, 2600, 2601, 2602, 2603, 2604, 2609, 2610, 2611, 2612, 2614, 2615, 2616, 2617, 2618, 2619, 2620, 2621, 2622, 2623, 2624, 2625, 2626, 2627, 2628, 2629, 2630, 7248, 2634, 2635, 2636, 2637, 2638, 2639, 2640, 2643, 2646, 2647, 2648, 7264, 2651, 2652, 2653, 2654, 2655, 2656, 2657, 7273, 2660, 2661, 2662, 2664, 2665, 2666, 2667, 2668, 2669, 2670, 2671, 2673, 2674, 2675, 2677, 2678, 2679, 2680, 2681, 2682, 2683, 2684, 2685, 7301, 2688, 2689, 7305, 2692, 2693, 2694, 2695, 2696, 2697, 2698, 2699, 2700, 2701, 2702, 2703, 2704, 2705, 2706, 2707, 2708, 2709, 7325, 2712, 2714, 2715, 2716, 2717, 2718, 2719, 2720, 2721, 2722, 2723, 2724, 2725, 2726, 2727, 2728, 2729, 2730, 2731, 2732, 7348, 2736, 2738, 2739, 2740, 2741, 2742, 2744, 2745, 2746, 2747, 2749, 2750, 2751, 2752, 2753, 2754, 2755, 2756, 2757, 2758, 2759, 2760, 2761, 7377, 2764, 2766, 2767, 2768, 2770, 2771, 2772, 2773, 2774, 2775, 2776, 2777, 2778, 2779, 2780, 2781, 2782, 2783, 2784, 2785, 2786, 2787, 2788, 7404, 2791, 2793, 2794, 2795, 2797, 2798, 2799, 2800, 2801, 2802, 2803, 7099, 7097, 7180, 7178, 6666, 6664, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 7425, 7427, 7430, 7438, 7440, 7442, 7444, 7447, 7450, 7453, 7458, 7460, 7463, 7471, 7473, 7475, 7477, 7480, 7483, 7486, 7491, 7493, 7496, 7504, 7506, 7508, 7510, 7513, 7516, 7519, 6608, 7524, 7527, 7036, 7531, 7536, 7548, 7550, 7553, 7556, 7563, 7565, 7567, 7569, 7583, 7589, 7592, 7117, 7596, 7601, 7615, 7617, 7619, 7621, 7630, 7633, 7636, 7645, 7648, 7654, 7656, 7658, 7662, 7665, 7668, 7671, 7677, 7682, 7685, 7695, 7699, 7702, 7249, 7706, 7711, 7721, 7723, 7726, 7732, 7734, 7737, 7740, 7744, 7747, 7750, 7758, 7763, 7765, 7768, 7779, 7784, 7786, 7789, 7349, 7799, 7803, 7808, 7810, 7813, 7827, 7832, 7834, 7837, 7434, 7432, 7455, 7467, 7465, 7488, 7500, 7498, 7521, 7532, 7537, 7539, 7541, 7543, 7545, 7558, 7560, 7575, 7573, 7571, 7577, 7579, 2865, 2866, 7105, 7586, 7597, 7602, 7604, 7606, 7608, 7610, 7612, 7627, 7625, 7623, 7638, 7640, 7642, 2895, 2896, 7186, 7651, 7673, 7678, 2911, 2912, 6668, 7688, 7692, 7690, 7752, 7707, 7259, 7257, 7714, 7716, 7718, 7728, 7752, 7754, 7759, 7770, 7327, 7773, 7775, 7780, 7351, 7794, 7796, 7800, 7804, 7815, 7379, 7818, 7383, 7821, 7823, 7828, 7839, 7406, 7842, 7410, 7845, 7847, 7849, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 7937, 7431, 7947, 7464, 7957, 7497, 7557, 7570, 7584, 7622, 7637, 7649, 7663, 7672, 7686, 7696, 8012, 7727, 7735, 7738, 7741, 7436, 2807, 2808, 7942, 7941, 7940, 7943, 7945, 7944, 2815, 7469, 2819, 2820, 7952, 7951, 7950, 7953, 7955, 7954, 2827, 7502, 2831, 2832, 7962, 7961, 7960, 7963, 7965, 7964, 2839, 7968, 7967, 7969, 2843, 7534, 2845, 2846, 2847, 2848, 2849, 7054, 7974, 7973, 2854, 2855, 7978, 7977, 7976, 2860, 2861, 2862, 2863, 2864, 8062, 2868, 2869, 7982, 7981, 7983, 2873, 7599, 2875, 2876, 2877, 2878, 2879, 2880, 7988, 7987, 7986, 2885, 2886, 2887, 7991, 7990, 2891, 2892, 2893, 7175, 8079, 2898, 2899, 7997, 7996, 7995, 8000, 7999, 2907, 7675, 2909, 7680, 8085, 2914, 2915, 7730, 2919, 2920, 8018, 8020, 8019, 2927, 8007, 8006, 8008, 2931, 7709, 2933, 2934, 2935, 2936, 2937, 7730, 2941, 8018, 8020, 8019, 2948, 2949, 7756, 2951, 7761, 8024, 8023, 2955, 2956, 2957, 2958, 7777, 2960, 7782, 8028, 8027, 8029, 2965, 2966, 2967, 7357, 2969, 7362, 2971, 7806, 8034, 8033, 2975, 2976, 2977, 2978, 2979, 2980, 7825, 2982, 7830, 8038, 8037, 2986, 2987, 2988, 2989, 2990, 2991, 2992, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 8193, 8192, 2806, 2809, 2810, 2811, 2812, 2813, 2814, 8195, 8194, 2818, 2821, 2822, 2823, 2824, 2825, 2826, 8197, 8196, 2830, 2833, 2834, 2835, 2836, 2837, 2838, 2840, 2841, 2842, 2844, 2850, 2851, 2852, 7063, 8199, 2857, 2858, 2859, 8262, 7581, 2870, 2871, 2872, 2874, 8201, 2882, 2883, 2884, 8284, 2888, 2889, 7165, 2894, 7646, 2900, 2901, 2902, 7660, 2904, 2905, 7210, 2908, 2910, 7683, 8209, 8208, 2918, 8212, 8211, 8207, 2924, 2925, 2926, 2928, 2929, 2930, 2932, 8320, 8209, 8208, 2940, 8212, 8211, 8210, 2945, 2946, 2947, 2950, 2952, 2953, 2954, 2959, 2961, 2962, 2963, 2964, 2968, 2970, 2972, 2973, 2974, 2981, 2983, 2984, 2985, 8250, 8248, 8264, 8256, 8267, 8276, 8274, 8278, 8288, 8293, 8305, 8322, 8337, 8346, 8359, 8357, 8372, 8370, 8368, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 2804, 2805, 8214, 8452, 8456, 2816, 2817, 8224, 8461, 8465, 2828, 2829, 8234, 8470, 8474, 8476, 8481, 2853, 2856, 8486, 8263, 2867, 8490, 2881, 8496, 8285, 8499, 2890, 2897, 8504, 2903, 8508, 2906, 2913, 2916, 2917, 8308, 2921, 2922, 2923, 8521, 8523, 2938, 2939, 8325, 2942, 2943, 2944, 8535, 8539, 8543, 8549, 8553, 8246, 3004, 3005, 3006, 3009, 8252, 3011, 8272, 3015, 3016, 3019, 8290, 3021, 3022, 8302, 8300, 3028, 8317, 3035, 8526, 8332, 8330, 3043, 8341, 8339, 3047, 8352, 8350, 8348, 3052, 3053, 8363, 8361, 3057, 3058, 3059, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 8705, 8215, 8453, 8710, 8225, 8462, 8715, 8235, 8471, 8484, 8494, 8505, 8739, 8309, 8742, 8747, 8750, 8708, 8713, 8718, 3002, 8719, 8759, 8720, 3010, 8266, 3013, 8726, 8766, 8730, 3020, 8292, 3024, 3025, 8735, 8304, 8744, 3033, 8745, 3036, 8752, 8753, 3041, 3042, 8754, 3045, 3046, 8755, 3049, 3050, 3051, 8787, 8756, 3055, 3056, 8791, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 8723, 8728, 8743, 8751, 8960, 2994, 8962, 8963, 2997, 8965, 8966, 3000, 8968, 3003, 3007, 3012, 3014, 3018, 8990, 3023, 3026, 8971, 8993, 3029, 8972, 3031, 3034, 8999, 8975, 3038, 3040, 3044, 3048, 9010, 3054, 8792, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 2993, 2995, 2996, 2998, 2999, 3001, 9229, 9216, 9230, 9231, 9232, 9217, 9235, 3027, 9239, 3030, 9218, 9242, 3037, 9219, 9002, 9005, 9008, 9013, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 9473, 9475, 9477, 3008, 3017, 9485, 3032, 3039, 9003, 9006, 9249, 9014, 9482, 9478, 9489, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 8761, 9233, 9733, 9734, 9735, 9476, 9474, 9472, 3063, 3067, 3069, 9739, 9738, 9737, 9736, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 9984, 9234, 3060, 3061, 3062, 9986, 9487, 9490, 3071, 3072, 3073, 3074, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 8984, 8769, 10243, 3065, 3068, 3070, 10249, 10251, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 10244, 10497, 10496, 10503, 10500, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 3064, 3066, 10501, 3076, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 11008, 11009, 3075, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 11265, 11011, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 9993, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 10752, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 3077, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 12288, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255};
bool h_Op[]= {
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
#define THREADS_PER_BLOCK 256
#define BLOCKS_PER_GRID 1
#define SIZE_OF_IN 3328
#define SIZE_OF_AC 9472
__device__ void
ac(float *A, const int *B, const int *C, const bool *Op, int n_iter) {
int i= blockDim.x * blockIdx.x + threadIdx.x;
__shared__ float R[50*THREADS_PER_BLOCK];
const int t= THREADS_PER_BLOCK;
__shared__ float final;
final=0;
R[i + 0*t] = A[i + 0*t];
R[i + 1*t] = A[i + 1*t];
R[i + 2*t] = A[i + 2*t];
R[i + 3*t] = A[i + 3*t];
R[i + 4*t] = A[i + 4*t];
R[i + 5*t] = A[i + 5*t];
R[i + 6*t] = A[i + 6*t];
R[i + 7*t] = A[i + 7*t];
R[i + 8*t] = A[i + 8*t];
R[i + 9*t] = A[i + 9*t];
R[i + 10*t] = A[i + 10*t];
R[i + 11*t] = A[i + 11*t];
R[i + 12*t] = A[i + 12*t];
__syncthreads();
for (int iter=0; iter< n_iter; iter++) {
R[i + 13*t] = Op[i + 0*t] ? R[B[i + 0*t]] * R[C[i + 0*t]] : R[B[i + 0*t]] + R[C[i + 0*t]];
R[i + 14*t] = Op[i + 1*t] ? R[B[i + 1*t]] * R[C[i + 1*t]] : R[B[i + 1*t]] + R[C[i + 1*t]];
R[i + 15*t] = Op[i + 2*t] ? R[B[i + 2*t]] * R[C[i + 2*t]] : R[B[i + 2*t]] + R[C[i + 2*t]];
R[i + 16*t] = Op[i + 3*t] ? R[B[i + 3*t]] * R[C[i + 3*t]] : R[B[i + 3*t]] + R[C[i + 3*t]];
__syncthreads();
R[i + 17*t] = Op[i + 4*t] ? R[B[i + 4*t]] * R[C[i + 4*t]] : R[B[i + 4*t]] + R[C[i + 4*t]];
R[i + 18*t] = Op[i + 5*t] ? R[B[i + 5*t]] * R[C[i + 5*t]] : R[B[i + 5*t]] + R[C[i + 5*t]];
R[i + 19*t] = Op[i + 6*t] ? R[B[i + 6*t]] * R[C[i + 6*t]] : R[B[i + 6*t]] + R[C[i + 6*t]];
__syncthreads();
R[i + 20*t] = Op[i + 7*t] ? R[B[i + 7*t]] * R[C[i + 7*t]] : R[B[i + 7*t]] + R[C[i + 7*t]];
R[i + 21*t] = Op[i + 8*t] ? R[B[i + 8*t]] * R[C[i + 8*t]] : R[B[i + 8*t]] + R[C[i + 8*t]];
R[i + 22*t] = Op[i + 9*t] ? R[B[i + 9*t]] * R[C[i + 9*t]] : R[B[i + 9*t]] + R[C[i + 9*t]];
__syncthreads();
R[i + 23*t] = Op[i + 10*t] ? R[B[i + 10*t]] * R[C[i + 10*t]] : R[B[i + 10*t]] + R[C[i + 10*t]];
R[i + 24*t] = Op[i + 11*t] ? R[B[i + 11*t]] * R[C[i + 11*t]] : R[B[i + 11*t]] + R[C[i + 11*t]];
__syncthreads();
R[i + 25*t] = Op[i + 12*t] ? R[B[i + 12*t]] * R[C[i + 12*t]] : R[B[i + 12*t]] + R[C[i + 12*t]];
R[i + 26*t] = Op[i + 13*t] ? R[B[i + 13*t]] * R[C[i + 13*t]] : R[B[i + 13*t]] + R[C[i + 13*t]];
__syncthreads();
R[i + 27*t] = Op[i + 14*t] ? R[B[i + 14*t]] * R[C[i + 14*t]] : R[B[i + 14*t]] + R[C[i + 14*t]];
R[i + 28*t] = Op[i + 15*t] ? R[B[i + 15*t]] * R[C[i + 15*t]] : R[B[i + 15*t]] + R[C[i + 15*t]];
__syncthreads();
R[i + 29*t] = Op[i + 16*t] ? R[B[i + 16*t]] * R[C[i + 16*t]] : R[B[i + 16*t]] + R[C[i + 16*t]];
R[i + 30*t] = Op[i + 17*t] ? R[B[i + 17*t]] * R[C[i + 17*t]] : R[B[i + 17*t]] + R[C[i + 17*t]];
__syncthreads();
R[i + 31*t] = Op[i + 18*t] ? R[B[i + 18*t]] * R[C[i + 18*t]] : R[B[i + 18*t]] + R[C[i + 18*t]];
__syncthreads();
R[i + 32*t] = Op[i + 19*t] ? R[B[i + 19*t]] * R[C[i + 19*t]] : R[B[i + 19*t]] + R[C[i + 19*t]];
__syncthreads();
R[i + 33*t] = Op[i + 20*t] ? R[B[i + 20*t]] * R[C[i + 20*t]] : R[B[i + 20*t]] + R[C[i + 20*t]];
__syncthreads();
R[i + 34*t] = Op[i + 21*t] ? R[B[i + 21*t]] * R[C[i + 21*t]] : R[B[i + 21*t]] + R[C[i + 21*t]];
__syncthreads();
R[i + 35*t] = Op[i + 22*t] ? R[B[i + 22*t]] * R[C[i + 22*t]] : R[B[i + 22*t]] + R[C[i + 22*t]];
__syncthreads();
R[i + 36*t] = Op[i + 23*t] ? R[B[i + 23*t]] * R[C[i + 23*t]] : R[B[i + 23*t]] + R[C[i + 23*t]];
__syncthreads();
R[i + 37*t] = Op[i + 24*t] ? R[B[i + 24*t]] * R[C[i + 24*t]] : R[B[i + 24*t]] + R[C[i + 24*t]];
__syncthreads();
R[i + 38*t] = Op[i + 25*t] ? R[B[i + 25*t]] * R[C[i + 25*t]] : R[B[i + 25*t]] + R[C[i + 25*t]];
__syncthreads();
R[i + 39*t] = Op[i + 26*t] ? R[B[i + 26*t]] * R[C[i + 26*t]] : R[B[i + 26*t]] + R[C[i + 26*t]];
__syncthreads();
R[i + 40*t] = Op[i + 27*t] ? R[B[i + 27*t]] * R[C[i + 27*t]] : R[B[i + 27*t]] + R[C[i + 27*t]];
__syncthreads();
R[i + 41*t] = Op[i + 28*t] ? R[B[i + 28*t]] * R[C[i + 28*t]] : R[B[i + 28*t]] + R[C[i + 28*t]];
__syncthreads();
R[i + 42*t] = Op[i + 29*t] ? R[B[i + 29*t]] * R[C[i + 29*t]] : R[B[i + 29*t]] + R[C[i + 29*t]];
__syncthreads();
R[i + 43*t] = Op[i + 30*t] ? R[B[i + 30*t]] * R[C[i + 30*t]] : R[B[i + 30*t]] + R[C[i + 30*t]];
__syncthreads();
R[i + 44*t] = Op[i + 31*t] ? R[B[i + 31*t]] * R[C[i + 31*t]] : R[B[i + 31*t]] + R[C[i + 31*t]];
__syncthreads();
R[i + 45*t] = Op[i + 32*t] ? R[B[i + 32*t]] * R[C[i + 32*t]] : R[B[i + 32*t]] + R[C[i + 32*t]];
__syncthreads();
R[i + 46*t] = Op[i + 33*t] ? R[B[i + 33*t]] * R[C[i + 33*t]] : R[B[i + 33*t]] + R[C[i + 33*t]];
__syncthreads();
R[i + 47*t] = Op[i + 34*t] ? R[B[i + 34*t]] * R[C[i + 34*t]] : R[B[i + 34*t]] + R[C[i + 34*t]];
__syncthreads();
R[i + 48*t] = Op[i + 35*t] ? R[B[i + 35*t]] * R[C[i + 35*t]] : R[B[i + 35*t]] + R[C[i + 35*t]];
__syncthreads();
R[i + 49*t] = Op[i + 36*t] ? R[B[i + 36*t]] * R[C[i + 36*t]] : R[B[i + 36*t]] + R[C[i + 36*t]];
if (i==0) { final += R[49*t]; }
__syncthreads();
}
if (i==0) { A[0]= final;}
}
|
22,587 | #include "includes.h"
__global__ void kernel(float *a, size_t N)
{
int tid = threadIdx.x;
__shared__ float s[BS];
int blocks = (N+BS-1)/BS;
float sum = 0.0f;
for (int ib=0; ib<blocks; ib++)
{
int off = ib*BS+tid;
s[tid] = a[off];
for (int skip=16; skip>0; skip>>=1)
if (tid+skip < N && tid < skip)
s[tid] += s[tid+skip];
sum += s[0];
}
a[0] = sum;
} |
22,588 | #include "includes.h"
__device__ inline float stableLogit(float x) {
if(x >= 0) {
float z = expf(-x);
return 1.0 / (1.0 + z);
} else {
float z = expf(x);
return z / (1.0 + z);
}
}
__global__ void gLSTMOutputForward(float* out, const float* cell, const float* xW, const float* sU, const float* b, size_t rows, size_t cols) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float* rowOut = out + j * cols;
const float* rowCell = cell + j * cols;
const float* xWrow = xW + j * cols * 4;
const float* sUrow = sU + j * cols * 4;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < cols) {
int k = i + 3 * cols;
float go = stableLogit(xWrow[k] + sUrow[k] + b[k]);
rowOut[i] = go * tanhf(rowCell[i]);
}
}
}
}
} |
22,589 | #include <stdio.h>
#include <time.h>
#include <stdlib.h>
#include <cuda.h>
#define max 999
__global__ void kernel(int n, int size, int * A, int * path, int check)
{
for(int k=0;k<n;k++){
int i = threadIdx.x;
int j = threadIdx.y;
if(A[i*size+j] > A[i*size+k]+A[k*size+j])
{
A[i*size+j] = A[i*size+k]+A[k*size+j];
path[i*size+j]=k;
}
if(i == j){
A[i*size+j] = 0;
}
}
}
int main(){
FILE *fp;
printf("Begin reading the file...\n");
fp = fopen("graph.txt","r");
int MAXV;
fscanf(fp, "%d", &MAXV);
//get the edges matrix
int *edges;
edges = (int*)malloc(sizeof(int)*MAXV*MAXV);
//create the new matrix A, the shortest distance will be stored here
int *A;
A = (int*)malloc(sizeof(int)*MAXV*MAXV);
//create the path matrix
int *path;
path = (int*)malloc(sizeof(int)*MAXV*MAXV);
//initial the edges matrix to the max
for(int i = 0; i<MAXV; i++){
for(int j = 0; j<MAXV; j++){
edges[i*MAXV+j]= max;
}
}
//get the value of the edge matrix from the file
for(int i = 0; i< MAXV; i++){
for(int j = 0; j< MAXV; j++){
fscanf(fp, "%d ",&edges[i*MAXV+j]);
}
}
//close the file
if(!fp)
fclose(fp);
printf("Read file complete.\n");
printf("the number of node is %d.\n", MAXV);
//initial the A and path matrix, A == edges, path == -1
for(int i=0;i<MAXV;i++)
{
for(int j=0;j<MAXV;j++)
{
A[i*MAXV+j]=edges[i*MAXV+j];
path[i*MAXV+j]=-1;
}
}
//printing the edge matrix
for(int i = 0; i<MAXV; i++){
for(int j = 0; j<MAXV; j++){
printf("%d ", edges[i*MAXV+j]);
}
printf("\n");
}
printf("end\n");
printf("///////////////////////////\n");
//initial the device matrix
int *d_A;
int *d_path;
size_t pitch;
//malloc the memory for the device
cudaMallocPitch(&d_A, &pitch, sizeof(int)*MAXV, MAXV);
cudaMallocPitch(&d_path, &pitch, sizeof(int)*MAXV, MAXV);
//copy from the host to the device
cudaMemcpy(d_A, A,
sizeof(int)*(MAXV*MAXV), cudaMemcpyHostToDevice);
cudaMemcpy(d_path, path,
sizeof(int)*(MAXV*MAXV), cudaMemcpyHostToDevice);
//creating the time point
double start, stop, lapse;
start = clock();
int check = 0;
//call the kernel functioni
int numBlocks = 1;
dim3 threadsPerBlock(MAXV, MAXV);
kernel<<<numBlocks,threadsPerBlock>>>(MAXV, MAXV, d_A, d_path, check);
//检查程序是否运行
printf("%d\n",check);
stop = clock();
lapse = stop - start;
printf("time: %fs\n", lapse);
printf("///////////////////////////\n");
//copy the data back to the host
cudaMemcpy(A, d_A,
sizeof(int)*(MAXV*MAXV), cudaMemcpyDeviceToHost);
cudaMemcpy(path, d_path,
sizeof(int)*(MAXV*MAXV), cudaMemcpyDeviceToHost);
//printing the matrix A
for(int i=0;i < MAXV;i++){
for(int j = 0;j < MAXV; j++){
if(A[i*MAXV+j] == 999)
{
printf("%s ", "max");
}
else
{
printf("%d ", A[i*MAXV+j]);
}
}
printf("\n");
}
//free the memory
free(edges);
free(A);
free(path);
cudaFree(d_A);
cudaFree(d_path);
//*FINISHED*//
return 0;
}
|
22,590 | #include "includes.h"
__global__ void UniformNormalDistribution(float *from, float *to, int size)
{
int id = blockDim.x * blockIdx.y * gridDim.x
+ blockDim.x * blockIdx.x
+ threadIdx.x;
float tmp;
if (id < size)
{
tmp = normcdf(from[id] * sqrt((float)size));
to[id] = (tmp -0.5)*2;
}
} |
22,591 | #include <iostream>
#include <iomanip>
#include <ctime>
#include <cstdlib>
#include <cstdio>
#include <cmath>
#include "cuda_runtime.h"
using namespace std;
#define TPB 1024
#define min(a,b) ((a < b) ? a : b)
__global__
void scat_part_sum(double * array, double * array_psums) {
// Distributes the values from array_psums (array of partial sums) to every element
// in the array. Every thread in a block gets the same partial sum added to it
int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
//__syncthreads();
array[tid] += array_psums[blockIdx.x];
//__syncthreads();
}
__global__
void upsweep (double * array, double * array_aggr1, int size) {
// Performs an upsweep
int bid = blockIdx.x * blockDim.x;
int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
int min_size = min(size, TPB);
__syncthreads();
// Merge elements like a binary tree
for (int step = 2; step <= min_size ; step *= 2) {
if (tid % step == (step - 1) && (tid - (step / 2) >= bid)) {
array[tid] += array[tid - (step / 2)];
}
__syncthreads();
}
__syncthreads();
// Aggregates the sum of each block to another array for to calculate partial tums
if (array_aggr1 != NULL) {
if (threadIdx.x == (TPB - 1)) {
if (tid < size) {
array_aggr1[blockIdx.x] = array[tid];
}
else {
array_aggr1[blockIdx.x] = array[size - 1];
}
}
__syncthreads();
}
}
__global__
void excl_downsweep (double * array, int size) {
int bsize = blockIdx.x * blockDim.x;
int next_block = (blockIdx.x + 1) * blockDim.x;
int tid = bsize + threadIdx.x;
int tmp;
int min_size = min(size, TPB);
// Performs an exlusive down sweep. After the inclusive down sweep, each block
// will have elements 0, 0 + a_1 , 0 + a_1 + a_2, ... , 0 + a_1 + a_2 + ... + a_1023
if (tid % TPB == 0) { array[min(size, next_block) - 1] = 0; }
__syncthreads();
for (int step = min_size; step > 0; step /= 2) {
if (tid % step == (step - 1) && (tid - (step / 2) >= bsize)) {
tmp = array[tid];
array[tid] += array[tid - (step / 2)];
array[tid - (step / 2)] = tmp;
}
__syncthreads();
}
}
__global__
void incl_downsweep (double * array) {
int next_bid = (blockIdx.x + 1) * blockDim.x;
int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
__syncthreads();
// Performs an inclusive down sweep. After the inclusive down sweep, each block
// will have elements a_1, a_1 + a_2, ... , a_1 + a_2 + ... + a_1024
for (int step = TPB / 2; step > 1; step /= 2) {
if (tid % step == (step - 1) && (tid + (step / 2) < next_bid)) {
array[tid + (step / 2)] += array[tid];
}
__syncthreads();
}
}
void sum(double* a, double* b, const int n) {
//Given an array a[0...n-1], you need to compute b[0...n-1],
//where b[i] = (i+1)*a[0] + i*a[1] + ... + 2*a[i-1] + a[i]
//Note that b is NOT initialized with 0, be careful!
//Write your CUDA code starting from here
//Add any functions (e.g., device function) you want within this file
int size = n;
int size_div1 = int(ceil(double(size) / double(TPB)));
int size_div2 = int(ceil(double(size_div1) / double(TPB)));
int nblocks = int(ceil(double(size) / double(TPB)));
int nblocks_div1 = int(ceil(double(nblocks) / double(TPB)));
int nblocks_div2 = int(ceil(double(nblocks_div1) / double(TPB)));
double *d_x, *d_x1, *d_x2;
cudaMalloc(&d_x, size * sizeof(double));
cudaMalloc(&d_x1, size_div1 * sizeof(double));
cudaMalloc(&d_x2, size_div2 * sizeof(double));
cudaMemcpy(d_x, a, size * sizeof(double), cudaMemcpyHostToDevice);
//cudaMemcpy(d_x1, x1, size_div1 * sizeof(double), cudaMemcpyHostToDevice);
//cudaMemcpy(d_x2, x2, size_div2 * sizeof(double), cudaMemcpyHostToDevice);
for (int pf_step = 1; pf_step < 3; pf_step++) {
// cout << "Up-Sweep\n" << endl;
upsweep <<<nblocks, TPB>>> (d_x, d_x1, size);
cudaDeviceSynchronize();
// cout << "Up-Sweep 2\n" << endl;
upsweep <<<nblocks_div1, TPB>>> (d_x1, d_x2, size_div1);
cudaDeviceSynchronize();
// cout << "Up-Sweep 3\n" << endl;
upsweep <<<nblocks_div1, TPB>>> (d_x2, NULL, size_div2);
cudaDeviceSynchronize();
// cout << "Down-Sweep 3\n" << endl;
excl_downsweep <<<nblocks_div2, TPB>>> (d_x2, size_div2);
cudaDeviceSynchronize();
// cout << "Down-Sweep 2\n" << endl;
excl_downsweep <<<nblocks_div1, TPB>>> (d_x1, size_div1);
cudaDeviceSynchronize();
// cout << "Down-Sweep\n" << endl;
incl_downsweep <<<nblocks, TPB>>> (d_x);
cudaDeviceSynchronize();
// cout << "Scatter Partial Sums 2\n" << endl;
scat_part_sum <<<nblocks_div1, TPB>>> (d_x1, d_x2);
cudaDeviceSynchronize();
// cout << "Scatter Partial Sums 1\n" << endl;
scat_part_sum <<<nblocks, TPB>>> (d_x, d_x1);
cudaDeviceSynchronize();
cudaMemcpy(b, d_x, size * sizeof(double), cudaMemcpyDeviceToHost);
}
}
int main(int argc, const char * argv[]) {
if (argc != 2) {
printf("The argument is wrong! Execute your program with only input file name!\n");
return 1;
}
int n = 1 << 24;
//Dummy code for creating a random input vectors
//Convenient for the text purpose
//Please comment out when you submit your code!!!!!!!!!
/* FILE *fpw = fopen(argv[1], "w");
if (fpw == NULL) {
printf("The file can not be created!\n");
return 1;
}
//int n = 1 << 24;
fprintf(fpw, "%d\n", n);
srand(time(NULL));
for (int i=0; i<n; i++)
fprintf(fpw, "%lg\n", ((double)(rand() % n))/100);
fclose(fpw);
printf("Finished writing\n");
*/
//Read input from input file specified by user
FILE* fpr = fopen(argv[1], "r");
if (fpr == NULL) {
printf("The file can not be opened or does not exist!\n");
return 1;
}
//int n;
fscanf(fpr, "%d\n", &n);
printf("%d\n", n);
double* a = (double*)malloc(n*sizeof(double));
double* b = (double*)malloc(n*sizeof(double));
for (int i=0; i<n; i++) {
fscanf(fpr, "%lg\n", &a[i]);
}
fclose(fpr);
//Main function
sum(a, b, n);
//Write b into output file
FILE* fpo = fopen("output.txt","w");
if (fpo == NULL) {
printf("The file can not be created!\n");
return 1;
}
fprintf(fpo, "%d\n", n);
for (int i=0; i<n; i++)
fprintf(fpo, "%lg\n", b[i]);
fclose(fpo);
free(a);
free(b);
printf("Done...\n");
return 0;
}
|
22,592 | #include <cuda_runtime.h>
#include <cuda_fp16.h>
#include <cstdio>
#include <chrono>
#include <random>
// Flops = num_ops * gpu_loops * iterations * blocks * threads / time_seconds
__global__ void testKernel(float* A, float* B, float* C, long long int gpu_loops, long long int *timers) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
long long int start = clock64();
// while ((clock64() - start) <= gpu_loops) {
for (int i=0; i<gpu_loops; i++) {
C[idx] = .997 * A[idx] + .998 * B[idx]; // 3 Floating Point Operations
}
timers[idx] += clock64() - start;
}
void print_usage(int argc, char* argv[]) {
printf("Usage: %s [# blocks] [# threads/block] [seconds] [gpu_loops]\n", argv[0]);
}
int main(int argc, char* argv[]) {
if (argc != 5) {
print_usage(argc, argv);
return 0;
}
int blocks = std::stoi(argv[1]);
int threads = std::stoi(argv[2]);
int runtime = std::stoi(argv[3]); // In seconds
int gpu_loops = std::stoi(argv[4]); // In seconds
if (blocks <= 0 || threads <= 0) {
print_usage(argc, argv);
return 0;
}
// Allocate memory on host and fill with random numbers
float* hA = new float[threads*blocks];
float* hB = new float[threads*blocks];
float* hC = new float[threads*blocks];
for(int i = 0; i < threads*blocks; i++) {
hA[i] = float(std::rand())/float((RAND_MAX));
hB[i] = float(std::rand())/float((RAND_MAX));
}
long long int* hTimers = new long long int[threads*blocks];
// Allocate memory on GPU and copy data
float *dA, *dB, *dC;
cudaMalloc(&dA, sizeof(float) * threads * blocks);
cudaMalloc(&dB, sizeof(float) * threads * blocks);
cudaMalloc(&dC, sizeof(float) * threads * blocks);
cudaMemcpy(dA, hA, sizeof(float) * threads * blocks, cudaMemcpyHostToDevice);
cudaMemcpy(dB, hB, sizeof(float) * threads * blocks, cudaMemcpyHostToDevice);
long long int* dTimers;
cudaMalloc(&dTimers, sizeof(long long int) * threads * blocks);
cudaMemcpy(dTimers, hTimers, sizeof(long long int) * threads * blocks, cudaMemcpyHostToDevice);
// Initialize timing variables and start timer
float time_ms;
cudaEvent_t gpu_start, gpu_stop;
cudaEventCreate(&gpu_start);
cudaEventCreate(&gpu_stop);
cudaEventRecord(gpu_start);
// Run computations
int i = 0;
printf("Using %i blocks with %i threads/block for %i seconds.\n", blocks, threads, runtime);
std::chrono::system_clock::time_point end = std::chrono::system_clock::now() + std::chrono::seconds(runtime);
while (std::chrono::system_clock::now() < end) {
testKernel<<<blocks, threads>>>(dA, dB, dC, gpu_loops, dTimers);
cudaDeviceSynchronize();
i++;
}
// Calculate runtime
cudaEventRecord(gpu_stop);
cudaEventSynchronize(gpu_stop);
cudaEventElapsedTime(&time_ms, gpu_start, gpu_stop);
printf("Actual time: %fms over %i iterations\n", time_ms, i);
unsigned long long int flops = (3ULL * gpu_loops * i * blocks * threads) / (time_ms/1000ULL);
printf("FLOPS: %llu\n", flops);
cudaMemcpy(hC, dC, sizeof(float) * threads * blocks, cudaMemcpyDeviceToHost);
cudaMemcpy(hTimers, dTimers, sizeof(long long int) * threads * blocks, cudaMemcpyDeviceToHost);
// for(int i=0; i < 1; i++) printf("%llu\n", hTimers[i]);
cudaFree(dA);
cudaFree(dB);
cudaFree(dC);
delete hA, hB, hC;
} |
22,593 | #include "includes.h"
__global__ void vec_add(int N, int *A, int *B, int *C){
int i = threadIdx.x + blockIdx.x * blockDim.x;
// assert( i<N );
if(i < N) C[i] = A[i] + B[i];
} |
22,594 | #include "includes.h"
//#define NDEBUG
const static float eps = 1e-6;
const static size_t blocSize = 8;
const static size_t size = 1024;
__global__ void matMultiply1D(float* matA, float* matB, float* Dest, int dimensions)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i < dimensions)
{
float vectA[2048];
for (unsigned k = 0; k != dimensions; ++k)
{
vectA[k] = matB[i*dimensions + k];
}
for (unsigned j = 0; j != dimensions; ++j)
{
float res = 0.0f;
for (unsigned k = 0; k != dimensions; ++k)
{
res += vectA[k] * matB[k*dimensions + j];
}
Dest[i*dimensions + j] = res;
}
}
} |
22,595 | #include "includes.h"
__global__ void eldiv0(float * inA, float * inB, int length)
{
int idx = threadIdx.x + blockDim.x*blockIdx.x;
if (idx<length) inA[idx] /= inB[idx];
} |
22,596 | /*
* CUDA kernel for 2D matrix shift. Ignores borders.
* Sofie Lovdal 18.6.2018
*/
__global__ void shiftPixels(double * output, double * const input,
unsigned int const numRows, unsigned int const numCols,
double const rho, double const phi)
{
/*global thread ID in x, y dimension*/
const int colIdx = blockIdx.x*blockDim.x + threadIdx.x;
const int rowIdx = blockIdx.y*blockDim.y + threadIdx.y;
/*make sure we are within image*/
if(colIdx>=numCols || rowIdx >= numRows) return;
/*Consider one pixel at the time in the input image. Calculate, based on rho
* and phi, how much this pixel should be shifted, and insert it in the
* corresponding position in the output buffer. If pixel goes outside of
* image, just exit. Here it would be good if the output buffer is from
* the beginning initialized to zero*/
/*Linear index of the pixel to be considered*/
int linearIdx = rowIdx*numCols + colIdx;
double pixelValue = input[linearIdx];
int deltax = (int)(-rho*cos(phi)); //FIX: ROUND TOWARDS 0. cos(pi/2)=0 so move 0 steps in x direction
int deltay = (int)(-rho*sin(phi)); //sin(pi/2)=1 so move -rho steps in y direction (two pixels upwards, -2)
if(colIdx+deltax<0 || colIdx+deltax>=numCols || rowIdx-deltay<0 || rowIdx-deltay >= numRows) return;
/*y axis is opposite direction in image compared to cartesian coordinate system?*/
int outputPixel = linearIdx + deltax - deltay*numCols; //or minus?
output[outputPixel] = pixelValue;
}
|
22,597 | #include<stdlib.h>
#include<math.h>
#include<iostream>
#include<time.h>
#define N 512
#define BLOCKS 64
using namespace std;
__global__ void Jacobi(double* u1, double* u2, double* f, double* ut, double h2, double* dmax)
{
dmax[0] = 0; // max error
double dm = 0; // temporary value of error
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
int k = j*N + i;
if((i > 0)&&(i < N-1)&&(j > 0)&&(j < N-1)&&(k < N*N))
{
u2[k] = 0.25*(u1[k-1] + u1[k+1] + u1[k-N] + u1[k+N] - h2*f[k]);
dm = fabs(ut[k]-u2[k]);
if (dmax[0] < dm) dmax[0] = dm;
u1[k] = u2[k];
}
}
void boundaryConditions(double * U, double h)
{
for (int i = 0; i < N; i++) {
double x = i*h;
U[i*N] = x*x - x + 1;
U[i] = x*x - x + 1;
U[i*N+N-1] = x*x - x + 1;
U[(N-1)*N+i] = x*x - x + 1;
}
}
int main( int argc, char * argv [] )
{
//double EPS = 0.0005;
double h =(double)1/(double)N;
int numBytes = sizeof(double)*N*N;
double* U1;
double* U2;
double* f;
double* UT;
double* dmax;
U1 = (double*)malloc(numBytes);
U2 = (double*)malloc(numBytes);
f = (double*)malloc(numBytes);
UT = (double*)malloc(numBytes);
dmax = (double*)malloc(sizeof(double));
dmax[0] = 1;
/* Matrix Initialization */
for(int i = 0; i < N; i++)
{
double x = i*h;
for(int j = 0; j < N; j++)
{
double y = j*h;
f[i*N+j] = 4 + 2*x*x - 2*x + 2*y*y - 2*y; // rightSide 1st variant
U1[i*N+j] = 0;
U2[i*N+j] = 0;
}
}
boundaryConditions(U1,h);
boundaryConditions(U2,h);
/* theoretical Solution */
for (int i = 0; i < N; i++)
{
double x = i*h;
for (int j = 0; j < N; j++)
{
double y = j*h;
UT[i*N+j] = (x*x - x + 1)*(y*y - y + 1);
}
}
// allocate device memory
double * adev = NULL;
double * bdev = NULL;
double * cdev = NULL;
double * tdev = NULL;
double * devD = NULL;
cudaMalloc ( (void**)&adev, numBytes );
cudaMalloc ( (void**)&bdev, numBytes );
cudaMalloc ( (void**)&cdev, numBytes );
cudaMalloc ( (void**)&tdev, numBytes );
cudaMalloc ( (void**)&devD, sizeof(double) );
cudaMemcpy ( adev, U1, numBytes, cudaMemcpyHostToDevice );
cudaMemcpy ( bdev, U2, numBytes, cudaMemcpyHostToDevice );
cudaMemcpy ( cdev, f, numBytes, cudaMemcpyHostToDevice );
cudaMemcpy ( tdev, UT, numBytes, cudaMemcpyHostToDevice );
cudaMemcpy ( devD, dmax, sizeof(double), cudaMemcpyHostToDevice );
// set kernel launch configuration
dim3 threads = dim3(32, 32, 1);
dim3 blocks = dim3(16, 16, 1);
// create cuda event handles
cudaEvent_t start, stop;
float gpuTime = 0.0f;
cudaEventCreate ( &start );
cudaEventCreate ( &stop );
// asynchronously issue work to the GPU (all to stream 0)
cudaEventRecord ( start, 0 );
int k = 0; // iteration counter
for (int i = 0; i < 59000 && dmax[0] > 0.00001; i++)
{
Jacobi<<<threads, blocks,0>>>(adev, bdev, cdev, tdev, h*h, devD);
cudaMemcpy ( dmax, devD, sizeof(double), cudaMemcpyDeviceToHost);
if(k%100 == 0)
cerr << k <<" "<< dmax[0] << "\n";
k++;
}
cudaMemcpy ( U2, bdev, numBytes, cudaMemcpyDeviceToHost );
cudaEventRecord ( stop, 0 );
cudaEventSynchronize ( stop );
cudaEventElapsedTime ( &gpuTime, start, stop );
cerr << "Executing time: " << (float)gpuTime << " milliseconds" << "\n";
cerr << "Total number of iterations: " << k << "\n";
cerr << "Error: " << dmax[0] << "\n";
for(int i = 0; i < N ; i++)
{
for(int j = 0; j < N ; j++)
{
cout << i*h << " " << j*h << " " << U2[i*N+j] << "\n";
}
}
// release resources
cudaEventDestroy ( start );
cudaEventDestroy ( stop );
cudaFree ( adev );
cudaFree ( bdev );
cudaFree ( cdev );
cudaFree ( tdev );
cudaFree ( devD );
free(U1);
free(U2);
free(UT);
free(f);
free(dmax);
return 0;
}
|
22,598 | #include <stdio.h>
/* cuda kernel declared and defined */
__global__ void add( int a, int b, int *c ){
*c = a + b;
}
int main( void ) {
int c;
int *dev_c;
/* allocates memory on the device */
cudaMalloc( (void**)&dev_c, sizeof(int));
/*call to kernel*/
add<<<1,1>>>(2, 7, dev_c);
/* copies dev_c into c */
cudaMemcpy(&c, dev_c, sizeof(int), cudaMemcpyDeviceToHost);
printf("2 + 7 = %d\n", c);
cudaFree(dev_c);
return 0;
}
|
22,599 | extern int MaxThreadsPerBlock;
extern int MaxThreadsX;
extern int MaxThreadsY;
__global__ void Kernel_Rings1(unsigned char *surface1, int width, int height, size_t pitch,
float Amp, float a, float b, float Rt, int x0, int y0, float yref, int Mask )
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
unsigned char *pixel1;
if (x >= width || y >= height) return;
pixel1 = (unsigned char *)(surface1 + y*pitch) + 4*x;
float R = sqrtf( powf(x-x0,2) + powf(y-y0,2) ) ;
float ZR = Amp*sin(a*R+b);
if (Rt>0) ZR = ZR*expf(-R/Rt);
int w = yref + ZR;
if (w<0) w=0;
else
if (w>253) w=253;
for (int i=0;i<3;i++)
{
if (Mask & (1<<i)) pixel1[i] = w;
}
pixel1[3] =w; // alpha comme les autres
}
void BuildRingsK1( void *surface1, int width, int height, size_t pitch,
float Amp, float a, float b, float Rt, int x0, int y0, float yref, int RgbMask, int mode )
{
dim3 Db = dim3(MaxThreadsX, MaxThreadsY);
dim3 Dg = dim3((width+Db.x-1)/Db.x, (height+Db.y-1)/Db.y);
Kernel_Rings1<<<Dg,Db>>>((unsigned char *)surface1, width, height, pitch, Amp,a,b,Rt,x0,y0,yref, RgbMask );
}
|
22,600 | #include <stdio.h>
#include <time.h>
using namespace std;
#define PI 3.1415926535897932384
#define mu0 4*PI*1e-7
#define block_i 16
#define block_j 16
//grid will be r driven meaning grid(r,z) = grid[r*zMax + z]
__global__ void init(double *grid, double Il, double dI, double ldr, double rlength, int rseg, int zseg){
int j = blockIdx.y *block_j + threadIdx.y;
int i = blockIdx.x *block_i + threadIdx.x;
if(j<(rseg+2) && i < (zseg+2))
grid[j*(zseg+2)+i] = (1-(j*j*ldr*ldr/(3*rlength*rlength)))*3*mu0*(Il + i*dI)*j*ldr/(4*PI*rlength*rlength);
}
__global__ void writeOut(double *grid, double *tempGrid, int rseg, int zseg){
int j = blockIdx.y *block_j + threadIdx.y;
int i = blockIdx.x *block_i + threadIdx.x;
if(i<(zseg+2) && j<(rseg+2)){
grid[j*(zseg+2) + i] = tempGrid[j*(zseg+2) + i];
}
}
__global__ void run(double *grid, double *tempGrid, double r_aug, double z_aug, int rseg, int zseg){
int j = blockIdx.y *block_j + threadIdx.y;
int i = blockIdx.x *block_i + threadIdx.x;
int sharedPos = threadIdx.y*(block_i+2) + threadIdx.x;
extern __shared__ double grid_old_s[];
if(j<(rseg+2) && i<(zseg+2)){
grid_old_s[sharedPos] = grid[j*(zseg+2) + i];
}
__syncthreads();
if(i<(zseg+1) && i != 0 && j<(rseg+1) && j!=0){
if(threadIdx.x != 0 && threadIdx.x != block_i + 1 && threadIdx.y != 0 && threadIdx.y != block_j + 1){
if(j==1){
tempGrid[j*(zseg+2) + i] += r_aug*(2*grid_old_s[sharedPos+block_i+2] - 4*grid_old_s[sharedPos]) +
z_aug * (grid_old_s[sharedPos+1] - 2*grid_old_s[sharedPos] + grid_old_s[sharedPos-1]);
}
else{
tempGrid[j*(zseg+2) + i] += r_aug*((1+(1/(2*j)))*grid_old_s[sharedPos+block_i+2] + (-2-(1/(j*j)))*grid_old_s[sharedPos] + (1-(1/(2*j)))*grid_old_s[sharedPos-(block_i+2)])
+z_aug*(grid_old_s[sharedPos+1] - 2*grid_old_s[sharedPos] + grid_old_s[sharedPos-1]);
}
}
}
/*
if(i<(zseg+1) && i != 0 && j<(rseg+1) && j!=0){
if(threadIdx.x != 0 && threadIdx.x != block_i + 1 && threadIdx.y != 0 && threadIdx.y != block_j + 1){
if(j==1){
tempGrid[j*(zseg+2) + i] += r_aug*(2*grid[(j+1)*(zseg+2) + i] - 4*grid[j*(zseg+2) + i]) +
z_aug * (grid[j*(zseg+2) + i + 1] - 2*grid[j*(zseg+2) + i] + grid[j*(zseg+2) + i - 1]);
}
else{
tempGrid[j*(zseg+2) + i] += r_aug*((1+(1/(2*j)))*grid[(j+1)*(zseg+2) + i] + (-2-(1/(j*j)))*grid[j*(zseg+2) + i] + (1-(1/(2*j)))*grid[(j-1)*(zseg+2) + i])
+z_aug*(grid[j*(zseg+2) + i + 1] - 2*grid[j*(zseg+2) + i] + grid[j*(zseg+2) + i -1]);
}
}
}*/
}
int main(){
double Il, Ir, rlength, eta, tstep, ldr, ldz, tottime, zlength;
int rseg, zseg;
//printf("What is your left I? ");
scanf("%lf", &Il);
//printf("What is your right I? ");
scanf("%lf", &Ir);
//printf("What is the radius of your rod? ");
scanf("%lf", &rlength);
//printf("What is the length of your rod? ");
scanf("%lf", &zlength);
//printf("What is eta? ");
scanf("%lf", &eta);
//printf("How many segments would you like per radius? ");
scanf("%d", &rseg);
//printf("How many segments would you like per length? ");
scanf("%d", &zseg);
ldr = rlength/(rseg+1);
ldz = zlength/(zseg+1);
double smallest = ldr;
if(ldz < ldr)
smallest = ldz;
tstep = 0.125*smallest*smallest*mu0/eta;
//printf("How long would you like to run? ");
scanf("%lf", &tottime);
double *h_grid, *d_grid, *d_tempGrid;
size_t grid_size = (rseg + 2)*(zseg+2) * sizeof(double);
h_grid = (double*)malloc(grid_size);
cudaMalloc(&d_grid, grid_size);
cudaMalloc(&d_tempGrid, grid_size);
double dI = (Ir - Il) / (zseg+2);
int init_block_i, init_block_j;
init_block_i = 1+((zseg + 2 - 1)/block_i);
init_block_j = 1+((rseg+2-1)/block_j);
dim3 initBlockDim(init_block_i, init_block_j);
dim3 initThreadDim(block_i, block_j);
init<<<initBlockDim,initThreadDim>>>(d_grid, Il, dI, ldr, rlength, rseg, zseg);
cudaMemcpy(h_grid, d_grid, grid_size, cudaMemcpyDeviceToHost);
FILE *myfile;
myfile = fopen("init.txt", "w");
long int i;
for(i = 0; i< zseg+1; i++)
fprintf(myfile, "%lf ", i*ldz);
fprintf(myfile, "%lf\n", i*ldz);
for(i = 0; i< rseg+1; i++)
fprintf(myfile, "%lf ", i*ldr);
fprintf(myfile, "%lf\n", i*ldr);
for(i = 0; i< (rseg + 2)*(zseg+2); i++){
if(i%(zseg+2)==zseg+1)
fprintf(myfile, "%lf\n", h_grid[i]);
else
fprintf(myfile, "%lf ", h_grid[i]);
}
fclose(myfile);
double r_aug = eta*tstep/(mu0*ldr*ldr);
double z_aug = eta*tstep/(mu0*ldz*ldz);
int run_block_i, run_block_j ;
run_block_i = 1+((zseg-1)/block_i);
run_block_j = 1+((rseg-1)/block_j);
dim3 blockDim(run_block_i, run_block_j);
//printf("%d\n", run_block_i);
dim3 threadDim(block_i + 2, block_j + 2);
cudaMemcpy(d_tempGrid, d_grid, grid_size, cudaMemcpyDeviceToDevice);
clock_t begin, end;
double time_spent;
begin = clock();
//run
long int step = 0;
while((step*tstep) < tottime){
run<<<blockDim,threadDim, (block_i+2)*(block_j+2)*sizeof(double)>>>(d_grid, d_tempGrid, r_aug, z_aug, rseg, zseg);
//cudaDeviceSynchronize();
cudaMemcpy(d_grid, d_tempGrid, grid_size, cudaMemcpyDeviceToDevice);
//cudaDeviceSynchronize();
step++;
}
cudaDeviceSynchronize();
end = clock();
time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
cudaMemcpy(h_grid, d_grid, grid_size, cudaMemcpyDeviceToHost);
printf("\nSteps: %ld\n", step);
myfile = fopen("res.txt", "w");
for(i = 0; i< zseg+1; i++)
fprintf(myfile, "%lf ", i*ldz);
fprintf(myfile, "%lf\n", i*ldz);
for(i = 0; i< rseg+1; i++)
fprintf(myfile, "%lf ", i*ldr);
fprintf(myfile, "%lf\n", i*ldr);
for(i = 0; i< (rseg + 2)*(zseg+2); i++){
if(i%(zseg+2)==zseg+1)
fprintf(myfile, "%lf\n", h_grid[i]);
else
fprintf(myfile, "%lf ", h_grid[i]);
}
fclose(myfile);
free(h_grid);
cudaFree(d_grid);
cudaFree(d_tempGrid);
printf("\n------------------------------------\nExecution took: %lf sec\n", time_spent);
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.