serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
4,101 | #include "includes.h"
__global__ void fill_kernel(int N, float ALPHA, float *X, int INCX)
{
const int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= N) return;
X[index*INCX] = ALPHA;
} |
4,102 | #include "includes.h"
__global__ void vectorAdd(const int *a, const int *b, int *c, int N)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
while(tid < N)
{
c[tid] = a[tid] + b[tid];
tid += blockDim.x * gridDim.x;
}
} |
4,103 | #include <stdio.h>
#include <cuda.h>
#include <cuComplex.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#define BLOCK_SIZE 16 // Threads per block supported by the GPU
__global__ void dtpmv_kernel ( char UPLO, char TRANS, char DIAG,int N,double * A, double *X , double *T) {
int elementId = blockIdx.x * BLOCK_SIZE + threadIdx.x;
if(elementId>=N) {
return;
}
double sum = 0;
if (UPLO == 'U' && TRANS == 'N' ) {
for (int i = elementId; i<N ; i++) {
sum += A[elementId *N + i - ((elementId+1)*elementId)/2 ] * X[i];
}
} else if (UPLO == 'L' && TRANS == 'N') {
int temp = (elementId * (elementId +1)) /2;
for (int i = 0; i <=elementId; i++ ) {
sum += A[temp+i] * X[i];
}
} else if (UPLO == 'U' && TRANS == 'T') {
for (int i = elementId;i>=0; i--) {
sum += A[i*N + elementId- ((i+1)*i)/2]*X[i];
}
} else {
for (int i = elementId;i < N; i++) {
sum += A[i*N + elementId- ((2*N-i-1)*i)/2 ]*X[i];;
}
}
T[elementId] = sum;
}
int Ceil (int n , int m) {
if (n%m == 0) {
return n/m;
} else {
return n/m +1;
}
}
void dtpmv(char UPLO, char TRANS, char DIAG, int N, double * A, double * X) {
double * d_A, *d_X, *d_T;
int size_a = (N*(N+1))/2;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
cudaMalloc((void**) &d_A, sizeof(double)*size_a );
cudaMalloc((void**) &d_X, sizeof(double)*N );
cudaMalloc((void**) &d_T, sizeof(double)*N );
cudaMemcpy(d_A, A , sizeof(double) *size_a, cudaMemcpyHostToDevice);
cudaMemcpy(d_X, X, sizeof(double) * N, cudaMemcpyHostToDevice);
// cudaMemcpy(d_T, X, sizeof(double) * N, cudaMemcpyHostToDevice);
/*Determine Kernel Parameters*/
int num_blocks = Ceil(N, BLOCK_SIZE);
/*Launch Kernel*/
dtpmv_kernel <<<num_blocks, BLOCK_SIZE >>> (UPLO, TRANS, DIAG, N, d_A, d_X, d_T);
/* Copy Memory Back to Host */
cudaMemcpy(X, d_T, sizeof(double) * N , cudaMemcpyDeviceToHost);
/* Free Device Memory*/
cudaFree(d_A);
cudaFree(d_X);
cudaFree(d_T);
cudaEventRecord(stop, 0);
float timeElapsed;
cudaEventSynchronize(stop);
cudaEventElapsedTime(&timeElapsed, start, stop);
printf("Time Elapsed : %f\n", timeElapsed);
}
int main() {
char UPLO, TRANS, DIAG;
int N; int Incx;
double * A, *X;
/*
Test Cases are to be input
*/
scanf("%c %c %c %d",&UPLO,&TRANS, &DIAG, &N);
int size_a = (N* (N+1))/2;
cudaMallocHost((void**) &A, sizeof(double)*size_a );
cudaMallocHost((void**) &X, sizeof(double)*N);
for (int i= 0; i<size_a;i++) {
scanf("%lf", &A[i]);
}
for (int i= 0; i<N;i++) {
scanf("%lf", X + i);
}
dtpmv(UPLO, TRANS, DIAG, N, A, X);
/* Display Output */
FILE * fp;
fp = fopen("Results/dtpmv_gpu_last.txt", "w");
for(int i =0; i<N;i++) {
fprintf(fp,"%f ", X[i]);
}
fclose(fp);
}
|
4,104 | #ifndef _CUDA_KERNEL_OPTIONS_CU_
#define _CUDA_KERNEL_OPTIONS_CU_
#define TPB 128
#define MAX_OBSTACLES 128
typedef enum kernel_opt
{
NONE = 0,
IGNORE_UNLESS_ZERO = 1 << 0,
LOCAL_SPACE_BANKING = 1 << 1,
SPHERICAL_WRAP_AROUND = 1 << 2
} kernel_options;
#endif // _CUDA_KERNEL_OPTIONS_CU_ |
4,105 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <cuda.h>
#define CUDA_CHECK_RETURN(value) {\
cudaError_t _m_cudaStat = value;\
if (_m_cudaStat != cudaSuccess) {\
fprintf(stderr, "Error %s at line %d in file %s\n", cudaGetErrorString(_m_cudaStat),__LINE__, __FILE__);\
exit(1);\
}\
}
__global__ void addVector(float* left, float* right, float* result)
{
int idx = threadIdx.x;
result[idx] = left[idx] + right[idx];
}
#define SIZE 2048
__host__ int main()
{
float elapsedTime;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
//Âûäåëÿåì ïàìÿòü ïîä âåêòîðà
float* vec1 = new float[SIZE];
float* vec2 = new float[SIZE];
float* vec3 = new float[SIZE];
for (int i = 0; i < SIZE; i++)
{
vec1[i] = i;
vec2[i] = i;
// printf("#%d\t%f\t %f\n", i, vec1[i], vec2[i]);
}
float* devVec1;
float* devVec2;
float* devVec3;
CUDA_CHECK_RETURN(cudaMalloc((void**)&devVec1, sizeof(float) * SIZE));
CUDA_CHECK_RETURN(cudaMalloc((void**)&devVec2, sizeof(float) * SIZE));
CUDA_CHECK_RETURN(cudaMalloc((void**)&devVec3, sizeof(float) * SIZE));
CUDA_CHECK_RETURN(cudaMemcpy(devVec1, vec1, sizeof(float) * SIZE, cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(devVec2, vec2, sizeof(float) * SIZE, cudaMemcpyHostToDevice));
dim3 block(512);
cudaEventRecord(start,0);
addVector <<<SIZE/512, block >>>(devVec1, devVec2, devVec3);
cudaEventRecord(stop, 0);
CUDA_CHECK_RETURN(cudaEventSynchronize(stop));
CUDA_CHECK_RETURN(cudaEventElapsedTime(&elapsedTime, start, stop));
cudaEvent_t syncEvent;
CUDA_CHECK_RETURN(cudaEventCreate(&syncEvent));
CUDA_CHECK_RETURN(cudaEventRecord(syncEvent, 0));
CUDA_CHECK_RETURN(cudaEventSynchronize(syncEvent));
CUDA_CHECK_RETURN(cudaMemcpy(vec3, devVec3, sizeof(float) * SIZE, cudaMemcpyDeviceToHost));
for (int i = 0; i < SIZE; i++)
{
//printf("Element #%i: %.1f\n", i, vec3[i]);
}
fprintf(stderr,"gTest took %g\n",elapsedTime);
cudaEventDestroy(syncEvent);
cudaFree(devVec1);
cudaFree(devVec2);
cudaFree(devVec3);
delete[] vec1; vec1 = 0;
delete[] vec2; vec2 = 0;
delete[] vec3; vec3 = 0;
return 0;
}
|
4,106 | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <math.h>
#define DIM 3
#define GRID 16
#define VALIDATE 10
// function declarations
void validate_grid (const float *c, const float *intervals, const int *grid_c,
const int *points_block_c, int D);
void validate_search (const float *q, const float *c, const int *closest, int N,
int D);
void write_file (double time_var, const char *filename, const char *mode);
/**
* Find grid location of each block with gpu
* @method find_grid_loc_gpu
* @param points points matrix
* @param grid_loc grid location for each point result
* @param n num of elements
* @param d grid dimension (cube)
*/
__global__ void
find_grid_loc_gpu (float *points, int *grid_loc, int n, int d, int k)
{
// int thread_id = blockIdx.x*blockDim.x + threadIdx.x;
int x, y, z;
int dd = d * d;
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n;
i += blockDim.x * gridDim.x)
{
x = (int) (points[i * DIM + 0] * d);
y = (int) (points[i * DIM + 1] * d);
z = (int) (points[i * DIM + 2] * d);
grid_loc[i] = x + d * y + dd * z;
}
}
/**
* [search_block_gpu description]
* @method search_block_gpu
* @param closest current closest point index
* @param current_min current closest point distance
* @param block_offset block location in point array
* @param q point
* @param block block point array
* @param points_in_block num of points the current block
*/
__device__ void
search_block_gpu (int *closest, float *current_min, int block_offset, float *q,
float *block, int points_in_block)
{
float dist;
for (int i = 0; i < points_in_block; i++)
{
dist = (block[i * DIM + 0] - q[0]) * (block[i * DIM + 0] - q[0]);
dist += (block[i * DIM + 1] - q[1]) * (block[i * DIM + 1] - q[1]);
dist += (block[i * DIM + 2] - q[2]) * (block[i * DIM + 2] - q[2]);
dist = sqrtf (dist);
if (dist < *current_min)
{
*current_min = dist;
*closest = i + block_offset;
}
}
}
/**
* find closet point in c of each point in q with gpu
* @method void search_gpu
* @param q [q points]
* @param c [c points]
* @param grid [c points location of each grid block]
* @param points_per_block [points in each grid block]
* @param closests [result array index]
* @param mindists [result array min dist found]
* @param N [number of elements]
* @param d [grid dimension cube]
*/
__global__ void
search_gpu (float *q, float *c, int *grid, int *points_per_block, int *closests,
float *mindists, int n, int d, int dd)
{
// int thread_id = blockIdx.x*blockDim.x + threadIdx.x;
int x, y, z;
int grid_loc;
float b;
int stage = 0, finished = 0;
// int start = thread_id * points_per_thread ;
// int end = thread_id * points_per_thread + points_per_thread ;
float block_size = 1 / (float) d;
float d_mindist;
float point[3];
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n;
i += blockDim.x * gridDim.x)
{
point[0] = q[i * DIM + 0];
point[1] = q[i * DIM + 1];
point[2] = q[i * DIM + 2];
x = (int) (point[0] * d);
y = (int) (point[1] * d);
z = (int) (point[2] * d);
grid_loc = x + d * y + dd * z;
mindists[i] = (1 << 10); // Inf
search_block_gpu (&closests[i], &mindists[i], grid[grid_loc], &q[i * DIM],
&c[grid[grid_loc] * DIM], points_per_block[grid_loc]);
finished = 0;
stage = 0;
while (!finished)
{
finished = 1;
//-------X---------
if (x + stage + 1 < d)
{
b = block_size * (x + stage + 1) - point[0];
if (b < mindists[i])
finished = 0;
}
if (x - stage - 1 > -1)
{
b = point[0] - block_size * (x - stage);
if (b < mindists[i])
finished = 0;
}
//-------Y---------
if (y + stage + 1 < d)
{
b = block_size * (y + stage + 1) - point[1];
if (b < mindists[i])
finished = 0;
}
if (y - stage - 1 > -1)
{
b = point[1] - block_size * (y - stage);
if (b < mindists[i])
finished = 0;
}
//-------Z---------
if (z + stage + 1 < d)
{
b = block_size * (z + stage + 1) - point[2];
if (b < mindists[i])
finished = 0;
}
if (z - stage - 1 > -1)
{
b = point[2] - block_size * (z - stage);
if (b < mindists[i])
finished = 0;
}
stage++;
if (!finished)
{
for (int dx = x - stage; dx <= x + stage; dx++)
{
for (int dy = y - stage; dy <= y + stage; dy++)
{
for (int dz = z - stage; dz <= z + stage; dz++)
{
if (dx == x - stage || dx == x + stage
|| dy == y - stage || dy == y + stage
|| dz == z - stage || dz == z + stage)
{
grid_loc = dx + d * dy + dd * dz;
if (dx > -1 && dx < d && dy > -1 && dy < d
&& dz > -1 && dz < d)
search_block_gpu (&closests[i], &mindists[i],
grid[grid_loc], point,
&c[grid[grid_loc] * DIM],
points_per_block[grid_loc]);
}
}
}
}
}
}
}
}
/**
* Find grid location of each block
* @method voidfind_grid_loc
* @param points points matrix
* @param grid_loc grid location for each point result
* @param n num of elements
* @param d grid dimension (cube)
*/
void
init_rand_points (float *p, int n)
{
int i;
for (i = 0; i < n * DIM; i++)
p[i] = (rand () % 1000000 / (float) (1000001));
}
/**
* [search_block description]
* @method search_block
* @param closest current closest point index
* @param current_min current closest point distance
* @param block_offset block location in point array
* @param q point
* @param block block point array
* @param points_in_block num of points the current block
*/
float *
rearrange (float *p, int *intex, int *points_per_block, int *grid, int n, int k)
{
for (int i = 0; i < k; i++)
points_per_block[i] = 0;
for (int i = 0; i < n; i++)
{
points_per_block[intex[i]]++;
}
grid[0] = 0;
grid[1] = points_per_block[0];
for (int i = 2; i < k; i++)
{
grid[i] = grid[i - 1] + points_per_block[i - 1];
}
int *positions = (int *) malloc (k * sizeof (int));
for (int i = 0; i < k; i++)
{
positions[i] = grid[i];
}
float *arrangedpoints = (float *) malloc (n * DIM * sizeof (float));
int pos;
int posDim = 0, iDim = 0;
for (int i = 0; i < n; i++)
{
pos = positions[intex[i]];
posDim = pos * DIM;
arrangedpoints[posDim + 0] = p[iDim + 0];
arrangedpoints[posDim + 1] = p[iDim + 1];
arrangedpoints[posDim + 2] = p[iDim + 2];
iDim = iDim + DIM;
positions[intex[i]]++;
}
free (p);
free (positions);
return arrangedpoints;
}
int
main (int argc, char **argv)
{
if (argc != 3)
{
printf ("Invalid argument\n");
exit (1);
}
int NQ = 1 << atoi (argv[1]);
int NC = 1 << atoi (argv[1]);
int N = NQ;
int D = 1 << atoi (argv[2]);
/*
write_file (atoi (argv[1]), "problem_size.data", "a");
write_file (atoi (argv[2]), "grid_size.data", "a");
*/
int block_num = D * D * D;
printf ("NQ=%d NC=%d D=%d block_num=%d\n", NQ, NC, D, block_num);
float *intervals = (float *) malloc (D * sizeof (float));
for (int i = 1; i <= D; i++)
intervals[i - 1] = 1 / (float) D * i;
struct timeval startwtime, endwtime;
double elapsed_time;
float *q, *c;
int *grid_q, *grid_c;
int *q_block, *c_block;
int *points_block_q, *points_block_c;
int *closest;
float *mindists;
// malloc points
q = (float *) malloc (N * DIM * sizeof (float));
c = (float *) malloc (N * DIM * sizeof (float));
// malloc location of grid block in array q/c
grid_q = (int *) malloc (block_num * sizeof (int));
grid_c = (int *) malloc (block_num * sizeof (int));
// malloc grid of each point
q_block = (int *) malloc (N * sizeof (int));
c_block = (int *) malloc (N * sizeof (int));
// malloc points per block
points_block_q = (int *) malloc (block_num * sizeof (int));
points_block_c = (int *) malloc (block_num * sizeof (int));
closest = (int *) malloc (N * sizeof (int));
mindists = (float *) malloc (N * sizeof (float));
init_rand_points (q, N);
init_rand_points (c, N);
int blocks = 1280;
int threads_pblock = 64;
int threads = blocks * threads_pblock;
int k = N / threads;
printf (" Points per thread : %d\n", k);
float *d_q, *d_c, *d_mindists;
int *d_q_block, *d_c_block, *points_per_block, *block_loc, *d_closests;
cudaMalloc (&d_q, N * DIM * sizeof (float));
cudaMalloc (&d_c, N * DIM * sizeof (float));
cudaMalloc (&d_q_block, N * sizeof (float));
cudaMalloc (&d_c_block, N * sizeof (float));
cudaMalloc (&points_per_block, block_num * sizeof (float));
cudaMalloc (&block_loc, block_num * sizeof (float));
cudaMalloc (&d_closests, N * sizeof (float));
cudaMalloc (&d_mindists, N * sizeof (float));
//-------------REARRANGE POINTS IN GRID IN CPU-----------------------
cudaMemcpy (d_q, q, N * DIM * sizeof (float), cudaMemcpyHostToDevice);
cudaMemcpy (d_c, c, N * DIM * sizeof (float), cudaMemcpyHostToDevice);
gettimeofday (&startwtime, NULL);
// find_grid_loc_gpu<<<blocks, threads_pblock>>> (d_q, d_q_block, N, D, k);
find_grid_loc_gpu<<<blocks, threads_pblock>>> (d_c, d_c_block, N, D, k);
cudaDeviceSynchronize ();
cudaMemcpy (q_block, d_q_block, N * sizeof (int), cudaMemcpyDeviceToHost);
cudaMemcpy (c_block, d_c_block, N * sizeof (int), cudaMemcpyDeviceToHost);
c = rearrange (c, c_block, points_block_c, grid_c, N, block_num);
cudaMemcpy (d_c, c, N * DIM * sizeof (float), cudaMemcpyHostToDevice);
cudaMemcpy (points_per_block, points_block_c, block_num * sizeof (int),
cudaMemcpyHostToDevice);
cudaMemcpy (block_loc, grid_c, block_num * sizeof (int),
cudaMemcpyHostToDevice);
gettimeofday (&endwtime, NULL);
elapsed_time = (double) ((endwtime.tv_usec - startwtime.tv_usec) / 1.0e6
+ endwtime.tv_sec - startwtime.tv_sec);
printf ("Rearrange Time : %f\n", elapsed_time);
// write_file (elapsed_time, "rearrange_time.data", "a");
//---------------GRID VALIDATION IN // CPU-----------------------
validate_grid (c, intervals, grid_c, points_block_c, D);
//-------------SEARCH GRID IN GPU-----------------------
gettimeofday (&startwtime, NULL);
search_gpu<<<blocks, threads_pblock>>> (d_q, d_c, block_loc, points_per_block,
d_closests, d_mindists, N, D, D * D);
cudaDeviceSynchronize ();
cudaMemcpy (closest, d_closests, N * sizeof (int), cudaMemcpyDeviceToHost);
cudaMemcpy (mindists, d_mindists, N * sizeof (int), cudaMemcpyDeviceToHost);
gettimeofday (&endwtime, NULL);
elapsed_time = (double) ((endwtime.tv_usec - startwtime.tv_usec) / 1.0e6
+ endwtime.tv_sec - startwtime.tv_sec);
// write_file (elapsed_time, "search_gpu_time.data", "a");
validate_search (q, c, closest, N, D);
printf ("Search Time GPU: %f\n", elapsed_time);
//---------------CLEAN UP-------------------------------------
cudaFree (d_q_block);
cudaFree (d_c_block);
cudaFree (d_q);
cudaFree (d_c);
free (q);
free (c);
free (grid_c);
free (grid_q);
free (c_block);
free (q_block);
free (points_block_c);
free (points_block_q);
free (closest);
free (mindists);
}
void
validate_grid (const float *c, const float *intervals, const int *grid_c,
const int *points_block_c, int D)
{
int sum = 0;
int fails = 0;
float xmax, ymax, zmax, xmin, ymin, zmin;
int pos, block_pos, point_pos;
for (int x = 0; x < D; x++)
{
xmax = intervals[x];
if (x == 0)
{
xmin = 0;
}
else
{
xmin = intervals[x - 1];
}
for (int y = 0; y < D; y++)
{
ymax = intervals[y];
if (x == 0)
{
ymin = 0;
}
else
{
ymin = intervals[y - 1];
}
for (int z = 0; z < D; z++)
{
zmax = intervals[z];
if (x == 0)
{
zmin = 0;
}
else
{
zmin = intervals[z - 1];
}
pos = x + D * y + D * D * z;
block_pos = grid_c[pos];
for (int point = 0; point < points_block_c[pos]; point++)
{
sum++;
if (c[(block_pos + point) * DIM + 0] >= xmax
|| c[(block_pos + point) * DIM + 0] < xmin)
{
fails++;
// printf("fail at %d \n", block_pos );
}
if (c[(block_pos + point) * DIM + 1] >= ymax
|| c[(block_pos + point) * DIM + 1] < ymin)
{
fails++;
// printf("fail at %d \n", block_pos );
}
if (c[(block_pos + point) * DIM + 2] >= zmax
|| c[(block_pos + point) * DIM + 2] < zmin)
{
fails++;
// printf("fail at %d \n", block_pos );
}
}
}
}
}
printf ("GRID VALIDATION POINTS:%d FAILS:%d\n", sum, fails);
}
void
validate_search (const float *q, const float *c, const int *closest, int N,
int D)
{
float mindist, dist;
int close;
int fails = 0;
for (int i = 0; i < VALIDATE; i++)
{
mindist = (1 << 10);
for (int j = 0; j < N; j++)
{
dist = (q[i * DIM + 0] - c[j * DIM + 0])
* (q[i * DIM + 0] - c[j * DIM + 0]);
dist += (q[i * DIM + 1] - c[j * DIM + 1])
* (q[i * DIM + 1] - c[j * DIM + 1]);
dist += (q[i * DIM + 2] - c[j * DIM + 2])
* (q[i * DIM + 2] - c[j * DIM + 2]);
dist = sqrtf (dist);
if (dist < mindist)
{
close = j;
mindist = dist;
}
}
if (close != closest[i])
{
// printf ("intex %d %d dists %f %f q :%f %f %f c: %f %f %f\n",
// close,
// closest[i], mindist, mindists[i], q[i * DIM + 0],
// q[i * DIM + 1], q[i * DIM + 2], c[close * DIM + 0],
// c[close * DIM + 1], c[close * DIM + 2]);
int x, y, z;
x = (int) (q[i * DIM + 0] * D);
y = (int) (q[i * DIM + 1] * D);
z = (int) (q[i * DIM + 2] * D);
// printf ("q : %d %d %d ", x, y, z);
x = (int) (c[close * DIM + 0] * D);
y = (int) (c[close * DIM + 1] * D);
z = (int) (c[close * DIM + 2] * D);
// printf ("c: %d %d %d \n", x, y, z);
fails++;
}
}
float failrate = fails / (float) 1024 * 100;
printf ("SEARCH VALIDATION POINTS: %d FAILS: %d\n", VALIDATE, fails);
}
void
write_file (double time_var, const char *filename, const char *mode)
{
FILE *fptr;
// open the file
char filepath[64] = "output_data_gpu/";
strcat (filepath, filename);
fptr = fopen (filepath, mode);
if (!fptr)
{
printf ("Error: Can't open file %s", filepath);
return;
}
// print the time in file
fprintf (fptr, "%lf ", time_var);
// close file
fclose (fptr);
return;
}
|
4,107 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <assert.h>
#include <unistd.h>
#include <stdint.h>
#define POP 300
#define LEN 30
#define MUT 0.1
#define REC 0.5
#define END 10000
#define SUMTAG 150
#define PRODTAG 3600
int gene[POP][LEN];
int value[POP][LEN];
int seed[POP][LEN];
void init_pop();
double evaluate(int n);
void run();
void display(int tournaments, int n);
void get_result(int idx);
double score[POP];
double random_double(){
double d;
d = (double)(rand() % 10) / 10;
return d;
}
__device__ unsigned int get_rand(int range, int* seed){
*seed ^= (*seed << 13);
*seed ^= (*seed >> 17);
*seed ^= (*seed << 5);
return *seed % range;
}
__global__ void comput_kernel(double* score, int* gene, int* value){
int offset = blockIdx.x * LEN;
//evaluate
uint64_t prod = 1;
uint32_t sum = 0;
for(int i = 0; i < LEN; ++i)
if(gene[offset + i] == 0)
sum += value[offset + i];
else
prod *= value[offset + i];
double scaled_sum_error = (double)(sum - (double)SUMTAG) / (double)SUMTAG;
double scaled_prod_error = (double)(prod - (double)PRODTAG) / (double)PRODTAG;
if(scaled_sum_error < 0.0)
scaled_sum_error *= -1;
if(scaled_prod_error < 0.0)
scaled_prod_error *= -1;
score[blockIdx.x] = scaled_sum_error + scaled_prod_error;
}
__global__ void find_min_score(double *score, double* min_score, int *min_idx){
int low_idx = -1;
double low = 0;
__shared__ double shared_score[POP];
for(int i = threadIdx.x; i < POP; i += blockDim.x)
shared_score[i] = score[i];
__syncthreads();
if(threadIdx.x != 0)
return;
for(int i = 0; i < POP; ++i)
if(shared_score[i] < low || low_idx == -1){
low = score[i];
low_idx = i;
}
*min_idx = low_idx;
*min_score = low;
}
__global__ void mutate_kernel(int* gene, int *min_idx, int* seed){
if(blockIdx.x == *min_idx)
return;
int offset = blockIdx.x * LEN;
int min_offset = *min_idx * LEN;
int reg_seed = seed[blockIdx.x * LEN + threadIdx.x];
if(get_rand(100, ®_seed) < (REC / 100))
gene[offset + threadIdx.x] = gene[min_offset + threadIdx.x];
if(get_rand(100, ®_seed) < (MUT / 100))
gene[offset + threadIdx.x] = 1 - gene[offset + threadIdx.x];
seed[blockIdx.x * LEN + threadIdx.x] = reg_seed;
}
void run(){
init_pop();
int low_idx = -1;
double low = 0;
int tournamentNo;
int* gene_d;
int* value_d;
double* score_d;
double* min_score;
int* min_idx;
int* seed_d;
cudaMalloc((void**)&gene_d, sizeof(int) * POP * LEN);
cudaMalloc((void**)&value_d, sizeof(int) * POP * LEN);
cudaMalloc((void**)&score_d, sizeof(double) * POP);
cudaMalloc((void**)&seed_d, sizeof(int) * POP * LEN);
cudaMalloc((void**)&min_score, sizeof(double));
cudaMalloc((void**)&min_idx, sizeof(int));
cudaMemcpy(gene_d, gene, sizeof(int) * POP * LEN, cudaMemcpyHostToDevice);
cudaMemcpy(value_d, value, sizeof(int) * POP * LEN, cudaMemcpyHostToDevice);
cudaMemcpy(score_d, score, sizeof(double) * POP, cudaMemcpyHostToDevice);
cudaMemcpy(seed_d, seed, sizeof(int) * POP * LEN, cudaMemcpyHostToDevice);
dim3 dimGrid(POP, 1);
dim3 dimBlock(1, 1);
for(tournamentNo = 0; tournamentNo < END; tournamentNo++){
comput_kernel<<<dimGrid, dimBlock>>>(score_d, gene_d, value_d);
find_min_score<<<1, 128>>>(score_d, min_score, min_idx);
mutate_kernel<<<dimGrid, LEN>>>(gene_d, min_idx, seed_d);
}
cudaMemcpy(gene, gene_d, sizeof(int) * POP * LEN, cudaMemcpyDeviceToHost);
cudaMemcpy(score, score_d, sizeof(double) * POP, cudaMemcpyDeviceToHost);
low_idx = -1;
low = 0;
for(int i =0; i < POP; ++i)
if((low_idx == -1 || score[i] < low) && score[i] != -1){
low = score[i];
low_idx = i;
}
if(low_idx != -1){
//printf("%f %f\n", low, evaluate(low_idx));
get_result(low_idx);
display(tournamentNo, low_idx);
}
}
void get_result(int idx){
unsigned long long prod, sum;
prod = 1;
sum = 0;
for(int i = 0; i < LEN; ++i){
if(gene[idx][i] == 1)
prod *= value[idx][i];
else
sum += value[idx][i];
}
printf("sum :%llu prod: %llu\n", sum, prod);
}
void display(int tournaments, int n){
printf("=========================================================================\n");
printf("After %d tournaments, Solution sum pile (should be %d) cards are : \n", tournaments, SUMTAG);
for(int i = 0; i < LEN; i++){
if(gene[n][i] == 0){
printf("%d ", value[n][i]);
}
}
printf("\n");
printf("Solution product pile (should be %d) cards are : \n", PRODTAG);
for(int i = 0; i < LEN; i++){
if(gene[n][i] == 1){
printf("%d ", value[n][i]);
}
}
for(int i = 0; i < LEN; i++)
assert(gene[n][i] == 1 || gene[n][i] == 0);
printf("\n=========================================================================\n");
}
double evaluate(int n){
unsigned long long sum = 0, prod = 1;
double scaled_sum_error, scaled_prod_error, combined_error;
for(int i = 0; i < LEN; i++){
if(gene[n][i] == 0){
sum += value[n][i];
}
else{
prod *= value[n][i];
}
}
scaled_sum_error = (double)(sum - (double)SUMTAG) / (double)SUMTAG;
if(scaled_sum_error < 0.0) scaled_sum_error *= -1;
scaled_prod_error = (double)(prod - (double)PRODTAG) / (double)PRODTAG;
if(scaled_prod_error < 0.0) scaled_prod_error *= -1;
combined_error = scaled_sum_error + scaled_prod_error;
return combined_error;
}
void init_pop(){
for(int i = 0; i < POP; i++){
for(int j = 0; j < LEN; j++){
if(random_double() < 0.5){
gene[i][j] = 0;
}
else{
gene[i][j] = 1;
}
}
score[i] = -1;
}
for(int i = 0; i < POP; i++){
for(int j = 0; j < LEN; j++){
value[i][j] = rand() % 9 + 1;
}
}
for(int i = 0; i < POP; ++i)
for(int j = 0; j < LEN; ++j)
seed[i][j] = rand() % 10000000;
}
int main(){
srand(getpid());
run();
return 0;
}
|
4,108 | /** Author: alexge50
* How to use: input should be given in a file input.txt, in the same directory as the binary. Output is given in output.txt
* Input: [Number of steps]
* [height - number of rows] [width - number of columns]
* board
* Output: [time] ms
* board at the current state when execution was stopped
* Compilation requires no other option than default:
* nvcc game-of-life.cout
* ./a.out
**/
#include <stdio.h>
#include <sys/time.h>
inline long long GetTime()
{
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec * 1000000LL + tv.tv_usec;
}
__global__ void update(int *board, int *result_board, int nRows, int nColumns)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.x;
int i = blockIdx.x;
if(i >= 1 && i <= nRows - 2 && j >= 1 && j <= nColumns - 2)
{
int n_neighbors = 0;
/*for(int di = -1; di <= 1; di++)
{
for(int dj = -1; dj <= 1; dj++)
{
int _index = (i + di) * nColumns + (j + dj);
n_neighbors += board[_index] && !(di == 0 && dj == 0);
}
}*/
n_neighbors = board[(i - 1) * nColumns + (j + 1)] +
board[(i + 1) * nColumns + (j + 1)] +
board[(i + 1) * nColumns + (j - 1)] +
board[(i + 0) * nColumns + (j + 1)] +
board[(i + 0) * nColumns + (j - 1)] +
board[(i + 1) * nColumns + (j + 0)] +
board[(i - 1) * nColumns + (j + 0)] +
board[(i - 1) * nColumns + (j - 1)];
if(board[index])
atomicExch(&result_board[index], n_neighbors == 2 || n_neighbors == 3);
else atomicExch(&result_board[index], n_neighbors == 3);
}
}
int main()
{
FILE *fin = fopen("input.txt", "r");
FILE *fout = fopen("output.txt", "w");
int nSteps;
int nRows, nColumns;
long long timeStart, timeStop;
int *board;
int *device_board0, *device_board1;
//char *device_board[2];
fscanf(fin, "%d %d %d ", &nSteps, &nRows, &nColumns);
//cudaMalloc((void **) &device_board[0], sizeof(char) * (nRows + 2) * (nColumns + 2));
//cudaMalloc((void **) &device_board[1], sizeof(char) * (nRows + 2) * (nColumns + 2));
nColumns += 2;
nRows += 2;
board = (int*)malloc(sizeof(int) * (nRows) * (nColumns));
cudaMalloc((void **) &device_board0, sizeof(int) * (nRows) * (nColumns));
cudaMalloc((void **) &device_board1, sizeof(int) * (nRows) * (nColumns));
for(int i = 0; i < nRows; i++)
for(int j = 0; j < nColumns; j++)
board[i * nColumns + j] = 0;
for(int i = 1; i <= nRows - 2; ++i)
for (int j = 1; j <= nColumns - 2; ++j)
{
char cell;
fscanf(fin, "%c ", &cell);
board[i * nColumns + j] = (cell == '*');
}
cudaMemcpy(device_board0, board, sizeof(int) * (nRows) * (nColumns), cudaMemcpyHostToDevice);
cudaMemcpy(device_board1, board, sizeof(int) * (nRows) * (nColumns), cudaMemcpyHostToDevice);
timeStart = GetTime();
int i = 0;
for (int k = 0; k < nSteps; ++k)
{
//UpdateCall
//update<<<nRows, nColumns>>>(device_board[i], device_board[i - 1], nRows, nColumns);
if(i == 0)
update<<<nRows, nColumns>>>(device_board0, device_board1, nRows, nColumns);
else
update<<<nRows, nColumns>>>(device_board1, device_board0, nRows, nColumns);
i = 1 - i;
}
//cudaMemcpy(board, device_board[i], sizeof(char) * (nRows + 2) * (nColumns + 2), cudaMemcpyDeviceToHost);
if(i == 0)
cudaMemcpy(board, device_board0, sizeof(int) * (nRows) * (nColumns), cudaMemcpyDeviceToHost);
else cudaMemcpy(board, device_board1, sizeof(int) * (nRows) * (nColumns), cudaMemcpyDeviceToHost);
timeStop = GetTime();
long double deltaTime = static_cast<long double>(timeStop - timeStart) / static_cast<long double>(1000.); // microseconds to milli seconds
fprintf(fout, "[%Lf ms]\n", deltaTime);
for(int i = 1; i <= nRows - 2; ++i)
{
for(int j = 1; j <= nColumns - 2; ++j)
fprintf(fout, "%c", board[i * nColumns + j] ? '*' : '.');
fprintf(fout, "\n");
}
printf("Time: %Lf ms\n", deltaTime);
free(board);
cudaFree(device_board0);
cudaFree(device_board1);
fclose(fin);
fclose(fout);
return 0;
}
|
4,109 | #include <stdio.h>
#include <time.h>
#define N 4
__global__ void outputFromGPU()
{
printf("[%d] : [%d]\n", blockIdx.x, threadIdx.x);
}
__global__ void multiplicationTableBlock(int *mutex, int *index)
{
int c = blockIdx.x;
while(atomicExch(mutex, 1) != 0);
for(int i = 1; i <= 12; i++)
{
printf("[%d]\t%d x %d = %d \t\t%d x %d = %d \t\t%d x %d = %d \t\t%d x %d = %d\n",
c, *index, i, *index*i, *index+1, i, (*index+1)*i, *index+2, i, (*index+2)*i, *index+3, i, (*index+3)*i);
}
printf("\n");
if(*index == 10){*index = 2;}
*index += 4;
atomicExch(mutex, 0);
}
__global__ void multiplicationTableThread()
{
int c = threadIdx.x;
int i = c+1;
for(int index = 2; index <= 10; index+=4)
{
printf("[%d]\t%d x %d = %d \t\t%d x %d = %d \t\t%d x %d = %d \t\t%d x %d = %d\n",
c, index, i, index*i, index+1, i, (index+1)*i, index+2, i, (index+2)*i, index+3, i, (index+3)*i);
if(i == 12){printf("\n");}
}
}
__global__ void multiplicationTableBlockAndThread(int *mutex, int *index)
{
int c = blockIdx.x;
int i = threadIdx.x+1;
if(threadIdx.x == 0)
{
while(atomicExch(mutex, 1) != 0);
*index += 4;
atomicExch(mutex, 0);
}
printf("[%d] : [%d]\t%d x %d = %d \t\t%d x %d = %d \t\t%d x %d = %d \t\t%d x %d = %d\n%s",
c, threadIdx.x, *index, i, *index*i, *index+1, i, (*index+1)*i, *index+2, i, (*index+2)*i, *index+3, i, (*index+3)*i, (i == 12)?"\n":"");
}
int main(void)
{
printf(":: Ex3 ::\n\n");
int *h_index, *d_index, *d_mutex;
h_index = (int*)malloc(sizeof(int));
cudaMalloc((void**)&d_index, sizeof(int));
cudaMalloc((void**)&d_mutex, sizeof(int));
printf("\n::: Block only :::\n");
outputFromGPU<<<N, 1>>>();
cudaDeviceSynchronize();
printf("\n::: Thread only :::\n");
outputFromGPU<<<1, N>>>();
cudaDeviceSynchronize();
printf("\n::: Block and Thread :::\n");
outputFromGPU<<<N, N>>>();
cudaDeviceSynchronize();
printf("\n::: Multiplication Table Block :::\n\n");
*h_index = 2;
cudaMemcpy(d_index, h_index, sizeof(int), cudaMemcpyHostToDevice);
multiplicationTableBlock<<<3, 1>>>(d_mutex, d_index);
cudaDeviceSynchronize();
printf("::: Multiplication Table Thread :::\n\n");
multiplicationTableThread<<<1, 12>>>();
cudaDeviceSynchronize();
printf("::: Multiplication Table Block and Thread :::\n\n");
*h_index = -2;
cudaMemcpy(d_index, h_index, sizeof(int), cudaMemcpyHostToDevice);
multiplicationTableBlockAndThread<<<3, 12>>>(d_mutex, d_index);
cudaDeviceSynchronize();
printf("::: Multiplication Table CPU :::\n\n");
for(int i = 2; i <= 10; i+=4)
{
for(int a = 1; a <= 12; a++)
{
printf("%d x %d = %d \t\t%d x %d = %d \t\t%d x %d = %d \t\t%d x %d = %d\n",
i, a, i*a, i+1, a, (i+1)*a, i+2, a, (i+2)*a, i+3, a, (i+3)*a);
}
printf("\n");
}
cudaFree(d_index);
cudaFree(d_mutex);
free(h_index);
return 0;
}
|
4,110 | #include <stdio.h>
#include <math.h>
#include <cuda.h>
#define CHUNK_SIZE 1024
#define T unsigned long int
//make sure start is less than N/2. a is a pointer to an array of length >= N
__global__ void Fibonacci( T *a, int start) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
int index = i + start;
if (i < 2 * start - 1) {
a[index] = (a[start - 2] * a[i]) + (a[start - 1] * a[i + 1]);
}
}
int main(int argc, char *argv[]) {
int N = 0;
if (argc == 2) {
N = atoi(argv[1]);
} else {
printf("Invalid number of command line arguments.\n");
return 1;
}
T x[3]= {1, 1, 2};
T *d_a;
//Allocate memory on the device
cudaMalloc(&d_a, N*sizeof(T));
cudaMemcpy(d_a, x, sizeof(x), cudaMemcpyHostToDevice);
unsigned int start = 3;
//ceiling of start - 1/1024
while (start <= N/2 ) {
unsigned int num_blocks = (start - 1)/CHUNK_SIZE;
if ((start - 1) % CHUNK_SIZE != 0) {
num_blocks++;
}
Fibonacci <<<num_blocks, CHUNK_SIZE>>>(d_a, start);
start = 2 * start - 1;
}
//Get the results array back
T b[N];
cudaMemcpy(b, d_a, N*sizeof(T), cudaMemcpyDeviceToHost);
//Print results
for (int i = 0; i < N; i++) {
printf("%d:\t%lu \n", i + 1, b[i]);
}
//Free device memory
cudaFree(d_a);
fflush(stdout);
return 0;
}
|
4,111 | //pass
//--blockDim=1024 --gridDim=4
#include <cuda.h>
//////////////////////////////////////////////////////////////////////////////
//// Copyright (c) Microsoft Corporation. All rights reserved
//// This software contains source code provided by NVIDIA Corporation.
//////////////////////////////////////////////////////////////////////////////
//----------------------------------------------------------------------------
// File: MersenneTwister.cpp
//
// This sample implements Mersenne Twister random number generator
// and Cartesian Box-Muller transformation on the GPU.
//----------------------------------------------------------------------------
#define MT_RNG_COUNT 4096
#define MT_MM 9
#define MT_NN 19
#define MT_WMASK 0xFFFFFFFFU
#define MT_UMASK 0xFFFFFFFEU
#define MT_LMASK 0x1U
#define MT_SHIFT0 12
#define MT_SHIFTB 7
#define MT_SHIFTC 15
#define MT_SHIFT1 18
////////////////////////////////////////////////////////////////////////////////
// Transform each of MT_RNG_COUNT lanes of n_per_RNG uniformly distributed
// random samples, produced by rand_MT_amp(), to normally distributed lanes
// using Cartesian form of Box-Muller transformation.
// n_per_RNG must be even.
////////////////////////////////////////////////////////////////////////////////
static __attribute__((always_inline)) __device__ void box_muller_transform(float* u1, float* u2)
{
float r = sqrt(-2.0f * log(*u1));
float phi = 2.0f * 3.14159265358979f * (*u2);
*u1 = r * cos(phi);
*u2 = r * sin(phi);
}
__global__ void box_muller_kernel(float* random_nums, float* normalized_random_nums, int n_per_RNG)
{
int gid = (blockIdx.x*blockDim.x + threadIdx.x);
for(int out = 0;
out < n_per_RNG; out += 2)
{
float f0 = random_nums[out * MT_RNG_COUNT + gid];
float f1 = random_nums[(out + 1) * MT_RNG_COUNT + gid];
box_muller_transform(&f0, &f1);
normalized_random_nums[out * MT_RNG_COUNT + gid] = f0;
normalized_random_nums[(out + 1) * MT_RNG_COUNT + gid] = f1;
#ifdef MUTATION
normalized_random_nums[out * MT_RNG_COUNT + gid + 1] = normalized_random_nums[out * MT_RNG_COUNT + gid + 1];
/* BUGINJECT: ADD_ACCESS, UP */
#endif
}
}
|
4,112 | #include "TmpMalloc.cuh"
#include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <map>
#include <vector>
using namespace std;
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true) {
if (code != cudaSuccess) {
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
void TmpMalloc::free_all() {
for (pair<int, bool *> p: this->bool_arrays) {
cudaFree(p.second);
}
for (pair<int, float *> p: this->float_arrays) {
cudaFree(p.second);
}
for (pair<int, int *> p: this->int_arrays) {
cudaFree(p.second);
}
for (pair<int, int **> p: this->int_pointer_arrays) {
cudaFree(p.second);
}
int *tmp;
while (!this->q_points.empty()) {
tmp = this->q_points.front();
this->q_points.pop();
cudaFree(tmp);
points_count--;
}
if (points_count) {
printf("memory leak for points: %d\n", points_count);
}
while (!this->q_nodes.empty()) {
tmp = this->q_nodes.front();
this->q_nodes.pop();
cudaFree(tmp);
nodes_count--;
}
if (nodes_count) {
printf("memory leak for nodes: %d\n", nodes_count);
}
while (!this->q_dims.empty()) {
tmp = this->q_dims.front();
this->q_dims.pop();
cudaFree(tmp);
dims_count--;
}
if (dims_count) {
printf("memory leak for dims: %d\n", dims_count);
}
while (!this->q_one.empty()) {
tmp = this->q_one.front();
this->q_one.pop();
cudaFree(tmp);
one_count--;
}
if (one_count) {
printf("memory leak for one: %d\n", one_count);
}
for (const auto& q_pair : this->q) {
queue<int*> q = q_pair.second;
while (!q.empty()) {
tmp = q.front();
q.pop();
cudaFree(tmp);
}
}
not_free = false;
}
TmpMalloc::~TmpMalloc() {
if (not_free) {
this->free_all();
}
}
bool *TmpMalloc::get_bool_array(int name, int size) {
bool *tmp;
map<int, bool *>::iterator it = this->bool_arrays.find(name);
if (it != this->bool_arrays.end()) {
tmp = this->bool_arrays[name];
int tmp_size = bool_array_sizes[name];
if (size > tmp_size) {
cudaFree(tmp);
cudaMalloc(&tmp, size * sizeof(bool));
this->bool_arrays[name] = tmp;
this->bool_array_sizes[name] = size;
}
} else {
cudaMalloc(&tmp, size * sizeof(bool));
this->bool_arrays.insert(pair<int, bool *>(name, tmp));
this->bool_array_sizes.insert(pair<int, int>(name, size));
}
return tmp;
}
float *TmpMalloc::get_float_array(int name, int size) {
float *tmp;
map<int, float *>::iterator it = this->float_arrays.find(name);
if (it != this->float_arrays.end()) {
tmp = this->float_arrays[name];
int tmp_size = float_array_sizes[name];
if (size > tmp_size) {
cudaFree(tmp);
cudaMalloc(&tmp, size * sizeof(float));
this->float_arrays[name] = tmp;
this->float_array_sizes[name] = size;
}
} else {
cudaMalloc(&tmp, size * sizeof(float));
this->float_arrays.insert(pair<int, float *>(name, tmp));
this->float_array_sizes.insert(pair<int, int>(name, size));
}
return tmp;
}
int *TmpMalloc::get_int_array(int name, int size) {
int *tmp;
map<int, int *>::iterator it = this->int_arrays.find(name);
if (it != this->int_arrays.end()) {
tmp = this->int_arrays[name];
int tmp_size = int_array_sizes[name];
if (size > tmp_size) {
cudaFree(tmp);
cudaMalloc(&tmp, size * sizeof(int));
this->int_arrays[name] = tmp;
this->int_array_sizes[name] = size;
}
} else {
cudaMalloc(&tmp, size * sizeof(int));
this->int_arrays.insert(pair<int, int *>(name, tmp));
this->int_array_sizes.insert(pair<int, int>(name, size));
}
return tmp;
}
int **TmpMalloc::get_int_pointer_array(int name, int size) {
int **tmp;
map<int, int **>::iterator it = this->int_pointer_arrays.find(name);
if (it != this->int_pointer_arrays.end()) {
tmp = this->int_pointer_arrays[name];
int tmp_size = int_pointer_array_sizes[name];
if (size > tmp_size) {
cudaFree(tmp);
cudaMalloc(&tmp, size * sizeof(int *));
this->int_pointer_arrays[name] = tmp;
this->int_pointer_array_sizes[name] = size;
}
} else {
cudaMalloc(&tmp, size * sizeof(int *));
this->int_pointer_arrays.insert(pair<int, int **>(name, tmp));
this->int_pointer_array_sizes.insert(pair<int, int>(name, size));
}
return tmp;
}
void TmpMalloc::reset_counters() {
bool_array_counter = 0;
float_array_counter = 0;
int_array_counter = 0;
int_pointer_array_counter = 0;
}
TmpMalloc::TmpMalloc() {
bool_array_counter = 0;
float_array_counter = 0;
int_array_counter = 0;
int_pointer_array_counter = 0;
points_count = 0;
nodes_count = 0;
dims_count = 0;
one_count = 0;
}
void TmpMalloc::set(int number_of_points, int number_of_nodes, int number_of_dims) {
this->number_of_points = number_of_points;
this->number_of_nodes = number_of_nodes;
this->number_of_dims = number_of_dims;
}
int *TmpMalloc::malloc_points() {
int *tmp;
if (!this->q_points.empty()) {
tmp = this->q_points.front();
this->q_points.pop();
} else {
cudaMalloc(&tmp, this->number_of_points * sizeof(int));
points_count++;
}
return tmp;
}
void TmpMalloc::free_points(int *memory) {
this->q_points.push(memory);
}
int *TmpMalloc::malloc_nodes() {
int *tmp;
if (!this->q_nodes.empty()) {
tmp = this->q_nodes.front();
this->q_nodes.pop();
} else {
cudaMalloc(&tmp, number_of_nodes * sizeof(int));
nodes_count++;
}
return tmp;
}
void TmpMalloc::free_nodes(int *memory) {
this->q_nodes.push(memory);
}
int *TmpMalloc::malloc_dims() {
int *tmp;
if (!this->q_dims.empty()) {
tmp = this->q_dims.front();
this->q_dims.pop();
} else {
cudaMalloc(&tmp, number_of_dims * sizeof(int));
dims_count++;
}
return tmp;
}
void TmpMalloc::free_dims(int *memory) {
this->q_dims.push(memory);
}
int *TmpMalloc::malloc_one() {
int *tmp;
if (!this->q_one.empty()) {
tmp = this->q_one.front();
this->q_one.pop();
} else {
cudaMalloc(&tmp, sizeof(int));
one_count++;
}
return tmp;
}
void TmpMalloc::free_one(int *memory) {
this->q_one.push(memory);
}
int *TmpMalloc::malloc_any(int n) {
int key = int(ceil(log2(n)));
if (this->q.find(key) == this->q.end()) {
this->q[key] = std::queue<int *>();
}
int *tmp;
if (!this->q[key].empty()) {
tmp = this->q[key].front();
this->q[key].pop();
} else {
cudaMalloc(&tmp, pow(2, key) * sizeof(int));
}
return tmp;
}
void TmpMalloc::free_any(int *memory, int n) {
int key = int(ceil(log2(n)));
this->q[key].push(memory);
}
|
4,113 | #include "includes.h"
//////////////////////////////////////////////////////////////////////////////////////////
__global__ void bestFilter(const double *Params, const bool *iMatch, const int *Wh, const float *cmax, const float *mus, int *id, float *x){
int tid,tind,bid, my_chan, ind, Nspikes, Nfilters, Nthreads, Nchan, Nblocks;
float max_running = 0.0f;
Nspikes = (int) Params[0];
Nfilters = (int) Params[2];
Nchan = (int) Params[7];
Nthreads = blockDim.x;
Nblocks = gridDim.x;
tid = threadIdx.x;
bid = blockIdx.x;
tind = tid + bid * Nthreads;
while (tind<Nspikes){
max_running = mus[tind] * mus[tind];
id[tind] = 0;
my_chan = Wh[tind];
for(ind=0; ind<Nfilters; ind++)
if (iMatch[my_chan + ind * Nchan])
if (cmax[tind + ind*Nspikes] < max_running){
id[tind] = ind;
max_running = cmax[tind + ind*Nspikes];
}
x[tind] = max_running;
tind += Nblocks*Nthreads;
}
} |
4,114 | /* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <cstdio>
#define BLOCK_DIM_X 32
#define BLOCK_DIM_Y 32
#define CUDA_RT_CALL(call) \
{ \
cudaError_t cudaStatus = call; \
if (cudaSuccess != cudaStatus) \
fprintf(stderr, \
"ERROR: CUDA RT call \"%s\" in line %d of file %s failed " \
"with " \
"%s (%d).\n", \
#call, __LINE__, __FILE__, cudaGetErrorString(cudaStatus), cudaStatus); \
}
__global__ void initialize_boundaries(float* a_new, float* a, const float pi, const int offset,
const int nx, const int my_ny, const int ny) {
for (int iy = blockIdx.x * blockDim.x + threadIdx.x; iy < my_ny; iy += blockDim.x * gridDim.x) {
const float y0 = sin(2.0 * pi * (offset + iy) / (ny - 1));
a[iy * nx + 0] = y0;
a[iy * nx + (nx - 1)] = y0;
a_new[iy * nx + 0] = y0;
a_new[iy * nx + (nx - 1)] = y0;
}
}
__global__ void jacobi_kernel(float* a_new, const float* a, float* l2_norm, const int iy_start,
const int iy_end, const int nx) {
int iy = blockIdx.y * blockDim.y + threadIdx.y + iy_start;
int ix = blockIdx.x * blockDim.x + threadIdx.x + 1;
__shared__ float block_l2_sum[BLOCK_DIM_X*BLOCK_DIM_Y];
unsigned thread_index = threadIdx.y*BLOCK_DIM_X + threadIdx.x;
if (iy < iy_end && ix < (nx - 1)) {
// Update grid point
const float new_val = 0.25 * (a[iy * nx + ix + 1] + a[iy * nx + ix - 1] +
a[(iy + 1) * nx + ix] + a[(iy - 1) * nx + ix]);
a_new[iy * nx + ix] = new_val;
float residue = new_val - a[iy * nx + ix];
// Set block-level L2 norm value for this grid point
block_l2_sum[thread_index] = residue * residue;
}
else {
block_l2_sum[thread_index] = 0;
}
// Reduce L2 norm for the block in parallel
for (unsigned stride = 1; stride < BLOCK_DIM_X*BLOCK_DIM_Y; stride *= 2) {
__syncthreads();
if ((thread_index) % (2*stride) == 0) {
block_l2_sum[thread_index] += block_l2_sum[thread_index + stride];
}
}
// Atomically update global L2 norm with block-reduced L2 norm
if (thread_index == 0) {
atomicAdd(l2_norm, block_l2_sum[0]);
}
}
void launch_initialize_boundaries(float* a_new, float* a, const float pi, const int offset,
const int nx, const int my_ny, const int ny){
initialize_boundaries<<<my_ny / 128 + 1, 128>>>(a_new, a, pi, offset, nx, my_ny, ny);
}
void launch_jacobi_kernel(float* a_new, const float* a, float* l2_norm, const int iy_start,
const int iy_end, const int nx) {
dim3 dim_block(BLOCK_DIM_X, BLOCK_DIM_Y, 1);
dim3 dim_grid((nx + BLOCK_DIM_X - 1) / BLOCK_DIM_X,
((iy_end - iy_start) + BLOCK_DIM_Y - 1) / BLOCK_DIM_Y, 1);
jacobi_kernel<<<dim_grid, dim_block>>>(a_new, a, l2_norm, iy_start, iy_end, nx);
}
|
4,115 | __global__ void reduce_kernel(float* d_out, const float *d_in){
extern __shared__ float sdata[];
int myId = threadIdx.x + blockDim.x * blockIdx.x;
int tid = threadIdx.x;
// load shared memory from global memory
sdata[tid] = d_in[myId];
__syncthreads();
// Do reduction in shared memory
for(unsigned int s = blockDim.x/2; s>0; s>>=1){
if(tid < s){
sdata[tid] += sdata[tid+s];
}
__syncthreads();
}
// only thread 0 writes result for this block back to global mem.
if (tid == 0){
d_out[blockIdx.x] = sdata[0];
}
}
void reduce(float *d_out, float *d_intermediate, float *d_in, int size){
// assumes that size is not greater than maxThreadsPerBlock^2
// and that size is a multiple of maxThreadsPerBlock
const int maxThreadsPerBlock = 1024;
int threads = maxThreadsPerBlock;
int blocks =size/maxThreadsPerBlock;
reduce_kernel<<<blocks, threads, threads * sizeof(float)>>>(d_intermediate, d_in);
} |
4,116 | #include "includes.h"
__global__ void cudaComputeYGradient(int* y_gradient, unsigned char* channel, int image_width, int image_height, int chunk_size_per_thread) {
int y_kernel[3][3] = { { 1, 2, 1 }, { 0, 0, 0 }, { -1, -2, -1 } };
int index = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = index * chunk_size_per_thread; i < (index + 1) * chunk_size_per_thread - 1; i++) {
if (i + 2 * image_width + 1 < image_width * image_height) {
if (i == 0 && blockIdx.x == 0 && blockIdx.x == 0) {
continue;
} else {
y_gradient[i] =
y_kernel[0][0] * channel[i - 1] +
y_kernel[1][0] * channel[i] +
y_kernel[2][0] * channel[i + 1] +
y_kernel[0][1] * channel[i + image_width - 1] +
y_kernel[1][1] * channel[i + image_width] +
y_kernel[2][1] * channel[i + image_width + 1] +
y_kernel[0][2] * channel[i + 2 * image_width - 1] +
y_kernel[1][2] * channel[i + 2 * image_width] +
y_kernel[2][2] * channel[i + 2 * image_width + 1];
}
}
}
return;
} |
4,117 | #include <stdio.h>
int main(int argc, char **argv)
{
printf("Hallo World from CPU!\n");
}
|
4,118 | #include <iostream>
#include <unistd.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define size 21 // Tamanho da matrix
// Exibe os pontos na tela
__host__ void print(bool grid[][size]){
std::cout << "\n\n\n\n\n";
for(unsigned int i = 1; i < size-1; i++) {
for(unsigned int j = 1; j < size-1; j++)
std::cout << (grid[i][j]?"#":"_");
std::cout << std::endl;
}
}
__host__ bool someoneAlive(bool grid[][size]){
for(unsigned int i=0; i < size; i++)
for(unsigned int j=0; j < size; j++)
if(grid[i][j]==true) return true;
return false;
}
// Calcula a simulacao
__global__ void jogo(bool grid[][size]){
int m=blockIdx.x*blockDim.x+threadIdx.x;
int n=blockIdx.y*blockDim.y+threadIdx.y;
if (m<size && n<size){
// printf("m: %d n: %d\n",m,n);
bool grid_tmp[size][size] = {};
for(unsigned int i=0; i < size; i++){
for(unsigned int j=0; j < size; j++){
grid_tmp[i][j] = grid[i][j];
// printf("%d",grid[i][j]);
}
// printf("\n");
}
unsigned int count = 0;
for(int k = -1; k <= 1; k++)
for(int l = -1; l <= 1; l++)
if(k != 0 || l != 0)
if(grid_tmp[m+k][n+l])
++count;
if(count < 2 || count > 3){
grid[m][n] = false;
// printf("m: %d n: %d MORREU\n",m,n);
// printf("count = %d\n", count);
}
else {
if(count == 3){
grid[m][n] = true;
// printf("m: %d n: %d REVIVEU\n",m,n);
}
}
}
return;
}
int main(){
// bool grid[size][size] = {}; // dados iniciais
bool grid[size][size] = {}; // dados iniciais
grid[ 5][ 7] = true;
grid[ 6][ 8] = true;
grid[ 8][ 8] = true;
grid[ 6][ 9] = true;
grid[ 8][10] = true;
grid[ 9][10] = true;
grid[ 8][11] = true;
grid[10][11] = true;
grid[10][12] = true;
bool d_grid[size][size];
int mem_size = size*size*sizeof(bool);
cudaMalloc((void **) d_grid, mem_size);
int nthreads = 7;
dim3 blocks(size/nthreads+1,size/nthreads+1);
dim3 threads(nthreads,nthreads);
while(someoneAlive(grid)){
cudaMemcpy(d_grid, grid, mem_size, cudaMemcpyHostToDevice);
jogo<<<blocks,threads>>>(d_grid);
cudaDeviceSynchronize();
cudaMemcpy(grid, d_grid, mem_size, cudaMemcpyDeviceToHost);
print(grid);
usleep(100000);
return 0;
}
cudaFree(d_grid);
}
|
4,119 | #include "includes.h"
__global__ void tile_kernel(const float* in,float* out, int num_planes, int num_rows, int num_cols) {
const int gid = threadIdx.x + blockIdx.x * blockDim.x;
const int elems_per_plane = num_rows * num_cols;
const int plane = gid / num_rows;
const int row = gid % num_rows;
if (plane >= num_planes)
return;
for (int col=0;col<num_cols; ++col){
out[plane * elems_per_plane + row * num_cols + col]=in[plane*num_cols+col];
}
} |
4,120 | // Multiply two matrices A * B = C
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
//Thread block size
#define BLOCK_SIZE 3
#define WA 3
// Matrix A width
#define HA 3
// Matrix A height
#define WB 3
// Matrix B width
#define HB WA
// Matrix B height
#define WC WB
// Matrix C width
#define HC HA
// Matrix C height
//Allocates a matrix with random float entries.
void randomInit(float * data ,int size)
{
for(int i = 0; i < size; ++i)
//data[i] = rand() / (float) RAND_MAX;
data[i] = i;
}
// CUDA Kernel
__global__ void matrixMul(float* C,float* A,float* B,int wA,int wB)
{
// 2D Thread ID
int tx = threadIdx.x;
int ty = threadIdx.y;
// value stores the element that is computed by the thread
float value = 0;
for(int i = 0; i < wA; ++i)
{
float elementA = A[ty * wA + i];
float elementB = B[i * wB + tx];
value += elementA * elementB;
}
// Write the matrix to device memory each
// thread writes one element
C[ty * wA + tx] = value;
}
// Program main
int main(int argc ,char** argv)
{
// set seed for rand()
srand(2006);
// 1. allocate host memory for matrices A and B
unsigned int size_A = WA * HA;
unsigned int mem_size_A =sizeof(float) * size_A;
float* h_A = (float*) malloc(mem_size_A);
unsigned int size_B = WB * HB;
unsigned int mem_size_B =sizeof(float) * size_B;
float * h_B = (float*) malloc(mem_size_B);
// 2. initialize host memory
randomInit(h_A, size_A);
randomInit(h_B, size_B);
// 3. print out A and B
printf("\n\nMatrix A\n");
for(int i = 0; i < size_A; i++)
{
printf("%f ", h_A[i]);
if(((i + 1) % WA) == 0)
printf("\n");
}
printf("\n\nMatrix B\n");
for(int i = 0; i < size_B; i++)
{
printf
("%f ", h_B[i]);
if(((i + 1) % WB) == 0)
printf("\n");
}
// 4. allocate host memory for the result C
unsigned int size_C = WC * HC;
unsigned int mem_size_C =sizeof(float) * size_C;
float * h_C = (float *) malloc(mem_size_C);
// 8. allocate device memory
float* d_A;
float* d_B;
cudaMalloc((void**) &d_A, mem_size_A);
cudaMalloc((void**) &d_B, mem_size_B);
//9. copy host memory to device
cudaMemcpy(d_A, h_A,mem_size_A ,cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B,mem_size_B ,cudaMemcpyHostToDevice);
// 10. allocate device memory for the result
float* d_C;
cudaMalloc((void**) &d_C, mem_size_C);
// 5. perform the calculation
// setup execution parameters
dim3 threads(BLOCK_SIZE , BLOCK_SIZE);
dim3 grid(WC / threads.x, HC / threads.y);
// execute the kernel
matrixMul<<< grid , threads >>>(d_C, d_A,d_B, WA, WB);
// 11. copy result from device to host
cudaMemcpy(h_C, d_C, mem_size_C ,cudaMemcpyDeviceToHost);
// 6. print out the results
printf("\n\n Matrix C ( Results ) \n ");
for(int i = 0;i<size_C; i ++){
printf("%f",h_C[i]);
if(((i+ 1) % WC) == 0)
printf("\n");
}
printf("\n");
// 7.clean up memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
free(h_A);
free(h_B);
free(h_C);
}
|
4,121 | #include <iostream>
#include <vector>
#include <fstream>
#include <map>
#include <string>
#include <sstream>
#include <iterator>
#include <algorithm>
#include <cuda_profiler_api.h>
#include <cuda_runtime.h>
#include <chrono>
#define timeNow() std::chrono::high_resolution_clock::now()
#define duration(start, stop) std::chrono::duration_cast<std::chrono::milliseconds>(stop - start).count()
typedef std::chrono::high_resolution_clock::time_point TimeVar;
/*
* Fills in the matrix term_class_matrix based on the frequency of terms. The term_index_arr
* holds the indices for the doc_term_arr where each term starts. Increment frequency of
* term_class_matrix for the class and term by looping through all docs with that term.
* The doc_class array is used to hold the class of each doc
*/
__global__ void calcFreq(int *term_index_arr, int *doc_term_arr, int *doc_class, float *term_class_matrix,
int num_terms, int doc_term_len, int classes) {
unsigned int i = blockIdx.x * gridDim.y * gridDim.z *
blockDim.x + blockIdx.y * gridDim.z *
blockDim.x + blockIdx.z * blockDim.x + threadIdx.x;
int start = term_index_arr[i];
int end = term_index_arr[i];
if(i < num_terms - 1) {
end = term_index_arr[i+1];
} else if (i == num_terms - 1){
end = doc_term_len - 1;
} else {
return ;
}
for (int x = start; x < end; x++) {
term_class_matrix[classes * i + doc_class[doc_term_arr[x]]] += 1.0;
}
}
/*
* Calculates total number of terms per class and places into an array. Parallelized
* based on class
*/
__global__ void calcTotalTermsPerClass(float * term_class_matrix, int * terms_per_class, int num_terms, int classes) {
unsigned int i = blockIdx.x * gridDim.y * gridDim.z *
blockDim.x + blockIdx.y * gridDim.z *
blockDim.x + blockIdx.z * blockDim.x + threadIdx.x;
if (i < classes) {
int sum = 0;
for (int x = 0; x < num_terms; x++) {
sum += (int)term_class_matrix[classes * x + i];
}
terms_per_class[i] = sum;
}
}
/*
* Goes through each term and divides the term frequency in the class by the total
* terms in that class. Parallelized based on terms
*/
__global__ void learn(float * term_class_matrix, int num_docs, int classes, int * terms_per_class, int num_terms) {
unsigned int i = blockIdx.x * gridDim.y * gridDim.z *
blockDim.x + blockIdx.y * gridDim.z *
blockDim.x + blockIdx.z * blockDim.x + threadIdx.x;
float k = 1.0;
if (i < num_terms) {
for (int x = 0; x < classes; x++) {
term_class_matrix[classes * i + x] = logf((term_class_matrix[classes * i + x] + k)/(terms_per_class[x] + k*num_terms));
}
}
}
__global__ void test(float *term_class_matrix, float * doc_prob, int * doc_index, int * terms_in_doc, int classes, int num_docs, int total_len_terms, int *predictions, float *prior) {
unsigned int i = blockIdx.x * gridDim.y * gridDim.z *
blockDim.x + blockIdx.y * gridDim.z *
blockDim.x + blockIdx.z * blockDim.x + threadIdx.x;
int start_term = doc_index[i];
int end_term = doc_index[i];
if(i < num_docs - 1) {
end_term = doc_index[i+1];
} else if (i == num_docs - 1) {
end_term = total_len_terms - 1;
} else {
return ;
}
for (int x = start_term; x < end_term; x++) {
for (int y = 0; y < classes; y++) {
doc_prob[classes * i + y] += term_class_matrix[classes * terms_in_doc[x] + y];
}
}
int max_index = 0;
float max = logf(0.0);
for (int y = 0; y < classes; y++) {
if (doc_prob[classes * i + y] + logf(prior[y]) > max) {
max_index = y;
max = doc_prob[classes * i + y] + logf(prior[y]);
}
}
predictions[i] = max_index;
}
void errorCheck(cudaError_t err) {
if (err) {
fprintf(stderr, "CUDA error: %d\n", err);
exit(err);
}
}
static cudaError_t numBlocksThreads(unsigned int N, dim3 *numBlocks, dim3 *threadsPerBlock) {
unsigned int BLOCKSIZE = 128;
int Nx, Ny, Nz;
int device;
cudaError_t err;
if(N < BLOCKSIZE) {
numBlocks->x = 1;
numBlocks->y = 1;
numBlocks->z = 1;
threadsPerBlock->x = N;
threadsPerBlock->y = 1;
threadsPerBlock->z = 1;
return cudaSuccess;
}
threadsPerBlock->x = BLOCKSIZE;
threadsPerBlock->y = 1;
threadsPerBlock->z = 1;
err = cudaGetDevice(&device);
if(err)
return err;
err = cudaDeviceGetAttribute(&Nx, cudaDevAttrMaxBlockDimX, device);
if(err)
return err;
err = cudaDeviceGetAttribute(&Ny, cudaDevAttrMaxBlockDimY, device);
if(err)
return err;
err = cudaDeviceGetAttribute(&Nz, cudaDevAttrMaxBlockDimZ, device);
if(err)
return err;
unsigned int n = (N-1) / BLOCKSIZE + 1;
unsigned int x = (n-1) / (Ny*Nz) + 1;
unsigned int y = (n-1) / (x*Nz) + 1;
unsigned int z = (n-1) / (x*y) + 1;
if(x > Nx || y > Ny || z > Nz) {
return cudaErrorInvalidConfiguration;
}
numBlocks->x = x;
numBlocks->y = y;
numBlocks->z = z;
return cudaSuccess;
}
/* Function to convert vector of ints into array of ints */
int * vecToArr(std::vector<int> v)
{
int *arr = (int *)malloc(v.size() * sizeof(int));
if(arr == NULL)
{
std::cerr << "Error converting vector to array" << std::endl;
exit(-1);
}
std::copy(v.begin(), v.end(), arr);
return arr;
}
float * vecToArrfloat(std::vector<float> v)
{
float *arr = (float *)malloc(v.size() * sizeof(float));
if(arr == NULL)
{
std::cerr << "Error converting vector to array" << std::endl;
exit(-1);
}
std::copy(v.begin(), v.end(), arr);
return arr;
}
int main(int argc, char **argv)
{
if(argc != 4)
{
std::cerr << "Usage: " << argv[0] << " [train_file] [test_file] [output_file]" << std::endl;
exit(-1);
}
/* Use vector to store terms */
std::vector<std::string> term_vec;
int term_index = 0;
/* Map of term to document list, to make sure no duplicate documents are added to list */
std::map<std::string, std::vector<int> > term_doc_map;
/*
Vector of terms.
Each index represents the term.
The value at that index represents the index in doc_term that holds list of documents for the term
Note: Will be converted to array later (to be used in kernel function)
*/
std::vector<int> term_index_vec;
/*
Vector of documents.
Each value represents the doc_number that the term has appeared in
Note: Will be converted to array later (to be used in kernel function)
*/
std::vector<int> doc_term_vec;
std::vector<int> doc_class;
/* Vector to hold all the classes */
std::vector<std::string> classes_vec;
std::vector<float> prior_vec;
/* Loop through each document */
std::ifstream file(argv[1]);
std::string line;
int lineno = 0;
while (std::getline(file, line))
{
/*
Split string
doc_split[0] = doc_class
doc_split[1 -> end] = terms in doc
*/
std::istringstream iss(line);
std::vector<std::string> doc_split((std::istream_iterator<std::string>(iss)),
std::istream_iterator<std::string>());
//doc_split.push_back(std::to_string(lineno));
/* Append class to classes_vec, only if it has not been seen before */
std::vector<std::string>::iterator class_it = std::find(classes_vec.begin(), classes_vec.end(), doc_split[0]);
if(class_it == classes_vec.end()) {
classes_vec.push_back(doc_split[0]);
prior_vec.push_back(0.0);
}
int class_index = find(classes_vec.begin(), classes_vec.end(), doc_split[0]) - classes_vec.begin();
doc_class.push_back(class_index);
prior_vec[class_index] += 1.0;
/* Loop through each term in the document */
for(int i = 1; i < doc_split.size(); i++)
{
std::string term = doc_split[i];
/* Add term to vector list, if not done so already */
std::vector<std::string>::iterator term_it = std::find(term_vec.begin(), term_vec.end(), term);
if(term_it == term_vec.end())
term_vec.push_back(term);
/* Add the document to the list of documents for this term, if not done so already */
std::vector<int> doc_list = term_doc_map[term];
std::vector<int>::iterator doc_it = std::find(doc_list.begin(), doc_list.end(), lineno);
if(doc_it == doc_list.end()) {
doc_list.push_back(lineno);
term_doc_map[term] = doc_list;
}
}
lineno++;
}
for (int i = 0; i < classes_vec.size(); i++) {
prior_vec[i] /= doc_class.size();
}
/* Go through each term and populate the term_index_vec and doc_term_vec */
for(int idx = 0; idx < term_vec.size(); idx++)
{
/* t is the term itself, idx is its index (in term_index_vec as well) */
std::string t = term_vec[idx];
/* d is the list of docs associated with t */
std::vector<int> d = term_doc_map[t];
/* The starting index for the list of docs (related to t) is the size of the doc_term_vec before we insert the new docs */
term_index_vec.push_back(doc_term_vec.size());
/* Insert the related documents in the doc_term_vec */
doc_term_vec.insert(doc_term_vec.end(), d.begin(), d.end());
}
std::ifstream test_file(argv[2]);
/*
Vector of Test documents.
Each index represents a test document.
The value at that index represents the index in test_term_doc_vec that holds list of terms for that documents
Note: Will be converted to array later (to be used in kernel function)
*/
std::vector<int> test_doc_index_vec;
/*
Vector of valid test document terms.
Each value represents the term_number that is valid and appears in the document
Note: Will be converted to array later (to be used in kernel function)
*/
std::vector<int> test_term_doc_vec;
while (std::getline(test_file, line)) {
std::istringstream iss(line);
std::vector<std::string> doc_split((std::istream_iterator<std::string>(iss)), std::istream_iterator<std::string>());
std::vector<int> test_doc_terms;
for(int i = 0; i < doc_split.size(); i++) {
std::string term = doc_split[i];
std::vector<std::string>::iterator term_it = std::find(term_vec.begin(), term_vec.end(), term);
if (term_it != term_vec.end()) {
test_doc_terms.push_back(term_it - term_vec.begin());
} else {
continue;
}
}
test_doc_index_vec.push_back(test_term_doc_vec.size());
test_term_doc_vec.insert(test_term_doc_vec.end(), test_doc_terms.begin(), test_doc_terms.end());
}
/* Convert the vectors to arrays for GPU processing */
int *term_index_arr = vecToArr(term_index_vec);
int *doc_term_arr = vecToArr(doc_term_vec);
int *doc_class_arr = vecToArr(doc_class);
/* Create a TxC matrix (i.e. # of Terms x # of Classes) which will hold the frequencies of each term */
float *term_class_matrix = (float *)calloc( (term_vec.size()) * (classes_vec.size()), sizeof(float) );
/* Create a C length array holding the total terms in each class*/
int *total_terms_class_arr = (int *)calloc( classes_vec.size(), sizeof(int));
size_t nSpatial;
size_t mSpatial;
dim3 spatialThreadsPerBlock, spatialBlocks;
float *d_term_class;
int *d_term_index;
int *d_doc_term;
int *d_doc_class;
int *d_total_terms_class;
// Test Device Arrays
int *test_doc_index_arr = vecToArr(test_doc_index_vec);
int *test_term_doc_arr = vecToArr(test_term_doc_vec);
float *prior_arr = vecToArrfloat(prior_vec);
int *predictions = (int *) calloc(test_doc_index_vec.size(), sizeof(int));
float *test_doc_prob = (float *)calloc( (test_doc_index_vec.size()) * (classes_vec.size()), sizeof(float) );
float *d_test_doc_prob;
int *d_test_doc_index;
int *d_test_term_doc;
int *d_predictions;
float *d_prior;
cudaDeviceReset();
cudaProfilerStart();
/* Allocation of Device Memory */
// Document Term Vector
nSpatial = doc_term_vec.size();
errorCheck(numBlocksThreads(nSpatial, &spatialBlocks, &spatialThreadsPerBlock));
mSpatial = spatialBlocks.x * spatialBlocks.y * spatialBlocks.z * spatialThreadsPerBlock.x * sizeof(int);
errorCheck(cudaMalloc(&d_doc_term, mSpatial));
errorCheck(cudaMemcpy(d_doc_term, doc_term_arr, nSpatial*sizeof(int), cudaMemcpyHostToDevice));
// Training Document Classes
nSpatial = doc_class.size();
errorCheck(numBlocksThreads(nSpatial, &spatialBlocks, &spatialThreadsPerBlock));
mSpatial = spatialBlocks.x * spatialBlocks.y * spatialBlocks.z * spatialThreadsPerBlock.x * sizeof(int);
errorCheck(cudaMalloc(&d_doc_class, mSpatial));
errorCheck(cudaMemcpy(d_doc_class, doc_class_arr, nSpatial*sizeof(int), cudaMemcpyHostToDevice));
// Probability Matrix
nSpatial = term_vec.size() * classes_vec.size();
errorCheck(numBlocksThreads(nSpatial, &spatialBlocks, &spatialThreadsPerBlock));
mSpatial = spatialBlocks.x * spatialBlocks.y * spatialBlocks.z * spatialThreadsPerBlock.x * sizeof(float);
errorCheck(cudaMalloc(&d_term_class, mSpatial));
errorCheck(cudaMemcpy(d_term_class, term_class_matrix, nSpatial*sizeof(float), cudaMemcpyHostToDevice));
// Allocation of Arrays based on class size
nSpatial = classes_vec.size();
errorCheck(numBlocksThreads(nSpatial, &spatialBlocks, &spatialThreadsPerBlock));
// Total terms valid in each class
mSpatial = spatialBlocks.x * spatialBlocks.y * spatialBlocks.z * spatialThreadsPerBlock.x * sizeof(int);
errorCheck(cudaMalloc(&d_total_terms_class, mSpatial));
errorCheck(cudaMemcpy(d_total_terms_class, total_terms_class_arr, nSpatial*sizeof(int), cudaMemcpyHostToDevice));
// Prior Probability for each class
mSpatial = spatialBlocks.x * spatialBlocks.y * spatialBlocks.z * spatialThreadsPerBlock.x * sizeof(float);
errorCheck(cudaMalloc(&d_prior, mSpatial));
errorCheck(cudaMemcpy(d_prior, prior_arr, nSpatial*sizeof(float), cudaMemcpyHostToDevice));
// Test Documents' Probability for each class
nSpatial = test_doc_index_vec.size() * classes_vec.size();
errorCheck(numBlocksThreads(nSpatial, &spatialBlocks, &spatialThreadsPerBlock));
mSpatial = spatialBlocks.x * spatialBlocks.y * spatialBlocks.z * spatialThreadsPerBlock.x * sizeof(float);
errorCheck(cudaMalloc(&d_test_doc_prob, mSpatial));
errorCheck(cudaMemcpy(d_test_doc_prob, test_doc_prob, nSpatial*sizeof(float), cudaMemcpyHostToDevice));
// Allocation based on number of Test Documents
nSpatial = test_doc_index_vec.size();
errorCheck(numBlocksThreads(nSpatial, &spatialBlocks, &spatialThreadsPerBlock));
mSpatial = spatialBlocks.x * spatialBlocks.y * spatialBlocks.z * spatialThreadsPerBlock.x * sizeof(int);
// Array where each element represents where in the array the document's terms start in test_term_doc_arr
errorCheck(cudaMalloc(&d_test_doc_index, mSpatial));
errorCheck(cudaMemcpy(d_test_doc_index, test_doc_index_arr, nSpatial*sizeof(int), cudaMemcpyHostToDevice));
// Holds the prediction for each test document
errorCheck(cudaMalloc(&d_predictions, mSpatial));
errorCheck(cudaMemcpy(d_predictions, predictions, nSpatial*sizeof(int), cudaMemcpyHostToDevice));
//
nSpatial = test_term_doc_vec.size();
errorCheck(numBlocksThreads(nSpatial, &spatialBlocks, &spatialThreadsPerBlock));
mSpatial = spatialBlocks.x * spatialBlocks.y * spatialBlocks.z * spatialThreadsPerBlock.x * sizeof(int);
errorCheck(cudaMalloc(&d_test_term_doc, mSpatial));
errorCheck(cudaMemcpy(d_test_term_doc, test_term_doc_arr, nSpatial*sizeof(int), cudaMemcpyHostToDevice));
nSpatial = term_index_vec.size();
errorCheck(numBlocksThreads(nSpatial, &spatialBlocks, &spatialThreadsPerBlock));
mSpatial = spatialBlocks.x * spatialBlocks.y * spatialBlocks.z * spatialThreadsPerBlock.x * sizeof(int);
errorCheck(cudaMalloc(&d_term_index, mSpatial));
errorCheck(cudaMemcpy(d_term_index, term_index_arr, nSpatial*sizeof(int), cudaMemcpyHostToDevice));
std::cerr << "Started training... ";
TimeVar train_start = timeNow();
calcFreq<<<spatialBlocks, spatialThreadsPerBlock>>>(d_term_index, d_doc_term, d_doc_class, d_term_class, term_vec.size(), doc_term_vec.size(), classes_vec.size());
nSpatial = classes_vec.size();
errorCheck(numBlocksThreads(nSpatial, &spatialBlocks, &spatialThreadsPerBlock));
calcTotalTermsPerClass<<<spatialBlocks, spatialThreadsPerBlock>>>(d_term_class, d_total_terms_class, term_vec.size(), classes_vec.size());
cudaDeviceSynchronize();
TimeVar train_stop = timeNow();
std::cerr << "Done (" << duration(train_start, train_stop) << " ms)" << std::endl;
nSpatial = term_vec.size();
errorCheck(numBlocksThreads(nSpatial, &spatialBlocks, &spatialThreadsPerBlock));
std::cerr << "Started Learning... ";
TimeVar learn_start = timeNow();
learn<<<spatialBlocks, spatialThreadsPerBlock>>>(d_term_class, doc_class.size(), classes_vec.size(), d_total_terms_class, term_vec.size());
cudaDeviceSynchronize();
TimeVar learn_stop = timeNow();
std::cerr << "Done (" << std::chrono::duration_cast<std::chrono::microseconds>(learn_stop - learn_start).count() << " us)" <<std::endl;
// Test
nSpatial = test_doc_index_vec.size();
errorCheck(numBlocksThreads(nSpatial, &spatialBlocks, &spatialThreadsPerBlock));
std::cerr << "Started Testing... ";
TimeVar test_start = timeNow();
test<<<spatialBlocks, spatialThreadsPerBlock>>>(d_term_class, d_test_doc_prob, d_test_doc_index, d_test_term_doc, classes_vec.size(), test_doc_index_vec.size(), test_term_doc_vec.size(), d_predictions, d_prior);
cudaDeviceSynchronize();
TimeVar test_stop = timeNow();
std::cerr << "Done (" << duration(test_start, test_stop) << " ms)" << std::endl;
errorCheck(cudaMemcpy(predictions, d_predictions, nSpatial*sizeof(int), cudaMemcpyDeviceToHost));
std::ofstream results(argv[3]);
if(results.is_open()) {
for (int i = 0; i < test_doc_index_vec.size(); i++) {
results << classes_vec[predictions[i]] << '\n';
}
}
cudaProfilerStop();
cudaDeviceReset();
}
|
4,122 | __global__ void add(int *a, int *b, int *c) {
*c = *a + *b;
} |
4,123 | #include <thrust/version.h>
#include <iostream>
/* Version check for thrust
If not found, try nvcc version.cu -o version -I /home/you/libraries/
when libraries is where you store you thrust downloaded files
*/
int main(void)
{
int major = THRUST_MAJOR_VERSION;
int minor = THRUST_MINOR_VERSION;
std::cout << "Thrust v" << major << "." << minor << std::endl;
return 0;
}
|
4,124 | #include "includes.h"
#define tileSize 32
//function for data initialization
void initialization( double *M, double *N, int arow, int acol, int brow, int bcol);
//(for Debugging) prints out the input data
void printInput( double *M, double *N, int arow, int acol, int brow, int bcol);
//(for Debugging) prints out the output data
void printOutput( double *P_C, double *P_G, int arow, int bcol);
//GPU kernels
__global__
__global__ void vectorScaling(const double *A, double s, double *C, int numElements)
{
int gridIndex = blockDim.x * blockIdx.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
for (int i = gridIndex; i<numElements; i+=stride)
{
C[i] = A[i]*s;
}
} |
4,125 | // Modified from
// https://github.com/sshaoshuai/Pointnet2.PyTorch/tree/master/pointnet2/src/interpolate_gpu.cu
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#define THREADS_PER_BLOCK 256
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
__global__ void three_nn_kernel(int b, int n, int m,
const float *__restrict__ unknown,
const float *__restrict__ known,
float *__restrict__ dist2,
int *__restrict__ idx) {
// unknown: (B, N, 3)
// known: (B, M, 3)
// output:
// dist2: (B, N, 3)
// idx: (B, N, 3)
int bs_idx = blockIdx.y;
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (bs_idx >= b || pt_idx >= n) return;
unknown += bs_idx * n * 3 + pt_idx * 3;
known += bs_idx * m * 3;
dist2 += bs_idx * n * 3 + pt_idx * 3;
idx += bs_idx * n * 3 + pt_idx * 3;
float ux = unknown[0];
float uy = unknown[1];
float uz = unknown[2];
double best1 = 1e40, best2 = 1e40, best3 = 1e40;
int besti1 = 0, besti2 = 0, besti3 = 0;
for (int k = 0; k < m; ++k) {
float x = known[k * 3 + 0];
float y = known[k * 3 + 1];
float z = known[k * 3 + 2];
float d = (ux - x) * (ux - x) + (uy - y) * (uy - y) + (uz - z) * (uz - z);
if (d < best1) {
best3 = best2;
besti3 = besti2;
best2 = best1;
besti2 = besti1;
best1 = d;
besti1 = k;
} else if (d < best2) {
best3 = best2;
besti3 = besti2;
best2 = d;
besti2 = k;
} else if (d < best3) {
best3 = d;
besti3 = k;
}
}
dist2[0] = best1;
dist2[1] = best2;
dist2[2] = best3;
idx[0] = besti1;
idx[1] = besti2;
idx[2] = besti3;
}
void three_nn_kernel_launcher(int b, int n, int m, const float *unknown,
const float *known, float *dist2, int *idx,
cudaStream_t stream) {
// unknown: (B, N, 3)
// known: (B, M, 3)
// output:
// dist2: (B, N, 3)
// idx: (B, N, 3)
cudaError_t err;
dim3 blocks(DIVUP(n, THREADS_PER_BLOCK),
b); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
three_nn_kernel<<<blocks, threads, 0, stream>>>(b, n, m, unknown, known,
dist2, idx);
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
|
4,126 | #include <stdio.h>
#include <future>
#include <thread>
#include <chrono>
#include <iostream>
__constant__ int factor = 0;
__global__
void vectorAdd(int *a, int *b, int *c) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
c[i] = factor*(a[i] + b[i]);
}
__global__
void matrixAdd(int **a,int **b, int**c) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
c[i][j] = a[i][j] + b[i][j];
}
#define PRINT(x) \
std::cout << #x " = " << x << std::endl
void func(const char* ptr) {
std::cout << "ptr = " << ptr << std::endl;
}
#define N 1024*1024
#define FULL_DATA_SIZE N*20
__global__ void kernel(int *a, int *b, int *c) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < N) {
int idx1 = (idx+1) % 256;
int idx2 = (idx+2) % 256;
float as = (a[idx] + a[idx1] + a[idx2]) / 3.0f;
float bs = (b[idx] + b[idx1] + b[idx2]) / 3.0f;
c[idx] = (as + bs) / 2;
}
}
int main(int argc, char** argv) {
// start time
auto startTime = std::chrono::high_resolution_clock::now();
printf("Hello World\n");
// get the number of devices
int numDevices;
cudaGetDeviceCount(&numDevices);
PRINT(numDevices);
cudaDeviceProp prop;
for (auto i=0 ; i<numDevices; i++) {
cudaGetDeviceProperties(&prop, i);
PRINT(prop.name);
PRINT(prop.totalGlobalMem);
PRINT(prop.sharedMemPerBlock);
PRINT(prop.regsPerBlock);
PRINT(prop.warpSize);
PRINT(prop.memPitch);
PRINT(prop.maxThreadsPerBlock);
PRINT(prop.maxThreadsDim[0]);
PRINT(prop.maxThreadsDim[1]);
PRINT(prop.maxThreadsDim[2]);
PRINT(prop.maxGridSize[0]);
PRINT(prop.maxGridSize[1]);
PRINT(prop.maxGridSize[2]);
PRINT(prop.totalConstMem);
PRINT(prop.major);
PRINT(prop.minor);
PRINT(prop.clockRate);
PRINT(prop.textureAlignment);
PRINT(prop.deviceOverlap);
PRINT(prop.multiProcessorCount);
PRINT(prop.kernelExecTimeoutEnabled);
PRINT(prop.integrated);
PRINT(prop.canMapHostMemory);
PRINT(prop.computeMode);
PRINT(prop.maxTexture1D);
PRINT(prop.maxTexture2D[0]);
PRINT(prop.maxTexture2D[1]);
PRINT(prop.maxTexture3D[0]);
PRINT(prop.maxTexture3D[1]);
PRINT(prop.maxTexture3D[2]);
// PRINT(prop.maxTexture2DArray[0]);
// PRINT(prop.maxTexture2DArray[1]);
// PRINT(prop.maxTexture2DArray[2]);
PRINT(prop.concurrentKernels);
}
cudaEvent_t start, stop;
float elapsedTime;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cudaStream_t stream0, stream1;
cudaStreamCreate(&stream0);
cudaStreamCreate(&stream1);
int *h_a, *h_b, *h_c;
int *d_a0, *d_b0, *d_c0;
int *d_a1, *d_b1, *d_c1;
cudaMalloc((void**)&d_a0, N * sizeof(int));
cudaMalloc((void**)&d_b0, N * sizeof(int));
cudaMalloc((void**)&d_c0, N * sizeof(int));
cudaMalloc((void**)&d_a1, N * sizeof(int));
cudaMalloc((void**)&d_b1, N * sizeof(int));
cudaMalloc((void**)&d_c1, N * sizeof(int));
cudaHostAlloc((void**)&h_a, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault);
cudaHostAlloc((void**)&h_b, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault);
cudaHostAlloc((void**)&h_c, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault);
for (auto i =0; i<FULL_DATA_SIZE; i++) {
h_a[i] = i;
h_b[i] = i*i;
}
for (auto i=0; i<FULL_DATA_SIZE; i+=2*N) {
// copy a for both streams
cudaMemcpyAsync(d_a0, h_a + i, N * sizeof(int),
cudaMemcpyHostToDevice, stream0);
cudaMemcpyAsync(d_a1, h_a + i + N,
N * sizeof(int), cudaMemcpyHostToDevice, stream1);
// copy b for both streams
cudaMemcpyAsync(d_b0, h_b + i, N * sizeof(int),
cudaMemcpyHostToDevice, stream0);
cudaMemcpyAsync(d_a1, h_a + i + N,
N * sizeof(int), cudaMemcpyHostToDevice, stream1);
// execute kernels for both streams
kernel<<<N/256, 256, 0, stream0>>>(d_a0, d_b0, d_c0);
kernel<<<N/256, 256, 0, stream1>>>(d_a1, d_b1, d_c1);
// copy c back for both streams
cudaMemcpyAsync(h_c + i, d_c0, N * sizeof(int),
cudaMemcpyDeviceToHost, stream0);
cudaMemcpyAsync(h_c + i + N, d_c1, N * sizeof(int),
cudaMemcpyDeviceToHost, stream1);
}
// CPU to wait until GPU has finished
cudaStreamSynchronize(stream0);
cudaStreamSynchronize(stream1);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("Time taken: %3.1f ms\n", elapsedTime);
for (auto i=0; i<10; i++)
printf("c[%d] = %d\n", i, h_c[i]);
cudaFreeHost(h_a);
cudaFreeHost(h_b);
cudaFreeHost(h_c);
cudaFree(d_a0);
cudaFree(d_b0);
cudaFree(d_c0);
cudaFree(d_a1);
cudaFree(d_b1);
cudaFree(d_c1);
cudaStreamDestroy(stream0);
cudaStreamDestroy(stream1);
// stop time
auto stopTime = std::chrono::high_resolution_clock::now();
PRINT((stopTime - startTime).count());
printf("Goodbye World\n");
}
|
4,127 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <cuda.h>
#define row 10000
#define col 10000
int matrixA[row][col], matrixB[row][col], matrixC[row][col], matrixD[row][col];
__global__ void add_matrix(int matrixA[row][col], int matrixB[row][col], int matrixC[row][col])
{
int i = blockDim.x*blockIdx.x + threadIdx.x;
int j = blockDim.y*blockIdx.y + threadIdx.y;
if (i < row && j < col)
{
matrixC[i][j] = matrixA[i][j] + matrixB[i][j];
}
}
int main()
{
int(*deviceA)[col];
int(*deviceB)[col];
int(*deviceC)[col];
int i, j;
for (i = 0; i < row; i++)
{
for (j = 0; j < col; j++)
{
matrixA[i][j] = rand() % 100;
matrixB[i][j] = rand() % 100;
}
}
/*for (i = 0; i < row; i++)
{
for (j = 0; j < col; j++)
{
printf("%d ", matrixA[i][j]);
}
printf("\n");
}*/
cudaEvent_t start_time, stop_time;
float elapsedTime;
//clock_t start_time = clock();
cudaMalloc((void **)&deviceA, row * col * sizeof(int));
cudaMalloc((void **)&deviceB, row * col * sizeof(int));
cudaMalloc((void **)&deviceC, row * col * sizeof(int));
cudaMemcpy(deviceA, matrixA, row * col * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(deviceB, matrixB, row * col * sizeof(int), cudaMemcpyHostToDevice);
dim3 threadsPerBlock(32, 32);
dim3 numOfBlocks(ceil(row / 32.0), ceil(col / 32.0));
cudaEventCreate(&start_time);
cudaEventRecord(start_time, 0);
add_matrix <<<numOfBlocks, threadsPerBlock >>> (deviceA, deviceB, deviceC);
cudaEventCreate(&stop_time);
cudaEventRecord(stop_time, 0);
cudaEventSynchronize(stop_time);
cudaEventElapsedTime(&elapsedTime, start_time, stop_time);
cudaMemcpy(matrixC, deviceC, row*col * sizeof(int), cudaMemcpyDeviceToHost);
//clock_t stop_time = clock();
printf(" Parallely Elapsed Time: %f ms\n", elapsedTime);
clock_t start_time_nonparallely, stop_time_nonparallely;
start_time_nonparallely = clock();
for (i = 0; i < row; i++)
{
for (j = 0; j < col; j++)
{
matrixD[i][j] = matrixA[i][j] + matrixB[i][j];
}
}
stop_time_nonparallely = clock();
printf("Non-parallely Elapsed Time: %f ms\n", (float)((stop_time_nonparallely) - (start_time_nonparallely)));
cudaFree(deviceA);
cudaFree(deviceB);
cudaFree(deviceC);
} |
4,128 | #include <sys/time.h>
#include <stdio.h>
#include <cuda_runtime.h>
#define NUM_STREAMS 4
//For time log by callback function
double timeStampB=0;
double timeStampC=0;
double timeStampD=0;
double timeKernal=0;
// time stamp function in seconds
double getTimeStamp() {
struct timeval tv ;
gettimeofday( &tv, NULL ) ;
return (double) tv.tv_usec/1000000 + tv.tv_sec ;
}
//The three callback functions are used to called when finishing memcpyasyc and kernal completion
void myCallBackB(cudaStream_t stream,cudaError_t status, void* userData ){
timeStampB = getTimeStamp();
}
void myCallBackC(cudaStream_t stream,cudaError_t status, void* userData ){
timeStampC = getTimeStamp();
//Through looking nvvp graph, the kernal executation is non-overlap and totally spread out. So I got each timeStampC-timeStampB for each kernal and take a sum
timeKernal += timeStampC-timeStampB;
}
void myCallBackD(cudaStream_t stream,cudaError_t status, void* userData ){
timeStampD=getTimeStamp();
}
//initDataA function to init matrix A with selected values
void initDataA(float* data, int nx, int ny){
int i,j;
for(i = 0; i < nx; i++){
for(j = 0; j < ny; j++){
data[i*ny + j] = (float) (i+j)/3.0;
}
}
}
//initDataB function to init matrix B with selected values
void initDataB(float* data, int nx, int ny){
int i,j;
for(i = 0; i < nx; i++){
for(j = 0; j < ny; j++){
data[i*ny + j] = (float)3.14*(i+j);
}
}
}
//for debug: print each element in matrix
void debugPrint(float* data, int nx, int ny){
int i,j;
for(i = 0; i < nx; i++){
for(j = 0; j < ny; j++){
printf("%f ",data[i*ny + j]);
}
printf("\n");
}
printf("\n");
}
// host side matrix addition
void h_addmat(float *A, float *B, float *C, int nx, int ny){
int i;
for(i = 0; i < nx*ny; i++){
C[i] = A[i] + B[i];
}
}
// device-side matrix addition
__global__ void f_addmat( float *A, float *B, int len){
int ix = threadIdx.x;
int iy = threadIdx.y*blockDim.x + blockIdx.x*blockDim.x*blockDim.y;
int idx = iy + ix ;
//for loop will be unrolled
//stride loop access 4 elements, the stride is one grid size since the kernal's grid is 1/4 of total size per stream
//assign B = B + A then I dont need access the third matrix C to save time.
#pragma unroll
for(int i = idx; i < len; i+=gridDim.x*blockDim.x*blockDim.y){
B[i] += A[i];
}
}
int main( int argc, char *argv[] ) {
// get program arguments
if( argc != 3) {
printf("Error: wrong number of args\n") ;
exit(1) ;
}
int nx = atoi( argv[1] ) ; // should check validity
int ny = atoi( argv[2] ) ; // should check validity
int noElems = nx*ny ;
int bytes = noElems * sizeof(float) ;
// but you may want to pad the matrices…
// alloc memory host-side
float *h_hA = (float *) malloc( bytes ) ;
float *h_hB = (float *) malloc( bytes ) ;
float *h_hC = (float *) malloc( bytes ) ; // host result
//the following allocate array pinned on memory to boost memcpy
float *h_A, *h_B, *h_dC;
float *d_A, *d_B ;
cudaHostAlloc((void**)&h_A,bytes,cudaHostAllocWriteCombined|cudaHostAllocMapped);
cudaHostAlloc((void**)&h_B,bytes,cudaHostAllocWriteCombined|cudaHostAllocMapped);
cudaHostAlloc((void**)&h_dC,bytes,cudaHostAllocWriteCombined);
// init matrices with random data
initDataA(h_A, nx, ny);
initDataB(h_B, nx, ny);
initDataA(h_hA, nx, ny);
initDataB(h_hB, nx, ny);
// alloc memory dev-side
cudaMalloc( (void **) &d_A, bytes ) ;
cudaMalloc( (void **) &d_B, bytes ) ;
cudaDeviceSetCacheConfig(cudaFuncCachePreferL1);
double timeStampA = getTimeStamp() ;
// invoke Kernel
dim3 block( 32, 32 ) ;
//I use 4 streams to pipeline memcpy h2d, kernal exec and memcpy d2h
//grid determines the number of blocks. "/4" means shrink into 1/4 of total size for kernal executing 4 elements addition
//"/NUM_STREAMS" means assign blocks to 4 streams uniformly
int grid = ((noElems+3)/4/NUM_STREAMS + block.x*block.y-1)/(block.x*block.y);
//align_idx is to align 4 data in 32-byte data access for each kernal executing
int align_idx = noElems/NUM_STREAMS-(noElems/NUM_STREAMS)%8;
//stream creation
cudaStream_t stream[NUM_STREAMS+1];
for (int i = 1; i < NUM_STREAMS+1; i++){
cudaStreamCreate(&(stream[i]));
}
int i;
for(i = 1; i < NUM_STREAMS; i++){
//async memcpy for A and B
cudaMemcpyAsync(&d_A[(i-1)*align_idx],&h_A[(i-1)*align_idx],align_idx*sizeof(float),cudaMemcpyHostToDevice,stream[i]);
cudaMemcpyAsync(&d_B[(i-1)*align_idx],&h_B[(i-1)*align_idx],align_idx*sizeof(float),cudaMemcpyHostToDevice,stream[i]);
//add callback to get timestamp update when each of memcpy per stream. I can get the last completion of data copying
cudaStreamAddCallback(stream[i],myCallBackB,(void*)&i,0);
//kernal invoked
f_addmat<<<grid, block, 0, stream[i]>>>( d_A+(i-1)*align_idx, d_B+(i-1)*align_idx,align_idx) ;
//add callback to get lastest stamp when finishing kernal per stream
cudaStreamAddCallback(stream[i],myCallBackC,(void*)&i,0);
//async memcpy back to host
cudaMemcpyAsync(&h_dC[(i-1)*align_idx],&d_B[(i-1)*align_idx],align_idx*sizeof(float),cudaMemcpyDeviceToHost,stream[i]);
//add callback to get lastest timestamp when finishing data copying
cudaStreamAddCallback(stream[i],myCallBackD,(void*)&i,0);
}
//Here is to run the last stream. It is out of loop since the size of remaing data is different from aligned data size
grid =((noElems-(NUM_STREAMS-1)*align_idx+3)/4+ block.x*block.y-1)/(block.x*block.y);
cudaMemcpyAsync(&d_A[(NUM_STREAMS-1)*align_idx],&h_A[(NUM_STREAMS-1)*align_idx],(noElems-(NUM_STREAMS-1)*align_idx)*sizeof(float),cudaMemcpyHostToDevice,stream[NUM_STREAMS]);
cudaMemcpyAsync(&d_B[(NUM_STREAMS-1)*align_idx],&h_B[(NUM_STREAMS-1)*align_idx],(noElems-(NUM_STREAMS-1)*align_idx)*sizeof(float),cudaMemcpyHostToDevice,stream[NUM_STREAMS]);
cudaStreamAddCallback(stream[i],myCallBackB,(void*)&i,0);
f_addmat<<<grid, block, 0, stream[NUM_STREAMS]>>>( d_A+(NUM_STREAMS-1)*align_idx, d_B+(NUM_STREAMS-1)*align_idx,noElems-(NUM_STREAMS-1)*align_idx) ;
cudaStreamAddCallback(stream[i],myCallBackC,(void*)&i,0);
cudaMemcpyAsync(&h_dC[(NUM_STREAMS-1)*align_idx],&d_B[(NUM_STREAMS-1)*align_idx],(noElems-(NUM_STREAMS-1)*align_idx)*sizeof(float),cudaMemcpyDeviceToHost,stream[NUM_STREAMS]);
cudaStreamAddCallback(stream[i],myCallBackD,(void*)&i,0);
//sync all streams and done
for(int i = 1; i < NUM_STREAMS+1; i++){
cudaStreamSynchronize(stream[i]);
}
// check result
h_addmat( h_hA, h_hB, h_hC, nx, ny ) ;
// print out results
if(!memcmp(h_hC,h_dC,nx*ny*sizeof(float))){//results compare
printf("%.6f %.6f %.6f %.6f\n", timeStampD-timeStampA, timeStampB-timeStampA, timeKernal, timeStampD-timeStampC);
}else{
//for debug print
//debugPrint(h_hC, nx, ny);
//debugPrint(h_dC, nx, ny);
printf("Error: Results not matched.\n");
}
// free GPU resources
cudaFreeHost( h_A ) ; cudaFreeHost( h_B ) ; cudaFreeHost( h_dC ) ;
cudaDeviceReset() ;
}
|
4,129 | #include <cuda_runtime.h>
void saxpy_c(int n, float a, float* x, float* y) {
for (int i = 0; i < n; ++i) y[i] = a * x[i] + y[i];
}
__global__ void saxpy(int n, float a, float* x, float* y) {
int const i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) y[i] = a * x[i] + y[i];
}
#include <iostream>
int main() {
cudaError err;
int const N = 1<<20;
float* x;
err = cudaMallocManaged(&x, N * sizeof(float));
if (err != cudaSuccess) {
std::cout << "Cannot allocate x: " << err << "\n";
return 1;
}
float* y;
err = cudaMallocManaged(&y, N * sizeof(float));
if (err != cudaSuccess) {
std::cout << "Cannot allocate y: " << err << "\n";
return 1;
}
for (int i = 0; i < N; ++i) x[i] = static_cast<float>(i);
for (int i = 0; i < N; ++i) y[i] = static_cast<float>(i);
std::cout << "x: [";
for (int i = 0; i < 10; ++i) std::cout << " " << x[i];
std::cout << " ]\n";
std::cout << "y: [";
for (int i = 0; i < 10; ++i) std::cout << " " << y[i];
std::cout << " ]\n";
saxpy<<<4096, 256>>>(N, 2.f, x, y);
err = cudaDeviceSynchronize();
if (err != cudaSuccess) {
std::cout << "Error: " << err << "\n";
return 1;
}
std::cout << "x: [";
for (int i = 0; i < 10; ++i) std::cout << " " << x[i];
std::cout << " ]\n";
std::cout << "y: [";
for (int i = 0; i < 10; ++i) std::cout << " " << y[i];
std::cout << " ]\n";
return 0;
}
|
4,130 | /* ==================================================================
Programmer: Yicheng Tu (ytu@cse.usf.edu)
The basic SDH algorithm implementation for 3D data
To compile: nvcc SDH.c -o SDH in the rc machines
StevenFaulkner U9616-1844
Summer 2018
==================================================================
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
#include <cuda_runtime.h>
#define BOX_SIZE 23000 /* size of the data box on one dimension */
/* descriptors for single atom in the tree */
typedef struct atomdesc {
double x_pos;
double y_pos;
double z_pos;
} atom;
typedef struct hist_entry{
//float min;
//float max;
unsigned long long d_cnt; /* need a long long type as the count might be huge */
} bucket;
bucket * histogram; /* list of all buckets in the histogram */
long long PDH_acnt; /* total number of data points */
int num_buckets; /* total number of buckets in the histogram */
double PDH_res; /* value of width */
atom * atom_list; /* list of all data points */
struct timezone Idunno;
struct timeval startTime, endTime;
void ErrorCheck( cudaError_t err, const char op[])
{
if( err != cudaSuccess )
{
printf("CUDA Error: %s, %s ", op, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/*
distance of two points in the atom_list
*/
__device__ double
p2p_distance(atom *list, int ind1, int ind2)
{
double x1 = list[ind1].x_pos;
double x2 = list[ind2].x_pos;
double y1 = list[ind1].y_pos;
double y2 = list[ind2].y_pos;
double z1 = list[ind1].z_pos;
double z2 = list[ind2].z_pos;
return sqrt((x1-x2)*(x1-x2) + (y1-y2)*(y1-y2) + (z1-z2)*(z1-z2));
}
/*
brute-force SDH solution in a single CPU thread
*/
__global__ void
PDH_baseline(bucket *histo_in, atom *list, double width, int size)
{
int i, j, pos;
double distance;
i = (blockIdx.x * blockDim.x) + threadIdx.x;
j = i+1;
for(int x = j; x < size; ++x)
{
distance = p2p_distance(list,i,x);
pos = (int) (distance/width);
atomicAdd( &histo_in[pos].d_cnt,1);
}
}
__global__ void
PDHGPU_Baseline(bucket *histogram,atom *list, double width)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if(x < y)
{
double dist = p2p_distance(list,x,y);
int pos = (int) (dist/width);
histogram[pos].d_cnt++;
printf("%d,%d : %d, %f \n", x,y,pos,dist);
}
__syncthreads();
}
/*
set a checkpoint and show the (natural) running time in seconds
*/
double report_running_time() {
long sec_diff, usec_diff;
gettimeofday(&endTime, &Idunno);
sec_diff = endTime.tv_sec - startTime.tv_sec;
usec_diff= endTime.tv_usec-startTime.tv_usec;
if(usec_diff < 0) {
sec_diff --;
usec_diff += 1000000;
}
printf("Running time for CPU version: %ld.%06ld\n", sec_diff, usec_diff);
return (double)(sec_diff*1.0 + usec_diff/1000000.0);
}
/*
print the counts in all buckets of the histogram
*/
void output_histogram(bucket *histogram){
int i;
long long total_cnt = 0;
for(i=0; i< num_buckets; i++) {
if(i%5 == 0) /* we print 5 buckets in a row */
printf("\n%02d: ", i);
printf("%15lld ", histogram[i].d_cnt);
total_cnt += histogram[i].d_cnt;
/* we also want to make sure the total distance count is correct */
if(i == num_buckets - 1)
printf("\n T:%lld \n", total_cnt);
else printf("| ");
}
}
int main(int argc, char **argv)
{
int i;
PDH_acnt = atoi(argv[1]);
PDH_res = atof(argv[2]);
num_buckets = (int)(BOX_SIZE * 1.732 / PDH_res) + 1;
size_t hist_size = sizeof(bucket)*num_buckets;
size_t atom_size = sizeof(atom)*PDH_acnt;
histogram = (bucket *)malloc(sizeof(bucket)*num_buckets);
atom_list = (atom *)malloc(sizeof(atom)*PDH_acnt);
srand(1);
/* generate data following a uniform distribution */
for(i = 0; i < PDH_acnt; i++) {
atom_list[i].x_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE;
atom_list[i].y_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE;
atom_list[i].z_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE;
}
/*
PDH_baseline();
report_running_time();
output_histogram(histogram);
*/
bucket *dev_Histo = NULL;
atom *dev_atomL = NULL;
ErrorCheck(cudaMalloc((void**) &dev_Histo,hist_size), "Allocate Memory for Histogram");
ErrorCheck(cudaMalloc((void**) &dev_atomL, atom_size), "Allocate Memory for Atom List");
ErrorCheck(cudaMemcpy(dev_Histo,histogram,hist_size, cudaMemcpyHostToDevice), "Copying Histogram to Device");
ErrorCheck(cudaMemcpy(dev_atomL, atom_list, atom_size, cudaMemcpyHostToDevice), "Copying Atom list to Device");
PDH_baseline <<<ceil(PDH_acnt/32), 32 >>> (dev_Histo, dev_atomL, PDH_res, PDH_acnt);
ErrorCheck(cudaMemcpy(histogram, dev_Histo, hist_size, cudaMemcpyDeviceToHost), " Move Histogram to host");
/* print out the histogram */
output_histogram(histogram);
ErrorCheck(cudaFree(dev_Histo), "Free Device Histogram");
ErrorCheck(cudaFree(dev_atomL), "Free Device Atom List");
free(histogram);
free(atom_list);
ErrorCheck(cudaDeviceReset(), "Reset");
return 0;
}
|
4,131 | #include <stdio.h>
void init(double *a, int N)
{
int i;
for (i = 0; i < N; ++i)
{
a[i] = i%3;
}
}
struct position {
int x;
int y;
};
/// convert a 2D position to a 1D index
/// assumes bottom left corner of image is 0,0 and index 1
long get1dIndex( int width, int x, int y) {
return y * width + x;
}
/// inverse of 2D to 1D mapping function
/// sends back x,y values in tuple from index
void get_Position( int width, int id, struct position *pos) {
int xx = 0;
int yy = 0;
// struct position pos;
xx = id / width;
yy = id % width;
pos->x = yy;
pos->y = xx;
// return pos;
}
__global__
void doubleElements(double *a, int N, int color )
{
/*
* Use a grid-stride loop so each thread does work
* on more than one element in the array.
*/
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
if ( color == 1 )
{
for (int i = idx; i < N; i += stride)
{
int x = i/4;
int y = i%4;
a[i] = a[i] + 2*x + y*i;
}
}
if ( color == 2 )
{
for (int i = idx; i < N; i += stride)
{
int x = i/4;
int y = i%4;
a[i] = a[i] + 2*x + y*i;
}
}
if ( color == 3 )
{
for (int i = idx; i < N; i += stride)
{
int x = i/4;
int y = i%4;
a[i] = a[i] + 2*x + y*i;
}
}
}
int main()
{
int N = 12;
double *red;
size_t size = N * sizeof(double);
cudaMallocManaged(&red, size);
init(red, N);
double *green;
cudaMallocManaged(&green, size);
init(green, N);
double *blue;
cudaMallocManaged(&blue, size);
init(blue, N);
size_t threads_per_block = 256;
size_t number_of_blocks = 32;
doubleElements<<<number_of_blocks, threads_per_block>>>(red, N, 1);
doubleElements<<<number_of_blocks, threads_per_block>>>(blue, N, 2);
doubleElements<<<number_of_blocks, threads_per_block>>>(green, N, 3);
cudaDeviceSynchronize();
printf("\nresult: ");
for ( int i = 0; i < N ; i ++ )
{
printf("%lf ", red[i]);
}
for ( int i = 0; i < N ; i ++ )
{
printf("%lf ", green[i]);
}
for ( int i = 0; i < N ; i ++ )
{
printf("%lf ", blue[i]);
}
cudaFree(red);
cudaFree(green);
cudaFree(blue);
}
|
4,132 | #include "includes.h"
__global__ void relabel2Kernel(int *components, int previousLabel, int newLabel, const int colsComponents, const int idx, const int frameRows) {
uint i = (blockIdx.x * blockDim.x) + threadIdx.x;
uint j = (blockIdx.y * blockDim.y) + threadIdx.y;
i = i * colsComponents + j;
i = i + (colsComponents * frameRows * idx);
if (components[i] == previousLabel) {
components[i] = newLabel;
}
} |
4,133 | #include <stdio.h>
#include <math.h>
__global__ void kernelb(int *A, int *x, int *b, int N){
int tId = threadIdx.x + blockIdx.x * blockDim.x;
if(tId< N){
for(int k=0; k < N; k++){
b[tId] += A[(int)(tId*N+k)]*x[k];
}
}
}
int main(int argc, char const *argv[])
{
int n = 1e4;
int block_size = 256;
int grid_size = (int) ceil((float) n/ block_size);
int *GPU_b;
int *GPU_x;
int *GPU_A;
int *CPU_x = (int *) malloc(1e4 * sizeof (int));
int *CPU_A = (int *) malloc(1e8 * sizeof (int));
for(int k = 0; k < 1e8; k++){
if(k < 1e4){
CPU_x[k] = 1;
}
CPU_A[k] = 1;
}
cudaMalloc(&GPU_x , 1e4 * sizeof(int));
cudaMalloc(&GPU_b , 1e4 * sizeof(int));
cudaMalloc(&GPU_A , 1e8 * sizeof(int));
cudaMemcpy(GPU_A, CPU_A, 1e8 * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(GPU_x, CPU_x, 1e4 * sizeof(int), cudaMemcpyHostToDevice);
cudaMemset(GPU_b, 0, 1e4 * sizeof(int));
kernelb<<<grid_size, block_size>>>(GPU_A, GPU_x, GPU_b, n);
cudaMemcpy(CPU_x, GPU_b, 1e4 * sizeof(int), cudaMemcpyDeviceToHost);
//for(int k = 0; k< 1e4; k++){
// printf("%d\n", CPU_x[k]);
//}
cudaFree(GPU_x);
cudaFree(GPU_b);
cudaFree(GPU_A);
free(CPU_x);
free(CPU_A);
return(0);
} |
4,134 | #include <stdio.h>
#include <assert.h>
inline cudaError_t checkCuda(cudaError_t result)
{
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result));
//assert(result == cudaSuccess);
}
return result;
}
__global__
void initVectorGpu(float *a, float value, int N){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < N){
a[idx] = value;
}
}
__global__
void addVectorsGpu(float *result, float *a, float *b, int N){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for(int i = idx; i < N; i += stride){
result[i] = a[i] + b[i];
}
}
void checkElementsAre(float target, float *array, int N)
{
for(int i = 0; i < N; i++)
{
if(array[i] != target)
{
printf("FAIL: array[%d] - %0.0f does not equal %0.0f\n", i, array[i], target);
exit(1);
}
}
printf("SUCCESS! All values added correctly.\n");
}
int main()
{
const int N = 2<<20;
size_t size = N * sizeof(float);
float *a;
float *b;
float *c;
a = (float *)malloc(size);
b = (float *)malloc(size);
c = (float *)malloc(size);
checkCuda(cudaMallocManaged(&a, size));
checkCuda(cudaMallocManaged(&b, size));
checkCuda(cudaMallocManaged(&c, size));
size_t threadsPerBlock;
size_t numberOfBlocks;
threadsPerBlock = 256;
numberOfBlocks = (N + threadsPerBlock - 1) / threadsPerBlock;
printf("Threads : %ld Blocks : %ld", threadsPerBlock, numberOfBlocks);
initVectorGpu<<<numberOfBlocks,threadsPerBlock>>>(a, 3, N);
checkCuda(cudaGetLastError());
checkCuda(cudaDeviceSynchronize());
initVectorGpu<<<numberOfBlocks,threadsPerBlock>>>(b, 4, N);
checkCuda(cudaGetLastError());
checkCuda(cudaDeviceSynchronize());
initVectorGpu<<<numberOfBlocks,threadsPerBlock>>>(c, 0, N);
checkCuda(cudaGetLastError());
checkCuda(cudaDeviceSynchronize());
addVectorsGpu<<<numberOfBlocks, threadsPerBlock>>>(c, a, b, N);
checkCuda(cudaGetLastError());
checkCuda(cudaDeviceSynchronize());
checkElementsAre(7, c, N);
checkCuda( cudaFree(a) );
checkCuda( cudaFree(b) );
checkCuda( cudaFree(c) );
}
|
4,135 | #include <cuda_runtime.h>
#include <thrust/extrema.h>
#include <thrust/execution_policy.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/functional.h>
#include "curand.h"
#include "curand_kernel.h"
#include <cmath>
#include <chrono>
#include <iostream>
#include <iomanip>
#include <stdio.h>
#define BLOCK_SIZE 32
__global__ void calc_dist(double* X, double* Y, double* Dist, int N) {
int i = blockIdx.y*blockDim.y+threadIdx.y;
int j = blockIdx.x*blockDim.x+threadIdx.x;
if (i >= N || j >= N) return;
// Essa matriz é simetrica, mas estamos calculando ela inteira. Ponto de otimizacao!
Dist[i*N+j] = sqrt(pow((X[i] - X[j]), 2) + pow((Y[i] - Y[j]), 2));
}
__device__ void swap(int *a, int *b) {
int temp = *a;
*a = *b;
*b = temp;
return;
}
__global__ void random_sol(int *solutions, double *costs, double *distances, int N) {
int i = blockIdx.x*blockDim.x+threadIdx.x;
double solution_cost = 0; // Custo total dessa solucao
if (i >= 10000) return; // nSols
// Preenche a solucao em ordem para que possamos permutar depois
for (int k = 0; k < N; k++) {
solutions[i * N + k] = k;
}
// Inicializar o random
curandState_t st;
curand_init(0, i, 0, &st);
int idx;
// Realiza a permutacao e calcula o custo total da solucao
for (int k = 1; k < N; k++){
idx = (int) ((N-k) * curand_uniform(&st) + k); // Pegar um indice aleatorio entre 1 e N-1
// Swap dos elementos do vetor e salva no vetor de solucoes
swap(&solutions[i * N + k], &solutions[i * N + idx]);
solution_cost += distances[solutions[i * N + k-1] * N + solutions[i * N + k]]; // Calculo das distancias
}
solution_cost += distances[solutions[i * N] * N + solutions[i * N + N-1]]; // Ultimo calculo: primeiro e ultimo
costs[i] = solution_cost; // Salva no vetor de custos
}
int main() {
// Preparacao para receber os dados do arquivo
int N; std::cin >> N;
thrust::host_vector<double> host_x(N);
thrust::host_vector<double> host_y(N);
double x, y;
for (int i = 0; i < N; i++) {
std::cin >> x; std::cin >> y;
host_x[i] = x;
host_y[i] = y;
}
// ---------------------------------------------------------------------
// Preparacao para pre-calcular as distancias
thrust::device_vector<double> dev_x(host_x);
thrust::device_vector<double> dev_y(host_y);
thrust::device_vector<double> dev_points_distance(N * N);
dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid(ceil((double) N / threads.x), ceil((double) N / threads.y));
calc_dist<<<grid,threads>>>(thrust::raw_pointer_cast(dev_x.data()),
thrust::raw_pointer_cast(dev_y.data()),
thrust::raw_pointer_cast(dev_points_distance.data()),
N);
// ---------------------------------------------------------------------
// Preparacao sortear solucoes e calcular custos
long nSols = 10000;
int gpu_threads = 1024;
thrust::device_vector<int> dev_solutions(nSols * N); // Vetor de solucoes
thrust::device_vector<double> dev_costs(nSols); // Vetor de custos totais de cada solucao
// Medicao de Tempo
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, NULL);
random_sol<<<ceil((double) nSols/gpu_threads), gpu_threads>>>(thrust::raw_pointer_cast(dev_solutions.data()),
thrust::raw_pointer_cast(dev_costs.data()),
thrust::raw_pointer_cast(dev_points_distance.data()),
N);
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop);
float msecTotal = 0.0f;
cudaEventElapsedTime(&msecTotal, start, stop);
// ---------------------------------------------------------------------
// Pegar o elemento minimo do vetor
thrust::device_vector<double>::iterator iter = thrust::min_element(dev_costs.begin(), dev_costs.end());
int position = iter - dev_costs.begin();
double min_val = *iter;
// ---------------------------------------------------------------------
// Print do tempo e do melhor caminho
#ifdef TIME
std::cout << msecTotal << std::endl;
std::cout << "milisegundo(s)." << std::endl;
#endif
std::cout << std::fixed << std::setprecision(5);
std::cout << min_val;
std::cout << " 0" << std::endl;
for (int i = position * N; i < position * N + N; i++) {
std::cout << dev_solutions[i] << ' ';
}
std::cout << std::endl;
return 0;
} |
4,136 | #include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define N 5
__global__ void gpu_global_memory(int *d_a)
{
d_a[threadIdx.x] = threadIdx.x;
}
__global__ void gpu_local_memory(int d_in)
{
int t_local;
t_local = d_in * threadIdx.x;
printf("Val of local var in current thread is %d\n", t_local);
}
__global__ void gpu_shared_memory(float *d_a)
{
int i, idx = threadIdx.x;
}
int main(void)
{
int h_a[N]; int *d_a;
// writing in Global Memory
cudaMalloc(&d_a, N*sizeof(int));
cudaMemcpy(d_a, h_a, N*sizeof(int), cudaMemcpyHostToDevice);
gpu_global_memory<<<1,N>>>(d_a);
cudaDeviceSynchronize();
cudaMemcpy(h_a, d_a, N*sizeof(int), cudaMemcpyDeviceToHost);
printf("Array in Global Memory is: \n");
for(int i=0; i<N; i++)
printf("At Index: %d --> %d \n", i, h_a[i]);
// writing in Local Memory
printf("Use of Local memory on GPU.\n");
gpu_local_memory <<<1,N>>>(5);
cudaDeviceSynchronize();
}
|
4,137 | #include <stdlib.h>
#include <stdio.h>
#include <sys/time.h>
/**
*
* Function my_gettimeofday()
* Used to compute time of execution
*
**/
double my_gettimeofday(){
struct timeval tmp_time;
gettimeofday(&tmp_time, NULL);
return tmp_time.tv_sec + (tmp_time.tv_usec * 1.0e-6L);
}
/**
*
* Function read_param()
* "l h
* n"
*
**/
int read_param(char *name, unsigned long **data, int *n, int *l, int *h){
FILE* fp = NULL;
fp = fopen(name, "r");
if(fp == NULL){
printf("fopen :\t ERROR\n");
return -1;
}
fscanf(fp, "%u %u", l, h);
fscanf(fp, "%u", n);
fclose(fp);
return 0;
}
/**
*
* Function read_data()
* "l h
* n
* x_0 y_0
* ...
* x_n y_n"
*
**/
int read_data(char *name, unsigned long **data, int n){
FILE* fp = NULL;
int i = 0, a = 0, b = 0;
fp = fopen(name, "r");
if(fp == NULL){
printf("fopen :\t ERROR\n");
return -1;
}
/* Ghost reading */
fscanf(fp, "%u %u", &a, &b);
fscanf(fp, "%u", &a);
for(i = 0; i < n; i++)
fscanf(fp, "%lu %lu", &data[0][i], &data[1][i]);
fclose(fp);
return 0;
}
|
4,138 | // copied from gsl
__device__ __host__ inline double sample_quantile_from_sorted_data(
const double sorted_data[], const int n, const double f){
const double index = f * (n - 1) ;
const int lhs = (int)index ;
const double delta = index - lhs ;
double result;
if (n == 0)
return 0.0 ;
if (lhs == n - 1)
{
result = sorted_data[lhs];
}
else
{
result = (1 - delta) * sorted_data[lhs] + delta * sorted_data[(lhs + 1)];
}
return result ;
}
|
4,139 | #include<stdio.h>
#include<stdlib.h>
#include<sys/time.h>
// Simple transformation kernel
__global__ void transformKernel(float* output, cudaTextureObject_t coolTexObj, cudaTextureObject_t heatTexObj, int nx, int ny, float log_n)
{
// Calculate normalized texture coordinates
int xid = blockIdx.x * blockDim.x + threadIdx.x;
int yid = blockIdx.y * blockDim.y + threadIdx.y;
float t = float(xid)/nx;
float n = (log_n + 6.0)/12.1;
if (xid < nx && yid < ny) {
// Read from texture and write to global memory
output[yid*nx + xid] = tex2D<float>(coolTexObj, t, n);
output[nx*ny + yid*nx + xid] = tex2D<float>(heatTexObj, t, n);
//printf("%3d %d %f %f %f %f\n", xid, yid, n, t, output[yid*nx + xid], output[nx*ny + yid*nx + xid]);
}
}
void Load_Cooling_Tables(float* cooling_table, float* heating_table);
double get_time(void);
// Host code
int main()
{
float *cooling_table;
float *heating_table;
float *h_output;
const int nx = 81;
const int ny = 121;
int nx_out = 1000;
int ny_out = 1;
float log_n = -3.0;
double start_t, stop_t;
// allocate arrays to be copied to textures
cooling_table = (float *) malloc(nx*ny*sizeof(float));
heating_table = (float *) malloc(nx*ny*sizeof(float));
// Load cooling table into the array
Load_Cooling_Tables(cooling_table, heating_table);
// allocate output array on host
h_output = (float *) malloc(2*nx_out*ny_out*sizeof(float));
for (int i=0; i<nx_out; i++) {
for (int j=0; j<ny_out; j++) {
h_output[i+nx_out*j] = 0.0;
}
}
// Allocate array to store result of transformations in device memory
float* output;
cudaMalloc(&output, 2*nx_out*ny_out*sizeof(float));
// set info for cuda kernels
dim3 dimBlock(16, 16);
dim3 dimGrid((nx_out + dimBlock.x - 1) / dimBlock.x, (ny_out + dimBlock.y - 1) / dimBlock.y, 1);
// Allocate CUDA array in device memory
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat);
cudaArray* cuCoolArray;
cudaArray* cuHeatArray;
cudaMallocArray(&cuCoolArray, &channelDesc, nx, ny);
cudaMallocArray(&cuHeatArray, &channelDesc, nx, ny);
// Copy to device memory the cooling and heating arrays
// in host memory
cudaMemcpyToArray(cuCoolArray, 0, 0, cooling_table, nx*ny*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpyToArray(cuHeatArray, 0, 0, heating_table, nx*ny*sizeof(float), cudaMemcpyHostToDevice);
// Specify textures
struct cudaResourceDesc coolResDesc;
memset(&coolResDesc, 0, sizeof(coolResDesc));
coolResDesc.resType = cudaResourceTypeArray;
coolResDesc.res.array.array = cuCoolArray;
struct cudaResourceDesc heatResDesc;
memset(&heatResDesc, 0, sizeof(heatResDesc));
heatResDesc.resType = cudaResourceTypeArray;
heatResDesc.res.array.array = cuHeatArray;
// Specify texture object parameters (same for both tables)
struct cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = cudaAddressModeClamp; // out-of-bounds fetches return border values
texDesc.addressMode[1] = cudaAddressModeClamp; // out-of-bounds fetches return border values
texDesc.filterMode = cudaFilterModeLinear;
texDesc.readMode = cudaReadModeElementType;
texDesc.normalizedCoords = 1;
// Create texture objects
cudaTextureObject_t coolTexObj = 0;
cudaCreateTextureObject(&coolTexObj, &coolResDesc, &texDesc, NULL);
cudaTextureObject_t heatTexObj = 0;
cudaCreateTextureObject(&heatTexObj, &heatResDesc, &texDesc, NULL);
// Invoke kernel
start_t = get_time();
transformKernel<<<dimGrid, dimBlock>>>(output, coolTexObj, heatTexObj, nx_out, ny_out, log_n);
stop_t = get_time();
//printf("%f ms\n", (stop_t-start_t)*1000);
cudaDeviceSynchronize();
// Copy the results back to the host
cudaMemcpy(h_output, output, 2*nx_out*ny_out*sizeof(float), cudaMemcpyDeviceToHost);
for (int j=0; j<ny_out; j++) {
for (int i=0; i<nx_out; i++) {
printf("%6.3f %6.3f\n", h_output[j*nx_out + i], h_output[nx_out*ny_out + j*nx_out + i]);
}
}
// Destroy texture object
cudaDestroyTextureObject(coolTexObj);
cudaDestroyTextureObject(heatTexObj);
// Free device memory
cudaFreeArray(cuCoolArray);
cudaFreeArray(cuHeatArray);
cudaFree(output);
// Free host memory
free(cooling_table);
free(heating_table);
free(h_output);
return 0;
}
void Load_Cooling_Tables(float* cooling_table, float* heating_table)
{
double *n_arr;
double *T_arr;
double *L_arr;
double *H_arr;
int i;
int nx = 121;
int ny = 81;
FILE *infile;
char buffer[0x1000];
char * pch;
// allocate arrays for temperature data
n_arr = (double *) malloc(nx*ny*sizeof(double));
T_arr = (double *) malloc(nx*ny*sizeof(double));
L_arr = (double *) malloc(nx*ny*sizeof(double));
H_arr = (double *) malloc(nx*ny*sizeof(double));
// Read in cloudy cooling/heating curve (function of density and temperature)
i=0;
infile = fopen("./cloudy_coolingcurve.txt", "r");
if (infile == NULL) {
printf("Unable to open Cloudy file.\n");
exit(1);
}
while (fgets(buffer, sizeof(buffer), infile) != NULL)
{
if (buffer[0] == '#') {
continue;
}
else {
pch = strtok(buffer, "\t");
n_arr[i] = atof(pch);
while (pch != NULL)
{
pch = strtok(NULL, "\t");
if (pch != NULL)
T_arr[i] = atof(pch);
pch = strtok(NULL, "\t");
if (pch != NULL)
L_arr[i] = atof(pch);
pch = strtok(NULL, "\t");
if (pch != NULL)
H_arr[i] = atof(pch);
}
i++;
}
}
fclose(infile);
// copy data from cooling array into the table
for (i=0; i<nx*ny; i++)
{
cooling_table[i] = float(L_arr[i]);
heating_table[i] = float(H_arr[i]);
}
// Free arrays used to read in table data
free(n_arr);
free(T_arr);
free(L_arr);
free(H_arr);
}
double get_time(void)
{
struct timeval timer;
gettimeofday(&timer,NULL);
return timer.tv_sec + 1.0e-6*timer.tv_usec;
}
|
4,140 | #include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/generate.h>
#include <thrust/sort.h>
#include <thrust/copy.h>
#include <algorithm>
#include <cstdlib>
#include <iostream>
#include <numeric>
#include <ctime>
struct HashGenerator {
int current_;
HashGenerator (int start) : current_(start) {}
double operator() () { current_++;
return generateReal(current_);}
// generates a int hash from an int value
int hash(int i)
{
uint64_t v = ((uint64_t) i) * 3935559000370003845 + 2691343689449507681;
v = v ^ (v >> 21);
v = v ^ (v << 37);
v = v ^ (v >> 4);
v = v * 4768777513237032717;
v = v ^ (v << 20);
v = v ^ (v >> 41);
v = v ^ (v << 5);
return (int) (v & ((((uint64_t) 1) << 31) - 1));
}
// generates a pseudorandom double precision real from an integer
double generateReal(int i) {
return (double(hash(i)));
}
};
struct IncGenerator {
double current_;
IncGenerator (double start) : current_(start) {}
double operator() () { return current_++; }
};
int main(void)
{
int N = 10000000;
// generate 100M numbers serially
thrust::host_vector<double> h_vec(N);
thrust::host_vector<double> v_vec(N);
thrust::host_vector<double> h_vec_result(N);
thrust::host_vector<double> v_vec_result(N);
HashGenerator HG (0);
IncGenerator IG (0);
clock_t begin_generation = clock();
std::generate(h_vec.begin(), h_vec.end(), HG);
std::generate(v_vec.begin(), v_vec.end(), IG);
clock_t end_generation = clock();
double generation_time = double(end_generation - begin_generation) / CLOCKS_PER_SEC;
std::cout << "Generation Time: " << generation_time << std::endl;
int numRuns = 5;
for(int i = 0; i < numRuns; i++) {
clock_t begin_sort_copy = clock();
// transfer data to the device
thrust::device_vector<double> d_vec = h_vec;
thrust::device_vector<double> dv_vec = v_vec;
clock_t begin_sort = clock();
// sort data on the device
thrust::sort_by_key(thrust::device, d_vec.begin(), d_vec.end(), dv_vec.begin());
cudaThreadSynchronize();
clock_t end_sort = clock();
// transfer data back to host
thrust::copy(d_vec.begin(), d_vec.end(), h_vec_result.begin());
thrust::copy(dv_vec.begin(), dv_vec.end(), v_vec_result.begin());
cudaThreadSynchronize();
clock_t end_sort_copy = clock();
double sort_copy_time = double(end_sort_copy - begin_sort_copy) / CLOCKS_PER_SEC;
double sort_time = double(end_sort - begin_sort) / CLOCKS_PER_SEC;
std::cout << "Sort + Copy Time: " << sort_copy_time << std::endl;
std::cout << "Sort Only Time: " << sort_time << std::endl;
clock_t begin_check = clock();
for(int j = 1; j < N; j++) {
if(h_vec_result[j] < h_vec_result[j-1]){
std::cout << "Error: " << h_vec_result[j-1] << " is before " << h_vec_result[j] << std::endl;
}
}
clock_t end_check = clock();
double check_time = double(end_check - begin_check) / CLOCKS_PER_SEC;
std::cout << "Check Time: " << check_time << std::endl;
}
return 0;
} |
4,141 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
extern "C"
{
__global__ void IncrementAll(float* input, float* output, float incrementSize, int itemCount)
{
int threadId = blockIdx.y*blockDim.x*gridDim.x
+ blockIdx.x*blockDim.x
+ threadIdx.x;
if (threadId < itemCount)
{
output[threadId] = input[threadId] + incrementSize;
}
}
} |
4,142 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <stdio.h>
using namespace std;
#define N 3 //rowsize
#define M 4 // columnsize
const int blockNUM = 4;
const int threadNUM =3;
void mxv(const int rowsize,const int columnsize,
const float*matrix,const float*v,float*r)
{
for(int i=0;i<rowsize;i++)
{
float re=0.0f;
for(int j=0;j<columnsize;j++)
re+=(matrix[i*columnsize+j]*v[j]);
r[i]=re;
}
cout <<"CPU:";
for(int i=0;i<rowsize;i++)
cout << r[i]<< " ";
cout <<endl;
}
static void __global__ mxvNaive(int rowSize, int columnSize, int columnPitch,
const float *d_matrix, const float *d_vec, float *d_r)
{
int id = threadIdx.x+blockIdx.x*blockDim.x;
if(id<rowSize)
{
float temp=0;
for(int i=0;i<columnSize;i++)
{
temp+=d_matrix[id*columnPitch+i]*d_vec[i];
}
d_r[id]=temp;
}
}
int main()
{
float *matrix=(float*)malloc(N*M*sizeof(float));
float *vec=(float*)malloc(M*sizeof(float));
float *r =(float*)malloc(N*sizeof(float));
float *dev_matrix,*dev_vec,*dev_r;
cudaMalloc((void**)&dev_vec,M*sizeof(float));
cudaMalloc((void**)&dev_matrix,M*N*sizeof(float));
cudaMalloc((void**)&dev_r,N*sizeof(float));
for(int i=1;i<=N*M;i++)
matrix[i-1]=i;
for(int i=0;i<M;i++)
vec[i]=i+1;
cudaMemcpy(dev_matrix,matrix,M*N*sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(dev_vec,vec,M*sizeof(float),cudaMemcpyHostToDevice);
mxvNaive<<<blockNUM,threadNUM>>> (N,M,M,dev_matrix,dev_vec,dev_r);
cudaMemcpy(r,dev_r,N*sizeof(float),cudaMemcpyDeviceToHost);
cout << "GPU:";
for(int i=0;i<N;i++)
cout <<r[i]<<" ";
cout <<endl;
mxv(N,M,matrix,vec,r);
return 0;
}
|
4,143 | #include "includes.h"
extern "C" {
}
#define TB 128
#define DISP_MAX 256
__global__ void remove_white(float *x, float *y, int size)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < size) {
if (x[id] == 255) {
y[id] = 0;
}
}
} |
4,144 | // Tests that "sm_XX" gets correctly converted to "compute_YY" when we invoke
// fatbinary.
//
// REQUIRES: clang-driver
// REQUIRES: x86-registered-target
// REQUIRES: nvptx-registered-target
// CHECK:fatbinary
// RUN: %clang -### -target x86_64-linux-gnu -c --cuda-gpu-arch=sm_20 %s 2>&1 \
// RUN: | FileCheck -check-prefix ARCH64 -check-prefix SM20 %s
// RUN: %clang -### -target x86_64-linux-gnu -c --cuda-gpu-arch=sm_21 %s 2>&1 \
// RUN: | FileCheck -check-prefix ARCH64 -check-prefix SM21 %s
// RUN: %clang -### -target x86_64-linux-gnu -c --cuda-gpu-arch=sm_30 %s 2>&1 \
// RUN: | FileCheck -check-prefix ARCH64 -check-prefix SM30 %s
// RUN: %clang -### -target x86_64-linux-gnu -c --cuda-gpu-arch=sm_32 %s 2>&1 \
// RUN: | FileCheck -check-prefix ARCH64 -check-prefix SM32 %s
// RUN: %clang -### -target x86_64-linux-gnu -c --cuda-gpu-arch=sm_35 %s 2>&1 \
// RUN: | FileCheck -check-prefix ARCH64 -check-prefix SM35 %s
// RUN: %clang -### -target x86_64-linux-gnu -c --cuda-gpu-arch=sm_37 %s 2>&1 \
// RUN: | FileCheck -check-prefix ARCH64 -check-prefix SM37 %s
// RUN: %clang -### -target x86_64-linux-gnu -c --cuda-gpu-arch=sm_50 %s 2>&1 \
// RUN: | FileCheck -check-prefix ARCH64 -check-prefix SM50 %s
// RUN: %clang -### -target x86_64-linux-gnu -c --cuda-gpu-arch=sm_52 %s 2>&1 \
// RUN: | FileCheck -check-prefix ARCH64 -check-prefix SM52 %s
// RUN: %clang -### -target x86_64-linux-gnu -c --cuda-gpu-arch=sm_53 %s 2>&1 \
// RUN: | FileCheck -check-prefix ARCH64 -check-prefix SM53 %s
// SM20:--image=profile=sm_20{{.*}}--image=profile=compute_20
// SM21:--image=profile=sm_21{{.*}}--image=profile=compute_20
// SM30:--image=profile=sm_30{{.*}}--image=profile=compute_30
// SM32:--image=profile=sm_32{{.*}}--image=profile=compute_32
// SM35:--image=profile=sm_35{{.*}}--image=profile=compute_35
// SM37:--image=profile=sm_37{{.*}}--image=profile=compute_37
// SM50:--image=profile=sm_50{{.*}}--image=profile=compute_50
// SM52:--image=profile=sm_52{{.*}}--image=profile=compute_52
// SM53:--image=profile=sm_53{{.*}}--image=profile=compute_53
|
4,145 | #include <stdio.h>
#include <stdlib.h>
#include <stdio.h>
#include <cuda.h>
#include <assert.h>
__global__ void Asum(int *a, int *b, int *c){
*c = *a + *b;
}
|
4,146 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <math.h>
//-----------------------------------------------------------------------------
// GpuConstantsPackage: a struct to hold many constants (including pointers
// to allocated memory on the device) that can be
// uploaded all at once. Placing this in the "constants
// cache" is a convenient and performant way of handling
// constant information on the GPU.
//-----------------------------------------------------------------------------
struct GpuConstantsPackage {
int nvalue;
int* values;
};
typedef struct GpuConstantsPackage dataPack;
// This device constant is available to all functions in this CUDA unit
__device__ __constant__ dataPack dPk;
//-----------------------------------------------------------------------------
// GpuMirroredInt: a struct holding mirrored int data on both the CPU and the
// GPU. Functions below will operate on this struct
// (because this isn't a workshop on C++)
//-----------------------------------------------------------------------------
struct GpuMirroredInt {
int len; // Length of the array (again, this is not a C++ course)
int IsPinned; // "Pinned" memory is best for Host <= => GPU transfers.
// In fact, if non-pinned memory is transferred to the
// GPU from the host, a temporary allocation of pinned
// memory will be created and then destroyed. Pinned
// memory is not host-pageable, but the only performance
// implication is that creating lots of pinned memory
// may make it harder for the host OS to manage large
// memory jobs.
int* HostData; // Pointer to allocated memory on the host
int* DevcData; // Pointer to allocated memory on the GPU. Note that the
// host can know what the address of memory on the GPU
// is, but it cannot simply de-reference that pointer
// in host code.
};
typedef struct GpuMirroredInt gpuInt;
//-----------------------------------------------------------------------------
// GpuMirroredInt: a struct holding mirrored fp32 data on both the CPU and the
// GPU. Functions below will operate on this struct
// (because this isn't a workshop on C++)
//-----------------------------------------------------------------------------
struct GpuMirroredFloat {
int len; // Length of the array (again, this is not a C++ course)
int IsPinned; // "Pinned" memory is best for Host <= => GPU transfers.
// In fact, if non-pinned memory is transferred to the
// GPU from the host, a temporary allocation of pinned
// memory will be created and then destroyed. Pinned
// memory is not host-pageable, but the only performance
// implication is that creating lots of pinned memory
// may make it harder for the host OS to manage large
// memory jobs.
float* HostData; // Pointer to allocated memory on the host
float* DevcData; // Pointer to allocated memory on the GPU. Note that the
// host can know what the address of memory on the GPU
// is, but it cannot simply de-reference that pointer
// in host code.
};
typedef struct GpuMirroredFloat gpuFloat;
//-----------------------------------------------------------------------------
// kWarpPrefixSum: kernel for making a prefix sum of 32 numbers
//-----------------------------------------------------------------------------
__global__ void kWarpPrefixSum()
{
if (threadIdx.x == 0) {
printf("Values =\n");
int i, j;
for (i = 0; i < 4; i++) {
printf(" ");
for (j = 8*i; j < 8*(i+1); j++) {
printf("%4d ", dPk.values[j]);
}
printf(" [ slots %2d - %2d ]\n", 8*i, 8*(i+1)-1);
}
}
}
//-----------------------------------------------------------------------------
// CreateGpuInt: constructor function for allocating memory in a gpuInt
// instance.
//
// Arguments:
// len: the length of array to allocate
// pin: flag to have the memory pinned (non-pageable on the host side
// for optimal transfer speed ot the device)
//-----------------------------------------------------------------------------
gpuInt CreateGpuInt(int len, int pin)
{
gpuInt G;
G.len = len;
G.IsPinned = pin;
// Now that the official length is recorded, upgrade the real length
// to the next convenient multiple of 128, so as to always allocate
// GPU memory in 512-byte blocks. This is for alignment purposes,
// and keeping host to device transfers in line.
len = ((len + 127) / 128) * 128;
if (pin == 1) {
cudaHostAlloc((void **)&G.HostData, len * sizeof(int),
cudaHostAllocMapped);
}
else {
G.HostData = (int*)malloc(len * sizeof(int));
}
cudaMalloc((void **)&G.DevcData, len * sizeof(int));
memset(G.HostData, 0, len * sizeof(int));
cudaMemset((void *)G.DevcData, 0, len * sizeof(int));
return G;
}
//-----------------------------------------------------------------------------
// DestroyGpuInt: destructor function for freeing memory in a gpuInt
// instance.
//-----------------------------------------------------------------------------
void DestroyGpuInt(gpuInt *G)
{
if (G->IsPinned == 1) {
cudaFreeHost(G->HostData);
}
else {
free(G->HostData);
}
cudaFree(G->DevcData);
}
//-----------------------------------------------------------------------------
// UploadGpuInt: upload an integer array from the host to the device.
//-----------------------------------------------------------------------------
void UploadGpuInt(gpuInt *G)
{
cudaMemcpy(G->DevcData, G->HostData, G->len * sizeof(int),
cudaMemcpyHostToDevice);
}
//-----------------------------------------------------------------------------
// DownloadGpuInt: download an integer array from the host to the device.
//-----------------------------------------------------------------------------
void DownloadGpuInt(gpuInt *G)
{
cudaMemcpy(G->HostData, G->DevcData, G->len * sizeof(int),
cudaMemcpyHostToDevice);
}
//-----------------------------------------------------------------------------
// CreateGpuFloat: constructor function for allocating memory in a gpuFloat
// instance.
//
// Arguments:
// len: the length of array to allocate
// pin: flag to have the memory pinned (non-pageable on the host side
// for optimal transfer speed ot the device)
//-----------------------------------------------------------------------------
gpuFloat CreateGpuFloat(int len, int pin)
{
gpuFloat G;
G.len = len;
G.IsPinned = pin;
// Now that the official length is recorded, upgrade the real length
// to the next convenient multiple of 128, so as to always allocate
// GPU memory in 512-byte blocks. This is for alignment purposes,
// and keeping host to device transfers in line.
len = ((len + 127) / 128) * 128;
if (pin == 1) {
cudaHostAlloc((void **)&G.HostData, len * sizeof(float),
cudaHostAllocMapped);
}
else {
G.HostData = (float*)malloc(len * sizeof(float));
}
cudaMalloc((void **)&G.DevcData, len * sizeof(float));
memset(G.HostData, 0, len * sizeof(float));
cudaMemset((void *)G.DevcData, 0, len * sizeof(float));
return G;
}
//-----------------------------------------------------------------------------
// DestroyGpuFloat: destructor function for freeing memory in a gpuFloat
// instance.
//-----------------------------------------------------------------------------
void DestroyGpuFloat(gpuFloat *G)
{
if (G->IsPinned == 1) {
cudaFreeHost(G->HostData);
}
else {
free(G->HostData);
}
cudaFree(G->DevcData);
}
//-----------------------------------------------------------------------------
// UploadGpuFloat: upload an float array from the host to the device.
//-----------------------------------------------------------------------------
void UploadGpuFloat(gpuFloat *G)
{
cudaMemcpy(G->DevcData, G->HostData, G->len * sizeof(float),
cudaMemcpyHostToDevice);
}
//-----------------------------------------------------------------------------
// DownloadGpuFloat: download an float array from the host to the device.
//-----------------------------------------------------------------------------
void DownloadGpuFloat(gpuFloat *G)
{
cudaMemcpy(G->HostData, G->DevcData, G->len * sizeof(float),
cudaMemcpyHostToDevice);
}
//-----------------------------------------------------------------------------
// main
//-----------------------------------------------------------------------------
int main()
{
int i, np;
gpuInt ivals;
// Create a small array of integers and populate it
ivals = CreateGpuInt(32, 1);
// Initialize random number generator
srand(29538);
// Create random numbers
np = 32;
for (i = 0; i < np; i++) {
ivals.HostData[i] = (int)(100 * (double)rand() / (double)RAND_MAX);
}
// Stage critical constants--see cribSheet struct instance cSh above.
dataPack dpstage;
dpstage.nvalue = np;
dpstage.values = ivals.DevcData;
// Upload all data to the device
UploadGpuInt(&ivals);
// Upload the constants to the constants cache
cudaMemcpyToSymbol(dPk, &dpstage, sizeof(dataPack));
// Launch the kernel in more than one block
kWarpPrefixSum<<<1, 32>>>();
// Device synchronization
cudaDeviceSynchronize();
return 0;
}
|
4,147 | #include <cuda.h>
#include <cuda_runtime.h>
#include <cfloat>
#include <stdio.h>
#include <stdlib.h>
#include <cmath>
/**
* @brief Print the device's properties
*
*/
extern void dispDevice()
{
cudaDeviceProp props;
cudaGetDeviceProperties(&props, 0);
printf("GPU: %s\n", props.name);
}
// test kernel
__global__ void test()
{
int t_id = blockIdx.x * blockDim.x + threadIdx.x; // get global thread ID
if (t_id >= 1)
return;
printf("t_id %d working?\n", t_id);
}
// call the test kernel
extern void testWrapper()
{
int blockSize, gridSize;
blockSize = 1024;
gridSize = (int)ceil((float)1000 / blockSize);
test<<<gridSize, blockSize>>>();
}
/**
* @brief Calculate the distance between two points
*
* @param currObs
* @param currMu
* @param nFeatures
* @return __device__ dist
*/
__device__ float
dist(float *currObs, float *currMu, int nFeatures)
{
float distance = 0; // initial distance
for (int i = 0; i < nFeatures; i++)
distance += (currObs[i] - currMu[i]) * (currObs[i] - currMu[i]); // cumulatively add the vector compenents
return distance;
}
/**
* @brief Update set assignments
*
* @param d_x
* @param d_mu
* @param d_sums
* @param d_counts
* @param d_sets
* @param nSets
* @param nFeatures
* @param nObs
* @param rank
* @return __global__ updateSets
*/
__global__ void updateSets(float *d_x, float *d_mu, float *d_sums, int *d_counts, int *d_sets, int nSets, int nFeatures, int nObs, int rank)
{
int t_id = blockIdx.x * blockDim.x + threadIdx.x; // global thread id
if (t_id >= nObs) // end the run for a thread out of range
return;
float currObs[5]; // the current observation
float currMu[5]; // the current set mean
int startIdx = (rank * nObs * nFeatures) + (t_id * nFeatures); // the ranks starting index
// get the current observation
for (int i = 0; i < nFeatures; i++)
{
currObs[i] = d_x[startIdx + i];
}
// if (t_id == 0) // DEBUGGING
// {
// printf("npp %d startIdx %d\n", nObs, startIdx);
// printf("displaying t_id 0 pt on rank %d.\n", rank);
// for (int i = 0; i < 5; i++)
// {
// printf("%f ", currObs[i]);
// }
// printf("\n");
// }
float bestDist = FLT_MAX; // maximum float
int bestSet = 0; // assume best set is 0
// check all sets
for (int aSet = 0; aSet < nSets; aSet++)
{
// get the sets mean -- could be done a better way, I'm sure
for (int i = 0; i < nFeatures; i++)
{
currMu[i] = d_mu[(aSet * nFeatures) + i];
}
float distance = dist(currObs, currMu, nFeatures); // get the distance to aSet's mean
if (distance < bestDist) // update distance if current distance is best
{
bestDist = distance;
bestSet = aSet;
}
}
d_sets[t_id] = bestSet; // assign set
atomicAdd(&d_counts[bestSet], 1); // add 1 to set counts
for (int i = 0; i < nFeatures; i++) // sum set observations
{
atomicAdd(&d_sums[(bestSet * nFeatures) + i], currObs[i]);
}
}
/**
* @brief Update set assignments wrapper -- called by an MPI process
*
* @param x
* @param mu
* @param sums
* @param counts
* @param sets
* @param nSets
* @param nFeatures
* @param nObs
* @param rank
*/
extern void updateSetsWrapper(float *x, float *mu, float *sums, int *counts, int *sets, int nSets, int nFeatures, int nObs, int rank)
{
float *d_x, *d_mu, *d_sums;
int *d_sets, *d_counts;
size_t obsBytes, muBytes, setsBytes, cntBytes;
obsBytes = sizeof(float) * nObs * nFeatures * 4; // get the sizes in bytes of device arrays
muBytes = sizeof(float) * nFeatures * nSets;
setsBytes = sizeof(int) * nObs;
cntBytes = sizeof(int) * nSets;
cudaMalloc(&d_x, obsBytes); // allocate memory on device
cudaMalloc(&d_mu, muBytes);
cudaMalloc(&d_sums, muBytes);
cudaMalloc(&d_sets, setsBytes);
cudaMalloc(&d_counts, cntBytes);
// copy data from host to device
cudaMemcpy(d_x, x, obsBytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_sums, sums, muBytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_counts, counts, cntBytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_mu, mu, muBytes, cudaMemcpyHostToDevice);
// SET UP threads
int blockSize, gridSize;
blockSize = 1024;
gridSize = (int)ceil((float)nObs / blockSize);
updateSets<<<gridSize, blockSize>>>(d_x, d_mu, d_sums, d_counts, d_sets, nSets, nFeatures, nObs, rank);
// copy data from device to host
cudaMemcpy(sums, d_sums, muBytes, cudaMemcpyDeviceToHost);
cudaMemcpy(counts, d_counts, cntBytes, cudaMemcpyDeviceToHost);
cudaMemcpy(mu, d_mu, muBytes, cudaMemcpyDeviceToHost);
cudaMemcpy(sets, d_sets, setsBytes, cudaMemcpyDeviceToHost);
//printf("counts[0]=%d\n", counts[0]);
// free device memory
cudaFree(d_x);
cudaFree(d_mu);
cudaFree(d_sums);
cudaFree(d_sets);
cudaFree(d_counts);
}
/**
* @brief Update the mean
*
* @param mu
* @param sums
* @param counts
* @param nFeatures
* @return __global__ computeMu
*/
__global__ void computeMu(float *mu, float *sums, int *counts, int nFeatures)
{
int set = threadIdx.x; // set is current thread
int offset = set * nFeatures; // index offset
int i_counts = 1;
if (counts[set] != 0) // keep from dividing by 0
i_counts = counts[set];
// calculate a sets new mean
for (int i = 0; i < nFeatures; i++)
mu[offset + i] = sums[offset + i] / i_counts;
}
/**
* @brief wrapper for computeMu
*
* @param mu
* @param sums
* @param counts
* @param nFeatures
* @param nSets
*/
extern void muWrapper(float *mu, float *sums, int *counts, int nFeatures, int nSets)
{
float *d_mu, *d_sums;
int *d_counts;
size_t muBytes, cntBytes;
muBytes = sizeof(float) * nFeatures * nSets;
cntBytes = sizeof(int) * nSets;
cudaMalloc(&d_mu, muBytes);
cudaMalloc(&d_sums, muBytes);
cudaMalloc(&d_counts, cntBytes);
cudaMemcpy(d_mu, mu, muBytes, cudaMemcpyHostToDevice); // copy data from host to device
cudaMemcpy(d_sums, sums, muBytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_counts, counts, cntBytes, cudaMemcpyHostToDevice);
computeMu<<<1, 3>>>(d_mu, d_sums, d_counts, nFeatures);
cudaMemcpy(mu, d_mu, muBytes, cudaMemcpyDeviceToHost); // copy data from device to host
cudaFree(d_mu); // freee device memory
cudaFree(d_sums);
cudaFree(d_counts);
}
/**
* @brief Copy an array
*
* @param sets
* @param prevSets
* @param nObs
* @return __global__ copySets
*/
__global__ void copySets(int *sets, int *prevSets, int nObs)
{
int t_id = blockIdx.x * blockDim.x + threadIdx.x; // get global thread id
if (t_id >= nObs)
return;
prevSets[t_id] = sets[t_id];
}
/**
* @brief copySets wrapper
*
* @param sets
* @param prevSets
* @param nObs
*/
extern void copyWrapper(int *sets, int *prevSets, int nObs)
{
int *d_sets, *d_prevSets;
size_t setsBytes = sizeof(int) * nObs;
cudaMalloc(&d_sets, setsBytes);
cudaMalloc(&d_prevSets, setsBytes);
cudaMemcpy(d_sets, sets, setsBytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_prevSets, prevSets, setsBytes, cudaMemcpyHostToDevice);
int blockSize, gridSize;
blockSize = 1024;
gridSize = (int)ceil((float)nObs / blockSize);
copySets<<<gridSize, blockSize>>>(d_sets, d_prevSets, nObs);
cudaMemcpy(prevSets, d_prevSets, setsBytes, cudaMemcpyDeviceToHost);
cudaFree(d_sets);
cudaFree(d_prevSets);
}
/**
* @brief Check for convergence
*
* @param sets
* @param prevSets
* @param converge
* @param nObs
* @return __global__ checkConvergence
*/
__global__ void checkConvergence(int *sets, int *prevSets, bool *converge, int nObs)
{
int t_id = blockIdx.x * blockDim.x + threadIdx.x; // get global thread id
if (t_id >= nObs)
return;
// printf("%d, %d\n", sets[t_id], prevSets[t_id]);
converge[t_id] = sets[t_id] == prevSets[t_id];
//printf("%d, %d\n",t_id, converge[t_id]);
}
/**
* @brief Check convergence wrapper
*
* @param sets
* @param prevSets
* @param converge
* @param nObs
*/
extern void convergeWrapper(int *sets, int *prevSets, bool *converge, int nObs)
{
int *d_sets, *d_prevSets;
bool *d_converge;
size_t setsBytes, convBytes;
setsBytes = sizeof(int) * nObs;
convBytes = sizeof(bool) * nObs;
cudaMalloc(&d_sets, setsBytes);
cudaMalloc(&d_prevSets, setsBytes);
cudaMalloc(&d_converge, convBytes);
cudaMemcpy(d_sets, sets, setsBytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_prevSets, prevSets, setsBytes, cudaMemcpyHostToDevice);
int blockSize, gridSize;
blockSize = 1024;
gridSize = (int)ceil((float)nObs / blockSize);
checkConvergence<<<gridSize, blockSize>>>(d_sets, d_prevSets, d_converge, nObs);
cudaMemcpy(converge, d_converge, convBytes, cudaMemcpyDeviceToHost);
cudaFree(d_sets);
cudaFree(d_prevSets);
cudaFree(d_converge);
} |
4,148 | #include "simd_kernels.hh"
#include "simd_ops.hh"
#include "../runtime/node.hh"
#include <iostream>
namespace cpu
{
namespace
{
void kernel_sigmoid(rt::Node* node)
{
(void) node;
//simd_sigmoid(node->in1, node->out1, node->len1);
}
}
kernel_f simd_kernels_list[64] =
{
nullptr, //kernel_mat_mat_mul,
nullptr, //kernel_mat_rvect_add,
kernel_sigmoid,
nullptr, //kernel_mse,
nullptr, //kernel_softmax,
nullptr, //kernel_log_softmax,
nullptr, //kernel_softmax_cross_entropy,
nullptr, //kernel_conv2d,
nullptr, //kernel_relu,
nullptr, //kernel_relu_leaky,
nullptr, //kernel_tanh,
nullptr, //kernel_mse_grad,
nullptr, //kernel_sigmoid_grad,
nullptr, //kernel_mat_mul_add,
nullptr, //kernel_tmat_mat_mul,
nullptr, //kernel_mat_tmat_mul,
nullptr, //kernel_mat_sum_rows,
nullptr, //kernel_mat_sum_cols,
nullptr, //kernel_softmax_cross_entropy_grad,
nullptr, //kernel_relu_grad,
nullptr, //kernel_conv2d_bias_add,
nullptr, //kernel_update,
nullptr, //kernel_sigmoid_cross_entropy,
nullptr, //kernel_sigmoid_cross_entropy_grad,
nullptr, //kernel_conv2d_input_grad,
nullptr, //kernel_conv2d_kernel_grad,
nullptr, //kernel_argmax_acc,
nullptr, //kernel_moment_update,
nullptr, //kernel_moment_update2,
nullptr, //kernel_adam_update,
nullptr, //kernel_leaky_relu_grad,
nullptr, //kernel_conv2d_bias_add_grad,
nullptr, //kernel_tanh_grad,
nullptr, //kernel_conv2d_transpose,
nullptr, //kernel_conv2d_transpose_input_grad,
nullptr, //kernel_conv2d_transpose_kernel_grad
};
}
|
4,149 | #undef NDEBUG
#include <assert.h>
int main()
{
assert(sizeof(cudaError_t) == sizeof(int));
assert(sizeof(cudaStream_t) == sizeof(void*));
assert(sizeof(long) == sizeof(size_t));
return 0;
}
|
4,150 | #include "includes.h"
// helper for CUDA error handling
__global__ void restoreEigenvectors( const double* meanSubtractedImages , const double* reducedEigenvectors , double* restoredEigenvectors , std::size_t imageNum , std::size_t pixelNum , std::size_t componentNum )
{
std::size_t row = blockIdx.x;
std::size_t col = blockIdx.y * blockDim.x + threadIdx.x;
if(col >= pixelNum || row >= componentNum)
{
return;
}
restoredEigenvectors[row * pixelNum + col] = 0.0;
for(std::size_t i = 0; i < imageNum; ++i)
{
restoredEigenvectors[row * pixelNum + col] += reducedEigenvectors[(imageNum - row - 1) * imageNum + i] * meanSubtractedImages[i * pixelNum + col];
}
} |
4,151 | #include<stdio.h>
#include<math.h>
#define N 8
__global__ void exclusive_scan(int *d_in) {
__shared__ int temp_in[N];
int id = threadIdx.x;
temp_in[id] = d_in[id];
__syncthreads();
unsigned int s = 1;
for(; s <= N-1; s <<= 1) {
int i = 2 * s * (threadIdx.x + 1) - 1;
if(i >= s && i < N){
//printf("s = %d, i = %d \n", s, i);
int a = temp_in[i];
int b = temp_in[i-s];
__syncthreads();
temp_in[i] = a + b;
}
__syncthreads();
}
if(threadIdx.x == 0) {
temp_in[N-1] = 0;
}
for(s = s/2; s >= 1; s >>= 1) {
int i = 2*s*(threadIdx.x+1)-1;
if(i >= s && i < N){
//printf("s = %d, i = %d \n", s, i);
int r = temp_in[i];
int l = temp_in[i-s];
__syncthreads();
temp_in[i] = l + r;
temp_in[i-s] = r;
}
__syncthreads();
}
d_in[id] = temp_in[id];
//Teacher's code
/*
//Phase 1 Uptree
int s = 1;
for(; s <= N-1; s <<= 1) {
int i = 2 * s * (threadIdx.x + 1) - 1;
if(i-s >= 0 && i < N){
//printf("s = %d, i = %d \n", s, i);
int a = d_in[i];
int b = d_in[i-s];
__syncthreads();
d_in[i] = a + b;
}
__syncthreads();
}
//Phase 2 Downtree
if(threadIdx.x == 0) {
d_in[N-1] = 0;
}
for(s = s/2; s >= 1; s >>= 1) {
int i = 2*s*(threadIdx.x+1)-1;
if(i-s >= 0 && i < N){
//printf("s = %d, i = %d \n", s, i);
int r = d_in[i];
int l = d_in[i-s];
__syncthreads();
d_in[i] = l + r;
d_in[i-s] = r;
}
__syncthreads();
}
*/
}
__global__ void inclusive_scan(int *d_in) {
__shared__ int temp_in[N];
int i = threadIdx.x;
temp_in[i] = d_in[i];
__syncthreads();
for(unsigned int s = 1; s <= N-1; s <<= 1) {
if(i >= s && i < N) {
int a = temp_in[i];
int b = temp_in[i-s];
int c = a + b;
temp_in[i] = c;
}
__syncthreads();
}
d_in[i] = temp_in[i];
}
int main()
{
int h_in[N] = {3, 1, 7, 0, 4, 1, 6, 3};
int h_out[N];
//for(int i=0; i < N; i++)
// h_in[i] = 1;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
int *d_in;
//int *d_out;
cudaMalloc((void**) &d_in, N*sizeof(int));
//cudaMalloc((void**) &d_out, N*sizeof(int));
cudaMemcpy(d_in, &h_in, N*sizeof(int), cudaMemcpyHostToDevice);
//Implementing kernel call
//Timed each kernel call
cudaEventRecord(start);
//inclusive_scan<<<1, N>>>(d_in);
exclusive_scan<<<1, N>>>(d_in);
cudaEventRecord(stop);
cudaMemcpy(&h_out, d_in, N*sizeof(int), cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
cudaFree(d_in);
//cudaFree(d_out);
for(int i=0; i<N; i++)
printf("out[%d] = %d\n", i, h_out[i]);
printf("Time used: %f milliseconds\n", milliseconds);
return -1;
}
|
4,152 | #include <cuda.h>
#include <stdio.h>
#include <stdint.h>
// For comparisons
//#include "seqScan.c"
/* ------------------------------------------------------------------------
Unrolled in-place(shared memory) Scan without syncs (16 threads, 32 elts)
--------------------------------------------------------------------- */
__device__ void skl_scan(int i, float* input, float *output, uint8_t *sbase, float *maxs) {
float *s_data = (float*)sbase;
int tid = threadIdx.x;
int tids = tid << 1;
int eltOffs = blockIdx.x * 32 + tid;
// Load data from global memory into shared memory (in two separate load ops)
s_data[tid] = input[eltOffs];
s_data[tid + 16] = input[eltOffs + 16];
// NO SYNC HERE
s_data[tids | 1] += s_data[tids];
s_data[(tids | 3) - (tid & 1)] += s_data[tids & 0xFFFFFFFC | 1];
s_data[(tids | 7) - (tid & 3)] += s_data[tids & 0xFFFFFFF8 | 3];
s_data[(tids | 15) - (tid & 7)] += s_data[tids & 0xFFFFFFF0 | 7];
s_data[(tids | 31) - (tid & 15)] += s_data[tids & 0xFFFFFFE0 | 15];
// NO Interleaved SYNCS here.
output[eltOffs] = s_data[tid];
output[eltOffs + 16] = s_data[tid + 16];
if(tid == 0)
maxs[i] = s_data[31];
}
// Replace with handcoded scan that does not do conditionals.
// This already exists in the scan work I did with Erik.
/*
__device__ int sklansky(int i, float* input0, float *output0, uint8_t *sbase,float *maxs) {
uint32_t t2 = ((blockIdx.x*32)+((threadIdx.x&4294967294)|(threadIdx.x&1)));
uint32_t t9 = ((threadIdx.x&4294967292)|(threadIdx.x&3));
uint32_t t14 = ((threadIdx.x&4294967288)|(threadIdx.x&7));
uint32_t t19 = ((threadIdx.x&4294967280)|(threadIdx.x&15));
((float*)sbase)[threadIdx.x] = (((threadIdx.x&1)<1) ? input0[t2] : (input0[((blockIdx.x*32)+((threadIdx.x&4294967294)|0))]+input0[t2]));
//__syncthreads();
((float*)(sbase+128))[threadIdx.x] = (((threadIdx.x&3)<2) ? ((float*)sbase)[t9] : (((float*)sbase)[((threadIdx.x&4294967292)|1)]+((float*)sbase)[t9]));
//__syncthreads();
((float*)sbase)[threadIdx.x] = (((threadIdx.x&7)<4) ? ((float*)(sbase+128))[t14] : (((float*)(sbase+128))[((threadIdx.x&4294967288)|3)]+((float*)(sbase+128))[t14]));
//__syncthreads();
((float*)(sbase+128))[threadIdx.x] = (((threadIdx.x&15)<8) ? ((float*)sbase)[t19] : (((float*)sbase)[((threadIdx.x&4294967280)|7)]+((float*)sbase)[t19]));
//__syncthreads();
((float*)sbase)[threadIdx.x] = ((threadIdx.x<16) ? ((float*)(sbase+128))[threadIdx.x] : (((float*)(sbase+128))[15]+((float*)(sbase+128))[threadIdx.x]));
//__syncthreads();
output0[((blockIdx.x*32)+threadIdx.x)] = ((float*)sbase)[threadIdx.x];
if (threadIdx.x == 0)
maxs[i] = ((float*)sbase)[31];
return 0;
}
*/
/* ------------------------------------------------------------------------
The Scan kernel (Thousand(s?) of elements! NO SYNCS AT ALL)
--------------------------------------------------------------------- */
__global__ void kernel(float* input0,
float* output0,
float* maxout){
extern __shared__ __attribute__ ((aligned(16))) uint8_t sbase[];
// float *maxs = (float*)(sbase+(sizeof(float)*64));
float *maxs = (float*)(sbase+(sizeof(float)*32));
for (int i = 0; i < 32; i ++) {
//sklansky(i,input0+i*32,output0+i*32,sbase,maxs);
skl_scan(i,input0+i*32,output0+i*32,sbase,maxs);
}
float v; // discard this value
//sklansky(0,maxs,maxs,sbase,&v);
skl_scan(0,maxs,maxs,sbase,&v);
// distribute (now in two phases)
// 15 thread pass
if (threadIdx.x > 0) {
for (int j = 0; j < 32; j ++) {
output0[(blockIdx.x*32)+(threadIdx.x*32+j)] += maxs[threadIdx.x-1];
}
}
// 16 thread pass
for (int j = 0; j < 32; j ++) {
output0[((blockIdx.x+16)*32)+(threadIdx.x*32+j)] += maxs[threadIdx.x+15];
}
maxout[threadIdx.x] = maxs[threadIdx.x];
maxout[threadIdx.x+16] = maxs[threadIdx.x+16];
}
#define N 32*32
int main(void) {
float v[N];
float r[N];
//float rc[N];
float m[32];
float *dv;
float *dr;
float *dm;
for (int i = 0; i < N; i ++) {
v[i] = 1.0;
r[i] = 7.0;
}
cudaMalloc((void**)&dv,N*sizeof(float));
cudaMalloc((void**)&dr,N*sizeof(float));
cudaMalloc((void**)&dm,32*sizeof(float));
cudaMemcpy(dv,v,N*sizeof(float),cudaMemcpyHostToDevice);
//kernel<<<1,32,32*3*(sizeof(float))>>>(dv,dr,dm);
kernel<<<1,16,32*2*(sizeof(float))>>>(dv,dr,dm);
cudaMemcpy(r,dr,N*sizeof(float),cudaMemcpyDeviceToHost);
cudaMemcpy(m,dm,32*sizeof(float),cudaMemcpyDeviceToHost);
for (int i = 0; i < N; i ++) {
printf("%f ",r[i]);
}
printf("\n ------ \n");
for (int i = 0; i < 32; i ++) {
printf("%f ",m[i]);
}
//seqScan(v,rc,N);
//int s = compare(rc,r,0.01,N);
//printf ("\n%s\n", s? "same" : "not the same");
return 0;
}
/*
__global__ void kernel(float* input0,
float* output0,
float* maxout){
extern __shared__ __attribute__ ((aligned(16))) uint8_t sbase[];
float *maxs = (float*)(sbase+(sizeof(float)*64));
for (int i = 0; i < 32; i ++) {
sklansky(i,input0+i*32,output0+i*32,sbase,maxs);
}
float v; // discard this value
sklansky(0,maxs,maxs,sbase,&v);
// distribute
if (threadIdx.x > 0) {
for (int j = 0; j < 32; j ++) {
output0[(blockIdx.x*32)+(threadIdx.x*32+j)] += maxs[threadIdx.x-1];
}
}
maxout[threadIdx.x] = maxs[threadIdx.x];
}
*/
|
4,153 | #include <cuda_runtime.h>
#include <stdio.h>
__global__ void checkIndex(void) {
printf("threadIdx: (%d, %d, %d) || blockIdx: (%d, %d, %d) || blockDim:(%d, %d, %d) \n"
"gridDim: (%d, %d, %d)\n", threadIdx.x, threadIdx.y, threadIdx.z,
blockIdx.x, blockIdx.y, blockIdx.z, blockDim.x, blockDim.y, blockDim.z,
gridDim.x, gridDim.y, gridDim.z);
}
int main(int argc, char **argv) {
// the number of data
int nElem = 6;
// define which block and calcul the grid
dim3 block(3);
dim3 grid ((nElem + block.x-1) / block.x);
//from the host side, we are checking the grid and block dimension
printf("grid.x %d grid.y %d grid.z %d \n", grid.x, grid.y, grid.z);
printf("block.x %d block.y %d block.z %d \n", block.x, block.y, block.z);
//same thing but from the device side
checkIndex <<< grid, block >>> ();
//finally we reset the device before leaving
cudaDeviceReset();
return(0);
}
|
4,154 | #include <cuda_runtime.h>
#include <stdio.h>
#include <device_launch_parameters.h>
#include <stdlib.h>
#define THREADS_PER_BLOCK 16
void save_to_file(double *AB, const int a_size) {
FILE *f = fopen("out.txt", "w+");
fprintf(f, "%d\n", a_size);
for(int i = 0; i < a_size*(a_size + 1); i++) {
if((i + 1) % (a_size+1) == 0) {
fprintf(f, "\n");
continue;
}
fprintf(f, "%lf ", AB[i]);
}
for(int i = 0; i < a_size; i++)
fprintf(f, "%lf ", AB[a_size + i*(a_size + 1)]);
fprintf(f, "\n");
fclose(f);
}
double *malloc_matrix(const int a, const int b) {
return (double*)malloc(sizeof(double *)*a*b);
}
double *load_from_file(int *a_size, char *name) {
FILE *f = fopen(name, "r");
int size;
fscanf(f, "%d", &size);
double *matrix_ab = malloc_matrix(size, size + 1);
for(int i = 0; i < size*(size + 1); i++) {
if((i+1) % (size + 1) == 0)
continue;
fscanf(f, "%lf", &matrix_ab[i]);
}
for(int i = 0; i < size; i++) {
fscanf(f, "%lf", &matrix_ab[size + i*(size + 1)]);
}
fclose(f);
*a_size = size;
return matrix_ab;
}
void print_matrix(double *matrix, const int a, const int b) {
for(int i = 0; i < a*b; i++) {
printf("%lf\t", matrix[i]);
if((i+1) % b == 0)
printf("\n");
}
}
void print_output(double *AB, const int a_size) {
printf("%d\n", a_size);
for(int i = 0; i < a_size*(a_size + 1); i++) {
if((i + 1) % (a_size+1) == 0) {
printf("\n");
continue;
}
printf("%lf\t", AB[i]);
}
for(int i = 0; i < a_size; i++)
printf("%lf\t", AB[a_size + i*(a_size + 1)]);
printf("\n");
}
int load_size() {
int size;
scanf("%d", &size);
return size;
}
void load_ab_matrix(double *a, const int size) {
for(int i = 0; i < size*(size + 1); i++) {
if((i+1) % (size + 1) == 0)
continue;
scanf("%lf", &a[i]);
}
for(int i = 0; i < size; i++) {
scanf("%lf", &a[size + i*(size + 1)]);
}
}
double *load_input(int *size) {
*size = load_size();
double *matrix_ab = malloc_matrix(*size, *size + 1);
load_ab_matrix(matrix_ab, *size);
return matrix_ab;
}
__global__ void replace_zero_gpu(double *AB, int rows, int columns, int column) {
if(fabs(AB[column*columns + column]) <= 1e-4) {
int row = column;
for(; row < rows; row++) {
if(fabs(AB[row*columns + column]) > 1e-4)
break;
}
int threadId = blockDim.x*blockIdx.x + threadIdx.x;
if(threadId + column >= columns)
return;
int zero = column*columns + column + threadId;
int chosen = row*columns + column + threadId;
AB[zero] += AB[chosen];
}
}
__global__ void column_elimination_gpu(double *AB, int rows, int columns, int column) {
int threadId = blockDim.x*blockIdx.x + threadIdx.x;
if(threadId >= (rows - 1 - column)*(columns - column))
return;
int el_row = column + threadId/(columns - column) + 1;
int el_col = column + threadId%(columns - column);
int el = el_col + el_row*columns;
int upper_el = el_col + column*columns;
int main_el = column + column*columns;
int main2_el = column + el_row*columns;
double f = AB[main2_el]/AB[main_el];
AB[el] -= f*AB[upper_el];
}
__global__ void multiple_column(double *AB, int rows, int columns, int row) {
int threadId = threadIdx.x;
AB[(threadId * columns) + row] *= AB[columns*(row + 1) - 1];
}
__global__ void reverse_row_elimination(double *AB, int rows, int columns, int row) {
int threadId = threadIdx.x;
int cols = columns - 2 - row;
int start_index = row*columns + row + 1;
int j = cols%2;
for(int i = cols/2; i > 0; i/=2) {
if(threadId >= i)
return;
AB[start_index + threadId] += (AB[start_index + threadId + i + j]);
AB[start_index + threadId + i + j] = 0;
if(j == 1)
i++;
j = i%2;
__syncthreads();
}
int x_el = (row + 1)*columns - 1;
int diag_el = row*columns + row;
if(diag_el + 1 != x_el) {
AB[x_el] -= AB[diag_el + 1];
AB[diag_el + 1] = 0.0;
}
AB[x_el] /= AB[diag_el];
AB[diag_el] = 1.0;
}
__global__ void sum_row(double *AB, int rows, int columns, int row) {
int threadId = threadIdx.x;
int j = columns%2;
for(int i = columns/2; i > 0; i/=2) {
if(threadId >= i)
return;
AB[threadId] += AB[threadId + i + j];
__syncthreads();
if(j == 1)
i++;
j = i%2;
}
}
void start_gaussian_elimination_gpu(double *AB, int rows, int cols) {
double *AB_gpu;
cudaMalloc(&AB_gpu, sizeof(double)*rows*cols);
cudaMemcpy(AB_gpu, (void*)AB, sizeof(double)*rows*cols, cudaMemcpyHostToDevice);
int block_size;
for(int column = 0; column < cols - 1; column++) {
block_size = (cols - column - 1)/THREADS_PER_BLOCK + 1;
replace_zero_gpu<<<block_size, THREADS_PER_BLOCK>>>(AB_gpu, rows, cols, column);
cudaThreadSynchronize();
block_size = ((rows - column)*(cols - column) - 1)/THREADS_PER_BLOCK + 1;
column_elimination_gpu<<<block_size, THREADS_PER_BLOCK>>>(AB_gpu, rows, cols, column);
cudaThreadSynchronize();
}
for(int row = rows - 1; row >= 0; row--) {
reverse_row_elimination<<<1, cols>>>(AB_gpu, rows, cols, row);
multiple_column<<<1, row>>>(AB_gpu, rows, cols, row);
cudaThreadSynchronize();
}
cudaMemcpy(AB, (void*)AB_gpu, sizeof(double)*rows*cols, cudaMemcpyDeviceToHost);
cudaFree(AB_gpu);
}
int main(int argc, char ** argv) {
int size;
double *AB = load_from_file(&size, argv[1]);
print_output(AB, size);
start_gaussian_elimination_gpu(AB, size, size + 1);
printf("\n\n");
print_output(AB, size);
save_to_file(AB, size);
return 0;
}
|
4,155 | #include <stdio.h>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess) {
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__inline__ __device__
float3 operator+(float3 a, float3 b)
{
return make_float3(a.x+b.x, a.y+b.y, a.z+b.z);
}
__inline__ __device__
float3 operator*(float s, float3 v)
{
return make_float3(s*v.x, s*v.y, s*v.z);
}
__inline__ __device__
float3 lerp(float3 a, float3 b, float t)
{
return (1.0-t) * a + t * b;
}
inline __device__
uint getGlobalIdx1d1d()
{
return blockIdx.x * blockDim.x + threadIdx.x;
}
|
4,156 | #include "includes.h"
__global__ void THCudaTensor_kernel_indexAdd( float *res, float *src, long* res_stride, float *index, long res_nDim, int dim, long idx_size, long src_size, long size_dim )
{
int thread_idx = blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;
long flat_size = src_size / idx_size;
if (thread_idx < flat_size)
{
long coeff = 0;
for (int i=0; i<idx_size; i++)
{
int leftover = thread_idx;
int targetIdx = 0;
int resIdx = 0;
for (int d=0; d<res_nDim; d++)
{
if (d < dim)
{
long stride_d = res_stride[d] / size_dim;
coeff = leftover / stride_d;
leftover -= coeff * stride_d;
targetIdx += coeff * stride_d * idx_size;
resIdx += coeff * res_stride[d];
}
else if (d > dim)
{
coeff = leftover / res_stride[d];
leftover -= coeff * res_stride[d];
targetIdx += coeff * res_stride[d];
resIdx += coeff * res_stride[d];
}
}
atomicAdd(&res[resIdx + ((long)(index[i])-1)*res_stride[dim]], src[targetIdx + i*res_stride[dim]]);
}
}
} |
4,157 | #include <cuda_runtime.h>
#include <sys/time.h>
#include "iostream"
#include "iomanip"
#include "cmath"
#include <stdio.h>
using namespace std;
#define pi 3.14159265358979323846
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if (error != cudaSuccess) \
{ \
printf("Error: %s:%d, ", __FILE__, __LINE__); \
printf("code:%d, reason: %s\n", error, cudaGetErrorString(error)); \
exit(1); \
} \
}
// When the fault is occured for GPU function
__device__ void Differentiald(double *deltapresent,double *omegapresent,double deltaprevious,double omegaprevious,double omega0,double c_h)
{
double temp,temp1,ddeltapresent,domegapresent,ddeltaprevious,domegaprevious;
ddeltaprevious =omegaprevious-omega0;
temp=deltaprevious+(c_h)*(ddeltaprevious);
domegaprevious =((pi*60)/5)* (0.8-0.65*sin(temp));
temp1 = omegaprevious+(c_h*(domegaprevious));
ddeltapresent = temp1-omegaprevious;
*deltapresent = deltaprevious + (c_h/2)*(ddeltaprevious+ddeltapresent);
domegapresent =((pi*60)/5)* (0.8-(0.65*sin(*deltapresent)));
//domegapresent = 32-(173.68*sin(*deltapresent-(10*pi)/180));
*omegapresent = omegaprevious+(c_h/2)*(domegaprevious+domegapresent);
}
//Once the fault is cleared for GPU function
__device__ void Differentialpostfaultd(double *deltapresent,double *omegapresent,double deltaprevious,double omegaprevious,double omega0,double c_h)
{
double temp,temp1,ddeltapresent,domegapresent,ddeltaprevious,domegaprevious;
ddeltaprevious =omegaprevious-omega0;
temp=deltaprevious+(c_h)*(ddeltaprevious);
domegaprevious =((pi*60)/5)* (0.8-1.4625*sin(temp));
temp1 = omegaprevious+(c_h*(domegaprevious));
ddeltapresent = temp1-omegaprevious;
*deltapresent = deltaprevious + (c_h/2)*(ddeltaprevious+ddeltapresent);
domegapresent =((pi*60)/5)* (0.8-(1.4625*sin(*deltapresent)));
*omegapresent = omegaprevious+(c_h/2)*(domegaprevious+domegapresent);
}
__global__ void gpuparareal(double *g_delta,double *g_omega,double *g_a,const double omega0,const double f_h,double *g_fine_tempd,double *g_fine_tempo,double *g_del_fine,double *g_omega_fine,double *g_diff_delta,double *g_diff_omega,int num_steps,int c_h)
{
const int idx = threadIdx.x + (blockIdx.x*blockDim.x);
if(idx>=num_steps)
{
return;
}
double tempd,tempo,tint,tfin,fine_step,fine_tempd,fine_tempo;
tint = g_a[idx];
tfin = g_a[idx+1];
tempd = g_delta[idx];
tempo = g_omega[idx];
//printf("g_a[%d] = %lf \n",idx,g_a[idx]);
// __syncthreads();
bool flag = (g_a[idx]<0.8);
if(flag)
{
int umax = round((tfin-tint)/f_h);
for (int u=0;u<umax;u++)
{
fine_step = tint+f_h;
Differentiald(&fine_tempd,&fine_tempo,tempd,tempo,omega0,f_h);
tempd=fine_tempd;
tempo=fine_tempo;
tint=fine_step;
}
// printf("idx = %d The value of fine is %lf for time %lf\n",idx,(tempd*180/pi),tfin);
}
if(!flag)
{
int umax = round((tfin-tint)/f_h);
for (int u=0;u<umax;u++)
{
fine_step = tint+f_h;
Differentialpostfaultd(&fine_tempd,&fine_tempo,tempd,tempo,omega0,f_h);
tempd=fine_tempd;
tempo=fine_tempo;
tint=fine_step;
}
}
g_del_fine[idx+1]=tempd;
g_omega_fine[idx+1]=tempo;
g_diff_delta[idx] = tempd - g_delta[idx+1];
g_diff_omega[idx] = tempo - g_omega[idx+1];
// printf("idx = %d The value of fine is %lf for time %lf\n",idx,(tempd*180/pi),tfin);
//printf("The value of fine is %lf for time %lf\n",(tempd*180/pi),tfin);
}
__global__ void gpucorrection(double *d_diff_delta,double *d_diff_omega,double *d_pred_delta,double *d_pred_omega,double *d_corec_delta,double *d_corec_omega, int num_steps)
{
const int idx = threadIdx.x + (blockIdx.x*blockDim.x);
if(idx>=num_steps)
{
return;
}
d_corec_delta[idx+1]=d_pred_delta[idx]+d_diff_delta[idx];
d_corec_omega[idx+1]=d_pred_omega[idx]+d_diff_omega[idx];
}
// When the fault is occured
void Differential(double *deltapresent,double *omegapresent,double deltaprevious,double omegaprevious,double omega0,double c_h)
{
double temp,temp1,ddeltapresent,domegapresent,ddeltaprevious,domegaprevious;
ddeltaprevious =omegaprevious-omega0;
temp=deltaprevious+(c_h)*(ddeltaprevious);
domegaprevious =((pi*60)/5)* (0.8-0.65*sin(temp));
temp1 = omegaprevious+(c_h*(domegaprevious));
ddeltapresent = temp1-omegaprevious;
*deltapresent = deltaprevious + (c_h/2)*(ddeltaprevious+ddeltapresent);
domegapresent =((pi*60)/5)* (0.8-(0.65*sin(*deltapresent)));
//domegapresent = 32-(173.68*sin(*deltapresent-(10*pi)/180));
*omegapresent = omegaprevious+(c_h/2)*(domegaprevious+domegapresent);
}
//Once the fault is cleared
void Differentialpostfault(double *deltapresent,double *omegapresent,double deltaprevious,double omegaprevious,double omega0,double c_h)
{
double temp,temp1,ddeltapresent,domegapresent,ddeltaprevious,domegaprevious;
ddeltaprevious =omegaprevious-omega0;
temp=deltaprevious+(c_h)*(ddeltaprevious);
domegaprevious =((pi*60)/5)* (0.8-1.4625*sin(temp));
temp1 = omegaprevious+(c_h*(domegaprevious));
ddeltapresent = temp1-omegaprevious;
*deltapresent = deltaprevious + (c_h/2)*(ddeltaprevious+ddeltapresent);
domegapresent =((pi*60)/5)* (0.8-(1.4625*sin(*deltapresent)));
*omegapresent = omegaprevious+(c_h/2)*(domegaprevious+domegapresent);
}
int main()
{
cudaEvent_t kernel_start;
cudaEvent_t kernel_stop;
struct timeval start,end;
double tint,tfin,omega0;
float fElapsedTime,faverage,fsum[10];
double fMemoryCopyTime[10];
double fSequential_time[10],tet[10];
//host variables
double *h_omega,*h_delta,*h_a,c_h,f_h,dint,*h_del_fine,*h_omega_fine,*h_diff_delta,*h_diff_omega;
//device variables
double *d_omega,*d_delta,*d_a,*d_del_fine,*d_omega_fine,*d_fine_tempd,*d_fine_tempo,*d_diff_delta,*d_diff_omega,*d_pred_delta,*d_corec_delta,*d_pred_omega,*d_corec_omega;
double *h_pred_delta,*h_corec_delta,*h_pred_omega,*h_corec_omega,*h_fine_tempd,*h_fine_tempo,et[110];
cout<<"The initial time value is : "<<endl;
cin>>tint;
cout<<"The final time value is: "<<endl;
cin>>tfin;
cout<<"The coarse grid time step value is: "<<endl;
cin>>c_h;
cout<<"The fine grid step size value is: "<<endl;
cin>>f_h;
cout<<"Enter the intial value of delta in degrees: "<<endl;
cin>>dint;
int num_steps = ((tfin-tint)/c_h)+1;
cout<<"the number of steps for coarse : "<<num_steps<<endl;
size_t num_steps_bytes_coarse = num_steps*sizeof(double);
int fine_size = ((tfin-tint)/f_h)+1;
cout<<"The number of steps for fine : "<<fine_size<<endl;
size_t num_steps_bytes_fine = fine_size*sizeof(double);
h_omega = new double[num_steps];
h_delta = new double[num_steps];
h_a = new double[num_steps];
h_del_fine = new double[num_steps];
h_omega_fine = new double[num_steps];
h_fine_tempd= new double[fine_size];
h_fine_tempo=new double[fine_size];
h_pred_delta= new double[num_steps];
h_pred_omega= new double[num_steps];
h_corec_delta=new double[num_steps];
h_corec_omega = new double[num_steps];
h_diff_delta = new double[num_steps];
h_diff_omega = new double [num_steps];
omega0=2*pi*60;
h_omega[0]=omega0;
h_delta[0]=(dint*pi)/180;
cout<<"The value in radians is: "<<h_delta[0]<<endl;
h_a[0] =0;
h_a[0]=tint;
num_steps =num_steps - 1;
fine_size =fine_size - 1;
cout<<num_steps<<endl;
for(int k=0;k<2;k++)
{
fMemoryCopyTime[k]=0;
fSequential_time[k]=0;
// gettimeofday(&start,NULL);
if(k==0)
{
gettimeofday(&start,NULL);
for (int i=0;i<num_steps;i++)
{
h_a[i+1]=h_a[i]+c_h; //a[i] contains all the time step required for coarse grid calculation
if(h_a[i+1]<=0.8)
{
//cout << "a= " <<h_a[i+1]<<__LINE__<<endl;
// h_a[i+1]=h_a[i]+c_h; //a[i] contains all the time step required for coarse grid calculation
Differential(&h_delta[i+1],&h_omega[i+1],h_delta[i],h_omega[i],omega0,c_h);
//cout<< "The coarse grid values are "<< (h_delta[i+1]*180)/pi<<" for time"<<h_a[i+1]<<"for array element "<<i<<"for k value "<<k<<endl;
//cout<<"break 2"<<endl;
}
if(h_a[i+1]>0.8)
{
//cout << "a= " <<h_a[i]<<__LINE__<<endl;
//h_a[i+1]=h_a[i]+c_h;
Differentialpostfault(&h_delta[i+1],&h_omega[i+1],h_delta[i],h_omega[i],omega0,c_h);
//cout<< "The coarse grid values are "<< (h_delta[i+1]*180)/pi<<" for time"<<h_a[i+1]<<"for array element "<<i<<"for k value "<<k<<endl;
}
}
gettimeofday(&end,NULL);
fSequential_time[k] = ((end.tv_sec*1e6+end.tv_usec)-(start.tv_sec*1e6+start.tv_usec))/1000;
cout<<" The Sequential Execution time is : "<<fSequential_time[k]<<" ms"<<endl;
}
else
{
gettimeofday(&start,NULL);
for(int i=1;i<num_steps;i++)
{
h_delta[i]=h_corec_delta[i];
h_omega[i]=h_corec_omega[i];
h_a[i+1]=h_a[i]+c_h; //a[i] contains all the time step required for coarse grid calculation
if(h_a[i+1]<=0.8)
{
// h_a[i+1]=h_a[i]+c_h; //a[i] contains all the time step required for coarse grid calculation
Differential(&h_delta[i+1],&h_omega[i+1],h_delta[i],h_omega[i],omega0,c_h);
//cout<< "The coarse grid values are "<< (h_delta[i+1]*180)/pi<<" for time"<<h_a[i+1]<<"for array element "<<i<<"for k value "<<k<<endl;
}
if(h_a[i+1]>0.8)
{
//h_a[i+1]=h_a[i]+c_h;
Differentialpostfault(&h_delta[i+1],&h_omega[i+1],h_delta[i],h_omega[i],omega0,c_h);
//cout<< "The coarse grid values are "<< (h_delta[i+1]*180)/pi<<" for time"<<h_a[i+1]<<"for array element "<<i<<"for k value "<<k<<endl;
}
}
gettimeofday(&end,NULL);
fSequential_time[k] = ((end.tv_sec*1e6+end.tv_usec)-(start.tv_sec*1e6+start.tv_usec))/1000;
cout<<" The Sequential Execution time is : "<<fSequential_time[k]<<" ms"<<endl;
}
//cout<<" The Sequential Execution time is : "<<fSequential_time<<" ms"<<endl;
// cudaEvent_t kernel_start;
// cudaEvent_t kernel_stop;
cudaError_t cudaSetDevice(int device);
cudaSetDevice(0);
CHECK(cudaEventCreate(&kernel_start));
CHECK(cudaEventCreate(&kernel_stop));
//Allocating memory on GPU for device variables
CHECK(cudaMalloc((double**)&d_delta,num_steps_bytes_coarse+8));
CHECK(cudaMalloc((double**)&d_omega,num_steps_bytes_coarse+8));
CHECK(cudaMalloc((double**)&d_fine_tempd,num_steps_bytes_fine+8));
CHECK(cudaMalloc((double**)&d_fine_tempo,num_steps_bytes_fine+8));
CHECK(cudaMalloc((double**)&d_del_fine,num_steps_bytes_coarse+8));
CHECK(cudaMalloc((double**)&d_omega_fine,num_steps_bytes_coarse+8));
CHECK(cudaMalloc((double**)&d_a,num_steps_bytes_coarse+8));
CHECK(cudaMalloc((double**)&d_diff_delta,num_steps_bytes_coarse+8));
CHECK(cudaMalloc((double**)&d_diff_omega,num_steps_bytes_coarse+8));
CHECK(cudaMalloc((double**)&d_pred_delta,num_steps_bytes_coarse+8));
CHECK(cudaMalloc((double**)&d_pred_omega,num_steps_bytes_coarse+8));
CHECK(cudaMalloc((double**)&d_corec_delta,num_steps_bytes_coarse+8));
CHECK(cudaMalloc((double**)&d_corec_omega,num_steps_bytes_coarse+8));
//copying the data to device from host
gettimeofday(&start,NULL);
CHECK(cudaMemcpy(d_delta,h_delta,num_steps_bytes_coarse+8,cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_omega,h_omega,num_steps_bytes_coarse+8,cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_a,h_a,num_steps_bytes_coarse+8,cudaMemcpyHostToDevice));
gettimeofday(&end,NULL);
fMemoryCopyTime[k] = ((end.tv_sec*1e6+end.tv_usec)-(start.tv_sec*1e6+start.tv_usec))/1000;
//Kernel call
int ilen = 256;
dim3 block (ilen,1,1);
dim3 grid ((num_steps+block.x-1)/block.x,1,1);
cout << "1D Grid Dimension" << endl;
cout << "\tNumber of Blocks along X dimension: " << grid.x << endl;
cout << "1D Block Dimension" << endl;
cout << "\tNumber of threads along X dimension: " << block.x << endl;
//kernel function
et[0]=0;
for(int i=0;i<110;i++)
{
CHECK(cudaEventRecord(kernel_start));
gpuparareal<<<grid,block>>>(d_delta,d_omega,d_a,omega0,f_h,d_fine_tempd,d_fine_tempo,d_del_fine,d_omega_fine,d_diff_delta,d_diff_omega,num_steps,c_h);
CHECK(cudaEventRecord(kernel_stop));
CHECK(cudaEventSynchronize(kernel_stop));
CHECK(cudaEventElapsedTime(&fElapsedTime,kernel_start,kernel_stop));
et[i]=fElapsedTime;
}
// cout << "Kernel with Compiler Implementation = " << fElapsedTime << " msecs" << endl;
// gettimeofday(&start,NULL);
CHECK(cudaMemcpy(h_fine_tempd,d_fine_tempd,num_steps_bytes_fine+8,cudaMemcpyDeviceToHost));
CHECK(cudaMemcpy(h_fine_tempo,d_fine_tempo,num_steps_bytes_fine+8,cudaMemcpyDeviceToHost));
gettimeofday(&start,NULL);
CHECK(cudaMemcpy(h_diff_delta,d_diff_delta,num_steps_bytes_coarse+8,cudaMemcpyDeviceToHost));
CHECK(cudaMemcpy(h_diff_omega,d_diff_omega,num_steps_bytes_coarse+8,cudaMemcpyDeviceToHost));
CHECK(cudaMemcpy(h_del_fine,d_del_fine,num_steps_bytes_coarse+8,cudaMemcpyDeviceToHost));
CHECK(cudaMemcpy(h_omega_fine,d_omega_fine,num_steps_bytes_coarse+8,cudaMemcpyDeviceToHost));
gettimeofday(&end,NULL);
fMemoryCopyTime[k]+= ((end.tv_sec*1e6+end.tv_usec)-(start.tv_sec*1e6+start.tv_usec))/1000;
cout<< "Memory transfer time = " << fMemoryCopyTime[k] <<" ms"<<endl;
h_pred_delta[0]=h_del_fine[1];
h_pred_omega[0]=h_omega_fine[1];
cout<<"Fine values are: "<<"\tdelta"<< (h_del_fine[1]*180/pi)<<"\t omega"<<h_omega_fine[1]<<endl;
gettimeofday(&start,NULL);
for (int i=1;i<num_steps;i++)
{
h_a[i+1]=h_a[i]+c_h; //a[i] contains all the time step required for coarse grid calculation
if(h_a[i+1]<=0.8)
{
//h_a[i+1]=h_a[i]+c_h;
Differential(&h_pred_delta[i],&h_pred_omega[i],h_pred_delta[i-1],h_pred_omega[i-1],omega0,c_h);
//cout<<"The predicted value is "<<(h_pred_delta[i]*180)/pi<<endl;
}
if(h_a[i+1]>0.8)
{
//h_a[i+1]=h_a[i]+c_h;
Differentialpostfault(&h_pred_delta[i],&h_pred_omega[i],h_pred_delta[i-1],h_pred_omega[i-1],omega0,c_h);
//cout<<"The predicted value is "<<(h_pred_delta[i]*180)/pi<<endl;
}
}
gettimeofday(&end,NULL);
fSequential_time[k] += ((end.tv_sec*1e6+end.tv_usec)-(start.tv_sec*1e6+start.tv_usec))/1000;
gettimeofday(&start,NULL);
CHECK(cudaMemcpy(d_diff_delta,h_diff_delta,num_steps_bytes_coarse+8,cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_diff_omega,h_diff_omega,num_steps_bytes_coarse+8,cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_pred_delta,h_pred_delta,num_steps_bytes_coarse+8,cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_pred_omega,h_pred_omega,num_steps_bytes_coarse+8,cudaMemcpyHostToDevice));
gettimeofday(&end,NULL);
double time = 0;
time = ((end.tv_sec*1e6+end.tv_usec)-(start.tv_sec*1e6+start.tv_usec))/1000;
// int ilen = 256;
// dim3 block (ilen,1,1);
// dim3 grid ((num_steps+block.x-1)/block.x,1,1);
CHECK(cudaEventRecord(kernel_start));
gpucorrection<<<grid,block>>>(d_diff_delta,d_diff_omega,d_pred_delta,d_pred_omega,d_corec_delta,d_corec_omega,num_steps);
CHECK(cudaEventRecord(kernel_stop));
CHECK(cudaEventSynchronize(kernel_stop));
CHECK(cudaEventElapsedTime(&fElapsedTime,kernel_start,kernel_stop));
cout<<"Elapsed time is for correction is " <<fElapsedTime<<endl;
gettimeofday(&start,NULL);
CHECK(cudaMemcpy(h_corec_delta,d_corec_delta,num_steps_bytes_coarse+8,cudaMemcpyDeviceToHost));
CHECK(cudaMemcpy(h_corec_omega,d_corec_omega,num_steps_bytes_coarse+8,cudaMemcpyDeviceToHost));
gettimeofday(&end,NULL);
time+= ((end.tv_sec*1e6+end.tv_usec)-(start.tv_sec*1e6+start.tv_usec))/1000;
cout<<"Correction memory copy time is: "<<time<<" ms"<<endl;
/*for (int i=0;i<num_steps;i++)
{
corec_delt[i+1] = h_diff_delta[i]+pred_delt[i];
corec_omega[i+1] = h_diff_omega[i]+pred_omega[i];
//cout<< "The corrected grid values are "<< (corec_delt[i+1]*180)/pi<<" for time"<<h_a[i+1]<<"for array element "<<i<<endl;
}*/
fSequential_time[k] += ((end.tv_sec*1e6+end.tv_usec)-(start.tv_sec*1e6+start.tv_usec))/1000;
faverage=0;
for(int i=10;i<110;i++)
{
faverage+=et[i];
}
fsum[k]=faverage/100;
cout<<"The gpu execution time is "<<fsum[k]<<"\t"<<"sequential time is "<<fSequential_time[k]<<" ms"<<endl;
tet[k]=fsum[k]+fSequential_time[k]+fMemoryCopyTime[k];
cout<<"the elapsed time is "<<tet[k]<<" ms"<<endl;
CHECK(cudaEventDestroy(kernel_start));
CHECK(cudaEventDestroy(kernel_stop));
CHECK(cudaFree(d_omega));
CHECK(cudaFree(d_delta));
CHECK(cudaFree(d_fine_tempd));
CHECK(cudaFree(d_fine_tempo));
CHECK(cudaFree(d_del_fine));
CHECK(cudaFree(d_omega_fine));
CHECK(cudaFree(d_a));
CHECK(cudaFree(d_diff_delta));
CHECK(cudaFree(d_diff_omega));
CHECK(cudaFree(d_pred_delta));
CHECK(cudaFree(d_pred_omega));
CHECK(cudaFree(d_corec_delta));
CHECK(cudaFree(d_corec_omega));
CHECK(cudaDeviceReset());
}
/* delete[] h_omega;
delete[] h_delta;
delete[] h_a;
delete[] h_del_fine;
delete[] h_omega_fine;
delete[] h_fine_tempd;
delete[] h_fine_tempo;
delete[] h_pred_delta;
delete[] h_pred_omega;
delete[] h_corec_delta;
delete[] h_corec_omega;
delete[] h_diff_delta;
delete[] h_diff_omega;*/
}
|
4,158 | #include "includes.h"
__global__ void _kgauss64map(int nx, int ns, double *x2, double *s2, double *k, double g) {
int i, n, xi, si;
i = threadIdx.x + blockIdx.x * blockDim.x;
n = nx*ns;
while (i < n) {
xi = (i % nx);
si = (i / nx);
k[i] = exp(-g * (x2[xi] + s2[si] - 2*k[i]));
i += blockDim.x * gridDim.x;
}
} |
4,159 | #include <stdio.h>
#include <cuda.h>
#include <time.h>
struct Startup{
int seed = time(nullptr);
int threadsPerBlock = 256;
int datasetSize = 10000;
int range = 100;
} startup;
struct DataSet{
float* values;
int size;
};
inline int sizeOfDataSet(DataSet data)
{ return sizeof(float)*data.size; }
DataSet generateRandomDataSet(int size){
DataSet data;
data.size = size;
data.values = (float*)malloc(sizeof(float)*data.size);
for (int i = 0; i < data.size; i++)
data.values[i] = (float)(rand()%startup.range);
return data;
}
bool CompareDataSet(DataSet d1, DataSet d2){
for (int i = 0; i < d1.size; i++)
if (d1.values[i] != d2.values[i]){
printf("Dataset is different");
return false;
}
if (d1.size != d2.size) {printf("Datasets are not equal size\n"); return false;};
printf("D1 and D2 are equal!");
return true;
}
__global__ void DeviceCalculateSM_Global(float* input, int input_size, float* result, int result_size, int sample_size){
int id_x = blockDim.x * blockIdx.x + threadIdx.x;
float sum = 0;
if (id_x < result_size){
for (int i = 0; i < sample_size; i++)
sum = sum + input[id_x+i];
sum = sum/sample_size;
result[id_x] = sum;
}
}
__global__ void DeviceCalculateSM_Shared(float* input, int input_size, float* result, int result_size, int sample_size){
int id_x = blockDim.x * blockIdx.x + threadIdx.x;
if (id_x < input_size){
extern __shared__ float cache[];
int cachedSize = sample_size + blockDim.x;
for (int i = 0; i < cachedSize/blockDim.x+1; i++){
int cacheId = threadIdx.x+ i*blockDim.x;
if (cacheId < cachedSize && cacheId+blockDim.x *blockIdx.x < input_size)
cache[cacheId] = input[cacheId+blockDim.x *blockIdx.x];
}
__syncthreads();
float sum = 0;
for (int i = 0; i < sample_size; i++){
if(i + threadIdx.x < cachedSize && i + id_x < input_size)
sum = sum + cache[i+threadIdx.x];
}
sum = sum/sample_size;
/*store in global memory*/
if (id_x < result_size)
result[id_x] = sum;
}
}
DataSet CalculateSM(DataSet input, int sample_size, bool usesharedmemory){
if(sample_size == 1 && input.size < 1 && sample_size < 1 && sample_size > input.size)
{
printf("Error! Invalid Sample Size");
exit(-1);
}
int result_size = input.size-sample_size+1;
DataSet host_result = {(float*)malloc(sizeof(float)*(result_size)), result_size};
float* device_input, *device_result;
int threads_needed = host_result.size;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
if (usesharedmemory){
int shared_memory_allocation_size = sizeof(float)*(startup.threadsPerBlock+sample_size);
cudaEventRecord(start);
DeviceCalculateSM_Shared<<<threads_needed/ startup.threadsPerBlock + 1, startup.threadsPerBlock, shared_memory_allocation_size>>> (device_input, input.size, device_result, host_result.size, sample_size);
cudaEventRecord(stop);
}else{
cudaEventRecord(start);
DeviceCalculateSM_Global<<<threads_needed/ startup.threadsPerBlock + 1, startup.threadsPerBlock>>> (device_input, input.size, device_result, host_result.size, sample_size);
cudaEventRecord(stop);
}
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
if (usesharedmemory) printf("Shared Memory: "); else printf("Global Memory: ");
printf("Kernel executed in %f milliseconds\n", milliseconds);
return host_result;
}
void printDataSet(DataSet data){
for (int i = 0; i < data.size; i++)
printf("%.6g, ", data.values[i]);
printf("\n");
}
int main(int argc, char** argv){
for (int i = 0; i < argc; i++){
if (strcmp(argv[i], "Range")==0 && i+1 < argc) startup.range = atoi(argv[i+1]);
if (strcmp(argv[i], "Seed")==0 && i+1 < argc) startup.seed = atoi(argv[i+1]);
if (strcmp(argv[i], "Block threads")==0 && i+1 < argc) startup.threadsPerBlock = atoi(argv[i+1]);
}
srand(startup.seed);
DataSet data = generateRandomDataSet(100);
printDataSet( data );
DataSet shared = CalculateSM(data, 2, true);
DataSet global = CalculateSM(data, 2, false);
printDataSet( shared );
printf("\n");
printDataSet( global );
printf("\n");
printf("Each should be %d elements in size\n", global.size);
CompareDataSet(global, shared);
}
|
4,160 | #if __linux__ && defined(__INTEL_COMPILER)
#define __sync_fetch_and_add(ptr,addend) _InterlockedExchangeAdd(const_cast<void*>(reinterpret_cast<volatile void*>(ptr)), addend)
#endif
#include <string>
#include <cstring>
#include <cctype>
#include <cstdlib>
#include <cstdio>
#include <iostream>
#include <fstream>
#include <bitset>
#include <time.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include "utility.h"
#include "csv.hpp"
#include<thrust/host_vector.h>
#include<thrust/device_vector.h>
#include<thrust/sort.h>
using namespace std;
std::vector<string> sv;
std::vector<string> sourceIP;
std::vector<string> destinationIP;
std::vector<string> timestamp;
std::vector<string> IPstring_src;
std::vector<string> IPstring_dst;
std::vector<std::string> split_string_2(std::string str, char del) {
int first = 0;
int last = str.find_first_of(del);
std::vector<std::string> result;
while (first < str.size()) {
std::string subStr(str, first, last - first);
result.push_back(subStr);
first = last + 1;
last = str.find_first_of(del, first);
if (last == std::string::npos) {
last = str.size();
}
}
return result;
}
int main( int argc, char* argv[] ) {
int counter = 0;
struct timespec startTime, endTime, sleepTime;
int N = atoi(argv[2]);
int netmask;
std::map <int,int> found_flag;
const string session_file = std::string(argv[1]);
vector<vector<string>> session_data;
try {
Csv objCsv(session_file);
if (!objCsv.getCsv(session_data)) {
cout << "read ERROR" << endl;
return 1;
}
}
catch (...) {
cout << "EXCEPTION (session)" << endl;
return 1;
}
thrust::host_vector<unsigned long> V1(session_data.size());
thrust::host_vector<unsigned long> V2(session_data.size());
thrust::host_vector<unsigned long> V3(session_data.size());
clock_gettime(CLOCK_REALTIME, &startTime);
sleepTime.tv_sec = 0;
sleepTime.tv_nsec = 123;
for (unsigned int row2 = 0; row2 < session_data.size(); row2++) {
vector<string> rec2 = session_data[row2];
std::string srcIP = rec2[4];
for(size_t c = srcIP.find_first_of("\""); c != string::npos; c = c = srcIP.find_first_of("\"")){
srcIP.erase(c,1);
}
char del2 ='.';
std::string sessionIPstring;
for (const auto subStr : split_string_2(srcIP, del2)) {
unsigned long ipaddr_src;
ipaddr_src = stoul(subStr.c_str());
std::bitset<8> trans = std::bitset<8>(ipaddr_src);
std::string trans_string = trans.to_string();
sessionIPstring = sessionIPstring + trans_string;
}
std::bitset<32> bit_sessionIP(sessionIPstring);
std::bitset<32> bit_sessionIP_cuda(sessionIPstring);
// cout << srcIP << "," << bit_sessionIP.to_ulong() << endl;
/* mask 1 */
int netmask=24;
std::bitset<32> trans2(0xFFFFFFFF);
trans2 <<= 32-netmask;
// trans2 >>= netmask;
bit_sessionIP &= trans2;
std::string rev_string = bit_sessionIP.to_string();
string bs1 = rev_string.substr(0,8);
int bi1 = bitset<8>(bs1).to_ulong();
string bs2 = rev_string.substr(8,8);
int bi2 = bitset<8>(bs2).to_ulong();
string bs3 = rev_string.substr(16,8);
int bi3 = bitset<8>(bs3).to_ulong();
string bs4 = rev_string.substr(24,8);
int bi4 = bitset<8>(bs4).to_ulong();
string revIP = to_string(bi1) + "." + to_string(bi2) + "." + to_string(bi3) + "." + to_string(bi4);
if(row2 < 10)
{
cout << srcIP << "," << bit_sessionIP_cuda.to_ulong() << "," << bit_sessionIP_cuda << "," << trans2 << "," << revIP << endl;
}
}
clock_gettime(CLOCK_REALTIME, &endTime);
if (endTime.tv_nsec < startTime.tv_nsec) {
printf("%10ld.%09ld", endTime.tv_sec - startTime.tv_sec - 1, endTime.tv_nsec + 1000000000 - startTime.tv_nsec);
} else {
printf("%10ld.%09ld", endTime.tv_sec - startTime.tv_sec,endTime.tv_nsec - startTime.tv_nsec);
}
printf(" sec\n");
clock_gettime(CLOCK_REALTIME, &startTime);
sleepTime.tv_sec = 0;
sleepTime.tv_nsec = 123;
for (unsigned int row2 = 0; row2 < session_data.size(); row2++) {
vector<string> rec2 = session_data[row2];
std::string srcIP = rec2[4];
for(size_t c = srcIP.find_first_of("\""); c != string::npos; c = c = srcIP.find_first_of("\"")){
srcIP.erase(c,1);
}
char del2 ='.';
std::string sessionIPstring;
for (const auto subStr : split_string_2(srcIP, del2)) {
unsigned long ipaddr_src;
ipaddr_src = stoul(subStr.c_str());
std::bitset<8> trans = std::bitset<8>(ipaddr_src);
std::string trans_string = trans.to_string();
sessionIPstring = sessionIPstring + trans_string;
}
std::bitset<32> bit_sessionIP_cuda(sessionIPstring);
V1[row2] = bit_sessionIP_cuda.to_ulong();
}
netmask=24;
std::bitset<32> trans2(0xFFFFFFFF);
// trans2 <<= netmask;
trans2 <<= 32-netmask;
thrust::fill(V2.begin(), V2.end(), trans2.to_ulong());
thrust::device_vector<unsigned long> DV1 = V1;
thrust::device_vector<unsigned long> DV2 = V2;
thrust::device_vector<unsigned long> DV3 = V3;
thrust::transform(DV1.begin(), DV1.end(), DV2.begin(), DV3.begin(), thrust::bit_and<unsigned long>());
for(int i = 0; i < 10; i++)
{
std::bitset<32> bs1(DV1[i]);
std::bitset<32> cuda_sessionIP(DV3[i]);
std::string rev_string_2 = cuda_sessionIP.to_string();
string rev1 = rev_string_2.substr(0,8);
int bi1 = bitset<8>(rev1).to_ulong();
string rev2 = rev_string_2.substr(8,8);
int bi2 = bitset<8>(rev2).to_ulong();
string rev3 = rev_string_2.substr(16,8);
int bi3 = bitset<8>(rev3).to_ulong();
string rev4 = rev_string_2.substr(24,8);
int bi4 = bitset<8>(rev4).to_ulong();
string revIP_2 = to_string(bi1) + "." + to_string(bi2) + "." + to_string(bi3) + "." + to_string(bi4);
std::cout << DV1[i] << "," << bs1 << "," << DV2[i] << "," << trans2 << "," << DV3[i] << "," << revIP_2 << std::endl;
}
clock_gettime(CLOCK_REALTIME, &endTime);
if (endTime.tv_nsec < startTime.tv_nsec) {
printf("%10ld.%09ld", endTime.tv_sec - startTime.tv_sec - 1, endTime.tv_nsec + 1000000000 - startTime.tv_nsec);
} else {
printf("%10ld.%09ld", endTime.tv_sec - startTime.tv_sec,endTime.tv_nsec - startTime.tv_nsec);
}
printf(" sec\n");
}
|
4,161 | #include <algorithm>
#include <iostream>
using namespace std;
__global__
void calcEuropeanOption(int timeSteps,
double startPrice,
double strikePrice,
double riskFree,
double delta,
double u,
double p_u,
double * cache) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i > timeSteps) return;
int colDim = timeSteps + 1;
cache[timeSteps * colDim + i] = max(startPrice * pow(u, 2 * i - timeSteps) - strikePrice, 0.0);
timeSteps--;
while (timeSteps >= i) {
cache[timeSteps * colDim + i] = (p_u * cache[(timeSteps + 1) * colDim + i + 1] +
(1 - p_u) * cache[(timeSteps + 1) * colDim + i ]) * exp(-riskFree * delta);
timeSteps--;
__syncthreads();
}
}
int main() {
double startPrice = 100;
double strikePrice = 100;
double timeToExpiry = 1.5;
double vol = 0.12;
double riskFree = 0.005;
int timeSteps = 100;
double delta = timeToExpiry / timeSteps;
double u = exp(vol * sqrt(delta));
double p_u = (exp(riskFree * delta) - 1/u) / (u - 1/u);
int N = timeSteps + 1;
double * cache = new double[N * N];
double * d_cache;
cudaMalloc(&d_cache, N * N * sizeof(double));
calcEuropeanOption<<<(timeSteps + 255)/256, 256>>>(timeSteps, startPrice,strikePrice,
riskFree, delta, u, p_u, d_cache);
double * finalPrice;
cudaMemcpy(finalPrice, d_cache, sizeof(double), cudaMemcpyDeviceToHost);
cout << "Price: " << *finalPrice << endl;
cudaFree(d_cache);
}
|
4,162 | #include <stdio.h>
#include <time.h>
#include "RSA_kernel.cu"
#define BUZZ_SIZE 10002
int p, q, n, t, flag, e[100], d[100], temp[BUZZ_SIZE], j, m[BUZZ_SIZE],
en[BUZZ_SIZE], mm[BUZZ_SIZE], res[BUZZ_SIZE], i;
char msg[BUZZ_SIZE];
int prime(long int);
void generate_input(int);
void ce();
long int cd(long int);
void encrypt();
void decrypt();
void encrypt_gpu();
void decrypt_gpu();
int numChars;
int threadsPerBlock = 1024;
int blocksPerGrid;
time_t tt;
double time_encrypt_cpu, time_decrypt_cpu;
float time_encrypt_gpu = 0.0;
float time_decrypt_gpu = 0.0;
int main() {
p = 157;
q = 373;
srand((unsigned) time(&tt));/* Intializes random number generator */
generate_input(10000);
FILE *f = fopen("input.txt", "r");
if (f == NULL) {
perror("Error opening file");
return (1);
}
if (fgets(msg, BUZZ_SIZE, f) != NULL) {
//printf("String read: %s\n", msg);
printf("Reading input file...done(");
}
fclose(f);
numChars = strlen(msg) - 1;
msg[numChars] = '\0';
printf("numChars: %d)\n\n", numChars);
blocksPerGrid = (numChars + threadsPerBlock - 1) / threadsPerBlock;
/*
printf("\nENTER MESSAGE\n");
fflush(stdin);
scanf("%s", msg);
numChars = strlen(msg);
blocksPerGrid =(numChars + threadsPerBlock - 1) / threadsPerBlock;
*/
for (i = 0; msg[i] != '\0'; i++) {
m[i] = msg[i];
mm[i] = msg[i] - 96;
}
n = p * q;
t = (p - 1) * (q - 1);
ce();
/*
printf("\nPOSSIBLE VALUES OF e AND d ARE\n");
for (i = 0; i < j - 1; i++)
printf("\n%ld\t%ld", e[i], d[i]);
*/
encrypt();
decrypt();
encrypt_gpu();
decrypt_gpu();
printf("GPU encryption speed up: %f\n",
time_encrypt_cpu / time_encrypt_gpu);
printf("GPU decryption speed up: %f\n\n",
time_decrypt_cpu / time_decrypt_gpu);
return 0;
}
void generate_input(int size) {
printf("\nGenerating input file... ");
FILE *fp = fopen("input.txt", "wb");
if (fp != NULL) {
for (int k = 0; k < size; k++) {
int r = rand() % 26;
fprintf(fp, "%c", r + 97);
}
fprintf(fp, "\n");
fclose(fp);
printf("done\n");
}
}
int prime(long int pr) {
int i;
j = sqrt(pr);
for (i = 2; i <= j; i++) {
if (pr % i == 0)
return 0;
}
return 1;
}
void ce() {
int k;
k = 0;
for (i = 2; i < t; i++) {
if (t % i == 0)
continue;
flag = prime(i);
if (flag == 1 && i != p && i != q) {
e[k] = i;
flag = cd(e[k]);
if (flag > 0) {
d[k] = flag;
k++;
}
if (k == 99)
break;
}
}
}
long int cd(long int x) {
long int k = 1;
while (1) {
k = k + t;
if (k % x == 0)
return (k / x);
}
}
void encrypt() {
double start_encrypt, end_encrypt;
start_encrypt = clock();
printf("CPU starts encrypting...\n");
int pt, ct, key = e[0], k, len;
printf("\ne=%d\n",key);
i = 0;
len = numChars;
while (i != len) {
pt = m[i];
pt = pt - 96;
k = 1;
for (j = 0; j < key; j++) {
k = k * pt;
k = k % n;
}
temp[i] = k;
ct = k + 96;
en[i] = ct;
i++;
}
end_encrypt = clock();
time_encrypt_cpu = (double) (end_encrypt - start_encrypt) / CLOCKS_PER_SEC;
printf("Encryption time taken by CPU: %f s\n", time_encrypt_cpu);
/*
en[i] = -1;
printf("\nCPU ENCRYPTED MESSAGE IS\n");
for (i = 0; en[i] != -1; i++)
printf("%d ", en[i]);
*/
printf("Saving CPU encrypted file... ");
en[i] = -1;
FILE *fp = fopen("encrypted_cpu.txt", "wb");
if (fp != NULL) {
for (int k = 0; en[k] != -1; k++) {
fprintf(fp, "%d", en[k]);
}
fclose(fp);
printf("done\n\n");
}
}
void encrypt_gpu() {
cudaEvent_t start_encrypt, stop_encrypt;
int key = e[0];
//printf("\nkey=%d, n=%d\n",key,n);
cudaSetDevice(1);
int *dev_num, *dev_key, *dev_den;
int *dev_res;
cudaMalloc((void **) &dev_num, numChars * sizeof(int));
cudaMalloc((void **) &dev_key, sizeof(int));
cudaMalloc((void **) &dev_den, sizeof(int));
cudaMalloc((void **) &dev_res, numChars * sizeof(int));
cudaMemcpy(dev_num, mm, numChars * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_key, &key, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_den, &n, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_res, res, numChars * sizeof(int), cudaMemcpyHostToDevice);
cudaEventCreate(&start_encrypt);
cudaEventCreate(&stop_encrypt);
cudaEventRecord(start_encrypt);
printf("GPU starts encrypting...\n");
rsa<<<blocksPerGrid, threadsPerBlock>>>(dev_num,dev_key,dev_den,dev_res);
cudaEventRecord(stop_encrypt);
cudaEventSynchronize(stop_encrypt);
cudaThreadSynchronize();
cudaEventElapsedTime(&time_encrypt_gpu, start_encrypt, stop_encrypt);
cudaMemcpy(res, dev_res, numChars * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dev_num);
cudaFree(dev_key);
cudaFree(dev_den);
cudaFree(dev_res);
time_encrypt_gpu /= 1000;
printf("Encryption time taken by GPU: %f s\n", time_encrypt_gpu);
/*
printf("\nGPU ENCRYPTED MESSAGE IS\n");
for (i = 0; i < numChars; i++)
printf("%d ", res[i]+96);
printf("\n");
*/
printf("Saving GPU encrypted file... ");
FILE *fp = fopen("encrypted_gpu.txt", "wb");
if (fp != NULL) {
for (i = 0; i < numChars; i++) {
fprintf(fp, "%d", res[i] + 96);
}
fclose(fp);
printf("done\n\n");
}
}
void decrypt_gpu() {
cudaEvent_t start_decrypt, stop_decrypt;
int key = d[0];
//printf("\nkey=%d, n=%d\n",key,n);
cudaSetDevice(1);
int *dev_num, *dev_key, *dev_den;
int *dev_res;
cudaMalloc((void **) &dev_num, numChars * sizeof(int));
cudaMalloc((void **) &dev_key, sizeof(int));
cudaMalloc((void **) &dev_den, sizeof(int));
cudaMalloc((void **) &dev_res, numChars * sizeof(int));
cudaMemcpy(dev_num, res, numChars * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_key, &key, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_den, &n, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_res, res, numChars * sizeof(int), cudaMemcpyHostToDevice);
cudaEventCreate(&start_decrypt);
cudaEventCreate(&stop_decrypt);
cudaEventRecord(start_decrypt);
printf("GPU starts decrypting...\n");
rsa<<<blocksPerGrid, threadsPerBlock>>>(dev_num,dev_key,dev_den,dev_res);
cudaEventRecord(stop_decrypt);
cudaEventSynchronize(stop_decrypt);
cudaThreadSynchronize();
cudaEventElapsedTime(&time_decrypt_gpu, start_decrypt, stop_decrypt);
cudaMemcpy(res, dev_res, numChars * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dev_num);
cudaFree(dev_key);
cudaFree(dev_den);
cudaFree(dev_res);
time_decrypt_gpu /= 1000;
printf("Decryption time taken by GPU: %f s\n", time_decrypt_gpu);
/*
printf("\nGPU DECRYPTED MESSAGE IS\n");
for (i = 0; i < numChars; i++)
printf("%d ", res[i]+96);
printf("\n");
*/
printf("Saving GPU decrypted file... ");
FILE *fp = fopen("decrypted_gpu.txt", "wb");
if (fp != NULL) {
for (i = 0; i < numChars; i++) {
fprintf(fp, "%c", res[i] + 96);
}
fprintf(fp, "\n");
fclose(fp);
printf("done\n\n");
}
}
void decrypt() {
double start_decrypt, end_decrypt;
start_decrypt = clock();
printf("CPU starts decrypting...\n");
long int pt, ct, key = d[0], k;
printf("\nd=%d\n",key);
i = 0;
while (en[i] != -1) {
ct = temp[i];
k = 1;
for (j = 0; j < key; j++) {
k = k * ct;
k = k % n;
}
pt = k + 96;
m[i] = pt;
i++;
}
end_decrypt = clock();
time_decrypt_cpu = (double) (end_decrypt - start_decrypt) / CLOCKS_PER_SEC;
printf("Decryption time taken by CPU: %f s\n", time_decrypt_cpu);
/*
m[i] = -1;
printf("\nCPU DECRYPTED MESSAGE IS\n");
for (i = 0; m[i] != -1; i++)
printf("%d ", m[i]);
printf("\n");
*/
printf("Saving CPU decrypted file... ");
m[i] = -1;
FILE *fp = fopen("decrypted_cpu.txt", "wb");
if (fp != NULL) {
for (int k = 0; m[k] != -1; k++) {
fprintf(fp, "%c", m[k]);
}
fprintf(fp, "\n");
fclose(fp);
printf("done\n\n");
}
}
|
4,163 | #include <stdio.h>
__global__ void use_local_memory_GPU(float in)
{
float f; // variable "f" is in local memory and private to each thread
f = in; // parameter "in" is in local memory and private to each thread
}
__global__ void use_global_memory_GPU(float *array)
{
array[threadIdx.x] = 2.0f * (float) threadIdx.x;
}
__global__ void use_shared_memory_GPU(float *array)
{
int i, index = threadIdx.x;
float average, sum = 0.0f;
__shared__ float sh_arr[128];
sh_arr[index] = array[index];
__syncthreads(); // ensure all the writes to shared memory have completed
for (i=0; i<index; i++) { sum += sh_arr[i]; }
average = sum / (index + 1.0f);
printf("Thread id = %d\t Average = %f\n",index,average);
if (array[index] > average) { array[index] = average; }
sh_arr[index] = 3.14;
}
int main(int argc, char **argv)
{
use_local_memory_GPU<<<1, 128>>>(2.0f);
float h_arr[128]; // convention: h_ variables live on host
float *d_arr; // convention: d_ variables live on device (GPU global mem)
cudaMalloc((void **) &d_arr, sizeof(float) * 128);
cudaMemcpy((void *)d_arr, (void *)h_arr, sizeof(float) * 128, cudaMemcpyHostToDevice);
use_global_memory_GPU<<<1, 128>>>(d_arr); // modifies the contents of array at d_arr
cudaMemcpy((void *)h_arr, (void *)d_arr, sizeof(float) * 128, cudaMemcpyDeviceToHost);
use_shared_memory_GPU<<<1, 128>>>(d_arr);
cudaMemcpy((void *)h_arr, (void *)d_arr, sizeof(float) * 128, cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
return 0;
}
|
4,164 | #include <iostream>
#include "mandel.cuh"
#define INTER_LIMIT 255
__device__ int get_inter (thrust::complex<float> c) {
int i;
thrust::complex<float> z(0.0, 0.0);
for (i = 0; i < INTER_LIMIT; ++i) {
if (thrust::abs(z) > 2 ) {
break;
}
z = thrust::pow(z, 2) + c;
}
return i;
}
__global__ void fill_matrix (int *res, const int w, const int h, thrust::complex<float> c0, const float del_y, const float del_x, const int threads, const int blocks, const int offset) {
thrust::complex<float> del(0, 0);
unsigned int k = threadIdx.x + blockIdx.x*threads + blocks*threads*offset;
if (k >= w*h)
return;
del.real(del_x * (k%w));
del.imag(del_y * (k/w));
res[k] = get_inter(c0 + del);
return;
}
__host__ void prepare (int *res_matrix, const int w, const int h, thrust::complex<float> c0, const float del_y, const float del_x, const int threads) {
int *d_res_matrix;
int *d_w;
int *d_h;
thrust::complex<float> *d_c0;
float *d_del_y;
float *d_del_x;
cudaSetDevice(0);
if (cudaSuccess != cudaMallocManaged((void **) &d_res_matrix, sizeof(int)*w*h)) {
std::cerr << "Could not allocate memory";
exit(EXIT_FAILURE);
}
if (cudaSuccess != cudaMallocManaged((void **) &d_w, sizeof(int))) {
std::cerr << "Could not allocate memory";
exit(EXIT_FAILURE);
}
if (cudaSuccess != cudaMallocManaged((void **) &d_h, sizeof(int))) {
std::cerr << "Could not allocate memory";
exit(EXIT_FAILURE);
}
if (cudaSuccess != cudaMallocManaged((void **) &d_c0, sizeof(thrust::complex<float>)) ) {
std::cerr << "Could not allocate memory";
exit(EXIT_FAILURE);
}
if (cudaSuccess != cudaMallocManaged((void **) &d_del_y, sizeof(float))) {
std::cerr << "Could not allocate memory";
exit(EXIT_FAILURE);
}
if (cudaSuccess != cudaMallocManaged((void **) &d_del_x, sizeof(float))) {
std::cerr << "Could not allocate memory";
exit(EXIT_FAILURE);
}
if (cudaSuccess != cudaMemcpy(d_w, &w, sizeof(int), cudaMemcpyHostToDevice)) {
std::cerr << "Could not copy memory";
exit(EXIT_FAILURE);
}
if (cudaSuccess != cudaMemcpy(d_h, &h, sizeof(int), cudaMemcpyHostToDevice)) {
std::cerr << "Could not copy memory";
exit(EXIT_FAILURE);
}
if (cudaSuccess != cudaMemcpy(d_c0, &c0, sizeof(thrust::complex<float>), cudaMemcpyHostToDevice)) {
std::cerr << "Could not copy memory";
exit(EXIT_FAILURE);
}
if (cudaSuccess != cudaMemcpy(d_del_y, &del_y, sizeof(float), cudaMemcpyHostToDevice)) {
std::cerr << "Could not copy memory";
exit(EXIT_FAILURE);
}
if (cudaSuccess != cudaMemcpy(d_del_x, &del_x, sizeof(float), cudaMemcpyHostToDevice)) {
std::cerr << "Could not copy memory";
exit(EXIT_FAILURE);
}
int block = 1024;
int max = ((w*h) / (threads*block)) + 1;
for (int i = 0; i < max; ++i) {
fill_matrix<<<block, threads>>> (d_res_matrix, *d_w, *d_h, *d_c0, *d_del_y, *d_del_x, threads, block, i);
cudaDeviceSynchronize();
}
if (cudaSuccess != cudaMemcpy(res_matrix, d_res_matrix, sizeof(int)*w*h, cudaMemcpyDeviceToHost)) {
std::cerr << "Could not copy memory";
exit(EXIT_FAILURE);
}
return;
}
|
4,165 | /* Voxel sampling GPU implementation
* Author Zhaoyu SU
* All Rights Reserved. Sep., 2019.
*/
#include <stdio.h>
#include <iostream>
#include <float.h>
__device__ int get_batch_id(int* accu_list, int batch_size, int id) {
for (int b=0; b<batch_size-1; b++) {
if (id >= accu_list[b]) {
if(id < accu_list[b+1])
return b;
}
}
return batch_size - 1;
}
__global__ void bev_occupy_gpu_kernel(int batch_size, int input_point_num,
int output_w, int output_l, float resolution,
const float* input_coors,
const int* input_num_list,
int* input_accu_list,
int* output_occupy) {
const int output_img_size = output_w * output_l;
int point_id = threadIdx.x + blockIdx.x * blockDim.x;
if (point_id < input_point_num) {
int center_grid_coor_x = (int)floor(input_coors[point_id*3 + 0] / resolution);
int center_grid_coor_y = (int)floor(input_coors[point_id*3 + 1] / resolution);
int batch_id = get_batch_id(input_accu_list, batch_size, point_id);
int output_idx = batch_id * output_img_size + center_grid_coor_x * output_l + center_grid_coor_y;
atomicAdd(&output_occupy[output_idx], 1);
}
}
void bev_occupy_gpu_launcher(int batch_size, int input_point_num,
int output_w, int output_l, float resolution,
const float* input_coors,
const int* input_num_list,
int* input_accu_list,
int* output_occupy) {
if (batch_size*input_point_num <=0) {
printf("BevOccupyOp ERROR: Invalid CUDA input dimensions.\n");
return;
}
int blockSize; // The launch configurator returned block size
int minGridSize; // The minimum grid size needed to achieve the maximum occupancy for a full device launch
int gridSize; // The actual grid size needed, based on input size
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, bev_occupy_gpu_kernel, 0, input_point_num);
gridSize = (input_point_num + blockSize - 1) / blockSize;
bev_occupy_gpu_kernel<<<gridSize, blockSize>>>(batch_size, input_point_num,
output_w, output_l, resolution,
input_coors,
input_num_list,
input_accu_list,
output_occupy);
}
|
4,166 | /* \file TestShortCircuit.cu
\author Gregory Diamos <gregory.diamos@gatech.edu>
\date Tuesday November 9, 2010
\brief A CUDA assembly test for short-circuiting control flow.
*/
const unsigned int threads = 512;
__device__ bool out[threads];
__global__ void short_circuit()
{
unsigned int id = threadIdx.x;
bool b0 = (id >> 0) & 0x1;
bool b1 = (id >> 1) & 0x1;
bool b2 = (id >> 2) & 0x1;
bool b3 = (id >> 3) & 0x1;
bool b4 = (id >> 4) & 0x1;
bool b5 = (id >> 5) & 0x1;
bool b6 = (id >> 6) & 0x1;
bool b7 = (id >> 7) & 0x1;
bool b8 = (id >> 8) & 0x1;
if(((b0 && (b1 || b2)) || (b3 || (b4 && b5))) && (b6 || (b7 && b8)))
{
out[id] = true;
}
else
{
out[id] = false;
}
}
int main(int argc, char** argv)
{
short_circuit<<<1, threads>>>();
}
|
4,167 | /* nvcc -O2 test_cuda.cu -o test_cuda */
/*
benchmark sma: size=1048576 sample=5 equal=0
sma_cpu=8ms sma_gpu=64ms
benchmark sma: size=1048576 sample=5 equal=0
sma_cpu=8ms sma_gpu=6ms
benchmark sma: size=33554432 sample=5 equal=0
sma_cpu=115ms sma_gpu=49ms
benchmark sma: size=1073741824 sample=5 equal=0
sma_cpu=1575ms sma_gpu=862ms
benchmark sma: size=1048576 sample=30 equal=0
sma_cpu=6ms sma_gpu=8ms
benchmark sma: size=33554432 sample=30 equal=0
sma_cpu=54ms sma_gpu=33ms
benchmark sma: size=1073741824 sample=30 equal=0
sma_cpu=1567ms sma_gpu=880ms
*/
#include <chrono>
#include <iostream>
#include <thread>
#include <cuda_runtime.h>
using namespace std;
using namespace std::chrono;
using namespace std::chrono_literals;
__global__ void sma_gpu_kernel(const int *input, int *output, int size, int sample)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < size) {
int sum = 0;
int count = 0;
for (int i = tid; i >= 0 && i > tid - sample; i--) {
sum += input[i];
count++;
}
output[tid] = sum / count;
}
}
void sma_gpu(const int *input, int *output, int size, int sample)
{
int *devInput;
int *devOutput;
cudaMalloc((void **)&devInput, sizeof(int) * size);
cudaMalloc((void **)&devOutput, sizeof(int) * size);
cudaMemcpy(devInput, input, sizeof(int) * size, cudaMemcpyHostToDevice);
int blockSize = 256;
int numBlocks = (size + blockSize - 1) / blockSize;
sma_gpu_kernel<<<numBlocks, blockSize>>>(devInput, devOutput, size, sample);
cudaMemcpy(output, devOutput, sizeof(int) * size, cudaMemcpyDeviceToHost);
cudaFree(devInput);
cudaFree(devOutput);
}
void sma_cpu(const int *input, int *output, int size, int sample)
{
int sum = 0;
int count = 0;
for (int i = 0; i < size; i++) {
if (count < sample) {
sum += input[i];
count++;
output[i] = sum / count;
} else {
sum -= input[i-sample];
sum += input[i];
output[i] = sum / count;
}
}
}
void print(int *output, int size)
{
for (int i = 0; i < size; i++) {
cout << output[i] << " ";
}
cout << endl;
}
void benchmark_sma(int size, int sample) {
int *input = (int *) malloc(sizeof(int) * size);
for (int i = 0; i < size; i++) {
input[i] = (int) (rand() % size);
}
/* begin */
this_thread::sleep_for(200ms);
int *output1 = (int *) malloc(sizeof(int) * size);
memset(output1, 0, sizeof(int) * size);
auto t0 = high_resolution_clock::now();
sma_cpu(input, output1, size, sample);
//print(output1, size);
auto d1 = duration_cast<std::chrono::milliseconds>(high_resolution_clock::now() - t0);
this_thread::sleep_for(200ms);
int *output2 = (int *) malloc(sizeof(int) * size);
memset(output2, 0, sizeof(int) * size);
t0 = high_resolution_clock::now();
sma_gpu(input, output2, size, sample);
//print(output2, size);
auto d2 = duration_cast<std::chrono::milliseconds>(high_resolution_clock::now() - t0);
int b = memcmp(output1, output2, sizeof(int) * size);
cout << "benchmark sma: size=" << size << " sample=" << sample << " equal=" << b << endl;
cout << "sma_cpu=" << d1.count() << "ms sma_gpu=" << d2.count() << "ms" << endl;
free(output1);
free(output2);
}
int main()
{
/* hot run */
benchmark_sma(1024*1024, 5);
/* run */
benchmark_sma(1024*1024, 5);
benchmark_sma(32*1024*1024, 5);
benchmark_sma(1024*1024*1024, 5);
benchmark_sma(1024*1024, 30);
benchmark_sma(32*1024*1024, 30);
benchmark_sma(1024*1024*1024, 30);
return 0;
}
|
4,168 | // vec_add.cu: Parallel vector add using CUDA
#include <stdlib.h>
#include <stdio.h>
#include <cuda.h>
// Kernel function, runs on GPU
__global__ void add_vectors(float *a, float *b, float *c) {
int i = blockIdx.x;
c[i] = a[i] + b[i];
}
int main(void) {
int count, i;
// Find number of GPUs
cudaGetDeviceCount(&count);
printf("There are %d GPU devices in your system\n", count);
int N = 10; // Vector length
// Create vectors a, b and c in the host (CPU)
float *a = (float *)malloc(N*sizeof(float));
float *b = (float *)malloc(N*sizeof(float));
float *c = (float *)malloc(N*sizeof(float));
// Initialize a and b
for (i=0; i<N; i++) {
a[i] = i - 0.5;
b[i] = i*i - 3;
}
// Create a_dev, b_dev, c_dev on GPU
float *a_dev, *b_dev, *c_dev;
cudaMalloc((void **)&a_dev, N*sizeof(float));
cudaMalloc((void **)&b_dev, N*sizeof(float));
cudaMalloc((void **)&c_dev, N*sizeof(float));
// Copy a, b and c vectors from host to GPU
cudaMemcpy(a_dev, a, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(b_dev, b, N*sizeof(float), cudaMemcpyHostToDevice);
// Parallel add c_dev[i] = a_dev[i] + b_dev[i]
add_vectors<<< N, 1 >>>(a_dev, b_dev, c_dev);
// Copy result from GPU to host (CPU)
cudaMemcpy(c, c_dev, N*sizeof(float), cudaMemcpyDeviceToHost);
// Free memory
cudaFree(a_dev);
cudaFree(b_dev);
cudaFree(c_dev);
// Print result on host (CPU)
printf("\nVector Addition Result:\n");
for (i=0; i<N; i++) {
printf("a[%d] : %0.2f \t+\t", i, a[i]);
printf("b[%d] : %0.2f \t=\t", i, b[i]);
printf("c[%d] : %0.2f\n", i, c[i]);
}
return 0;
}
|
4,169 | template<typename T>
__device__ void abs(const T* data, T* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
result[index] = (T)abs((float)data[index]);
}
}
extern "C"
__global__ void abs_Boolean(const unsigned char* data, unsigned char* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
result[index] = data[index];
}
}
extern "C"
__global__ void abs_Char(const unsigned short* data, unsigned short* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
result[index] = data[index];
}
}
extern "C"
__global__ void abs_Double(const double* data, double* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
result[index] = abs(data[index]);
}
}
template<typename T>
__device__ void acosf(const T* data, float* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
result[index] = acos((float)data[index]);
}
}
template<typename T>
__device__ void acosd(const T* data, double* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
result[index] = acos((double)data[index]);
}
}
template<typename T>
__device__ void acoshf(const T* data, float* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
result[index] = acosh((float)data[index]);
}
}
template<typename T>
__device__ void acoshd(const T* data, double* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
result[index] = acosh((double)data[index]);
}
}
template<typename T>
__device__ void asinf(const T* data, float* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
result[index] = asin((float)data[index]);
}
}
template<typename T>
__device__ void asind(const T* data, double* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
result[index] = asin((double)data[index]);
}
}
template<typename T>
__device__ void asinhf(const T* data, float* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
result[index] = asinh((float)data[index]);
}
}
template<typename T>
__device__ void asinhd(const T* data, double* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
result[index] = asinh((double)data[index]);
}
}
template<typename T>
__device__ void atanf(const T* data, float* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
result[index] = atan((float)data[index]);
}
}
template<typename T>
__device__ void atand(const T* data, double* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
result[index] = atan((double)data[index]);
}
}
template<typename T>
__device__ void atanhf(const T* data, float* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
result[index] = atanh((float)data[index]);
}
}
template<typename T>
__device__ void atanhd(const T* data, double* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
result[index] = atanh((double)data[index]);
}
}
template<typename T>
__device__ void cbrtf(const T* data, float* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
result[index] = cbrt((float)data[index]);
}
}
template<typename T>
__device__ void cbrtd(const T* data, double* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
result[index] = cbrt((double)data[index]);
}
}
template<typename T>
__device__ void ceilf(const T* data, float* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
result[index] = ceil((float)data[index]);
}
}
template<typename T>
__device__ void ceild(const T* data, double* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
result[index] = ceil((double)data[index]);
}
}
template<typename T>
__device__ void cosf(const T* data, float* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
result[index] = cos((float)data[index]);
}
}
template<typename T>
__device__ void cosd(const T* data, double* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
result[index] = cos((double)data[index]);
}
}
template<typename T>
__device__ void coshf(const T* data, float* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
result[index] = cosh((float)data[index]);
}
}
template<typename T>
__device__ void coshd(const T* data, double* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
result[index] = cosh((double)data[index]);
}
}
template<typename T>
__device__ void expf(const T* data, float* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
result[index] = exp((float)data[index]);
}
}
template<typename T>
__device__ void expd(const T* data, double* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
result[index] = exp((double)data[index]);
}
}
template<typename T>
__device__ void exp10f(const T* data, float* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
result[index] = exp10((float)data[index]);
}
}
template<typename T>
__device__ void exp10d(const T* data, double* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
result[index] = exp10((double)data[index]);
}
}
template<typename T>
__device__ void exp2f(const T* data, float* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
result[index] = exp2((float)data[index]);
}
}
template<typename T>
__device__ void exp2d(const T* data, double* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
result[index] = exp2((double)data[index]);
}
}
template<typename T>
__device__ void floorf(const T* data, float* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
result[index] = floor((float)data[index]);
}
}
template<typename T>
__device__ void floord(const T* data, double* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
result[index] = floor((double)data[index]);
}
}
template<typename T>
__device__ void lnf(const T* data, float* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
result[index] = log((float)data[index]);
}
}
template<typename T>
__device__ void lnd(const T* data, double* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
result[index] = log((double)data[index]);
}
}
template<typename T>
__device__ void logf(const T* data, const float base, float* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
result[index] = log((float)data[index]) / log(base);
}
}
template<typename T>
__device__ void logd(const T* data, const double base, double* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
result[index] = log((float)data[index]) / log(base);
}
}
template<typename T>
__device__ void log10f(const T* data, float* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
result[index] = log10((float)data[index]);
}
}
template<typename T>
__device__ void log10d(const T* data, double* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
result[index] = log10((double)data[index]);
}
}
template<typename T>
__device__ void log2f(const T* data, float* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
result[index] = log2((float)data[index]);
}
}
template<typename T>
__device__ void log2d(const T* data, double* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
result[index] = log2((double)data[index]);
}
}
template<typename T>
__device__ void max1(const T* data, const T threshold, T* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
T value = data[index];
T diff = value - threshold;
result[index] = diff > 0 ? value : threshold;
}
}
extern "C"
__global__ void max1_Float(const float* data, const float threshold, float* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
float value = data[index];
float diff = value - threshold;
result[index] = max(value, threshold);
}
}
extern "C"
__global__ void max1_Double(const double* data, const double threshold, double* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
double value = data[index];
double diff = value - threshold;
result[index] = max(value, threshold);
}
}
template<typename T>
__device__ void max(const T* data1, const T* data2, T* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
T value1 = data1[index];
T value2 = data2[index];
T diff = value1 - value2;
result[index] = diff > 0 ? value1 : value2;
}
}
extern "C"
__global__ void max_Float(const float* data1, const float* data2, float* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
result[index] = max(data1[index], data2[index]);
}
}
extern "C"
__global__ void max_Double(const double* data1, const double* data2, double* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
result[index] = max(data1[index], data2[index]);
}
}
template<typename T>
__device__ void min1(const T* data, const T threshold, T* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
T value = data[index];
T diff = value - threshold;
result[index] = diff < 0 ? value : threshold;
}
}
extern "C"
__global__ void min1_Float(const float* data, const float threshold, float* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
float value = data[index];
float diff = value - threshold;
result[index] = min(value, threshold);
}
}
extern "C"
__global__ void min1_Double(const double* data, const double threshold, double* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
double value = data[index];
double diff = value - threshold;
result[index] = min(value, threshold);
}
}
template<typename T>
__device__ void min(const T* data1, const T* data2, T* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
T value1 = data1[index];
T value2 = data2[index];
T diff = value1 - value2;
result[index] = diff < 0 ? value1 : value2;
}
}
extern "C"
__global__ void min_Float(const float* data1, const float* data2, float* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
result[index] = min(data1[index], data2[index]);
}
}
extern "C"
__global__ void min_Double(const double* data1, const double* data2, double* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
result[index] = min(data1[index], data2[index]);
}
}
template<typename T>
__device__ void powf(const T* data, const float power, float* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
result[index] = pow((float)data[index], power);
}
}
template<typename T>
__device__ void powd(T* data, const double power, double* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
result[index] = pow((double)data[index], power);
}
}
template<typename T>
__device__ void pow2f(const T* data, float* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
float value = data[index];
result[index] = value * value;
}
}
template<typename T>
__device__ void pow2d(const T* data, double* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
double value = data[index];
result[index] = value * value;
}
}
template<typename T>
__device__ void relu(const T* data, T* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
T value = data[index];
result[index] = value >= 0 ? value : 0;
}
}
extern "C"
__global__ void round_f(const float* data, int* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
result[index] = round(data[index]);
}
}
extern "C"
__global__ void round_d(const double* data, long long int* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
result[index] = round(data[index]);
}
}
template<typename T>
__device__ void sigmoidf(const T* data, float* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
result[index] = 1.0f / (1.0f + exp(-(float)data[index]));
}
}
template<typename T>
__device__ void sigmoidd(const T* data, double* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
result[index] = 1.0f / (1.0f + exp(-(double)data[index]));
}
}
template<typename T>
__device__ void sign(const T* data, int* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
T value = data[index];
result[index] = value == 0 ? 0 : (value > 0 ? 1 : -1);
}
}
template<typename T>
__device__ void sinf(const T* data, float* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
result[index] = sin((float)data[index]);
}
}
template<typename T>
__device__ void sind(const T* data, double* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
result[index] = sin((double)data[index]);
}
}
template<typename T>
__device__ void sinhf(const T* data, float* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
result[index] = sinh((float)data[index]);
}
}
template<typename T>
__device__ void sinhd(const T* data, double* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
result[index] = sinh((double)data[index]);
}
}
template<typename T>
__device__ void sqrtf(const T* data, float* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
result[index] = sqrt((float)data[index]);
}
}
template<typename T>
__device__ void sqrtd(const T* data, double* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
result[index] = sqrt((double)data[index]);
}
}
template<typename T>
__device__ void tanf(const T* data, float* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
result[index] = tan((float)data[index]);
}
}
template<typename T>
__device__ void tand(const T* data, double* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
result[index] = tan((double)data[index]);
}
}
template<typename T>
__device__ void tanhf(const T* data, float* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
result[index] = tanh((float)data[index]);
}
}
template<typename T>
__device__ void tanhd(const T* data, double* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
if (index < length) {
result[index] = tanh((double)data[index]);
}
}
|
4,170 | #include <stdio.h>
__global__ void vectorAdd(const float *a, const float *b, float *c, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
c[i] = a[i] + b[i];
}
for (const clock_t threshold = clock() + 1e+4; clock() < threshold;);
}
int main(int argc, char *argv[])
{
int numElements = 3 << 22;
// Allocate vectors a, b and c in host memory.
size_t numBytes = sizeof(float) * numElements;
float *h_a;
float *h_b;
float *h_c;
cudaMallocHost((void **)&h_a, numBytes);
cudaMallocHost((void **)&h_b, numBytes);
cudaMallocHost((void **)&h_c, numBytes);
// Initialize vectors a and b.
for (int i = 0; i < numElements; ++i)
{
h_a[i] = rand() / (float)RAND_MAX;
h_b[i] = rand() / (float)RAND_MAX;
}
// Get the number of CUDA devices.
int numDevices;
cudaGetDeviceCount(&numDevices);
// Compute the average number of elements per device and the number of spare elements.
int avgElementsPerDevice = numElements / numDevices;
int sprElements = numElements - avgElementsPerDevice * numDevices;
float **d_a = (float **)malloc(sizeof(float *) * numDevices);
float **d_b = (float **)malloc(sizeof(float *) * numDevices);
float **d_c = (float **)malloc(sizeof(float *) * numDevices);
for (int i = 0, offset = 0; i < numDevices; ++i)
{
// Determine the number of elements to be processed by the current device.
int numElementsCurrentDevice = avgElementsPerDevice + (i < sprElements);
// Set device to be used for GPU executions.
// highlight:
cudaSetDevice(i);
// Allocate vectors a, b and c in device memory.
size_t numBytesCurrentDevice = sizeof(int) * numElementsCurrentDevice;
cudaMalloc((void **)&d_a[i], numBytesCurrentDevice);
cudaMalloc((void **)&d_b[i], numBytesCurrentDevice);
cudaMalloc((void **)&d_c[i], numBytesCurrentDevice);
// Copy vectors a and b from host memory to device memory asynchronously.
cudaMemcpyAsync(d_a[i], h_a + offset, numBytesCurrentDevice, cudaMemcpyHostToDevice);
cudaMemcpyAsync(d_b[i], h_b + offset, numBytesCurrentDevice, cudaMemcpyHostToDevice);
// Determine the number of threads per block and the number of blocks per grid.
unsigned int numThreadsPerBlock = 256;
unsigned int numBlocksPerGrid = (numElementsCurrentDevice + numThreadsPerBlock - 1) / numThreadsPerBlock;
// Invoke the kernel on device asynchronously.
vectorAdd<<<numBlocksPerGrid, numThreadsPerBlock>>>(d_a[i], d_b[i], d_c[i], numElementsCurrentDevice);
// Copy vector c from device memory to host memory asynchronously.
cudaMemcpyAsync(h_c + offset, d_c[i], numBytesCurrentDevice, cudaMemcpyDeviceToHost);
// Increase offset to point to the next portion of data.
offset += numElementsCurrentDevice;
}
// Wait for the devices to finish.
for (int i = 0; i < numDevices; ++i)
{
cudaSetDevice(i);
cudaDeviceSynchronize();
}
// Validate the result.
for (int i = 0; i < numElements; ++i)
{
float actual = h_c[i];
float expected = h_a[i] + h_b[i];
if (fabs(actual - expected) > 1e-7)
{
printf("h_c[%d] = %f, expected = %f\n", i, actual, expected);
break;
}
}
// Cleanup.
for (int i = 0; i < numDevices; ++i)
{
cudaSetDevice(i);
cudaFree(d_c[i]);
cudaFree(d_b[i]);
cudaFree(d_a[i]);
}
free(d_c);
free(d_b);
free(d_a);
cudaFreeHost(h_c);
cudaFreeHost(h_b);
cudaFreeHost(h_a);
for (int i = 0; i < numDevices; ++i)
{
cudaSetDevice(i);
cudaDeviceReset();
}
}
|
4,171 | //agent.cpp
//#include <iostream>
//#include <string>
//#include <thrust/version.h>
//#include <thrust/host_vector.h>
//#include <thrust/device_vector.h>
//#include <thrust/device_ptr.h>
//#include "agent.cuh"
|
4,172 | #include "includes.h"
__global__ void relu_ker(float* src, float* dst, int N){
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i >= N){
return;
}
dst[i] = fmaxf(0.0, src[i]);
} |
4,173 | #include <cuda.h>
#include <stdio.h>
#include <malloc.h>
__host__
void fill_vector(float* matrix , int size){
float aux = 2.0;
for (int i = 0; i < size; ++i)
{
matrix[i] = (((float)rand())/(float)(RAND_MAX)) * aux;
}
}
__host__
void print(float *V, int len){
for (int i = 0; i < len; i++) {
printf("%.2f ", V[i]);
}
printf("\n");
}
__global__
void matrixMult(float* d_Matrix , float* d_Result , int n){
int row = threadIdx.x + blockDim.x * blockIdx.x;
if(row < n){
d_Result[row] = d_Matrix[row] * 2;
}
}
int main(){
int n = 100;
cudaError_t error = cudaSuccess;
int width = n * sizeof(float);
float *h_Matrix = (float*) malloc(width);
float *h_Result = (float*) malloc(width);
fill_vector(h_Matrix,n);
print(h_Matrix,n);
float *d_Matrix, *d_Result;
error = cudaMalloc ((void **) &d_Matrix, width);
if (error != cudaSuccess){
printf("Error solicitando memoria en la GPU para d_R\n");
exit(-1);
}
error = cudaMalloc ((void **) &d_Result, width);
if (error != cudaSuccess){
printf("Error solicitando memoria en la GPU para d_R\n");
exit(-1);
}
cudaMemcpy(d_Matrix,h_Matrix,width,cudaMemcpyHostToDevice);
dim3 bloques(ceil(n/10.0),1,1);
dim3 hilos(10,1,1);
matrixMult<<<bloques,hilos>>>(d_Matrix,d_Result,n);
cudaDeviceSynchronize();
cudaMemcpy(h_Result,d_Result,width,cudaMemcpyDeviceToHost);
print(h_Result,n);
cudaFree(d_Matrix);
cudaFree(d_Result);
free(h_Matrix);
free(h_Result);
}
|
4,174 | #include "includes.h"
enum ComputeMode { ADD, SUB, MUL, DIV };
cudaError_t computeWithCuda(int *c, const int *a, const int *b, unsigned int size, ComputeMode mode);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
} |
4,175 | #include "includes.h"
__global__ void pythagoras(unsigned char* Gx, unsigned char* Gy, unsigned char* G, unsigned char* theta)
{
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
float af = float(Gx[idx]);
float bf = float(Gy[idx]);
G[idx] = (unsigned char)sqrtf(af * af + bf * bf);
theta[idx] = (unsigned char)atan2f(af, bf)*63.994;
} |
4,176 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
# include <iostream>
# include <fstream>
# include <cstdlib>
# include <cmath>
# include <vector>
using namespace std;
struct number{ //struktura wykorzystywana w wektorze danych - zawiera informacje o wartosci liczby oraz o tym czy jest pierwsza
unsigned long int value;
bool prime;
};
__global__ void primeTesting (number* tab, uint sqr, uint d) //Funkcja testująca pierwszoć liczb, przyjmuje jako argumenty wskaźnik na zbior do tesowania, pierwiastek do ktorego testujemy oraz wielkosc zbioru testowanego
{
uint tid=blockIdx.x; //Pobranie numeru bloku w jakim się znajdujemy
uint i,j; //Zmienne pomocnicze
for (i=2;i<=sqr;i++) { // kolejne liczby od 2 do pierwiastka kwadratowego z najwiekszego elementu zbioru wczytywanego
for (j = tid; j <d; j+=gridDim.x) { //petla wewnetrzna sprawdzajaca czy kolejne liczby wektora tab dziela sie przez aktualna wartosc zmiennej i (zmienna j inkrementowana o rozmiar siatki blokow)
if((tab[j].value%i==0)&&(tab[j].value!=i)) //jesli tak liczba uznawana jest za zlozona, dodatkowo sprawdzamy czy liczba nie jest rowna obecnemu dzielnikowi (zasada pierwszosci)
tab[j].prime=false;
}
}
}
int main(int argc, char** argv) {
int blockNumber=1000; //liczba blokow
ifstream file; //plik wejsciowy
unsigned int maxval=0; //zmienna przechowująca wartosc maksymalna z testowanego pliku
number fromfile; // pojedyncza liczba z pliku wraz z informacja o pierwszosci
cudaEvent_t start, stop; //deklaracja zmiennych licznika
float elapsedTime; //czas wynikowy
cudaEventCreate(&start); //zdarzenia CUDA
cudaEventCreate(&stop);
if (argc != 2) { //sprawdzenie ilosci argumentow podanych przy wywolaniu programu
cout << "The number of arguments is invalid"<<endl;
exit(1);
}
file.open(argv[1]);
if (file.fail()){ //Sprawdzenie poprawnosci otwartego pliku
cout<<"Could not open file to read."<<endl;
exit(1);
}
vector<number> tab; //utworzenie wektora liczb
while (file >> fromfile.value) { //pobranie danych z pliku do wektora tab
fromfile.prime=true; //domniemanie pierwszosci liczby
tab.push_back(fromfile); //zapisanie liczby w wektorze tab
if(fromfile.value>maxval) //poszukiwanie liczby maksymalnej ze zbioru
maxval=fromfile.value;
}
uint sqr=sqrt(maxval); //pierwiastek z liczby maksymalnej
uint d=tab.size(); //zmienna pomocnicza rozmiar wektora danych
number* tab2; //wskaźnik na typ number
number* temp = tab.data(); //konwersja wektora do tablicy
cudaMalloc( (void**)&tab2, d * sizeof(number) ); //alokacja miejsca w pamięci urządzenia
cudaMemcpy(tab2, temp, d * sizeof(number), cudaMemcpyHostToDevice); //kopiowanie danych z pamięci RAM do pamięci GPU
cudaEventRecord(start); //rozpoczęcie liczenia czasu
primeTesting <<< blockNumber, 1 >>> (tab2, sqr,d); //wywolanie funkcji na urządzeniu
cudaEventRecord(stop); //zatrzymanie licznika czasu
cudaEventSynchronize(stop); //synchronizacja zdarzenia stop
number * result; //wskaźnik na typ number do przechowywania wyniku
result= (number *) malloc (d*sizeof(number)); //alokacja miejsca
cudaMemcpy(result, tab2, d * sizeof(number), cudaMemcpyDeviceToHost); //kopiowanie danych wynikowych z pamięci urzadzenia do pamięci Hosta
cudaEventElapsedTime(&elapsedTime, start, stop); //obliczenie czasu pracy programu
printf("Czas : %f ms\n", elapsedTime); //wypisanie czasu obliczeń
for (uint i=0;i<d;i++) //wypisanie liczb z wektora tab wraz z informacją czy są pierwsze
if(result[i].prime==true)
cout<< result[i].value<<": prime"<<endl;
else
cout<< result[i].value<<": composite"<<endl;
cudaFree(tab2); // zwolnienie pamięci na urządzeniu
free(result); // zwolnienie pamięci na hoscie
return 0;
}
|
4,177 | #include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <assert.h>
#include <vector>
using namespace std;
const int INF = 10000000;
const int V = 10010;
const int MAX_THREAD_DIM2 = 32;
void input(char *inFileName, int B);
void output(char *outFileName);
void block_FW_2GPU(int B);
int ceil(int a, int b);
void calAsync(int gpuId, int B, int Round, int block_start_x, int block_start_y, int block_width, int block_height);
int realn;
int n, m; // Number of vertices, edges
int* Dist; // n * n, on host
int* dDist[2]; // n * n, on device
int streamSize[2];
vector<cudaStream_t> streams[2];
int getGPUId ()
{
int gpuId;
cudaGetDevice(&gpuId);
return gpuId;
}
cudaStream_t getIdleStream (int gpuId)
{
cudaSetDevice(gpuId);
if(streams[gpuId].size() == streamSize[gpuId])
{
cudaStream_t stm;
cudaStreamCreate(&stm);
streams[gpuId].push_back(stm);
streamSize[gpuId]++;
return stm;
}
else
return streams[gpuId][streamSize[gpuId]++];
}
void syncAllStreams ()
{
cudaThreadSynchronize();
streamSize[0] = 0;
streamSize[1] = 0;
}
void blockCopyAsync (int gpuId, int* dst, const int* src, cudaMemcpyKind kind, cudaStream_t stream, int B, int bi0, int bi1, int bj0, int bj1)
{
cudaSetDevice(gpuId);
for(int i = bi0 * B; i < bi1 * B; ++i)
{
int offset = i * n + bj0 * B;
int size = (bj1 - bj0) * B * sizeof(int);
cudaMemcpyAsync(dst + offset, src + offset, size, kind, stream);
}
}
int main(int argc, char* argv[])
{
int B = atoi(argv[3]);
input(argv[1], B);
block_FW_2GPU(B);
output(argv[2]);
return 0;
}
void input(char *inFileName, int B)
{
FILE *infile = fopen(inFileName, "r");
fscanf(infile, "%d %d", &realn, &m);
n = ceil(realn, B) * B;
cudaMallocManaged(&Dist, n * n * sizeof(int));
for (int i = 0, k = 0; i < n; ++i) {
for (int j = 0; j < n; ++j, ++k) {
if (i == j) Dist[k] = 0;
else Dist[k] = INF;
}
}
while (--m >= 0) {
int a, b, v;
fscanf(infile, "%d %d %d", &a, &b, &v);
--a, --b;
Dist[a * n + b] = v;
}
}
void output(char *outFileName)
{
FILE *outfile = fopen(outFileName, "w");
for (int i = 0; i < realn; ++i) {
for (int j = 0; j < realn; ++j) {
int d = Dist[i * n + j];
if (d >= INF) fprintf(outfile, "INF ");
else fprintf(outfile, "%d ", d);
}
fprintf(outfile, "\n");
}
cudaFree(Dist);
}
void print ()
{
for (int i = 0; i < realn; ++i) {
for (int j = 0; j < realn; ++j) {
int d = Dist[i * n + j];
if (d >= INF) fprintf(stderr, "INF ");
else fprintf(stderr, "%d ", d);
}
fprintf(stderr, "\n");
}
fprintf(stderr, "\n");
}
int ceil(int a, int b)
{
return (a + b -1)/b;
}
void block_FW_2GPU(int B)
{
int round = ceil(n, B);
for (int r = 0; r < round; ++r) {
/* Phase 1*/
fprintf(stderr, "Round: %d\n", r);
calAsync(0, B, r, r, r, 1, 1);
syncAllStreams();
/* Phase 2*/
calAsync(0, B, r, r, 0, r, 1); // L 0
calAsync(0, B, r, r, r +1, round - r -1, 1); // R 0
calAsync(1, B, r, 0, r, 1, r); // U 1
calAsync(1, B, r, r +1, r, 1, round - r -1); // D 1
syncAllStreams();
/* Phase 3*/
calAsync(0, B, r, 0, 0, r, r); // <^
calAsync(1, B, r, 0, r +1, round -r -1, r); // ^>
calAsync(1, B, r, r +1, 0, r, round - r -1); // <v
calAsync(0, B, r, r +1, r +1, round -r -1, round - r -1); // v>
syncAllStreams();
}
}
__global__
void Update (int k, int i0, int j0, int i1, int j1, int* dDist, int n)
{
#define D(i,j) (dDist[(i) * n + (j)])
int i = blockDim.x * blockIdx.x + threadIdx.x + i0;
int j = blockDim.y * blockIdx.y + threadIdx.y + j0;
if(i >= i1 || j >= j1)
return;
int Dik = D(i, k);
int Dkj = D(k, j);
int D1 = Dik + Dkj;
if (D1 < D(i, j))
D(i, j) = D1;
}
__global__
void UpdateIndependent (int k0, int k1, int i0, int j0, int i1, int j1, int* dDist, int n)
{
#define D(i,j) (dDist[(i) * n + (j)])
int tx = threadIdx.x;
int ty = threadIdx.y;
int di = blockDim.x * blockIdx.x + tx;
int dj = blockDim.y * blockIdx.y + ty;
int i = i0 + di;
int j = j0 + dj;
bool valid = i < i1 && j < j1;
__shared__ int Si[MAX_THREAD_DIM2][MAX_THREAD_DIM2];
__shared__ int Sj[MAX_THREAD_DIM2][MAX_THREAD_DIM2];
const int cacheSize = MAX_THREAD_DIM2;
int Dij = valid? D(i, j): 0;
int dkmod = 0;
for(int k = k0; k < k1; ++k)
{
if(dkmod == 0)
{
__syncthreads();
if(i < i1 && k+ty < k1)
Si[ty][tx] = D(i, k+ty);
if(j < j1 && k+tx < k1)
Sj[tx][ty] = D(k+tx, j);
__syncthreads();
}
if(valid)
{
// assert(Si[tx][dkmod] == D(i,k));
// assert(Sj[dkmod][ty] == D(k,j));
// int Dik = D(i, k);
// int Dkj = D(k, j);
int Dik = Si[dkmod][tx];
int Dkj = Sj[dkmod][ty];
int D1 = Dik + Dkj;
if (D1 < Dij)
Dij = D1;
}
dkmod = (dkmod + 1) % cacheSize;
}
if(valid)
D(i, j) = Dij;
}
void calAsync(int gpuId, int B, int Round, int block_start_x, int block_start_y, int block_width, int block_height)
{
cudaSetDevice(gpuId);
int block_end_x = block_start_x + block_height;
int block_end_y = block_start_y + block_width;
for (int b_i = block_start_x; b_i < block_end_x; ++b_i) {
for (int b_j = block_start_y; b_j < block_end_y; ++b_j) {
// To calculate B*B elements in the block (b_i, b_j)
// For each block, it need to compute B times
// for (int k = Round * B; k < (Round +1) * B && k < n; ++k) {
// To calculate original index of elements in the block (b_i, b_j)
// For instance, original index of (0,0) in block (1,2) is (2,5) for V=6,B=2
int i0 = b_i * B;
int i1 = min((b_i +1) * B, n);
int j0 = b_j * B;
int j1 = min((b_j +1) * B, n);
int k0 = Round * B;
int k1 = min((Round +1) * B, n);
bool iDepends = i0 == k0;
bool jDepends = j0 == k0;
int threadDim = MAX_THREAD_DIM2;//std::min(B, MAX_THREAD_DIM2);
int blockDim = (B + MAX_THREAD_DIM2 - 1) / MAX_THREAD_DIM2;
dim3 grid(blockDim, blockDim), block(threadDim, threadDim);
cudaStream_t stm = getIdleStream(gpuId);
if(iDepends || jDepends)
{
for(int k=k0; k<k1; ++k)
Update<<<grid, block, 0, stm>>>(k, i0, j0, i1, j1, Dist, n);
}
else
UpdateIndependent<<<grid, block, 0, stm>>>(k0, k1, i0, j0, i1, j1, Dist, n);
// for (int i = i0; i < i1; ++i) {
// for (int j = j0; j < j1; ++j) {
// if (Dist[i][k] + Dist[k][j] < Dist[i][j])
// Dist[i][j] = Dist[i][k] + Dist[k][j];
// }
// }
// }
}
}
}
|
4,178 | float h_A[]= {
0.5627173728130572, 0.6098007276360664, 0.5349730124526967, 0.6280156549880231, 0.5462467493414154, 0.8887562433166953, 0.5283508322038977, 0.9072439117199396, 0.5799009745766212, 0.7118663511190295, 0.6885295493956709, 0.9372667262192638, 0.942889387720673, 0.5654227062167685, 0.9815591129304171, 0.6402296882198368, 0.9395730031097221, 0.5744350636156921, 0.6267225914004302, 0.6668179324561752, 0.7039365987504664, 0.9456948247062826, 0.8824085628540038, 0.958130009015874, 0.9640106257467509, 0.5406062561360383, 0.6018286417144431, 0.9293439314586263, 0.7345254088772686, 0.8700755901277071, 0.7138365891236201, 0.8287468761648953, 0.7592687450251869, 0.6071038174839359, 0.5149567173197724, 0.5078362473411289, 0.9278474717605962, 0.563494462366111, 0.5301367447255485, 0.7877187279730848, 0.7007998631876857, 0.8097717360678727, 0.9537555609077031, 0.5290059945299896, 0.9884370349418188, 0.8329119493983209, 0.9479765539101663, 0.8744585415680575, 0.540693697182596, 0.9940549818510402, 0.6812423812575007, 0.8439311723228373, 0.8943180210330246, 0.6022883458326452, 0.6080999612881175, 0.6810522086380779, 0.8050679205566968, 0.7523304025684645, 0.7962703456599476, 0.7626070587356771, 0.786145287114702, 0.5260766472933724, 0.5358717172119191, 0.5905345620615313, 0.9847276600682584, 0.7352309853443417, 0.9025738233834197, 0.7502535866800047, 0.571039689043918, 0.8600831025416569, 0.906881190244075, 0.6665752822286957, 0.7599420751467931, 0.5571028704032486, 0.9408435273067675, 0.725221040589725, 0.7011557987050752, 0.605160525046433, 0.8976170108457084, 0.838029472145294, 0.5228350955007486, 0.883288048474848, 0.582079580865667, 0.5651433113815966, 0.7133238582919262, 0.7471828871466035, 0.5717302563863222, 0.9668017249369979, 0.8973889351045048, 0.6425694274208629, 0.8986407545682169, 0.5673275719816004, 0.6105251632450908, 0.9629296453032865, 0.7590695545446295, 0.615457172041532, 0.5357558016628818, 0.9623698223591414, 0.6419530131632898, 0.6606686109264144, 0.9909646062621882, 0.9196235443545759, 0.770940516816031, 0.9845246885733469, 0.7929684434475439, 0.9469483725447378, 0.8937293901300147, 0.7558381461954315, 0.5142835232219749, 0.7641396253823918, 0.7897711466508941, 0.5350285187588054, 0.5201290820261, 0.8892567158872664, 0.9013311852657161, 0.7709837485002233, 0.540648193499764, 0.9794895200318348, 0.805734095500763, 0.7381366116235641, 0.8469279143508113, 0.7454957366833737, 0.5645137285290396, 0.5362929933781382, 0.8999413628395767, 0.7986228328378078, 0.9548182044616319, 0.6848948694389188, 0.6353950368583208, 0.6654014029586488, 0.6374173497245698, 0.9356965912017645, 0.9142388353930959, 0.9838849240684675, 0.5114762261115849, 0.6956869962676511, 0.6654191821378199, 0.641521066435447, 0.5296410121383349, 0.8493908080741968, 0.5174288162914047, 0.8740050395619698, 0.7048677883412886, 0.7738799650869406, 0.7815832631095103, 0.5507027628077725, 0.7404339513640851, 0.7902251495570507, 0.7776548417835062, 0.67892985862561, 0.901113067343882, 0.5407123356918854, 0.9432941442343359, 0.5524493134758441, 0.7448488834395439, 0.6116544588627981, 0.9569612975441228, 0.7028816091643675, 0.6389265701033032, 0.89387674031728, 0.766400636820497, 0.5244324448249562, 0.5690419570645798, 0.6479384907080805, 0.6915278595288855, 0.5437071272415013, 0.7005240281239238, 0.7840459876750285, 0.8129009789414554, 0.8567615785455118, 0.969533804015038, 0.8418008873118762, 0.7591236864069029, 0.7798339636147802, 0.8205137878029074, 0.5914294308836123, 0.7911970770998549, 0.5350605455201259, 0.8276639344127623, 0.5292755621414751, 0.986915604847613, 0.7805058069663512, 0.7310903516997257, 0.7583469061393382, 0.8333004188064734, 0.5319318632688277, 0.7980200122750147, 0.5374787238771377, 0.9701235931361061, 0.7993218586308493, 0.6854844789287575, 0.7349209972426286, 0.7193244951716247, 0.5757852457579811, 0.528063617987963, 0.8246112637738376, 0.7163259634283075, 0.878503019417881, 0.7238536212148883, 0.9115151203960598, 0.8687237066757447, 0.5834203385084391, 0.8040128517772034, 0.7424439312145374, 0.7657415974565607, 0.8246655861941723, 0.5284854176113551, 0.757275632404222, 0.6748844438348107, 0.6015335151829548, 0.6293066383722509, 0.6174816798362447, 0.7259327754192867, 0.5837862160942597, 0.907203655966526, 0.9396406770187531, 0.9507412196981586, 0.7404235022823344, 0.879174883323046, 0.8302154160486457, 0.8742065030136466, 0.5180989624256274, 0.8453208984963645, 0.797006899321312, 0.8103240044703629, 0.9996429399385611, 0.5235582136333039, 0.5458283193178114, 0.6613154671877441, 0.7593400454331758, 0.6076607847141222, 0.5908536185912929, 0.9441803189380555, 0.5329066728773206, 0.6680280873089642, 0.6988906210901586, 0.9688882050213689, 0.7165193077027316, 0.9050907664782761, 0.6695815920848005, 0.5309493123215416, 0.5122916952603286, 0.7353973188991458, 0.7882335891262571, 0.6343402012927213, 0.8213111806007021, 0.9544907590494665, 0.9506735127181187, 0.8061497315603533, 0.6729044701100002, 0.6252460542186576, 0.7792739495243322, 0.8483892984253596, 0.7254809348815416, 0.864315754681396, 0.7400982402131107, 0.991715502403592, 0.9737967280053085, 0.626500891223656, 0.6106614227578397, 0.9703274972134652, 0.8218027026675766, 0.7563287404487122, 0.929097252553815, 0.932284022150254, 0.568297383022365, 0.8350602765780863, 0.6773322938167512, 0.5630095254796628, 0.7182584465804219, 0.8381682648985042, 0.5086095475914163, 0.7443557828958393, 0.7739662537703645, 0.5149345215686703, 0.5788282608347624, 0.9886572467578743, 0.6720133483152386, 0.6046225331554238, 0.6571415018110698, 0.6889398257680686, 0.983564323286853, 0.6144241634768721, 0.5024287293976767, 0.9613639255500922, 0.8693759385009536, 0.8147562145442506, 0.8411984722871756, 0.5117597657130006, 0.6494542821734983, 0.9182218080368397, 0.6220862493887741, 0.7014463973092266, 0.5825811379238017, 0.5154193312025874, 0.9689327995949624, 0.6732874921768626, 0.6581016528031582, 0.6479285062345622, 0.6028144875514996, 0.781190708608955, 0.6209952997288792, 0.9256781802390217, 0.6377406631392502, 0.5260654787336863, 0.6381608124702312, 0.8249584914983321, 0.7584411630280784, 0.59845398244141, 0.6234188403325558, 0.7582901878985673, 0.5198897467515444, 0.6142363650340394, 0.6665644069424255, 0.7106352801971112, 0.6214964798090635, 0.67615969830503, 0.7758887307302968, 0.6461378842015304, 0.9946487149166205, 0.5324010781037877, 0.7248973314348035, 0.9840193156027754, 0.5590045074718075, 0.9887972119569053, 0.566081336798592, 0.6081808634153778, 0.7160769975926222, 0.7416673632565043, 0.809592006381237, 0.892197636000099, 0.9942164918053787, 0.7739657316767234, 0.914224257228099, 0.8346395059304894, 0.9937505514516216, 0.7731382910478508, 0.6691326089722908, 0.9611393938190016, 0.7945004604053523, 0.8365039125998761, 0.8290911443845153, 0.6613660310457146, 0.9863656409169015, 0.9797902070664424, 0.60607018327334, 0.5243153765099362, 0.9133478230175631, 0.8894027547600103, 0.5013665965391405, 0.7073708762602198, 0.6369123486328319, 0.7839597103709912, 0.6645008160498979, 0.6363545348879582, 0.7480134604005282, 0.8348488974590224, 0.9188105150582762, 0.7696507731679131, 0.6943121216397656, 0.6979845221648976, 0.5086219569373814, 0.610826455528038, 0.5960161096638725, 0.9568449864762162, 0.9350876296809421, 0.5070576895249064, 0.7294585093598651, 0.7223273532422477, 0.5118274247838839, 0.9936623728462354, 0.6011096734528371, 0.6951457866736341, 0.52922094597896, 0.6562433599438495, 0.6560546702477734, 0.9630132336319173, 0.6380207863347978, 0.5422151136059778, 0.8037982856504191, 0.9907145594792441, 0.776473677710378, 0.8573754046247639, 0.5750417050737455, 0.6363718888295836, 0.9294416904746887, 0.5382454815746562, 0.7988758604645919, 0.8297799201926205, 0.8251616388137898, 0.6825267727279138, 0.9626865713595081, 0.946605958661014, 0.8993938836275077, 0.9299065656022393, 0.7642349222095467, 0.759185286376912, 0.8957995289966534, 0.8249491307712018, 0.6921630641452767, 0.8133350174499465, 0.7539353239759818, 0.941132308011801, 0.7043500952211255, 0.5587572762421732, 0.9582789210311106, 0.7962059690777349, 0.7622924343630848, 0.7049745498661975, 0.8650519509689074, 0.7648091780109598, 0.9800027076795306, 0.5261792777112821, 0.8661623802205883, 0.6137073928475155, 0.9519407228914123, 0.657166976303442, 0.7628766035694658, 0.5786814203255943, 0.9931601129104092, 0.8445653599180157, 0.7783408645577918, 0.6597212589701049, 0.9110060905225146, 0.932535029096631, 0.9729461184601521, 0.8809086332843024, 0.5709432712585777, 0.5043210584835064, 0.7462855361778651, 0.6923550805120195, 0.8420109219721494, 0.8989535106844027, 0.796205247233588, 0.5473818828105563, 0.6320158839799789, 0.6455287253080566, 0.5179851954878603, 0.8965856713562357, 0.7806185754625545, 0.5058425831403748, 0.7429145058634292, 0.7025336854849265, 0.7331087093332471, 0.6479578013414296, 0.5248807305132164, 0.6475588968766506, 0.8699837079857212, 0.6902027094691766, 0.8624170595391893, 0.6106235716806492, 0.99907796586857, 0.6712564537026781, 0.7918941085147305, 0.844693704703402, 0.6541049617078234, 0.6124230085407012, 0.5647009584923888, 0.595246385573403, 0.8493144995019348, 0.5710219138311621, 0.6458909828729315, 0.9998589079784117, 0.8046966295176127, 0.6265205306961558, 0.6867934499018298, 0.5928761723831866, 0.7345598700767383, 0.706285147945181, 0.9293865261113663, 0.9485615089100965, 0.8715409174339714, 0.5878681689110574, 0.5160197668943842, 0.922185836372859, 0.6292020629164328, 0.5382555249629339, 0.8113099362224023, 0.9860846516538333, 0.6399529876583903, 0.655089697073743, 0.8087934149309819, 0.6037494644183325, 0.5830537666906899, 0.747497991657766, 0.9021958104302308, 0.7622665799631007, 0.5680100055502331, 0.715265898724802, 0.7808428780256276, 0.7409055663194721, 0.8844803345030838, 0.8230387962974682, 0.9274238465692255, 0.6385254328890293, 0.5780876962174442, 0.7059597508627391, 0.6550813037694317, 0.757108738728419, 0.8697534989665325, 0.8600466975070691, 0.9527984699623523, 0.5483138472717615, 0.5213419478597798, 0.5876115348927209, 0.5007693163984743, 0.5179428814899696, 0.708367213678454, 0.8542442414013678, 0.7411816815941304, 0.562524227524358, 0.900164158034257, 0.7642122968903822, 0.8300771645316773, 0.9396193285459574, 0.7746936267551867, 0.748226519960387, 0.6423640228506389, 0.8488787039180423, 0.7073200528682516, 0.6457611175292599, 0.688079450751135, 0.9951573642402278, 0.6352383093546397, 0.6251706462603615, 0.942007888889385, 0.7845256889891814, 0.6971044590414528, 0.704754015678, 0.7073284866481667, 0.8806755071750005, 0.8069473049869451, 0.7262728289508533, 0.6971642505937322, 0.8435709603204657, 0.6184643066775604, 0.7009333218067605, 0.7616648929291328, 0.9238324662698778, 0.705176382328684, 0.5693781099182784, 0.9814431126596055, 0.8358711722213453, 0.6739125173170619, 0.8962933805566673, 0.8387395409930296, 0.8276737444648873, 0.5116787673764949, 0.5332545726293536, 0.620127215406105, 0.5578287119060015, 0.7521391052553913, 0.5592530908074795, 0.9759262853957313, 0.957171271546992, 0.5856353255125362, 0.6172626431796921, 0.9090447159725769, 0.9444567802861157, 0.7257574644372796, 0.9784791901802506, 0.7368180963079478, 0.6108380086560223, 0.9644748092320037, 0.7937446361126568, 0.7938760419076636, 0.8805117779841738, 0.7010051028445605, 0.5641005474879142, 0.6990838825315864, 0.8763763601214856, 0.9998960420282166, 0.8903655835386227, 0.5038304539485647, 0.8733300075440255, 0.6096659464478797, 0.7442937450556023, 0.9474058045779928, 0.7588771606161733, 0.7465398958431595, 0.7865735402026912, 0.5330987247295653, 0.7909857007540058, 0.9017160654311294, 0.7877296426456406, 0.8950831392290137, 0.851653543503412, 0.7481062939373919, 0.6372008626772077, 0.9986607261950229, 0.8615545172472026, 0.9776572765359934, 0.7767486730139457, 0.8611529183198541, 0.9324393815982783, 0.69738335351786, 0.8828353472347686, 0.6907061231488811, 0.5419384799826757, 0.826732687099069, 0.5538385482236317, 0.7433335806248463, 0.6286123646547825, 0.6072946964759001, 0.8159128261228958, 0.5948887665816576, 0.7708723275149953, 0.6975122273431773, 0.5300439967176725, 0.6939914668479054, 0.9739471943298224, 0.5811359445010189, 0.7148841211508487, 0.7583762776497679, 0.9408968655389475, 0.7727909216827029, 0.7555895807599868, 0.602191068291404, 0.6419062709946022, 0.6547625529678348, 0.9178287237187094, 0.9146024136146225, 0.6114344323311784, 0.5897719747353323, 0.5695911914986704, 0.7137441868046921, 0.9030272280826499, 0.7242288931991493, 0.7011830848131906, 0.896105381984651, 0.9113194064573362, 0.748810159710437, 0.7823269693100099, 0.8561375088716143, 0.6117327711789726, 0.7553106551750006, 0.759388202313049, 0.9080313742637642, 0.8799678359050452, 0.873243501508101, 0.8369461159248576, 0.5299394838957387, 0.8794486992358129, 0.7911324737561916, 0.8155353772931242, 0.5360785661595306, 0.7626926539515444, 0.6595600332307263, 0.803718379547565, 0.9224299599065241, 0.6340628395912185, 0.8180225390326877, 0.6080772844225456, 0.9437467890585757, 0.9240065708601776, 0.6253979878024998, 0.5161873294654349, 0.9789788608380898, 0.6010812952705349, 0.7160847791379445, 0.8802656021541575, 0.5655951405443251, 0.8574130192498259, 0.8896987087176226, 0.6500512343133802, 0.9094365796062167, 0.6376398077686538, 0.7352927588248248, 0.6221418309854593, 0.8290885045353176, 0.844016585836298, 0.5171927572329016, 0.5818623830132446, 0.6543003943552996, 0.921653697602169, 0.9293348300330881, 0.9475799033423953, 0.6250654499437514, 0.6288716939059351, 0.8831339581782278, 0.6719783869190001, 0.7609409954641707, 0.5305468885876806, 0.5557454192627143, 0.7652206147166103, 0.7326781922253253, 0.6732658644181639, 0.7191008165128508, 0.947985394881494, 0.9178703247750435, 0.9673168110244409, 0.5176572175855685, 0.9139015268630957, 0.6546142573630107, 0.8619468424136209, 0.9209583173097081, 0.9456210762159377, 0.7080858637142071, 0.8775927215643885, 0.7082594173210193, 0.9317542277021009, 0.63248529608043, 0.8912007651304515, 0.9334471028987315, 0.7211455409160605, 0.9448472370747211, 0.5366132142298594, 0.7411425329753158, 0.5436968965793384, 0.9700180627128197, 0.6539818050980376, 0.6146618792477867, 0.8454033495419195, 0.7881657602979684, 0.8683061912699948, 0.6708959125861622, 0.5395971833730397, 0.8503623583386236, 0.8626828729164829, 0.7930081099907594, 0.8849823108625553, 0.955292888199347, 0.7705145253002503, 0.9365946502608664, 0.7168272270734881, 0.9337284664603969, 0.9506796903498312, 0.5639652963980143, 0.944613726827171, 0.6510235306573595, 0.5109830452952766, 0.9852779917757535, 0.7071673797886638, 0.5141059933739165, 0.5808376551713132, 0.9585540957885597, 0.5284429010312938, 0.6452456939107014, 0.9217724887596661, 0.5945663866883903, 0.8204971761806124, 0.7492806648127728, 0.8789704686882363, 0.5486466216101442, 0.6985615858534615, 0.7325873194920336, 0.5380627194808049, 0.6154857034243362, 0.6127910139754837, 0.7465486339510884, 0.8444357525394406, 0.7060573482964934, 0.7752785009207661, 0.8915186584819534, 0.885840514069025, 0.6951128796450732, 0.7196011464568317, 0.9172933713844303, 0.9647844708660642, 0.8883712687636569, 0.8291875378304019, 0.6169864491221603, 0.8669312127506237, 0.763535201301439, 0.8763394018999117, 0.551176022091528, 0.802343313689345, 0.7683068384675675, 0.553830971920906, 0.7058975427119973, 0.8094460946026896, 0.64165632499377, 0.9750819203950949, 0.7209492304598257, 0.7524176630992424, 0.6664517159425877, 0.7102430334623124, 0.5680012855515946, 0.6107209836685632, 0.6800428799252989, 0.7594553382654703, 0.6183349310020949, 0.7123581889267405, 0.9438442789034276, 0.6279926095473725, 0.6669367962375137, 0.9595806042294925, 0.8075505160606985, 0.7405473610827061, 0.5968598087871301, 0.6202058479774778, 0.7591638044395261, 0.519175007164225, 0.638514769579802, 0.9646050155904127, 0.9142861959874933, 0.5325511463118584, 0.8668797853926812, 0.7247342329051075, 0.5375404276592057, 0.606106629763608, 0.6510276874540741, 0.9665629050789704, 0.7010735172681433, 0.6386391676765004, 0.6851690252667673, 0.6260219085841905, 0.6244754050725214, 0.610359241861582, 0.9900629960617306, 0.7663593265069908, 0.9414239794371723, 0.9423386450347755, 0.8438073493395019, 0.9158497963424174, 0.8652230134237131, 0.937304782224589, 0.9295803558713744, 0.8405963496780899, 0.6160714758514529, 0.8284478363272536, 0.936262694845259, 0.8530912283729801, 0.9767465695562051, 0.5210375800021195, 0.8318167839866384, 0.8895508940457484, 0.673316527048799, 0.9050589530207358, 0.5702790272133469, 0.5018053522952279, 0.8618362128826578, 0.7416574193316356, 0.8884030042182025, 0.6211356406074363, 0.5462334370190135, 0.9722816654326324, 0.7660522565528849, 0.7740643649249009, 0.9884586796739196, 0.6856093912171994, 0.9343348466916453, 0.7534323841835231, 0.9407375744090385, 0.5102362977658295, 0.8155032188441333, 0.9212128304727141, 0.8846060684629117, 0.8060287995244096, 0.6867797414939285, 0.7519531763588319, 0.5332496351349016, 0.8173302182309073, 0.5754351212565052, 0.5259338182855648, 0.9078956511831233, 0.9299737887835845, 0.7514553741936463, 0.5599414805446046, 0.7471238101251936, 0.5731084120514538, 0.7110826639382348, 0.5842814160008232, 0.7355317660604953, 0.8572518089473518, 0.9112764932331031, 0.6953997542587336, 0.8859823662234478, 0.8300873742852216, 0.9379580967186956, 0.854972235772264, 0.527788371915257, 0.8463968839752922, 0.963011968192431, 0.6509051910020321, 0.6939644836491438, 0.5420384501359472, 0.8606216213198269, 0.580062615782303, 0.9429538859844758, 0.8666330910758171, 0.5779589071380125, 0.7615158750500388, 0.8683357018062741, 0.8622541875018344, 0.9528884183168349, 0.6155720261887566, 0.7424780601983114, 0.7131063038800582, 0.9054800337484271, 0.6885140713780113, 0.5351390644422231, 0.8960293002411779, 0.6919323571283043, 0.5081010005188236, 0.7812128481100131, 0.5189909835491235, 0.579483349980277, 0.6554444591279569, 0.5578159199352366, 0.7614373259151841, 0.8115321989028013, 0.5871225312756327, 0.9738092289949924, 0.8066925765534116, 0.5570737799598724, 0.6594240042757384, 0.5101143236047649, 0.7214438657957855, 0.6451547313497454, 0.8729303124137765, 0.8090150638838254, 0.7370459222801092, 0.5740570306481967, 0.8120736822594665, 0.5589887052701379, 0.5277610448917851, 0.6996538549282595, 0.5097863924602217, 0.6187765117087478, 0.8020349248448608, 0.6134538686313513, 0.8873691716430493, 0.9020321333113648, 0.9316293258020547, 0.9033590318249979, 0.8136310129251647, 0.8039834749189083, 0.9401260292587739, 0.9451505865043041, 0.9938989698451595, 0.9419302458689678, 0.9762091899345516, 0.9929292667565734, 0.6479451348558555, 0.9974633817491153, 0.7140576028390162, 0.5254167609104066, 0.84090401054239, 0.9024316134600785, 0.5314983446325311, 0.8328917260236985, 0.5888597945829162, 0.597531496801511, 0.648743874452387, 0.9806388910584778, 0.6415187711846876, 0.9928760975663471, 0.6802205140338742, 0.9335593000258582, 0.7164390898519668, 0.8954916998794773, 0.5747882366466543, 0.6359277787399777, 0.672126468355891, 0.9591971168950333, 0.6008465868903998, 0.9155846367756961, 0.5406994591241101, 0.584670298201952, 0.502431940959399, 0.6363355748399453, 0.9922307034850599, 0.7599427470387301, 0.9381959458854201, 0.8352736024358564, 0.838879513482912, 0.8838244387947521, 0.6842730409781093, 0.8390188375457037, 0.8859501361168762, 0.5980012273368301, 0.5298365061281847, 0.8929709443496288, 0.7921666210057221, 0.6304184616799877, 0.7050483724425912, 0.7645808149602581, 0.7760838221411102, 0.7688677530586573, 0.6156237753575502, 0.5381444749210818, 0.7470732333204531, 0.670155007079845, 0.882898128817889, 0.6099262417548725, 0.7165157837754318, 0.7615502783524886, 0.7973686271024021, 0.5812380882436088, 0.847861693425384, 0.9558273595831308, 0.9774075956258506, 0.8723136018342393, 0.8167890767035548, 0.9630948319407264, 0.7759706682575028, 0.8123644341109686, 0.9528447159687905, 0.7490128679031991, 0.9611666660401599, 0.6185288356804778, 0.5051246850337874, 0.5390823558642333, 0.8718360849389197, 0.5899056630968894, 0.7752689819981222, 0.8086352031747712, 0.6628460465720287, 0.8484361990626583, 0.515376897110261, 0.9876573221522582, 0.6347701593221325, 0.8876638718457526, 0.749789469039833, 0.9521796690020681, 0.8446538819025291, 0.7531646021362097, 0.8318285738037979, 0.9062585521062312, 0.9830289468068818, 0.5599400345432102, 0.9832078512194602, 0.631588879659621, 0.6249058522592947, 0.9655608353116116, 0.6830925756347657, 0.6143291425628715, 0.8943424353518517, 0.7667065032321021, 0.8958334427137606, 0.7182405806377623, 0.8910642506752734, 0.6407521347532742, 0.6366710876064219, 0.58454951691665, 0.7802003879230256, 0.5967094453644626, 0.7599141725184693, 0.7289646594138989, 0.8130030491450646, 0.5565079709066917, 0.8413511811682808, 0.7998014028453951, 0.568435437688463, 0.5443547360332588, 0.5271927266890216, 0.8433515795458838, 0.8129723958832868, 0.6250091694815189, 0.9325871064500422, 0.7736959384888391, 0.9876795517087997, 0.5458986728362798, 0.9627575613084388, 0.8977570832445846, 0.8803167740395368, 0.9049364656488221, 0.9452015899228319, 0.6821262598286241, 0.5438783368264659, 0.7323372065092086, 0.9153005866268241, 0.5626122422848678, 0.7860795818501467, 0.6683912144252506, 0.9691888935061455, 0.8837764010825561, 0.9752913164778584, 0.9126462884981708, 0.730696093067579, 0.7064379219806856, 0.5146479282377872, 0.9006901195679706, 0.7191289651370956, 0.5591833072067771, 0.958552390894909, 0.7628794923977763, 0.8305018769289896, 0.5194459548363943, 0.8310148350051647, 0.8259927232265334, 0.524345785196034, 0.94267243971576, 0.9427644426921535, 0.76690242626019, 0.6698380656976366, 0.5721236274872512, 0.8567633190459315, 0.5779906888510329, 0.8443774314603838, 0.5375732642002494, 0.9144415342115526, 0.8748070191140068, 0.8148139988800012, 0.65051334989165, 0.8340292148060975, 0.7289139610878583, 0.8087470334701435, 0.7097861330583222, 0.560585297023894, 0.7290418276541575, 0.5657325267504143, 0.5411241177263562, 0.8275905857505008, 0.9525735226706874, 0.6484542585046869, 0.8790366331244743, 0.5282236984755825, 0.5091014567455849, 0.7872408027829678, 0.5404956654387281, 0.8137957028760634, 0.9436859896351468, 0.9528568075735245, 0.7262864788909804, 0.5915427791278247, 0.8235317721401485, 0.6328543156645501, 0.6584896635072388, 0.6905347554988543, 0.8400485506822434, 0.6066461202342359, 0.5382360693460548, 0.6373922903272309, 0.8939352866595665, 0.5682109856665312, 0.9691048091306327, 0.6873541044561793, 0.9099412183176601, 0.7249062632100189, 0.5105614039435256, 0.6349202881212189, 0.6472570530897139, 0.8450321305264826, 0.5868204921881506, 0.9737792915323775, 0.7425130822676086, 0.8027138508596272, 0.8458583537109654, 0.7554309675762917, 0.6192645911038113, 0.6548135116311571, 0.8085195246224334, 0.8939323967546178, 0.6903565366215372, 0.6304922897198169, 0.592585647160778, 0.7208711107751342, 0.9036263469579957, 0.7046695032776809, 0.80426207523234, 0.6530706123041505, 0.583289320084614, 0.7948547391245189, 0.6481229284355394, 0.9341463806413906, 0.932315781173275, 0.6790351884246046, 0.5277797936246021, 0.9696891513712873, 0.716995339198419, 0.5041086744895336, 0.8202605391226745, 0.651821660372613, 0.7745436602331879, 0.6784985225345437, 0.6422550609075417, 0.7183222450230121, 0.7909285250122038, 0.6597729507577396, 0.7343946193886229, 0.5132210770319401, 0.553652131485763, 0.7475216970123661, 0.9592811665022869, 0.52520735804243, 0.9394144327491536, 0.6229103875120308, 0.8200015030697068, 0.9561538515545065, 0.7535315737457599, 0.6189009410429303, 0.5404672940639084, 0.7088431578702019, 0.5149915004982161, 0.983613778265249, 0.6889343238574439, 0.5622320430916421, 0.963000873160512, 0.5122932779957788, 0.516725503527232, 0.7135417066727852, 0.5566818934342357, 0.6896079643298365, 0.9763897735027758, 0.8787416205483253, 0.786722946877527, 0.9023727843524323, 0.6900250147856892, 0.8151088004966868, 0.6986830375819723, 0.5053539031062024, 0.6178510498438184, 0.9039054805947304, 0.7474549893535758, 0.8947974108505601, 0.9700239074449143, 0.8150127453723035, 0.6235149632014764, 0.8063168568462926, 0.9345699764354078, 0.9989046859683248, 0.9820763910656026, 0.7291528251665678, 0.7795425624581174, 0.957824347877579, 0.7695661895237658, 0.626789171125598, 0.7684760650335652, 0.6474634669842221, 0.9583059779285705, 0.9227190720105092, 0.7457243441381419, 0.7022368661603441, 0.9995750173682234, 0.8156484058692679, 0.9234454292091645, 0.6643987604634044, 0.887427985941742, 0.8458516646912084, 0.9210870571067576, 0.9721680057001627, 0.5513144812813404, 0.9499726157707897, 0.6755970242367426, 0.7207979879633469, 0.9574401345464179, 0.6721219644620198, 0.8912567139286913, 0.5649612724745543, 0.6093280627138256, 0.8484193148557175, 0.601533193608862, 0.6369327130514106, 0.925478177103229, 0.6398472950522034, 0.8399647348634085, 0.9905025708190166, 0.8148076029141211, 0.8749062935447629, 0.7388112580785817, 0.9850893471558936, 0.870587898128725, 0.7263623054884354, 0.787998951862966, 0.5523843157411561, 0.8843927838346226, 0.8538768543642434, 0.5466594702626075, 0.9099282857611934, 0.5080208202944156, 0.6609723239428319, 0.8815207376837048, 0.8102745873597922, 0.525005885306513, 0.5358682307254468, 0.8992181379571482, 0.8384408687497122, 0.6215975118068529, 0.7176847452247013, 0.9662090286880146, 0.5438349122285611, 0.795342462719091, 0.8702423288678787, 0.9121860783442951, 0.8909548244730621, 0.5410265995595599, 0.9282922573870283, 0.7779908495344117, 0.727440997246557, 0.9163263768095011, 0.5456742089949524, 0.953168296843071, 0.8883643220862474, 0.6451563625583243, 0.6200529909209671, 0.5572477526071977, 0.503826716376587, 0.7524658956472589, 0.6383948291131394, 0.6081594797917875, 0.6038270053547952, 0.9850784455908708, 0.6022691930481916, 0.6511221411271334, 0.8269905098878139, 0.9006470676394241, 0.6920073760144962, 0.873203302847118, 0.6423988995242396, 0.5016870878772459, 0.9455047725419595, 0.8170509488106454, 0.9740256494602004, 0.8686499264702567, 0.8791985646266345, 0.9569967583365425, 0.9394370064797195, 0.7900744941761104, 0.8658391011359841, 0.5202000833878173, 0.7375816192513269, 0.8627655375376975, 0.6759598878239326, 0.9063384487678618, 0.5633077548986924, 0.8053789864456358, 0.7080722154605431, 0.5583424521825747, 0.592213302858402, 0.7583614390035206, 0.56409052117887, 0.9381754187367397, 0.7509125734544557, 0.8538081392670513, 0.8328273130800087, 0.8125428154911638, 0.6750919089906611, 0.9211555053312283, 0.689597846812547, 0.9520640090032837, 0.7517545971419224, 0.8515580223862326, 0.7365000414611184, 0.755568496921901, 0.6122642174618558, 0.8034096821154189, 0.5884813267290154, 0.9604039100767193, 0.6802849922560569, 0.9814732254079495, 0.5775918640902618, 0.5619295891345062, 0.6398889711637481, 0.9473644749042704, 0.7750791548719661, 0.9021175121383924, 0.9623522517347354, 0.9158087559525663, 0.5187472907433055, 0.7039955566260221, 0.5507484938012287, 0.5998876624585641, 0.7701402560575941, 0.9443675331709085, 0.6517639449754725, 0.7016342077472866, 0.9288161896316457, 0.7434544344412755, 0.6998549456065013, 0.9637501070534582, 0.6293082483166063, 0.6762221158204075, 0.6026428694101743, 0.602898811293697, 0.6507007768190424, 0.8785680691097493, 0.5739258141272978, 0.5343064339406689, 0.7579644512073929, 0.8010382186491996, 0.7524677050778226, 0.7292138949616781, 0.521363622885647, 0.6362021196367773, 0.9130765600756294, 0.9874075019916668, 0.8095134845894123, 0.7235142971658782, 0.7596593566176546, 0.8356143757275567, 0.8695149726171552, 0.8915512804409147, 0.5905889111778986, 0.8801435573060971, 0.9734177547499074, 0.5995069415987844, 0.5802512302891869, 0.5102361709683778, 0.8182743270516291, 0.6987333803762386, 0.5203598333507626, 0.6251433915517772, 0.7961663591625263, 0.7210427621907435, 0.6022964402743824, 0.7145152341650545, 0.8478788634848375, 0.5884197233835464, 0.857653953728762, 0.6266284796009143, 0.784354084349269, 0.846658827823612, 0.5667078842705948, 0.6750412884108294, 0.6084592545841437, 0.7575643751436814, 0.7783075232436522, 0.6720296806089818, 0.927272929301689, 0.672584268075255, 0.7923979691378649, 0.6146715574452595, 0.8195400484407573, 0.8264829279219867, 0.8263469057942319, 0.5854905115332996, 0.8915424565597754, 0.9247894741672364, 0.8264554602186776, 0.8536310883635462, 0.7486588300808231, 0.5254376898061814, 0.5301705260146927, 0.9271573754978406, 0.9451965897827976, 0.8404293707218442, 0.5134127778557729, 0.9067191876584044, 0.9905627027811024, 0.9547872946662965, 0.8470549420085497, 0.6927815412160345, 0.9566535676340386, 0.8056972659945925, 0.7548200125689453, 0.8614815537541218, 0.8711416327065693, 0.6015723669253432, 0.7805768041356134, 0.7273038518659105, 0.7726825252120977, 0.7930049851220746, 0.7032223585848507, 0.5070103032338522, 0.7442063673694752, 0.6378830752892349, 0.6451863040244467, 0.921573446103007, 0.9602090427092913, 0.7537429814576123, 0.7551879617543593, 0.8536722279924039, 0.7007071070037947, 0.9578766591094607, 0.876921193699804, 0.938104523103012, 0.8190442495713978, 0.6546446314558916, 0.8113543476338848, 0.5704320401043377, 0.8533534369073406, 0.5573571555776001, 0.7651814801688008, 0.5435994244908173, 0.8507767957550058, 0.793071822981676, 0.8535694513917799, 0.5600465870616912, 0.7836229624487587, 0.8062104426907427, 0.7324453922747147, 0.8788065046144206, 0.8620993764326443, 0.8112067663660392, 0.9570330456763347, 0.9611661432710616, 0.9105719963455665, 0.5332709501370768, 0.8210583351732247, 0.707730933430381, 0.6454428737076741, 0.9373219896733866, 0.6870180483498805, 0.5172031139024935, 0.9447487924026072, 0.9149892821492694, 0.6593929956691078, 0.6748789794396138, 0.8033064983750899, 0.9593106074829973, 0.9433708098819871, 0.7184538968001346, 0.6273642343773781, 0.8914831108195375, 0.5046551543996864, 0.5764664052389686, 0.9012176490857441, 0.9546638705240875, 0.5408155995639051, 0.9275062023142994, 0.6591263062734736, 0.7763075366337855, 0.7100178349377089, 0.877149707024715, 0.6085830848720942, 0.7145360140949253, 0.8151823344903565, 0.7865171786459932, 0.5637822987564391, 0.7641634784401488, 0.5270841984587352, 0.920532455785569, 0.8801178972258994, 0.9224045687995379, 0.9526291790780508, 0.7437287416399398, 0.5558198152755875, 0.8676539317244761, 0.7872936391207928, 0.5656202948582094, 0.8382606722997767, 0.7595506796727307, 0.7400336775425911, 0.9464791614822511, 0.8161265375008067, 0.6273588910127853, 0.945928184441327, 0.7981162118958123, 0.5067700238676678, 0.5265202485343048, 0.5970305794062566, 0.9829042482876664, 0.5127890718236132, 0.6899414819965161, 0.7560700383493149, 0.9732748262134663, 0.5500167053442815, 0.60541277734417, 0.5677956985692301, 0.9841703782408138, 0.7907512107941528, 0.9470710286917312, 0.9203478561684625, 0.5719985358355995, 0.8078472891679279, 0.8535237587946762, 0.9802258823352782, 0.9261990557396995, 0.5748295915679753, 0.696299906325114, 0.8599410468081827, 0.7229706620855674, 0.7853632206866872, 0.7827261166992192, 0.7880015323826622, 0.6345208423299855, 0.837322444820825, 0.6814436307454751, 0.5789019410556554, 0.8411774766866071, 0.7563018778393915, 0.8257627721329093, 0.7078522928607585, 0.5588068646849247, 0.8996611726132453, 0.5626930876192922, 0.8696784943924545, 0.6994350461658763, 0.6194609480620901, 0.9096358548892918, 0.6919787629509246, 0.7349627705219546, 0.8628418752639246, 0.663495276873995, 0.7773202029827263, 0.7881105296003514, 0.662299772190085, 0.9680182159555129, 0.6146416246164415, 0.5722110542110519, 0.9510499533989825, 0.8648936274910187, 0.638633609670501, 0.8804828513898554, 0.5734350921326448, 0.540288885202939, 0.8113655648089859, 0.5640554192295862, 0.5827518588513769, 0.633060530818465, 0.7504263583963813, 0.9954796320796624, 0.7635363694619919, 0.6073487026300772, 0.8502687046178523, 0.5135552640166979, 0.7795706196916399, 0.8793062505056686, 0.8331065486226721, 0.6161546505838366, 0.7481600077595598, 0.9643270493827892, 0.7288964137170946, 0.8367814377729932, 0.6145385517117997, 0.9024512153779904, 0.6229123154107634, 0.6247955203658309, 0.6161685825257057, 0.5080357957905862, 0.5481743584202446, 0.6182044783216507, 0.8787798800171496, 0.585243996132429, 0.8666336011794235, 0.8873164177152109, 0.9876059094827552, 0.7478530971066344, 0.8559070512506259, 0.7555983856407615, 0.8802918276171847, 0.9995307933624052, 0.85446526092279, 0.6220927176140952, 0.6490344974530233, 0.8119885341242474, 0.6747569094416287, 0.9425895224464698, 0.702620560511449, 0.5313696780011212, 0.9442474114038524, 0.8712430025368854, 0.6493190271553406, 0.813060207881863, 0.9269564577910003, 0.6299301056195192, 0.7816433204947122, 0.5566108077481298, 0.8890185865966973, 0.500753235052692, 0.9468290878153847, 0.7372072588020949, 0.5545519004972531, 0.670833481131937, 0.616193841476023, 0.8986974993546839, 0.5763223761546808, 0.8235893850624895, 0.6032878994490426, 0.7485281863098723, 0.7947786233571704, 0.9934402868548611, 0.7334892105647071, 0.5274198313162068, 0.918111932251708, 0.9880382660630362, 0.7124923056196426, 0.5995068821112151, 0.5473037461084753, 0.8711127504663319, 0.9131677258171953, 0.7217905302233061, 0.7849997981255816, 0.7677096258998422, 0.9596922244249328, 0.7487603248761561, 0.9726790136024184, 0.7595528608631185, 0.9428393367497298, 0.7396314143359656, 0.7172930341976291, 0.5578711201719524, 0.7565354858445237, 0.6389054521840756, 0.7429964459843309, 0.7970501726404824, 0.7186306661417305, 0.5454836396796867, 0.9031311422594324, 0.7325960784955444, 0.7274287698362759, 0.8652904314011867, 0.6186953739191572, 0.7631535654158992, 0.5238913866851198, 0.999143944372618, 0.7067692092028673, 0.6487494071669386, 0.7348530195742778, 0.5685457823164184, 0.8847089939794874, 0.5543922524770499, 0.5348833914862561, 0.7608599561698497, 0.9647928650443439, 0.9869803538337834, 0.7211512308253502, 0.9621994252468757, 0.8413977420702187, 0.8514056434632854, 0.8402966615261034, 0.794026178421396, 0.701827383113121, 0.6822076934252794, 0.6339780986347037, 0.7257700944133099, 0.8998436535774927, 0.6151939278839609, 0.8437399400606327, 0.6210726334403349, 0.9698021245415087, 0.6481201404539052, 0.7766404809347658, 0.6950539652151106, 0.9707309737492253, 0.5630151353838351, 0.8384713269694642, 0.8188040355103294, 0.8258751716582466, 0.5983714696969664, 0.5112268486358945, 0.9214633576351052, 0.8931068903803882, 0.5691699425481753, 0.8263579601130452, 0.8256361916704775, 0.901023158451165, 0.9122594008496367, 0.5659549414456948, 0.6989323082407799, 0.6569622950515634, 0.5101767073189937, 0.5916811292308409, 0.553150415558747, 0.8889833195162926, 0.9246654696061577, 0.817609902168901, 0.9115216418143018, 0.567636866896235, 0.7968301723037772, 0.8330907820855875, 0.784033932270391, 0.5645574089098782, 0.8074267272654183, 0.8341202287383649, 0.7279131787166397, 0.5772903674376657, 0.8392026554517139, 0.825861931946951, 0.9572925014508471, 0.5200747695592212, 0.889692253241978, 0.8146562547606911, 0.8653388092317644, 0.9241159841595665, 0.7820255326425017, 0.9127446107468355, 0.6300173363329855, 0.9475348682438501, 0.6813910682551169, 0.5569271148248549, 0.9532334419305619, 0.6418067828254717, 0.5645571570858594, 0.7064208306485893, 0.9027066530176826, 0.977201030182089, 0.7031923588081093, 0.7146707522748271, 0.748425899564244, 0.6066470627896174, 0.9612501069614977, 0.9746667931175866, 0.6862450804776166, 0.9413028998572411, 0.9656441598576231, 0.7450283204266108, 0.6450660510937563, 0.8853063025511834, 0.7121733094259698, 0.9449053717859148, 0.7239193952997643, 0.845753227585262, 0.9122209883435471, 0.6820353822579301, 0.6673635014589321, 0.7143696192226399, 0.7628478587224743, 0.7374836775015865, 0.6415038866249959, 0.5287862335451881, 0.9433405942240729, 0.5826792495796665, 0.8408206871444734, 0.7286861299688664, 0.6038108104872328, 0.6973103880443221, 0.8358522406385793, 0.9817563380509069, 0.7337094743949433, 0.8156995649496015, 0.8276048351163268, 0.762808281834767, 0.618774923076807, 0.9300024590660192, 0.7342685051524775, 0.9111010830956976, 0.7765664510997822, 0.7454823382026231, 0.5611193876205092, 0.8790369246164627, 0.5460651479193582, 0.518735551367685, 0.5981573890177676, 0.7593535058070722, 0.5867149493648689, 0.9012144789313516, 0.7303908625490259, 0.6054111733680376, 0.9230694482336823, 0.6754998682356821, 0.7657950042854373, 0.8917461318090548, 0.7277065017164289, 0.8300656556622644, 0.9464169110038737, 0.7764051454608454, 0.6401072665582903, 0.6790451695800153, 0.7874572429724814, 0.9225334931598199, 0.8689236632398647, 0.5859568959885307, 0.6095640342158501, 0.8639747324029378, 0.7478051406460526, 0.5468774097555567, 0.7078905644770714, 0.6126226916025236, 0.8322164608190148, 0.7182804913380356, 0.8051578278651561, 0.8592571667034561, 0.9203726284204113, 0.5234121047427067, 0.9431154259444916, 0.6900441252782153, 0.9346995226703187, 0.6634645059393425, 0.560572532550337, 0.8502497906372442, 0.8182358851610669, 0.7518046552799265, 0.5247012970128179, 0.6586307970032184, 0.5120831752098587, 0.8440091946158168, 0.7827773970360735, 0.8000907005583608, 0.5241090232465299, 0.6348622352613422, 0.8055091183994266, 0.6511849730905439, 0.7425593651825364, 0.8549707689561373, 0.7540028941012511, 0.7500345258973226, 0.7324727922826608, 0.5147770457005264, 0.6309025232471924, 0.9985294889666865, 0.62430499183154, 0.9537358819169844, 0.9455805248930914, 0.7558546422650141, 0.9981282989499602, 0.8272179084665399, 0.8846282460643915, 0.9244280184130178, 0.6184742400720278, 0.8774180156282179, 0.8299104000033801, 0.9079433358534116, 0.6334531343672394, 0.9200618268835532, 0.6357440702656036, 0.5608817909805763, 0.6021199174289806, 0.745567901034807, 0.5463662444342571, 0.6998864630250998, 0.6167078139909388, 0.7850655507495918, 0.5364548274691097, 0.7956989224433798, 0.6270750136062406, 0.562446415948764, 0.8909051476489127, 0.5883961679419427, 0.6276256240413212, 0.8315872208408964, 0.5423307216841715, 0.7642156467334795, 0.7724275438360346, 0.5905941768801375, 0.5379767167086551, 0.6102063456187611, 0.5863313816021436, 0.6619993119124339, 0.5692387778803469, 0.7768905259159713, 0.9117183121038104, 0.6656349935617215, 0.5520045951205603, 0.5617456778612457, 0.7868458418786626, 0.9452066817985828, 0.5972227805591066, 0.5705657585060575, 0.8252731067313452, 0.9733396808318372, 0.6706850192562287, 0.6493896946170803, 0.5139770241524303, 0.8046156135213163, 0.7007141593552386, 0.8072017574782061, 0.9116555398142097, 0.562032015203132, 0.5017301754012364, 0.5021793524636122, 0.5734980880325596, 0.8869209288748403, 0.8677278556378731, 0.5707507830869565, 0.6445237503442649, 0.6351207665043146, 0.8077377017949452, 0.9446482144987041, 0.7492669984269509, 0.793912730022251, 0.9336747908024192, 0.6205537274391743, 0.7817752484881659, 0.6558840852086159, 0.9691536409406591, 0.5085898176602608, 0.6146835319284232, 0.9423249053151812, 0.5121027067252859, 0.6522851828943392, 0.5175736775479259, 0.6628443575332758, 0.8586849266629135, 0.7183845566681548, 0.5525492531790187, 0.9383677359719214, 0.8649529305087882, 0.9619231435177824, 0.8323687236161808, 0.6400914922515439, 0.946792910565749, 0.7281326871873816, 0.6004290546034688, 0.7463553246968073, 0.7517345912523139, 0.673055983951747, 0.6674687781754577, 0.7576768436223122, 0.5991093608678153, 0.6721843950557009, 0.8560067429927001, 0.7162151976908061, 0.5761160066891655, 0.5323412833456466, 0.8801043544972289, 0.7728784902737783, 0.5896853458731668, 0.5608436945200279, 0.9520869280799094, 0.6572996983849937, 0.7362251745000691, 0.8451808296046268, 0.5348079896573321, 0.6765444825011437, 0.5441121958725579, 0.9354071588929211, 0.5495848290781424, 0.9383668344686122, 0.6628083240129836, 0.9576791989773925, 0.7008368184944684, 0.7776446249546207, 0.577839829505985, 0.8658868712054979, 0.9236407053929586, 0.9478590122071759, 0.6891956528688141, 0.8095068266091123, 0.8894156185475123, 0.7839027195673249, 0.7848886042680494, 0.9402887780076754, 0.7216018873986432, 0.6218496394930877, 0.5610950684961125, 0.9145399230040417, 0.5801923603640277, 0.9071199562870658, 0.7749458524490407, 0.7997780783447195, 0.5237682953976985, 0.7453421703511538, 0.6409586129365161, 0.9685880091115262, 0.9548139141913399, 0.5097562845521161, 0.7228642195312357, 0.7835100502549499, 0.9152016833361435, 0.8148105975866964, 0.9761308139619527, 0.6006388973178163, 0.5546313953445157, 0.8941216978867124, 0.8367454695684147, 0.8617000217330564, 0.8077063219554494, 0.8461094202173969, 0.5030231379517469, 0.9759023571438188, 0.9716356065502298, 0.9653479973005785, 0.8571510990502644, 0.9570447597204104, 0.850702906830523, 0.7792552949345551, 0.5226550301689233, 0.9697676372068813, 0.6392130420192688, 0.5466568742682965, 0.6501696728422435, 0.5309210224100479, 0.5052210916946653, 0.9112849439210278, 0.879292610288035, 0.6971064414316768, 0.7297278726924157, 0.8057477652265357, 0.5393088040096867, 0.8486197546144917, 0.9733182984394902, 0.7495043168493092, 0.7428992861508301, 0.7203055117129673, 0.9623141368816615, 0.5915442718807385, 0.8622644369656851, 0.8462932963829382, 0.5452108229799939, 0.7284776946149354, 0.7084152354905573, 0.9332781308134928, 0.5346134551288344, 0.872226045261721, 0.7532301579140371, 0.6490712548113965, 0.5430135213311149, 0.5195214697454211, 0.646345142936324, 0.610465889839138, 0.9561206207822153, 0.724422819193083, 0.5684384497585275, 0.8599378498890984, 0.9867570638858985, 0.5635868796960433, 0.9381551201410899, 0.6065842038410565, 0.6511806794289744, 0.800782986553698, 0.6081176242169162, 0.679302064313839, 0.5681051885889936, 0.5342896563428453, 0.715188374500145, 0.9126155755952254, 0.9843413522321215, 0.9443308011901299, 0.6546746475639544, 0.8211291982898015, 0.8198029940002025, 0.6412979623559019, 0.7178645801751382, 0.5139582834379188, 0.9211632385624635, 0.9625957011662682, 0.7863737536539719, 0.8130161431654033, 0.7329800018565207, 0.7869170188662548, 0.8075983930529043, 0.7450612758707518, 0.7443209363220887, 0.9092965381251698, 0.9358638140181688, 0.8743813717974113, 0.5357420572691844, 0.865345564555099, 0.9125731382935203, 0.5729588462146068, 0.7728075295251946, 0.7213714255335655, 0.723165205446528, 0.7855528550860515, 0.6089801024893772, 0.738626587980695, 0.8478957178133267, 0.706932287686542, 0.9346092356909743, 0.7368969535637586, 0.6668326023675275, 0.6353046235908708, 0.774375947133229, 0.602199745489775, 0.6277548835157413, 0.8071604601319687, 0.6062652044274837, 0.9689677745939154, 0.5308076245830412, 0.6783787811638974, 0.9982904440917246, 0.7679371052411406, 0.9022001231711225, 0.5456763217831189, 0.6602859072345686, 0.7742118195665834, 0.905682464996848, 0.9066332189244636, 0.7777671636982091, 0.7208065291544282, 0.7576201604645153, 0.5100895952698945, 0.9522650441122247, 0.5834586857233564, 0.8059366232368952, 0.5640918348614699, 0.7402316660401315, 0.8400323678447628, 0.8566613556111322, 0.9383199372287885, 0.881613709232308, 0.5519582772988685, 0.53761763000599, 0.9212977540044609, 0.7666457725214366, 0.8603665337026091, 0.7058813127008304, 0.6945478926003378, 0.6817435119306727, 0.7033558154485735, 0.980983803988921, 0.9796537362419302, 0.9763526503981008, 0.7962519876200613, 0.7502543755501775, 0.8421569704274317, 0.9365307078687406, 0.9062084129895783, 0.6788264581848031, 0.5192554151424278, 0.7839003755737597, 0.7844992306275602, 0.8926601608129658, 0.7885260573948514, 0.8751497416932208, 0.81369691385053, 0.6404855922517633, 0.6948780654280056, 0.5019073320082399, 0.7167203915802096, 0.8733012287437973, 0.9119446254071919, 0.9927501690730378, 0.6213757075682773, 0.6869976325752633, 0.9696350130077751, 0.7141291853387619, 0.5121451139487312, 0.5545051267558032, 0.8362090944256901, 0.9030063074341159, 0.7944848797396441, 0.687704815269392, 0.680652570987343, 0.9386247045464438, 0.5068195294120094, 0.5498594781004331, 0.8145144060177214, 0.5882656804467816, 0.9267088853481893, 0.7044393555370201, 0.6132019407922452, 0.7439902195247852, 0.8341850920449152, 0.8807811731782013, 0.7818834237923187, 0.8995184526437098, 0.6340571178048005, 0.8355087928815458, 0.5527266020604402, 0.7291072315550395, 0.645795298130898, 0.6059086867396852, 0.9141845593369915, 0.7171227471864756, 0.6556896304123245, 0.7827139732694759, 0.6278872803785622, 0.8406265268078634, 0.5731241875130875, 0.5986202955323083, 0.5289643873090528, 0.9615229061090144, 0.7625363165240879, 0.8410821672077851, 0.5564375408142422, 0.6629482340559167, 0.7991766775986158, 0.547664409460134, 0.73194324273844, 0.8713013143216064, 0.875922353830997, 0.9643898353537925, 0.5063588770751604, 0.695943717674284, 0.8610531885909596, 0.7754348776992881, 0.7245336301087825, 0.931894810279764, 0.6338982281667241, 0.5950023324529894, 0.8262459611812842, 0.6760595464999901, 0.8825744524551986, 0.5554924343519955, 0.5738246562107947, 0.9810342120383595, 0.6544986491508749, 0.9134303190787825, 0.6920794416877338, 0.6505801953702288, 0.6977499833685674, 0.6982198368260573, 0.5149033434386041, 0.5146180710147904, 0.5951707746185384, 0.9771135084474386, 0.770133790997161, 0.8098329720148314, 0.7092318541889725, 0.6256194515157236, 0.532959851866853, 0.6679616614689515, 0.7879666901876432, 0.6808493948179069, 0.5509888785017223, 0.7725238185552872, 0.9068676128058377, 0.6034928149419458, 0.5722120445744917, 0.7517443123711283, 0.6916908448466246, 0.8457167470691045, 0.6394896758536517, 0.9380482297250694, 0.7770318245571424, 0.6436675621376577, 0.8700643026030307, 0.603255303132249, 0.5712122870785347, 0.6597129435495297, 0.8602173251299006, 0.7946960986192517, 0.6475462830016518, 0.7694977132188627, 0.8944655639254033, 0.5359147856990061, 0.7375510203473531, 0.5097476551666367, 0.8347314734282785, 0.7882294103347441, 0.7066343646695944, 0.807483121496549, 0.8060697343654768, 0.5880230962657584, 0.9831371889439922, 0.7009827635804451, 0.6782908631158958, 0.5235392417594522, 0.8741800958877277, 0.7311684933664615, 0.814441943965893, 0.567364325688581, 0.6540237008179917, 0.6936323225440553, 0.7645392433615947, 0.8483120270909061, 0.9562242580768333, 0.5541307240185438, 0.705656100026643, 0.6846054746653762, 0.8666024534455358, 0.6861771763690152, 0.7247974008014981, 0.8609049425813715, 0.7125879573516674, 0.7652668277519893, 0.943147345031523, 0.9354093586563963, 0.559531866645064, 0.6429041587381861, 0.8176128159101701, 0.5448218508016334, 0.8861788227278289, 0.5848043420160982, 0.571322543178487, 0.8863270809516611, 0.9590111955990929, 0.9456582893488796, 0.6180352686646873, 0.9568130191248725, 0.5304520542979141, 0.7670240081639824, 0.9867716220594195, 0.8561868811603471, 0.6423336592982818, 0.7060196700002892, 0.5396206242397599, 0.6486515643221502, 0.8350528930543828, 0.8904718859633558, 0.8849617441979205, 0.8840012247954148, 0.804537977182186, 0.8314353792369302, 0.5868656200663513, 0.6410244937304459, 0.8282022170608803, 0.8260097106624049, 0.8249966168271363, 0.7563996339644709, 0.8635288208874715, 0.9599758895330328, 0.5895603734083581, 0.7766104903812716, 0.5988553653805195, 0.8816064423187528, 0.6120202899187852, 0.7320082912163881, 0.9265385303728899, 0.5949627301915756, 0.7338708989444443, 0.5863513291489804, 0.6939356024059014, 0.6103146192422468, 0.7412072469109999, 0.6863110794487575, 0.9128348084734527, 0.5186471601801138, 0.6403778772051321, 0.9473970371032234, 0.6583043476088564, 0.6071009149391573, 0.516262414690672, 0.6509167602772633, 0.8658674482570445, 0.8322527756246918, 0.8940177494711823, 0.6842691723133436, 0.8424783339542028, 0.5512693052432762, 0.9166437266033939, 0.9704382843098669, 0.5657196875124006, 0.6083402998333658, 0.955788676251764, 0.9701083387342702, 0.6829834035052711, 0.8639031803232551, 0.8706762629400502, 0.7843904340376054, 0.9551744671024572, 0.8957109684241269, 0.917334381753994, 0.6922632023418458, 0.73336053392576, 0.7798705981889831, 0.9378073120922052, 0.590693000242962, 0.9738678570329453, 0.5344210552635699, 0.6197637000653546, 0.8317625037012804, 0.913716457741637, 0.7806347212500854, 0.5557678987671, 0.8899603039488595, 0.5445691987216891, 0.7366744786191333, 0.7841756919334715, 0.5852005611200626, 0.8329279669901193, 0.8040087867876069, 0.7712307444734202, 0.5940654892228248, 0.5334821340485418, 0.7248554219203884, 0.7909067038719133, 0.7735310184622914, 0.5698251884642869, 0.6729420124389591, 0.5449253633728284, 0.6825856789491799, 0.7295457445763247, 0.7187578241760633, 0.5582246927810103, 0.7424181672330743, 0.8119487548198989, 0.9148792937907968, 0.8628668415611185, 0.6332227571483795, 0.854388274929333, 0.7346834365714257, 0.7277593814401921, 0.5454699248849455, 0.9858149401149168, 0.9989086913650225, 0.7461639210306952, 0.641232483931812, 0.7984901819307322, 0.9125707736366453, 0.7151687850029571, 0.8062836009543692, 0.7830903656028803, 0.9934212171811951, 0.6258450150653432, 0.8320214548800512, 0.5647124985333305, 0.9977602574834858, 0.7851561704564336, 0.772663657827232, 0.999129593939615, 0.9649054427962589, 0.5135658820103635, 0.559213129276771, 0.7130187930643035, 0.6836920667214743, 0.5791034958861234, 0.527420843359415, 0.6098411255931941, 0.9006613146202898, 0.6639339178293882, 0.8650297713338161, 0.9607230023815618, 0.8910548153637056, 0.946913561284271, 0.8259873592721821, 0.9305098099476632, 0.9995489299291134, 0.5941531720768485, 0.7181859173756693, 0.6453781563233435, 0.6497393395881883, 0.8158605592344631, 0.7368069057786195, 0.8627782625880944, 0.9154362485869156, 0.8138087699700992, 0.6057723623764258, 0.5822833771460674, 0.5703331295325742, 0.7235565314266394, 0.8260871374473373, 0.8913155905938991, 0.8082160512913152, 0.8026640102211824, 0.694688818618733, 0.9162346618971595, 0.8093021999873973, 0.7677176978229239, 0.6797645919274578, 0.7060332836342291, 0.5557186404896606, 0.7951666915597954, 0.7007606114293883, 0.9192844554259334, 0.6177766378001477, 0.7757266540054095, 0.8906413902159561, 0.505172832338699, 0.5755273056499332, 0.6315498540729403, 0.6897774955433169, 0.8010000838792741, 0.5912000876296031, 0.9685112320314135, 0.5943729555407731, 0.6452018269359212, 0.7817047728275999, 0.9406614635186082, 0.9928880984154185, 0.6561385580490805, 0.8094125178283172, 0.95585329526015, 0.8973327854660922, 0.9006431473903977, 0.6881310037189279, 0.5933140305132764, 0.6194652054488542, 0.6910685435674477, 0.7594830872977948, 0.8630292475713552, 0.8243272980106855, 0.9133993478849178, 0.8490282296622598, 0.6428676021230243, 0.6841042202043733, 0.966142358136648, 0.7357849186609067, 0.845030147642867, 0.6660982554072683, 0.5308228414412783, 0.8251324535040716, 0.9923003241164127, 0.7898405843588828, 0.606457175377989, 0.5296125127696557, 0.9314741553679745, 0.6909170375864726, 0.821011688498098, 0.9807265516030197, 0.6385059276104881, 0.6396424398314761, 0.5333777484083515, 0.5020079471251464, 0.9445078458549953, 0.7639674505937355, 0.66337742935158, 0.9750087123012188, 0.6285864430421118, 0.6201723169895881, 0.5796567805127737, 0.5536062157143375, 0.6642452992313451, 0.7546205309445735, 0.5864510619048278, 0.9325021676719147, 0.8214295018823565, 0.5383742751450697, 0.7348503691235033, 0.5791521856202608, 0.501811245552434, 0.9249850449992457, 0.7561949118049076, 0.5101486482154567, 0.8405007434813561, 0.6402600659626281, 0.9651097168755072, 0.9135362951966591, 0.5440015686795683, 0.7939700842431654, 0.9432078212334916, 0.9434130407031716, 0.7989972033251773, 0.5096751939272559, 0.9462855412898847, 0.8079621555392846, 0.9458438238927451, 0.9863250935153409, 0.7229443997054015, 0.7616206933262266, 0.9188705486342424, 0.856828836136218, 0.7890238966466121, 0.8352870403509631, 0.575081809428017, 0.8969125463551215, 0.757490455462009, 0.6015342278774045, 0.987301754865823, 0.6058108410704521, 0.8945678249460635, 0.8649217467882571, 0.76820096335786, 0.974366531549832, 0.824494243214704, 0.7432697353984513, 0.517405335995873, 0.5226858098120206, 0.7455420923419335, 0.8032190815310756, 0.9613294800125608, 0.6599229822342366, 0.5150708963000121, 0.5286630261645756, 0.8026329443852583, 0.753473409930953, 0.6100338854168279, 0.9098425787948861, 0.7764040136454808, 0.5547023823718359, 0.9954159354992405, 0.9326382627043859, 0.814280147707179, 0.9908521372990071, 0.7078438547991677, 0.9364099979495655, 0.792445076051467, 0.5837057459724075, 0.8207391624790952, 0.6417359980426551, 0.624079449332219, 0.9188922146048683, 0.5820903234518541, 0.6393494056381226, 0.8073469248332025, 0.806276576496421, 0.5249502569898611, 0.7687509370180354, 0.9866503466010711, 0.878651991862477, 0.8908781534104475, 0.5334104562980304, 0.608265045940033, 0.9976818157715331, 0.9368076001627965, 0.8712393396730702, 0.8202403371193132, 0.7029676154898382, 0.7320209032178553, 0.8189568313359281, 0.968181028650294, 0.5623053163327272, 0.5276408052517181, 0.8760417585145106, 0.75403785088512, 0.596282893580817, 0.579101416371324, 0.5753297332284895, 0.9497533456428063, 0.8428929147071598, 0.5772117313037906, 0.962377980272672, 0.609681319236999, 0.8931158257745957, 0.5202660670753174, 0.7268289855042824, 0.7563659117034911, 0.6499265780833137, 0.9811391499448093, 0.867338305651018, 0.9080491051987529, 0.533485553243938, 0.7534708741789329, 0.5085437940728297, 0.8188316771540649, 0.9025216145392521, 0.6945552031025255, 0.9615774825412295, 0.7858622833021707, 0.6523867525650764, 0.6346898708401516, 0.6694109151393193, 0.9685333160159231, 0.920048060374401, 0.5552562994117214, 0.628238405815307, 0.7318775159245502, 0.7547099405438841, 0.8448288688909302, 0.9717096494334252, 0.8186966808528653, 0.8408716892385603, 0.5450109392720257, 0.5153494833162451, 0.8854905491939384, 0.5605894780556867, 0.5703107338755886, 0.5200112901536316, 0.7326026839163379, 0.9391842651054512, 0.5643257189537567, 0.602865410174035, 0.9241939255824134, 0.5512678849240265, 0.6481399554248441, 0.5135056777485555, 0.5729660748070111, 0.6456815864825525, 0.6591895463057017, 0.6398929732553347, 0.6910207218252169, 0.7959921058521271, 0.9259777642332325, 0.6561806447092448, 0.7158365354364511, 0.6821575999466986, 0.5528311487552074, 0.7146535907498739, 0.622452942105624, 0.6580864522015304, 0.5670507247207079, 0.82919754256369, 0.6608323288366442, 0.8025484528425817, 0.6177087904722764, 0.7384128623457358, 0.7388610185377728, 0.553336913288106, 0.9479533279984247, 0.5203688628985043, 0.6431527332048634, 0.716210600159803, 0.6261622393348977, 0.9383105409494176, 0.5408737504405643, 0.7730818983064122, 0.6547766480256558, 0.6481010185155645, 0.7791946731522799, 0.553492998967176, 0.6537374517333443, 0.6766786805021576, 0.5188554894044154, 0.8311436112358022, 0.8653653972981146, 0.8082131348180005, 0.8038047679540224, 0.7417503101696389, 0.8380476454024239, 0.6100495888713084, 0.5969201377461989, 0.8709770585999831, 0.805483158464779, 0.5281688826712634, 0.7289622572661325, 0.9864954883394584, 0.7102059042133135, 0.7804693885480007, 0.6490198408968446, 0.8502461003853572, 0.8547156769086022, 0.8460928825235974, 0.6832392492354693, 0.6829267499124465, 0.9693088426305472, 0.9977602396971645, 0.9007569751276137, 0.5784596674958729, 0.6943226062236734, 0.7628847853187426, 0.7423864042883617, 0.6109027134312737, 0.895781017413458, 0.8023086826370711, 0.9232428691991059, 0.6303943893067265, 0.5691417709783966, 0.770806898614089, 0.7775166412825147, 0.7561756510073728, 0.9537069320394387, 0.9375388061760154, 0.8491938559668861, 0.7162108809851075, 0.933227276701468, 0.5796376096102076, 0.5942275402139379, 0.73798710525725, 0.7531602006498359, 0.8609861957878138, 0.9993292774393739, 0.9882672237958687, 0.5082279463818286, 0.652005071865787, 0.7666905584635442, 0.6246764515790091, 0.817089335114332, 0.7533766091106462, 0.9158650876745709, 0.8531366914783309, 0.9703658307713359, 0.8979046409305929, 0.8336054729560878, 0.7813777879890325, 0.5755696590692969, 0.6223086077533906, 0.8358689022359018, 0.9024607959489992, 0.9445943929904821, 0.8672840218092874, 0.9460065098321975, 0.8964418334072447, 0.8177006120876253, 0.7980456220646437, 0.9043212138821937, 0.6947570940484412, 0.5985873230496728, 0.8732026098748573, 0.9943448346025096, 0.9326324822233862, 0.8982423914093773, 0.764031116509621, 0.5154845434958297, 0.9888739813686079, 0.8620090829729001, 0.5854396221714744, 0.7275110108178051, 0.9745196897570672, 0.9751742768099403, 0.5733986158561039, 0.9497005186127838, 0.9243156155180137, 0.8378901079651554, 0.6232432274606056, 0.9814073731779145, 0.5281000986627694, 0.5322688591878069, 0.7979955795298106, 0.6237493476603428, 0.5896708549311787, 0.5175538732314885, 0.7921106385524779, 0.8783603883756856, 0.8595812013873637, 0.5936562829018449, 0.5626664947228002, 0.9058910911258635, 0.8088482183818027, 0.9615376469803169, 0.5306814819885031, 0.5433856185927558, 0.6241560652715579, 0.9575650238319358, 0.6412177401672252, 0.8418671863030278, 0.9520410193310292, 0.8960352841783604, 0.9617606069826208, 0.6736298896621646, 0.664049527111569, 0.6502041905886293, 0.806900923830044, 0.8088627266511897, 0.7066260604609347, 0.6773050611379681, 0.9048365691656273, 0.9543038077834733, 0.8720250630087429, 0.6236047308491066, 0.9426166007225182, 0.7677114608044975, 0.6996156707622303, 0.5441604969420983, 0.8011911542746293, 0.6402034949184502, 0.8245581010979006, 0.5758038490314188, 0.5845571722689151, 0.6524714850893409, 0.9229322729525369, 0.5858336635754108, 0.9571912799841746, 0.8302989895613408, 0.5579469917305357, 0.5711118834676805, 0.677194501880367, 0.9016355145377393, 0.8371620059367288, 0.8723932516542636, 0.7678504689595675, 0.8379077628757912, 0.9847733803677905, 0.5536230934947077, 0.7275564790701816, 0.578867992084669, 0.8277779332991893, 0.929638094511487, 0.5948046122614019, 0.6420613351263976, 0.7937285969129806, 0.6469042944423878, 0.7847446598581749, 0.9818050476828037, 0.7620814655779409, 0.8738994708791358, 0.68767861052721, 0.5991593651701254, 0.9708492797851253, 0.6583779544271934, 0.9725103016053762, 0.9693010102027759, 0.7573455342047329, 0.643526562913773, 0.6704247946382799, 0.951214007183711, 0.8075311939463299, 0.8526310924467323, 0.7256381320194389, 0.9582572576465018, 0.8743028636138257, 0.6717992291928798, 0.6793941149751912, 0.969723460447266, 0.9232769099861382, 0.6972845758797357, 0.6633216106260478, 0.5836254230525214, 0.5562451966798133, 0.6505018412771355, 0.5884721895590548, 0.9712841365968856, 0.8360024825201391, 0.6516940542386214, 0.5330466373949143, 0.8936749675875969, 0.923566607781656, 0.9573195676805385, 0.9529469284504242, 0.6411942477369519, 0.7718224147919708, 0.7281394472765804, 0.6560022986729273, 0.8551126808530604, 0.7525509226028493, 0.660359968442717, 0.7865023351828618, 0.8479117407846377, 0.5807647029209295, 0.9137020190148295, 0.5313690878275665, 0.6836084209898337, 0.8636398358920199, 0.6958652763141884, 0.8951590051695422, 0.7730287272012295, 0.9452570302632601, 0.8811068619017696, 0.9601625262363882, 0.8142527908132556, 0.9407357862093204, 0.8214391528146036, 0.7363983662749805, 0.799188183731103, 0.6636725904102214, 0.524792818884091, 0.5688241990224461, 0.6982875398155786, 0.8799435549313113, 0.7533883329594792, 0.7318723152645024, 0.9392026415917649, 0.5913741174545608, 0.6729432039985715, 0.988305093031938, 0.9076778719513136, 0.7339013796148254, 0.8510488627982622, 0.6812759706032558, 0.6360481342406489, 0.8863190457243616, 0.9991504704532506, 0.8951844718232282, 0.8568024521573667, 0.5565837560938891, 0.7523221756364744, 0.9824715692978782, 0.6279008375754945, 0.5109995490316244, 0.9386954129542969, 0.7636682268315953, 0.8326916523674588, 0.6125440434075191, 0.5928224310700874, 0.8783983516420666, 0.7876182600198356, 0.888243089910171, 0.7984835531389751, 0.8441278503471469, 0.559163114723831, 0.6773638684400357, 0.5627385810559038, 0.680092808634714, 0.8047647714885862, 0.8373753344019073, 0.5219318459509306, 0.5386693291556837, 0.5556503247343823, 0.5301915418812932, 0.5949414651372706, 0.5521591437503253, 0.5943092513369687, 0.6301506514667587, 0.8337996228011413, 0.9865772780266989, 0.6664672269682907, 0.8431867318173895, 0.6080538694828499, 0.978293330782084, 0.5353230578789379, 0.6564845764206282, 0.8628832277965969, 0.8760143654108586, 0.5993722806738613, 0.8850225263480376, 0.9081668303008512, 0.7782840382718053, 0.859383133064201, 0.9266919601265244, 0.5228351093248982, 0.6670949935221093, 0.9097208124848624, 0.6720749073798812, 0.9067490372755254, 0.631491054520432, 0.5825163564723502, 0.6195309776582927, 0.9469309268436334, 0.8563291858126825, 0.7721314866578208, 0.7711833164275967, 0.6841908040333702, 0.7969965982743461, 0.7886433569372231, 0.6198651052958921, 0.509598480216304, 0.7527655093666372, 0.8339090106383891, 0.7381070092181392, 0.8525151206030703, 0.5461951006757402, 0.8948720623249347, 0.6015357311793392, 0.8976992165792312, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0};
int h_B[]= {
0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 53, 55, 57, 59, 62, 64, 66, 68, 70, 72, 74, 76, 78, 80, 82, 84, 88, 90, 92, 94, 96, 98, 100, 102, 104, 106, 108, 110, 112, 114, 116, 118, 120, 122, 124, 126, 128, 130, 132, 134, 138, 140, 142, 144, 146, 148, 150, 152, 154, 156, 158, 160, 162, 164, 166, 168, 170, 172, 175, 177, 179, 181, 184, 186, 188, 190, 193, 195, 197, 199, 201, 203, 205, 207, 209, 211, 214, 216, 218, 220, 222, 224, 226, 228, 230, 232, 234, 236, 238, 240, 242, 244, 246, 248, 250, 252, 254, 256, 258, 260, 262, 264, 266, 268, 270, 272, 274, 276, 278, 280, 282, 284, 286, 288, 290, 292, 294, 296, 298, 300, 302, 304, 306, 308, 310, 312, 314, 316, 318, 320, 322, 324, 326, 328, 330, 332, 334, 336, 339, 341, 344, 346, 348, 350, 352, 354, 356, 358, 360, 362, 364, 366, 368, 370, 372, 374, 376, 378, 380, 382, 384, 386, 388, 390, 392, 394, 396, 398, 400, 402, 406, 408, 410, 412, 414, 416, 418, 420, 422, 424, 426, 428, 430, 432, 434, 436, 439, 441, 443, 445, 447, 449, 451, 453, 455, 457, 459, 461, 463, 465, 467, 469, 471, 473, 476, 478, 480, 482, 484, 486, 488, 490, 492, 494, 496, 498, 500, 502, 504, 506, 508, 510, 512, 514, 516, 518, 520, 522, 524, 526, 529, 531, 533, 535, 538, 540, 542, 544, 547, 549, 551, 553, 556, 558, 560, 562, 564, 566, 568, 570, 572, 574, 576, 578, 581, 583, 586, 588, 591, 593, 596, 598, 601, 603, 606, 608, 611, 613, 616, 618, 620, 622, 625, 627, 629, 631, 634, 636, 640, 642, 644, 646, 648, 650, 652, 654, 657, 659, 661, 663, 665, 667, 670, 672, 674, 676, 678, 680, 682, 684, 686, 688, 691, 693, 695, 697, 700, 702, 705, 707, 713, 715, 719, 721, 724, 726, 728, 730, 732, 734, 736, 738, 740, 742, 745, 747, 751, 753, 756, 758, 761, 763, 766, 768, 770, 772, 774, 776, 780, 782, 785, 787, 792, 794, 796, 798, 800, 802, 804, 806, 808, 810, 812, 814, 817, 819, 822, 824, 827, 829, 832, 834, 837, 839, 842, 844, 850, 852, 855, 857, 860, 862, 864, 866, 868, 870, 873, 875, 878, 880, 883, 885, 888, 890, 892, 894, 896, 898, 901, 903, 906, 908, 911, 913, 916, 918, 920, 922, 924, 926, 929, 931, 934, 936, 939, 941, 760, 755, 760, 755, 760, 755, 760, 755, 900, 915, 86, 86, 87, 87, 900, 915, 995, 997, 999, 1001, 1003, 1005, 1008, 1010, 1012, 1014, 1016, 1018, 1020, 1022, 1024, 1026, 1028, 1030, 638, 633, 638, 633, 638, 633, 928, 943, 928, 943, 1107, 1109, 1111, 1113, 1115, 1117, 1119, 1121, 1124, 1126, 1128, 1130, 1132, 1134, 1136, 1138, 1140, 1142, 1144, 1146, 1149, 1151, 1153, 1155, 791, 778, 1208, 1210, 1212, 1214, 1216, 1218, 1221, 1223, 704, 699, 704, 699, 933, 938, 933, 938, 1257, 1259, 933, 938, 1272, 1274, 1277, 1279, 1283, 1285, 1287, 1289, 1291, 1293, 1295, 1297, 1299, 1301, 1303, 1305, 887, 887, 882, 882, 1325, 1327, 1329, 1331, 1333, 1335, 1337, 1339, 1342, 1344, 1346, 1348, 1350, 1352, 1354, 1356, 1358, 1360, 595, 595, 709, 711, 750, 750, 778, 791, 778, 791, 849, 847, 849, 847, 1466, 1468, 1470, 1472, 1474, 1476, 1478, 1480, 1482, 1484, 1486, 1488, 1492, 1494, 1499, 1501, 1503, 1505, 1508, 1510, 1512, 1514, 1517, 1519, 1523, 1525, 1527, 1529, 1531, 1533, 1536, 1538, 1541, 1543, 1521, 1516, 1148, 1547, 1363, 1498, 1496, 1521, 1516, 1521, 1516, 1498, 1496, 1521, 1516, 994, 994, 1281, 1498, 1496, 1007, 1007, 1703, 1705, 1707, 1709, 1711, 1713, 1715, 1717, 1719, 1721, 1723, 1725, 1729, 1731, 1766, 1768, 1521, 1516, 1774, 1776, 1778, 1780, 1782, 1784, 1786, 1788, 1793, 1795, 1797, 1799, 1148, 1281, 1547, 1363, 1911, 1913, 1915, 1917, 1919, 1921, 1521, 1516, 1363, 1547, 1934, 1936, 1938, 1940, 1942, 1944, 1946, 1948, 1270, 1268, 1270, 1268, 1521, 1516, 1521, 1516, 1363, 2039, 2041, 2043, 2045, 1547, 2058, 2060, 1363, 2072, 2074, 1496, 1498, 1498, 1496, 1535, 1547, 1549, 2136, 2138, 2140, 2142, 2144, 2146, 2149, 2151, 2154, 2156, 2159, 2161, 2164, 2166, 2169, 2171, 2175, 2177, 2180, 2182, 2179, 2153, 2148, 2148, 2153, 2179, 2077, 2179, 2077, 2179, 2077, 2179, 2184, 2077, 2179, 2184, 2174, 2174, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 3072, 3074, 3076, 3078, 3080, 3082, 3084, 3086, 3088, 3090, 3092, 3094, 3096, 3098, 3100, 3102, 3104, 3106, 3108, 3110, 3112, 3114, 3116, 3118, 3120, 3122, 3124, 3126, 3128, 3130, 3132, 3134, 3136, 3138, 3140, 3142, 3144, 3146, 3148, 3150, 3152, 3154, 3156, 3158, 3160, 3162, 3164, 3166, 3168, 3170, 3172, 3174, 3176, 3178, 3180, 3182, 3184, 3186, 3188, 3190, 3192, 3194, 3196, 3198, 3200, 3202, 3204, 3206, 3208, 3210, 3212, 3214, 3216, 3218, 3220, 3222, 3224, 3226, 3228, 3230, 3232, 3234, 3236, 3238, 3240, 3242, 3244, 3246, 3248, 3250, 3252, 3254, 3256, 3258, 3260, 3262, 3264, 3266, 3268, 3270, 3272, 3274, 3276, 3278, 3280, 3282, 3284, 3286, 3288, 3290, 3292, 3294, 3296, 3298, 3300, 3302, 3304, 3306, 3308, 3310, 3312, 3314, 3316, 3318, 3320, 3322, 3324, 3326, 3328, 3330, 3332, 3334, 3336, 3338, 3340, 3342, 3344, 3346, 3348, 3350, 3352, 3354, 3356, 3358, 3360, 3362, 3364, 3366, 3368, 3370, 3372, 3374, 3376, 3378, 3380, 3382, 3384, 3386, 3388, 3390, 3392, 3394, 3396, 3398, 3400, 3402, 3404, 3406, 3408, 3410, 3412, 3414, 3416, 3418, 3420, 3422, 3424, 3426, 3428, 3430, 3432, 3434, 3436, 3438, 3440, 3442, 3444, 3446, 3448, 3450, 3452, 3454, 3456, 3458, 3460, 3462, 3464, 3466, 3468, 3470, 3472, 3474, 3476, 3478, 3480, 3482, 3484, 3486, 3488, 3490, 3492, 3494, 3496, 3498, 3500, 3502, 3504, 3505, 3506, 3507, 3508, 3509, 3510, 3511, 3512, 3513, 3514, 3515, 3516, 3517, 3518, 3519, 3520, 3522, 3524, 3526, 3528, 3530, 3532, 3534, 3536, 3538, 3539, 3540, 3541, 3542, 3543, 3544, 3545, 3546, 3547, 3548, 3550, 3552, 3554, 3556, 3558, 3560, 3562, 3564, 3566, 3568, 3570, 3572, 3573, 3574, 3576, 3578, 3580, 3582, 3583, 3584, 3585, 3586, 3587, 3588, 3589, 3590, 3592, 3593, 3594, 3596, 3598, 3600, 3602, 3604, 3606, 3608, 3610, 3611, 3612, 3613, 3614, 3616, 3618, 3620, 3622, 3624, 3626, 3628, 3630, 3632, 3633, 3634, 3635, 3636, 3637, 3638, 3639, 3640, 3641, 3642, 3643, 3644, 3645, 3646, 3648, 3650, 3652, 3654, 3656, 3658, 3660, 3662, 3664, 3666, 3668, 3670, 3672, 3674, 3676, 3678, 3680, 3681, 3682, 3683, 3684, 3685, 3686, 3687, 3688, 3689, 3690, 3691, 3692, 3693, 3694, 3695, 3696, 3697, 3698, 3699, 3700, 3701, 3702, 3704, 3706, 3708, 3710, 3712, 3714, 3716, 3718, 3719, 3720, 3722, 3724, 3726, 3728, 3730, 3732, 3733, 3734, 3735, 3736, 3738, 3740, 3742, 3743, 3744, 3745, 3746, 3748, 3750, 3752, 3754, 3755, 3756, 3757, 3758, 3759, 3760, 3761, 3762, 3763, 3765, 3767, 3768, 3770, 3771, 3773, 3774, 3775, 3776, 3777, 3778, 3779, 3780, 3782, 3784, 3786, 3788, 3790, 3792, 3794, 3796, 3798, 3800, 3801, 3802, 3803, 3804, 3805, 3806, 3807, 3808, 3809, 3810, 3811, 3812, 3813, 3814, 3815, 3816, 3817, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 905, 910, 910, 905, 933, 938, 905, 910, 910, 905, 933, 938, 638, 633, 3850, 717, 712, 4056, 4058, 704, 699, 4060, 4062, 910, 905, 717, 712, 704, 699, 638, 633, 3864, 717, 712, 699, 704, 789, 784, 590, 585, 590, 585, 590, 585, 600, 610, 605, 580, 590, 585, 600, 595, 610, 605, 615, 4081, 3989, 4083, 3994, 656, 3997, 669, 590, 585, 590, 585, 590, 585, 600, 610, 605, 580, 590, 585, 600, 595, 610, 605, 615, 3989, 4085, 3992, 3994, 656, 3997, 669, 910, 905, 3875, 910, 905, 3877, 933, 938, 933, 938, 910, 905, 3884, 910, 905, 3886, 933, 938, 933, 938, 755, 789, 3891, 3893, 784, 789, 4103, 760, 760, 3896, 590, 585, 610, 605, 3901, 784, 590, 585, 3904, 717, 712, 760, 755, 789, 590, 585, 580, 615, 638, 633, 3916, 3917, 3919, 669, 343, 338, 656, 712, 717, 717, 712, 717, 712, 3928, 3930, 3932, 755, 755, 755, 784, 590, 585, 717, 712, 4109, 4012, 4111, 4012, 760, 755, 638, 633, 3942, 638, 633, 3943, 784, 789, 4113, 4115, 854, 859, 3949, 905, 910, 3953, 928, 905, 910, 3953, 928, 4118, 943, 859, 854, 3959, 877, 872, 877, 872, 859, 854, 3965, 877, 872, 877, 872, 590, 585, 590, 585, 590, 585, 600, 610, 605, 580, 590, 585, 600, 595, 610, 605, 615, 638, 633, 3989, 638, 633, 3992, 3994, 656, 3997, 669, 717, 712, 717, 712, 717, 712, 4003, 4004, 704, 699, 717, 712, 717, 712, 4009, 4010, 4012, 760, 755, 760, 755, 760, 755, 765, 789, 784, 789, 784, 4148, 789, 784, 826, 821, 836, 831, 846, 841, 4151, 826, 821, 836, 831, 846, 841, 4153, 859, 854, 4039, 877, 872, 887, 882, 910, 905, 900, 910, 905, 915, 933, 938, 928, 938, 933, 943, 4172, 4175, 4159, 4160, 4161, 4177, 4179, 4181, 4183, 4185, 4190, 1521, 1516, 1521, 1516, 1545, 1540, 1545, 1540, 1547, 1547, 1270, 1268, 1276, 1271, 4202, 1545, 1540, 1148, 1148, 1148, 1521, 1516, 1547, 1363, 4210, 4160, 4161, 1363, 1547, 4212, 4135, 4161, 4217, 4105, 1545, 1540, 4219, 4107, 4108, 4225, 1276, 1271, 4227, 1276, 1271, 1281, 1281, 1281, 4229, 4231, 1545, 1540, 1545, 1540, 1545, 1540, 4135, 4160, 4161, 4135, 4160, 4161, 1521, 1516, 4138, 1521, 1516, 4140, 4156, 4158, 4159, 4160, 4161, 4242, 1521, 1516, 4164, 1521, 1516, 4167, 1545, 1540, 1545, 1540, 4241, 4240, 4241, 4240, 4241, 4240, 2077, 2077, 4241, 4240, 4241, 4240, 4241, 4240, 2148, 2077, 2179, 2077, 2179, 2077, 2179, 2179, 2077, 2179, 2158, 2158, 2077, 2179, 2077, 2179, 2077, 2179, 2077, 2179, 2158, 2153, 2148, 2158, 2153, 2163, 2077, 2179, 2174, 2158, 2153, 2148, 2158, 2153, 2163, 2077, 2179, 2184, 2135, 2133, 2077, 2179, 4263, 2174, 4265, 2174, 4267, 4270, 2135, 2133, 2135, 2133, 2158, 2153, 2148, 2158, 2153, 2163, 2179, 2179, 2179, 2184, 4260, 4259, 4274, 4273, 4260, 4259, 4260, 4259, 4260, 4259, 4260, 4259, 4274, 4273, 31, 4288, 4289, 4290, 4291, 4292, 4293, 4294, 4295, 4296, 4297, 4298, 4299, 4300, 4301, 4302, 4303, 4304, 4307, 4308, 4311, 4312, 4313, 4314, 4315, 4316, 4317, 4318, 4319, 4320, 4321, 4322, 4323, 4324, 4325, 4326, 4327, 4328, 4329, 4330, 4331, 4332, 4333, 4334, 4335, 4336, 4337, 4338, 4339, 4340, 4341, 4342, 4344, 4346, 4347, 4348, 4349, 4350, 4351, 4352, 4353, 4354, 4355, 4356, 4357, 4358, 4359, 4360, 4361, 4362, 4363, 4364, 4365, 4366, 4367, 4369, 4370, 4371, 4372, 4373, 4374, 4375, 4376, 4377, 4378, 4379, 4380, 4381, 4382, 4383, 4384, 4385, 4386, 4387, 4388, 4389, 4390, 4391, 4392, 4393, 4394, 4395, 4396, 4397, 4398, 4399, 4401, 4402, 4403, 4404, 4405, 4406, 4407, 4408, 4409, 4410, 4411, 4412, 4413, 4414, 4415, 4416, 4417, 4418, 4419, 4420, 4421, 4422, 4423, 4424, 4425, 4426, 4427, 4428, 4429, 4430, 4431, 4432, 4433, 4434, 4435, 4436, 4437, 4438, 4439, 4440, 4441, 4442, 4443, 4444, 4445, 4446, 4447, 4449, 4451, 4452, 4453, 4454, 4455, 4456, 4457, 4458, 4459, 4460, 4461, 4464, 4465, 4466, 4467, 4468, 4469, 4470, 4471, 4472, 4473, 4474, 4476, 4477, 4478, 4479, 4480, 4481, 4482, 4483, 4484, 4485, 4486, 4487, 4488, 4489, 4490, 4491, 4492, 4493, 4494, 4495, 4496, 4497, 4498, 4499, 4500, 4501, 4502, 4503, 4504, 4505, 4506, 4507, 4508, 4509, 4510, 4511, 4512, 4513, 4514, 4515, 4516, 4517, 4518, 4519, 4520, 4521, 4522, 4523, 4524, 4525, 4526, 4527, 4528, 4529, 4530, 4531, 4532, 4533, 4534, 4535, 4536, 4537, 4538, 4539, 4540, 4541, 4542, 4543, 4544, 4545, 4547, 4548, 4549, 4550, 4551, 4552, 4553, 4554, 4556, 4557, 4558, 4559, 4560, 4561, 4563, 4564, 4565, 4566, 4567, 4568, 4569, 4570, 4571, 4572, 4573, 4574, 4575, 4576, 4577, 4578, 4579, 4580, 4581, 4584, 4585, 4586, 4146, 4145, 4146, 4145, 4146, 4145, 4593, 4594, 4595, 4596, 4597, 4598, 4599, 4600, 4601, 4602, 4603, 4604, 4605, 4606, 4608, 4609, 4610, 4611, 4612, 4613, 4614, 4615, 4616, 4618, 4619, 4620, 4621, 4623, 4624, 4626, 4627, 4628, 4630, 4631, 4633, 4634, 4636, 4637, 4638, 4639, 4640, 4643, 4644, 4645, 4646, 4647, 4648, 4649, 4650, 4651, 4652, 4653, 4654, 4655, 4656, 4657, 4658, 4659, 4660, 4661, 4662, 4663, 4664, 4665, 4667, 4668, 4669, 4670, 4671, 4672, 4673, 4674, 4675, 4676, 4587, 4677, 4678, 4590, 4679, 4680, 4592, 4681, 4682, 4683, 4684, 4587, 4685, 4686, 4188, 4187, 4590, 4687, 4688, 4188, 4187, 4592, 4689, 4690, 4691, 4692, 4693, 4694, 4695, 4696, 4697, 4698, 4699, 4700, 4701, 4702, 4703, 4704, 4705, 4706, 4707, 4708, 4709, 4710, 4240, 4240, 4711, 4712, 4713, 4714, 4715, 4716, 4717, 4718, 4719, 4720, 4721, 4722, 4723, 4724, 4725, 4726, 4727, 4728, 4729, 4730, 4731, 4732, 4734, 4736, 4739, 4740, 4741, 4742, 4743, 4744, 4745, 4746, 4747, 4748, 4749, 4750, 4751, 4752, 4753, 4754, 4269, 4273, 4755, 4756, 4272, 4274, 4757, 4758, 4759, 4760, 4761, 4762, 4763, 4764, 4765, 4766, 4272, 4269, 4272, 4269, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 4768, 4770, 4772, 4774, 4776, 4778, 4780, 4783, 4785, 4787, 4789, 4791, 4793, 4796, 4798, 4800, 4802, 4804, 4806, 4809, 4812, 4814, 4816, 4824, 4826, 4828, 4831, 4834, 4836, 4838, 4847, 4850, 4853, 4855, 4857, 4860, 4863, 4865, 4871, 4876, 4878, 4882, 4885, 4887, 4890, 4894, 4900, 4903, 4905, 4907, 4916, 4918, 4922, 4924, 4927, 4930, 4932, 4935, 4939, 4944, 4947, 4949, 4951, 4954, 4956, 4958, 4960, 4962, 4965, 4968, 4970, 4972, 4975, 4978, 4985, 4987, 4989, 4993, 4995, 4997, 5002, 5004, 5006, 5009, 5011, 5013, 5015, 5017, 5019, 5021, 5023, 5025, 5027, 5030, 5032, 5034, 5037, 5040, 5043, 4984, 4982, 5001, 5049, 5050, 5051, 5052, 4984, 4982, 5001, 5053, 5054, 4984, 4982, 5001, 5001, 4984, 4982, 5001, 5055, 5057, 5059, 5061, 4842, 4819, 4823, 4821, 4842, 4841, 4846, 4844, 5065, 5067, 5069, 5074, 5076, 5080, 4899, 4870, 5008, 4146, 4145, 4147, 4982, 4899, 4870, 4899, 5008, 4146, 4145, 4147, 4982, 4899, 4911, 4150, 4984, 4902, 4911, 4150, 4899, 4911, 5008, 4146, 4145, 4147, 5085, 4984, 4982, 4448, 4920, 4450, 4921, 4984, 4982, 5001, 4943, 4938, 4943, 4942, 4943, 4938, 5089, 4943, 4942, 5091, 5096, 5098, 5100, 5108, 5111, 4984, 4982, 5001, 5119, 5122, 5125, 5127, 5113, 5110, 5129, 5132, 5135, 4666, 4241, 4240, 5115, 5140, 5143, 5144, 5145, 5148, 5149, 5150, 5154, 5156, 5158, 5161, 5113, 5110, 5113, 5084, 5165, 5167, 5169, 4666, 4241, 4240, 5171, 4666, 4241, 5173, 4666, 4241, 5174, 5115, 5115, 4666, 4241, 4240, 5113, 5110, 2135, 2133, 5175, 5178, 5181, 4666, 4241, 4240, 5113, 5084, 2135, 2133, 5184, 5187, 5190, 4666, 4241, 4240, 5113, 5110, 5195, 4666, 4241, 4240, 4666, 4241, 4240, 5115, 4666, 4241, 4240, 5199, 5201, 5203, 5206, 5208, 5208, 5153, 5215, 5216, 5212, 5212, 5197, 5219, 5220, 5208, 5153, 5212, 5197, 5208, 5208, 5208, 5205, 5208, 5208, 5212, 5197, 5231, 5232, 5233, 5234, 5198, 5212, 4274, 4273, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 4065, 4064, 4087, 4065, 4064, 4088, 4964, 4142, 4141, 4892, 5318, 4893, 4980, 4977, 5347, 5348, 4991, 4144, 4143, 5262, 4999, 4144, 4143, 5349, 5350, 4150, 4546, 4147, 5262, 5352, 4964, 4142, 4141, 4892, 5318, 4893, 4980, 4977, 5354, 5355, 4991, 4144, 4143, 5256, 4999, 4144, 4143, 5356, 5357, 4546, 4150, 4147, 4065, 4064, 4090, 4089, 4964, 4142, 4141, 4892, 5318, 4893, 4980, 4977, 5359, 5360, 4991, 4144, 4143, 5259, 4999, 4144, 4143, 5361, 5008, 4546, 4150, 4147, 4980, 4977, 4991, 4144, 4143, 5325, 4999, 4144, 4143, 5362, 5008, 4546, 4150, 4147, 5318, 4893, 4964, 4142, 4141, 4892, 4980, 4977, 5363, 5364, 4999, 4144, 4143, 5365, 4991, 4144, 4143, 5262, 4146, 4145, 4546, 4150, 4147, 4071, 4070, 4808, 4142, 4141, 4811, 5269, 4818, 5370, 5371, 5372, 5373, 4830, 4142, 4141, 4833, 5276, 4840, 5374, 5375, 5376, 5377, 4852, 4849, 4088, 4087, 4862, 4859, 4090, 4089, 4964, 4141, 4142, 4892, 5318, 4893, 4880, 4896, 4902, 5384, 4909, 4144, 4143, 5385, 5386, 5387, 5388, 5389, 4964, 4141, 4142, 4892, 5318, 4893, 4980, 4869, 5390, 5391, 4909, 4144, 4143, 5392, 5008, 4146, 4145, 4400, 4964, 4141, 4142, 4902, 5393, 4909, 4144, 4143, 5394, 5395, 5396, 5397, 4964, 4141, 4142, 4892, 5318, 4893, 4880, 4977, 5398, 5399, 4909, 4144, 4143, 5400, 5008, 4146, 4145, 5401, 4964, 4142, 4141, 4897, 4896, 5402, 5403, 4909, 4144, 4143, 5404, 5008, 4146, 4145, 5405, 4964, 4142, 4141, 4892, 4893, 4897, 4896, 4902, 5406, 4909, 4144, 4143, 5407, 5408, 5409, 5410, 5411, 4130, 4128, 4964, 4142, 4141, 4967, 5318, 4974, 4929, 4926, 5413, 5414, 4144, 4143, 5415, 5416, 5008, 4146, 4145, 4150, 4147, 4546, 4144, 4143, 5417, 4144, 4143, 5418, 5008, 4146, 4145, 4964, 4142, 4141, 4967, 5318, 4974, 4929, 4926, 5419, 5420, 4991, 5325, 4999, 5421, 5008, 4146, 4145, 4150, 4147, 4546, 4129, 4131, 5422, 5423, 5424, 5425, 4934, 4937, 5426, 5427, 4941, 5429, 5430, 4946, 4129, 4128, 4953, 4131, 4130, 4964, 4142, 4141, 4967, 5318, 4974, 4980, 4977, 5437, 5438, 4991, 4144, 4143, 5325, 4999, 4144, 4143, 5439, 5008, 4146, 4145, 4150, 4546, 4147, 5335, 4555, 5338, 4562, 5029, 5342, 5039, 5036, 5045, 5042, 5444, 5445, 5446, 5447, 5448, 4583, 5449, 5450, 5451, 5452, 5453, 5454, 5456, 5457, 5459, 4193, 4192, 4233, 4233, 5063, 5064, 5379, 5464, 5465, 5466, 5467, 4233, 5471, 5472, 5473, 4233, 5475, 5476, 5113, 5110, 5382, 5478, 5479, 5113, 5084, 5383, 5481, 5482, 5483, 5484, 5485, 5486, 5487, 4622, 5488, 5489, 5493, 5494, 5495, 5496, 5497, 4629, 5498, 5499, 5428, 5428, 5428, 5431, 5503, 5504, 5505, 5506, 5507, 4233, 4233, 4233, 4236, 4236, 5509, 5510, 5511, 5512, 5513, 5514, 5113, 5110, 4238, 4238, 5515, 5516, 5517, 5518, 5124, 5121, 4245, 4245, 5523, 5198, 5524, 5525, 4272, 4274, 4273, 4269, 5208, 5205, 5526, 5208, 5205, 5528, 5529, 5530, 5531, 5533, 5534, 4272, 4274, 4273, 4269, 5535, 5536, 5537, 4272, 5198, 4269, 5180, 5177, 5183, 5538, 4272, 4269, 5539, 5540, 5197, 4272, 4269, 5541, 5198, 5180, 5177, 5183, 5189, 5186, 5192, 5180, 5177, 5183, 5189, 5186, 5192, 5542, 5198, 5543, 5544, 5208, 5205, 5549, 5547, 5208, 5205, 5550, 5551, 5552, 25, 26, 27, 28, 29, 30, 31, 5568, 5569, 5570, 5571, 5572, 5573, 5574, 5575, 5576, 5577, 5578, 5579, 5580, 5581, 5582, 5584, 5585, 5586, 5587, 5588, 5589, 5590, 5593, 5594, 5595, 5596, 5598, 5599, 5600, 5601, 5602, 5603, 5604, 5605, 5606, 5608, 5609, 5610, 5611, 5612, 5613, 5614, 5617, 5618, 5619, 5620, 5621, 5622, 5623, 5624, 5625, 5626, 5627, 5628, 5629, 5630, 5631, 5632, 5634, 5635, 5636, 5637, 5638, 5639, 5640, 5642, 5643, 5644, 5645, 5646, 5647, 5648, 5649, 5650, 5651, 5652, 5653, 5654, 5656, 5657, 5658, 5659, 5660, 5661, 5662, 5663, 5664, 5665, 5666, 5667, 5668, 5670, 5671, 5672, 5674, 5675, 5676, 5677, 5678, 5679, 5680, 5681, 5682, 5683, 5684, 5685, 5686, 5687, 5688, 5689, 5690, 5691, 5693, 5695, 5696, 5697, 5698, 5699, 5700, 5701, 5703, 5705, 5706, 5707, 5708, 5709, 5710, 5711, 5712, 5713, 5714, 5715, 5716, 5717, 5718, 5719, 5720, 5721, 5723, 5724, 5725, 5727, 5731, 5732, 5733, 5734, 5735, 5736, 5737, 5738, 5739, 5741, 5742, 5743, 5745, 5746, 5747, 5748, 5749, 5750, 5751, 5752, 5754, 5755, 5756, 5757, 5761, 5762, 5763, 5764, 5765, 5766, 5767, 5768, 5769, 5771, 5772, 5773, 5775, 5776, 5777, 5779, 5780, 5781, 5782, 5783, 5784, 5786, 5787, 5788, 5790, 5791, 5792, 5794, 5795, 5796, 5797, 5798, 5799, 5800, 5801, 5803, 5804, 5805, 5807, 5811, 5812, 5813, 5814, 5815, 5816, 5817, 5818, 5819, 5820, 5821, 5823, 5824, 5827, 5828, 5829, 5830, 5831, 5832, 5833, 5834, 5836, 5837, 5839, 5840, 5841, 5842, 5843, 5844, 5845, 5846, 5847, 5848, 5849, 5850, 5852, 5853, 5854, 5856, 5857, 5858, 5859, 5860, 5861, 5862, 5863, 5864, 5866, 5868, 5869, 5870, 5872, 5873, 5875, 5876, 5877, 5878, 5879, 5880, 5881, 5882, 5883, 5884, 5885, 5886, 5887, 5888, 5889, 5891, 5892, 5893, 5894, 5895, 5896, 5897, 5899, 5900, 5901, 5902, 5903, 5904, 5905, 5906, 5907, 5908, 5909, 5910, 5911, 5912, 5913, 5914, 5915, 5917, 5918, 5919, 5920, 5921, 5925, 5927, 5929, 5930, 5931, 5932, 5933, 5934, 5935, 5936, 5937, 5939, 5941, 5942, 5945, 5946, 5948, 5949, 5950, 5951, 5953, 5954, 5955, 5958, 5961, 5963, 5964, 5966, 5969, 5971, 5972, 5974, 5975, 5976, 5977, 5978, 5981, 5983, 5984, 5985, 5986, 5987, 5988, 5991, 5994, 5995, 5996, 5997, 5999, 6002, 6003, 6004, 6005, 6006, 6007, 6008, 6010, 6011, 6012, 6013, 6014, 6015, 6017, 6018, 6019, 6020, 6023, 6025, 6026, 6027, 6028, 6029, 6031, 6032, 6033, 6034, 6035, 6036, 6037, 6038, 6039, 6040, 6041, 6043, 6044, 6045, 6046, 6047, 6048, 6049, 6050, 6051, 6052, 6053, 6054, 6055, 6056, 6057, 6058, 6059, 6060, 6061, 6062, 6063, 6064, 6065, 6067, 6068, 6069, 6070, 28, 29, 30, 31, 6080, 6083, 6086, 6092, 6095, 6099, 6102, 6106, 6112, 6115, 6119, 6122, 6125, 6127, 6129, 6135, 6138, 6142, 6146, 6149, 6151, 6155, 6159, 6164, 6168, 6171, 6174, 6178, 6180, 6183, 6185, 6193, 6201, 6203, 6205, 6207, 6209, 6215, 6217, 6218, 6221, 6222, 6228, 6231, 6234, 6238, 6241, 6242, 6245, 6246, 6252, 6255, 6258, 6261, 6264, 6267, 6270, 6273, 6278, 6280, 6281, 6284, 6287, 6293, 6296, 6298, 6301, 6304, 6306, 6308, 6311, 6317, 6323, 6326, 6339, 6342, 6344, 6350, 6353, 6357, 6360, 6363, 6372, 6374, 6381, 6134, 6091, 6111, 6369, 6367, 6371, 6091, 6163, 6111, 6134, 6163, 6371, 6385, 6190, 6192, 6198, 6200, 6395, 6397, 6398, 6401, 6402, 6214, 6251, 6277, 6227, 6367, 6214, 6227, 6277, 6251, 6316, 6277, 6369, 6405, 6409, 6349, 6369, 6367, 6286, 6285, 6331, 6292, 6316, 5855, 6321, 6369, 6367, 6330, 6329, 6331, 6332, 6349, 6369, 6367, 6335, 6337, 6417, 6424, 6425, 6426, 6349, 6369, 6367, 6371, 6430, 6431, 5520, 5194, 5193, 6435, 5928, 5926, 5520, 5194, 5193, 6438, 6440, 5520, 5519, 6442, 6408, 6412, 5520, 5519, 6444, 6446, 5928, 5926, 5520, 5194, 5193, 6449, 6451, 5520, 5519, 5520, 5193, 5194, 6454, 6455, 6408, 6458, 6412, 5520, 5194, 5193, 6461, 6462, 5193, 5194, 5520, 6465, 5520, 5194, 5193, 6468, 6408, 6470, 6412, 6473, 6408, 6476, 6412, 6479, 5194, 5193, 5520, 6482, 5520, 5519, 6484, 6485, 5520, 5519, 6486, 5520, 5519, 6489, 6491, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 6498, 6500, 6501, 6502, 6503, 6505, 6506, 6507, 6510, 6512, 6513, 6514, 6516, 6517, 6518, 6519, 6521, 6522, 6524, 6526, 6527, 6532, 6535, 6537, 6539, 6540, 6541, 6543, 6545, 6547, 6548, 6549, 6551, 6552, 6553, 6556, 6558, 6561, 6562, 6565, 6566, 6568, 6569, 6572, 6574, 6575, 6576, 6577, 6581, 6137, 6582, 6094, 6583, 6114, 6584, 6585, 6586, 6085, 6082, 6587, 6094, 6588, 6170, 6589, 6114, 6509, 6590, 6137, 6319, 6591, 6170, 6592, 6579, 6594, 6595, 6596, 6597, 6531, 6529, 6603, 6534, 5730, 6604, 6254, 6605, 6555, 5810, 6606, 6230, 6607, 6570, 6571, 6579, 6608, 6534, 5730, 6609, 6230, 6610, 6542, 5760, 6611, 6254, 6612, 6266, 6613, 6555, 5810, 6614, 6579, 6617, 6352, 6618, 6619, 6620, 6621, 6622, 6623, 6295, 5826, 5825, 5838, 5835, 6624, 6319, 6625, 6626, 6627, 6628, 6629, 6630, 6631, 6632, 6633, 6352, 6634, 6635, 6570, 6571, 6636, 6637, 6571, 6570, 6642, 6352, 6643, 6644, 6645, 6579, 6376, 6648, 6649, 6650, 6593, 6652, 6653, 6654, 6655, 6656, 6657, 6647, 6659, 6660, 6406, 6662, 6663, 6647, 6664, 6665, 6647, 6593, 6668, 6669, 6670, 6671, 6672, 6673, 6675, 6676, 6647, 6677, 6678, 6679, 6681, 6392, 6682, 6393, 6684, 6641, 6685, 6686, 6687, 6690, 6691, 6692, 6693, 6418, 6694, 6695, 6696, 6600, 6698, 6602, 6700, 6406, 6702, 6410, 6704, 6418, 6706, 6707, 6708, 6647, 6710, 6711, 6647, 6641, 6714, 6715, 6647, 6717, 6718, 6436, 6016, 6667, 6447, 6022, 6453, 6460, 6689, 6469, 6472, 6475, 6478, 6481, 6483, 6712, 6713, 6488, 6720, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 6132, 6801, 5641, 6141, 6763, 6089, 6803, 5591, 6098, 6755, 6109, 6805, 5615, 6118, 6759, 6806, 6809, 6810, 6089, 6812, 5591, 6098, 6755, 6167, 6814, 5655, 6105, 6766, 6109, 6816, 5615, 6118, 6759, 6817, 6132, 6819, 5641, 6141, 6763, 6820, 5655, 6154, 6766, 6167, 6822, 6177, 5673, 6770, 6824, 6188, 6196, 6829, 6830, 6212, 6832, 5726, 6833, 6249, 6835, 5774, 5778, 6276, 6837, 5806, 6838, 6225, 6840, 5744, 6237, 6842, 6843, 6844, 6212, 6846, 5726, 6847, 6225, 6849, 5744, 6237, 6314, 6851, 5789, 6852, 6249, 6854, 5774, 5778, 6314, 6856, 5789, 5793, 6276, 6858, 5806, 6859, 6861, 6347, 6863, 5898, 6356, 6799, 6864, 6866, 6290, 6870, 6871, 6872, 6790, 6873, 6874, 6794, 6314, 6876, 6877, 6794, 6879, 6881, 6347, 6886, 5898, 6356, 6799, 6887, 6889, 6890, 6893, 6894, 6347, 6896, 5898, 6356, 6799, 6897, 6900, 6416, 6413, 6416, 6414, 6901, 6902, 6905, 6908, 6912, 6913, 6915, 6918, 6919, 6921, 6922, 6925, 6929, 6931, 6932, 6936, 6938, 6416, 6413, 6940, 6941, 6944, 6416, 6413, 6416, 6414, 6948, 6949, 6952, 6954, 6956, 6958, 6416, 6413, 6416, 6414, 6416, 6415, 6960, 6961, 6964, 6965, 6967, 6968, 6969, 6971, 6972, 6974, 6911, 6975, 6976, 6977, 6978, 6928, 6979, 6935, 6980, 6981, 6947, 6982, 6983, 6984, 6985, 6986, 6987, 6988, 6989, 6990, 6991, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 7008, 7010, 7011, 7012, 7013, 7015, 7016, 7017, 7018, 7020, 7021, 7022, 7024, 7026, 7028, 7029, 7030, 7031, 7033, 7034, 7035, 7036, 7038, 7039, 7040, 7042, 7044, 7045, 7046, 7048, 7049, 7050, 7051, 7053, 7054, 7055, 7057, 7058, 7059, 7061, 7063, 7065, 7067, 7068, 7069, 7071, 7073, 7075, 7076, 7077, 7080, 7082, 7084, 7086, 7087, 7088, 7090, 7092, 7094, 7095, 7096, 7098, 7099, 7100, 7102, 7105, 7107, 7108, 7109, 7112, 7114, 7116, 7117, 7119, 7120, 7123, 7126, 7128, 7129, 7130, 7132, 7134, 7136, 7138, 7139, 7140, 7111, 7143, 7144, 7125, 7145, 7146, 7148, 6808, 7149, 7150, 6899, 5998, 6899, 5998, 6808, 5957, 6823, 5957, 7157, 7158, 7161, 5956, 7111, 7164, 7165, 7167, 7168, 7111, 7169, 7170, 7125, 7171, 7172, 7174, 5956, 5956, 5957, 7111, 7179, 7180, 7125, 7181, 7182, 7183, 7184, 7186, 6899, 5998, 6899, 5998, 7195, 7152, 6916, 6917, 7155, 7159, 7191, 7200, 7159, 7202, 6937, 6939, 7205, 6953, 6955, 6957, 6959, 7188, 7191, 7191, 7193, 29, 30, 31, 6800, 7233, 6802, 7237, 6804, 7241, 6811, 7246, 6813, 7250, 6815, 7254, 6818, 7258, 7261, 7264, 7265, 6825, 6827, 6831, 6834, 6836, 6839, 6845, 6848, 6850, 6853, 6855, 6857, 6862, 7298, 6869, 6875, 6885, 7309, 6895, 7315, 7318, 7319, 7303, 7321, 7322, 7325, 5924, 7326, 7328, 7329, 7330, 7331, 7332, 5924, 7333, 7334, 7335, 7336, 7313, 6391, 7280, 7072, 7281, 7339, 7340, 7341, 7345, 7346, 7303, 7348, 7349, 7072, 7275, 7280, 7064, 7281, 7352, 7064, 7275, 7072, 7280, 7281, 7353, 7083, 7286, 7091, 7291, 7294, 7103, 7312, 7354, 7355, 7356, 7303, 7305, 7307, 7358, 7359, 7312, 7361, 7313, 7364, 7365, 7366, 7367, 7324, 7369, 7370, 7371, 7372, 7373, 7374, 7376, 7338, 7378, 7379, 7343, 7344, 7351, 7381, 7382, 7383, 7384, 7363, 7385, 7386, 7387, 7388, 29, 30, 31, 7106, 7300, 7113, 7431, 7014, 7239, 7009, 7235, 7019, 7243, 7435, 7137, 7317, 7137, 7317, 7009, 7235, 7014, 7239, 7019, 7243, 7442, 7027, 7248, 7032, 7252, 7037, 7256, 7043, 7260, 7047, 7263, 7052, 7267, 6828, 6826, 7317, 7447, 7448, 7074, 7449, 7070, 7450, 7451, 7106, 7300, 7106, 7300, 7113, 7457, 7070, 7460, 7066, 7461, 7074, 7462, 7062, 7463, 7464, 7062, 7466, 7066, 7467, 7070, 7468, 7074, 7469, 7470, 7081, 7472, 7085, 7473, 7089, 7474, 7093, 7475, 7097, 7476, 7101, 7477, 7478, 7106, 7300, 7113, 7482, 7483, 7121, 7484, 7127, 7311, 7487, 7489, 7137, 7317, 7137, 7317, 7430, 7433, 7494, 7327, 7438, 7440, 7443, 7445, 7337, 7502, 7454, 7505, 7506, 7456, 7459, 7507, 7481, 7486, 7512, 7491, 7493, 7196, 7210, 7203, 7197, 7198, 7199, 7201, 7210, 7203, 7208, 7207, 7210, 7209, 7212, 7213, 7214, 7215, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 7520, 7521, 7522, 7524, 7525, 7526, 7527, 7528, 7529, 7531, 7532, 7533, 7534, 7535, 7536, 7537, 7538, 7539, 7540, 7542, 7543, 7544, 7545, 7546, 7547, 7548, 7549, 7550, 7551, 7552, 7553, 7554, 7555, 7556, 7559, 7561, 7564, 7565, 7566, 7567, 7568, 7570, 7572, 7574, 7576, 7579, 7581, 7583, 7585, 7588, 7590, 7592, 7594, 7596, 7598, 7601, 7602, 7603, 7606, 7608, 7609, 7612, 7613, 7614, 7615, 7616, 7617, 7530, 7619, 7620, 7621, 7541, 7622, 7623, 7624, 7558, 7452, 7626, 7629, 7630, 7465, 7471, 7479, 7632, 7633, 7488, 7491, 7635, 7636, 7194, 7637, 7638, 7639, 7640, 7641, 7642, 7643, 7377, 7644, 7645, 7204, 7380, 7206, 7646, 7647, 7648, 7649, 7211, 7650, 7651, 7652, 7653, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 7711, 7681, 7607, 7605, 7523, 7688, 7702, 7684, 7708, 7686, 7747, 7690, 7692, 7694, 7708, 7696, 7702, 7698, 7751, 7708, 7700, 7702, 7706, 7704, 7710, 7708, 7706, 7755, 7593, 7562, 7560, 7597, 7580, 7582, 7756, 7717, 7719, 7607, 7605, 7569, 7573, 7577, 7597, 7571, 7575, 7593, 7760, 7582, 7586, 7597, 7580, 7593, 7584, 7761, 7591, 7593, 7589, 7599, 7597, 7595, 7762, 7736, 7607, 7605, 7604, 7740, 7765, 7766, 7742, 7744, 7769, 7368, 7771, 7375, 7777, 7778, 7780, 7781, 7782, 7783, 7785, 7787, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 7809, 7810, 7811, 7812, 7813, 7814, 7815, 7816, 7817, 7819, 7820, 7821, 7822, 7823, 7824, 7825, 7827, 7828, 7829, 7830, 7831, 7832, 7833, 7834, 7713, 7836, 7837, 7838, 7839, 7840, 7841, 7843, 7844, 7845, 7846, 7847, 7848, 7849, 7850, 7851, 7852, 7853, 7855, 7856, 7857, 7858, 7859, 7860, 7862, 7863, 7864, 7865, 7866, 7867, 7869, 7870, 7871, 7872, 7873, 7876, 7877, 7879, 7881, 31, 7905, 7908, 7910, 7915, 7917, 7920, 7922, 7925, 7928, 7929, 7931, 7933, 7937, 7940, 7942, 7944, 7946, 7948, 7950, 7952, 7954, 7956, 7959, 7874, 7745, 7749, 7750, 7874, 7757, 7874, 7758, 7874, 7763, 7875, 7767, 7768, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 7968, 7969, 7971, 7973, 7975, 7977, 7980, 7981, 7984, 7987, 7990, 7991, 7992, 7993, 7994, 7835, 7995, 7996, 7997, 7998, 7999, 8000, 8001, 8002, 8003, 25, 26, 27, 28, 29, 30, 31, 8033, 8034, 8035, 8037, 8039, 8040, 8041, 7746, 7753, 7753, 8047, 7759, 7759, 7764, 7773, 7774, 7790, 7776, 7770, 7791, 7885, 7775, 7788, 7789, 24, 25, 26, 27, 28, 29, 30, 31, 8071, 8072, 7752, 7818, 8073, 7752, 7826, 7868, 7842, 8075, 8076, 7868, 7854, 7868, 7861, 8077, 8078, 8079, 8080, 8081, 8082, 7882, 8083, 8084, 8085, 8086, 8087, 27, 28, 29, 30, 31, 8043, 8098, 8099, 8101, 8102, 8103, 8104, 8048, 8050, 8107, 8108, 8109, 8110, 8052, 8117, 8113, 8120, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 8128, 8097, 8100, 8133, 8135, 8136, 8137, 8139, 8141, 8142, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 8161, 8162, 7883, 7878, 7887, 7889, 7884, 7886, 7888, 7880, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 8194, 7965, 8195, 8196, 8197, 8198, 8199, 8200, 7966, 8201, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 8225, 8232, 8112, 8226, 8116, 8119, 8230, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 8115, 8257, 8258, 8260, 8261, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 8288, 8262, 8291, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 8290, 8321, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 8352, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 8384, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31};
int h_C[]= {
1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 33, 35, 37, 39, 41, 43, 45, 47, 49, 51, 54, 56, 58, 60, 63, 65, 67, 69, 71, 73, 75, 77, 79, 81, 83, 85, 89, 91, 93, 95, 97, 99, 101, 103, 105, 107, 109, 111, 113, 115, 117, 119, 121, 123, 125, 127, 129, 131, 133, 135, 139, 141, 143, 145, 147, 149, 151, 153, 155, 157, 159, 161, 163, 165, 167, 169, 171, 173, 176, 178, 180, 182, 185, 187, 189, 191, 194, 196, 198, 200, 202, 204, 206, 208, 210, 212, 215, 217, 219, 221, 223, 225, 227, 229, 231, 233, 235, 237, 239, 241, 243, 245, 247, 249, 251, 253, 255, 257, 259, 261, 263, 265, 267, 269, 271, 273, 275, 277, 279, 281, 283, 285, 287, 289, 291, 293, 295, 297, 299, 301, 303, 305, 307, 309, 311, 313, 315, 317, 319, 321, 323, 325, 327, 329, 331, 333, 335, 337, 340, 342, 345, 347, 349, 351, 353, 355, 357, 359, 361, 363, 365, 367, 369, 371, 373, 375, 377, 379, 381, 383, 385, 387, 389, 391, 393, 395, 397, 399, 401, 403, 407, 409, 411, 413, 415, 417, 419, 421, 423, 425, 427, 429, 431, 433, 435, 437, 440, 442, 444, 446, 448, 450, 452, 454, 456, 458, 460, 462, 464, 466, 468, 470, 472, 474, 477, 479, 481, 483, 485, 487, 489, 491, 493, 495, 497, 499, 501, 503, 505, 507, 509, 511, 513, 515, 517, 519, 521, 523, 525, 527, 530, 532, 534, 536, 539, 541, 543, 545, 548, 550, 552, 554, 557, 559, 561, 563, 565, 567, 569, 571, 573, 575, 577, 579, 582, 584, 587, 589, 592, 594, 597, 599, 602, 604, 607, 609, 612, 614, 617, 619, 621, 623, 626, 628, 630, 632, 635, 637, 641, 643, 645, 647, 649, 651, 653, 655, 658, 660, 662, 664, 666, 668, 671, 673, 675, 677, 679, 681, 683, 685, 687, 689, 692, 694, 696, 698, 701, 703, 706, 708, 714, 716, 720, 722, 725, 727, 729, 731, 733, 735, 737, 739, 741, 743, 746, 748, 752, 754, 757, 759, 762, 764, 767, 769, 771, 773, 775, 777, 781, 783, 786, 788, 793, 795, 797, 799, 801, 803, 805, 807, 809, 811, 813, 815, 818, 820, 823, 825, 828, 830, 833, 835, 838, 840, 843, 845, 851, 853, 856, 858, 861, 863, 865, 867, 869, 871, 874, 876, 879, 881, 884, 886, 889, 891, 893, 895, 897, 899, 902, 904, 907, 909, 912, 914, 917, 919, 921, 923, 925, 927, 930, 932, 935, 937, 940, 942, 52, 52, 52, 52, 61, 61, 61, 61, 183, 183, 690, 723, 690, 723, 192, 192, 996, 998, 1000, 1002, 1004, 1006, 1009, 1011, 1013, 1015, 1017, 1019, 1021, 1023, 1025, 1027, 1029, 1031, 136, 136, 136, 136, 137, 137, 174, 174, 213, 213, 1108, 1110, 1112, 1114, 1116, 1118, 1120, 1122, 1125, 1127, 1129, 1131, 1133, 1135, 1137, 1139, 1141, 1143, 1145, 1147, 1150, 1152, 1154, 1156, 779, 779, 1209, 1211, 1213, 1215, 1217, 1219, 1222, 1224, 404, 404, 405, 405, 438, 438, 438, 438, 1258, 1260, 475, 475, 1273, 1275, 1278, 1280, 1284, 1286, 1288, 1290, 1292, 1294, 1296, 1298, 1300, 1302, 1304, 1306, 528, 537, 528, 537, 1326, 1328, 1330, 1332, 1334, 1336, 1338, 1340, 1343, 1345, 1347, 1349, 1351, 1353, 1355, 1357, 1359, 1361, 546, 555, 710, 710, 744, 749, 790, 779, 779, 790, 816, 816, 848, 848, 1467, 1469, 1471, 1473, 1475, 1477, 1479, 1481, 1483, 1485, 1487, 1489, 1493, 1495, 1500, 1502, 1504, 1506, 1509, 1511, 1513, 1515, 1518, 1520, 1524, 1526, 1528, 1530, 1532, 1534, 1537, 1539, 1542, 1544, 1123, 1123, 1491, 1362, 1362, 1497, 1497, 1282, 1282, 1282, 1282, 1497, 1497, 1123, 1123, 1507, 1522, 1491, 1497, 1497, 1507, 1522, 1704, 1706, 1708, 1710, 1712, 1714, 1716, 1718, 1720, 1722, 1724, 1726, 1730, 1732, 1767, 1769, 1123, 1123, 1775, 1777, 1779, 1781, 1783, 1785, 1787, 1789, 1794, 1796, 1798, 1800, 1491, 1491, 1546, 1546, 1912, 1914, 1916, 1918, 1920, 1922, 1282, 1282, 1546, 1546, 1935, 1937, 1939, 1941, 1943, 1945, 1947, 1949, 1256, 1256, 1269, 1269, 1282, 1282, 1282, 1282, 1546, 2040, 2042, 2044, 2046, 1362, 2059, 2061, 1362, 2073, 2075, 1490, 1490, 1497, 1497, 1548, 1546, 1548, 2137, 2139, 2141, 2143, 2145, 2147, 2150, 2152, 2155, 2157, 2160, 2162, 2165, 2167, 2170, 2172, 2176, 2178, 2181, 2183, 2076, 1765, 2036, 2037, 2038, 2057, 2057, 2057, 2076, 2076, 2076, 2076, 2168, 2076, 2076, 2173, 2168, 2173, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 3073, 3075, 3077, 3079, 3081, 3083, 3085, 3087, 3089, 3091, 3093, 3095, 3097, 3099, 3101, 3103, 3105, 3107, 3109, 3111, 3113, 3115, 3117, 3119, 3121, 3123, 3125, 3127, 3129, 3131, 3133, 3135, 3137, 3139, 3141, 3143, 3145, 3147, 3149, 3151, 3153, 3155, 3157, 3159, 3161, 3163, 3165, 3167, 3169, 3171, 3173, 3175, 3177, 3179, 3181, 3183, 3185, 3187, 3189, 3191, 3193, 3195, 3197, 3199, 3201, 3203, 3205, 3207, 3209, 3211, 3213, 3215, 3217, 3219, 3221, 3223, 3225, 3227, 3229, 3231, 3233, 3235, 3237, 3239, 3241, 3243, 3245, 3247, 3249, 3251, 3253, 3255, 3257, 3259, 3261, 3263, 3265, 3267, 3269, 3271, 3273, 3275, 3277, 3279, 3281, 3283, 3285, 3287, 3289, 3291, 3293, 3295, 3297, 3299, 3301, 3303, 3305, 3307, 3309, 3311, 3313, 3315, 3317, 3319, 3321, 3323, 3325, 3327, 3329, 3331, 3333, 3335, 3337, 3339, 3341, 3343, 3345, 3347, 3349, 3351, 3353, 3355, 3357, 3359, 3361, 3363, 3365, 3367, 3369, 3371, 3373, 3375, 3377, 3379, 3381, 3383, 3385, 3387, 3389, 3391, 3393, 3395, 3397, 3399, 3401, 3403, 3405, 3407, 3409, 3411, 3413, 3415, 3417, 3419, 3421, 3423, 3425, 3427, 3429, 3431, 3433, 3435, 3437, 3439, 3441, 3443, 3445, 3447, 3449, 3451, 3453, 3455, 3457, 3459, 3461, 3463, 3465, 3467, 3469, 3471, 3473, 3475, 3477, 3479, 3481, 3483, 3485, 3487, 3489, 3491, 3493, 3495, 3497, 3499, 3501, 3503, 961, 962, 963, 964, 967, 968, 969, 970, 973, 974, 977, 980, 981, 982, 992, 993, 3521, 3523, 3525, 3527, 3529, 3531, 3533, 3535, 3537, 1049, 1050, 1052, 1053, 1076, 1077, 1091, 1094, 1103, 1106, 3549, 3551, 3553, 3555, 3557, 3559, 3561, 3563, 3565, 3567, 3569, 3571, 1163, 1164, 3575, 3577, 3579, 3581, 1229, 1230, 1232, 1233, 1245, 1246, 1247, 1248, 3591, 1265, 1266, 3595, 3597, 3599, 3601, 3603, 3605, 3607, 3609, 1312, 1315, 1321, 1324, 3615, 3617, 3619, 3621, 3623, 3625, 3627, 3629, 3631, 1366, 1369, 1405, 1406, 1414, 1417, 1423, 1426, 1427, 1430, 1437, 1438, 1445, 1446, 3647, 3649, 3651, 3653, 3655, 3657, 3659, 3661, 3663, 3665, 3667, 3669, 3671, 3673, 3675, 3677, 3679, 1550, 1551, 1552, 1553, 1554, 1678, 1679, 1680, 1681, 1682, 1683, 1684, 1685, 1686, 1687, 1688, 1689, 1690, 1691, 1692, 1695, 1698, 3703, 3705, 3707, 3709, 3711, 3713, 3715, 3717, 1770, 1771, 3721, 3723, 3725, 3727, 3729, 3731, 1805, 1806, 1909, 1910, 3737, 3739, 3741, 1925, 1926, 1930, 1931, 3747, 3749, 3751, 3753, 2011, 2012, 2018, 2019, 2025, 2026, 2027, 2028, 2035, 3764, 3766, 2047, 3769, 2071, 3772, 2115, 2117, 2119, 2120, 2129, 2132, 2134, 3781, 3783, 3785, 3787, 3789, 3791, 3793, 3795, 3797, 3799, 2197, 2300, 2484, 2485, 2486, 2491, 2497, 2498, 2507, 2508, 2510, 2511, 2512, 2513, 2514, 2515, 2545, 2547, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 3840, 4048, 4048, 3841, 3843, 3842, 3844, 4048, 4048, 4048, 3846, 3845, 3848, 3847, 3849, 3852, 3851, 4057, 4059, 3854, 3853, 4061, 4063, 3856, 3855, 3858, 3857, 3860, 3859, 3862, 3861, 3863, 3866, 3865, 3868, 3867, 4023, 3869, 3971, 3970, 3973, 3972, 3975, 3974, 3976, 3978, 3870, 3871, 3981, 3980, 3983, 3982, 3985, 3984, 3872, 4082, 624, 4084, 3993, 3995, 3996, 3998, 3971, 3970, 3973, 3972, 3975, 3974, 3976, 3978, 3977, 3979, 3981, 3980, 3983, 3982, 3985, 3984, 3986, 624, 4086, 639, 3993, 3995, 3996, 3998, 3874, 3873, 183, 4048, 3876, 192, 3879, 3878, 3881, 3880, 3883, 3882, 183, 4048, 3885, 192, 3888, 3887, 3890, 3889, 3907, 3909, 624, 3892, 3895, 3894, 4104, 3908, 4017, 3934, 3898, 3897, 3900, 3899, 639, 3909, 3902, 3980, 3903, 3906, 3905, 3908, 3907, 3909, 3911, 3910, 3912, 3913, 3915, 3914, 624, 639, 3918, 3920, 3922, 3921, 3923, 3925, 3924, 4008, 3926, 4008, 3927, 718, 3929, 3931, 3933, 4015, 4016, 3934, 3980, 3935, 3937, 3936, 4110, 404, 4112, 405, 3939, 3938, 3941, 3940, 624, 3991, 3991, 639, 3945, 3944, 4114, 4116, 3947, 3946, 3948, 3951, 3950, 3952, 3954, 3951, 3950, 3952, 3954, 4119, 3955, 3957, 3956, 3958, 4041, 3960, 4041, 3961, 3963, 3962, 3964, 3967, 3966, 3969, 3968, 3971, 3970, 3973, 3972, 3975, 3974, 3976, 3978, 3977, 3979, 3981, 3980, 3983, 3982, 3985, 3984, 3986, 3988, 3987, 624, 3991, 3990, 639, 3993, 3995, 3996, 3998, 4000, 3999, 4008, 4001, 4008, 4002, 718, 690, 4006, 4005, 4008, 4007, 4008, 4008, 718, 723, 4011, 4014, 4013, 4017, 4015, 4017, 4016, 4018, 4020, 4019, 4023, 4021, 4149, 4023, 4022, 4025, 4024, 4027, 4026, 4029, 4028, 4152, 4031, 4030, 4033, 4032, 4035, 4034, 4154, 4037, 4036, 4038, 4041, 4040, 4043, 4042, 4045, 4044, 4046, 4048, 4047, 4049, 4051, 4050, 4052, 4054, 4053, 4055, 4173, 4176, 1491, 1491, 1491, 4178, 4180, 4182, 4184, 4186, 4191, 4073, 4072, 4166, 4074, 4076, 4075, 4078, 4077, 4079, 4080, 4092, 4091, 4094, 4093, 4203, 4096, 4095, 4132, 4133, 4134, 4098, 4097, 4100, 4099, 4211, 1491, 1491, 4102, 4101, 4213, 1341, 1341, 4218, 1507, 4123, 4106, 4220, 1220, 1220, 4226, 4120, 4117, 4228, 4120, 4120, 4121, 4133, 4134, 4230, 4232, 4123, 4122, 4125, 4124, 4127, 4126, 4132, 4133, 4134, 1341, 1341, 1341, 4137, 4136, 1507, 4166, 4139, 1522, 4155, 4157, 1491, 1491, 1491, 4243, 4163, 4162, 1507, 4166, 4165, 1522, 4169, 4168, 4171, 4170, 4174, 4174, 4174, 4174, 4174, 4174, 4207, 4224, 4189, 4189, 4189, 4189, 4189, 4189, 4194, 4224, 4195, 4197, 4196, 4199, 4198, 4200, 4224, 4201, 4248, 4251, 4224, 4204, 4224, 4205, 4207, 4206, 4209, 4208, 4248, 4247, 4221, 4223, 4222, 4252, 4215, 4214, 4216, 4248, 4247, 4221, 4223, 4222, 4252, 4224, 4255, 4256, 4244, 4244, 4235, 4234, 4264, 4237, 4266, 4239, 4268, 4271, 4244, 4244, 4246, 4246, 4248, 4247, 4249, 4251, 4250, 4252, 4253, 4254, 4255, 4256, 4261, 4261, 4257, 4257, 4258, 4258, 4261, 4261, 4261, 4261, 4261, 4261, 4262, 4262, 31, 944, 945, 946, 947, 948, 949, 950, 951, 952, 953, 954, 955, 956, 957, 958, 959, 960, 965, 966, 971, 972, 975, 976, 978, 979, 983, 984, 985, 986, 987, 988, 989, 990, 991, 1032, 1033, 1034, 1035, 1036, 1037, 1038, 1039, 1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047, 1048, 1051, 1054, 1055, 1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063, 1064, 1065, 1066, 1067, 1068, 1069, 1070, 1071, 1072, 1073, 1074, 1075, 1078, 1079, 1080, 1081, 1082, 1083, 1084, 1085, 1086, 1087, 1088, 1089, 1090, 1092, 1093, 1095, 1096, 1097, 1098, 1099, 1100, 1101, 1102, 1104, 1105, 1157, 1158, 1159, 1160, 1161, 1162, 1165, 1166, 1167, 1168, 1169, 1170, 1171, 1172, 1173, 1174, 1175, 1176, 1177, 1178, 1179, 1180, 1181, 1182, 1183, 1184, 1185, 1186, 1187, 1188, 1189, 1190, 1191, 1192, 1193, 1194, 1195, 1196, 1197, 1198, 1199, 1200, 1201, 1202, 1203, 1204, 1205, 1206, 1207, 1225, 1226, 1227, 1228, 1231, 1234, 1235, 1236, 1237, 1238, 1239, 1240, 1241, 1242, 1243, 1244, 1249, 1250, 1251, 1252, 1253, 1254, 1255, 1261, 1262, 1263, 1264, 1267, 1307, 1308, 1309, 1310, 1311, 1313, 1314, 1316, 1317, 1318, 1319, 1320, 1322, 1323, 1364, 1365, 1367, 1368, 1370, 1371, 1372, 1373, 1374, 1375, 1376, 1377, 1378, 1379, 1380, 1381, 1382, 1383, 1384, 1385, 1386, 1387, 1388, 1389, 1390, 1391, 1392, 1393, 1394, 1395, 1396, 1397, 1398, 1399, 1400, 1401, 1402, 1403, 1404, 1407, 1408, 1409, 1410, 1411, 1412, 1413, 1415, 1416, 1418, 1419, 1420, 1421, 1422, 1424, 1425, 1428, 1429, 1431, 1432, 1433, 1434, 1435, 1436, 1439, 1440, 1441, 1442, 1443, 1444, 1447, 1448, 1449, 1450, 1451, 1452, 1453, 1454, 1455, 1456, 1457, 1458, 1459, 1460, 1461, 1462, 1463, 1464, 1465, 1555, 1556, 1557, 4306, 4305, 4306, 4305, 4310, 4309, 1693, 1694, 1696, 1697, 1699, 1700, 1701, 1702, 1727, 1728, 1761, 1762, 1763, 1764, 1772, 1773, 1790, 1791, 1792, 1801, 1802, 1803, 1804, 1807, 1808, 1809, 1810, 1923, 1924, 1927, 1928, 1929, 1932, 1933, 2013, 2014, 2020, 2021, 2022, 2023, 2024, 2029, 2030, 2031, 2032, 2033, 2034, 2054, 2055, 2056, 2062, 2063, 2064, 2065, 2066, 2067, 2068, 2069, 2070, 2112, 2113, 2114, 2116, 2118, 2121, 2122, 2123, 2124, 2125, 2126, 2127, 2128, 2130, 2131, 4174, 2188, 2189, 4174, 2191, 2192, 4174, 2194, 2195, 2198, 2199, 4189, 2265, 2266, 4589, 4588, 4189, 2270, 2271, 4607, 4591, 4189, 2275, 2276, 2281, 2282, 2283, 2284, 2285, 2286, 2287, 2290, 2301, 2302, 2308, 2309, 2310, 2311, 2312, 2313, 2314, 2315, 2320, 2321, 4617, 4617, 2400, 2401, 2402, 2403, 2404, 2405, 2406, 2407, 2408, 2417, 2418, 2419, 2420, 2421, 2422, 2423, 2424, 2425, 2481, 2483, 2487, 2488, 2499, 2509, 2533, 2534, 2536, 2537, 2538, 2539, 2540, 2541, 2542, 2543, 2544, 2546, 2548, 2549, 2565, 2566, 4737, 4735, 2612, 2613, 4738, 4735, 2668, 2669, 2696, 2697, 2723, 2724, 2803, 2804, 2810, 2811, 4733, 4733, 4738, 4737, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 4769, 4771, 4773, 4775, 4777, 4779, 4781, 4784, 4786, 4788, 4790, 4792, 4794, 4797, 4799, 4801, 4803, 4805, 4807, 4810, 4813, 4815, 4817, 4825, 4827, 4829, 4832, 4835, 4837, 4839, 4848, 4851, 4854, 4856, 4858, 4861, 4864, 4866, 4872, 4877, 4879, 4883, 4886, 4888, 4891, 4895, 4901, 4904, 4906, 4908, 4917, 4919, 4923, 4925, 4928, 4931, 4933, 4936, 4940, 4945, 4948, 4950, 4952, 4955, 4957, 4959, 4961, 4963, 4966, 4969, 4971, 4973, 4976, 4979, 4986, 4988, 4990, 4994, 4996, 4998, 5003, 5005, 5007, 5010, 5012, 5014, 5016, 5018, 5020, 5022, 5024, 5026, 5028, 5031, 5033, 5035, 5038, 5041, 5044, 4983, 4782, 4067, 1582, 1583, 1588, 1589, 4983, 4795, 5000, 1608, 1609, 4983, 4795, 4067, 4069, 4983, 4795, 5000, 5056, 5058, 5060, 5062, 4345, 4343, 4822, 4820, 4368, 4368, 4845, 4843, 5066, 5068, 5070, 5075, 5077, 5081, 4983, 4910, 4914, 4913, 4867, 4868, 4884, 4898, 4910, 4983, 4874, 4874, 4873, 4875, 4884, 4983, 4910, 4881, 4983, 4884, 4910, 4889, 4898, 4910, 4914, 4913, 4912, 4915, 5086, 4983, 4981, 4992, 5000, 4992, 5000, 4983, 4981, 5000, 4463, 4462, 4463, 4463, 4475, 4475, 5090, 4475, 4475, 5092, 5097, 5099, 5101, 5109, 5112, 4983, 4981, 5000, 5120, 5123, 5126, 5128, 4607, 4582, 2187, 2190, 2193, 5048, 5047, 5046, 5114, 2264, 2267, 2268, 2269, 2272, 2273, 2274, 5155, 5157, 5159, 5162, 4607, 4607, 4607, 4607, 5166, 5168, 5170, 5073, 5072, 5071, 5172, 5079, 5078, 2324, 5079, 5078, 2330, 5114, 5114, 5083, 5106, 5082, 4642, 4625, 5088, 5087, 5176, 5179, 5182, 5083, 5106, 5082, 4642, 4625, 5088, 5087, 5185, 5188, 5191, 5095, 5094, 5093, 4642, 4641, 5196, 5104, 5103, 5102, 5107, 5106, 5105, 5114, 5118, 5117, 5116, 5200, 5202, 5204, 5207, 4261, 5164, 5163, 2597, 2598, 4257, 5139, 5138, 2617, 2618, 5164, 5163, 5211, 5160, 4258, 4261, 5164, 5163, 4261, 4261, 4262, 4733, 2815, 2816, 2826, 2827, 4735, 5211, 5210, 5209, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 5249, 5248, 5250, 5252, 5251, 5253, 5315, 5314, 5289, 5316, 5317, 5288, 5321, 5254, 1572, 1573, 5324, 5323, 5255, 4066, 5327, 5327, 5326, 1581, 5351, 5333, 5332, 5331, 4068, 5353, 5315, 5314, 5298, 5316, 5317, 5288, 5321, 5260, 1598, 1599, 5324, 5323, 5261, 4992, 5327, 5327, 5326, 1607, 5358, 5263, 5333, 5331, 5344, 5257, 5346, 5345, 5315, 5314, 5313, 5316, 5317, 5288, 5321, 5260, 1625, 1626, 5324, 5323, 5258, 4066, 5327, 5327, 5326, 1634, 5330, 5263, 5333, 5303, 5321, 5260, 5324, 5323, 5261, 4068, 5327, 5327, 5326, 1648, 5330, 5263, 5333, 5303, 5317, 5288, 5315, 5314, 5313, 5316, 5321, 5260, 1661, 1662, 5327, 5327, 5326, 1666, 5324, 5323, 5261, 4992, 5329, 5300, 5263, 5333, 5303, 5344, 5343, 5266, 5265, 5264, 5267, 5268, 5270, 1739, 1740, 1741, 1742, 5273, 5272, 5271, 5274, 5275, 5277, 1749, 1750, 1751, 1752, 5279, 5278, 5281, 5280, 5283, 5282, 5285, 5284, 5315, 5313, 5292, 5316, 5317, 5288, 5321, 5293, 5294, 1820, 5297, 5296, 5295, 1824, 1825, 1826, 1827, 1828, 5315, 5313, 5287, 5316, 5317, 5288, 5321, 5293, 1837, 1838, 5297, 5296, 5290, 1842, 5330, 5329, 5328, 5286, 5315, 5313, 5287, 5294, 1851, 5297, 5296, 5295, 1855, 1856, 1857, 1858, 5315, 5313, 5287, 5316, 5317, 5288, 5321, 5293, 1867, 1868, 5297, 5296, 5290, 1872, 5330, 5329, 5291, 1876, 5315, 5314, 5289, 5321, 5293, 1882, 1883, 5297, 5296, 5290, 1887, 5330, 5329, 5291, 1891, 5315, 5292, 5298, 5316, 5319, 5321, 5293, 5294, 1900, 5297, 5296, 5295, 1904, 1905, 1906, 1907, 1908, 5311, 5308, 5315, 5314, 5298, 5316, 5317, 5319, 5302, 5301, 1960, 1961, 5323, 5299, 1964, 1965, 5330, 5329, 5300, 5333, 5303, 5332, 5323, 5322, 1974, 5327, 5326, 1977, 5330, 5329, 5300, 5315, 5314, 5313, 5316, 5317, 5319, 5302, 5301, 1989, 1990, 5324, 4992, 5327, 1994, 5330, 5329, 5328, 5333, 5303, 5332, 5309, 5312, 2003, 2004, 2005, 2006, 5304, 5305, 2009, 2010, 5306, 2016, 2017, 5307, 5309, 5308, 5310, 5312, 5311, 5315, 5314, 5313, 5316, 5317, 5319, 5321, 5320, 2086, 2087, 5324, 5323, 5322, 4992, 5327, 5327, 5326, 2095, 5330, 5329, 5328, 5333, 5332, 5331, 5334, 5336, 5337, 5339, 5340, 5341, 5344, 5343, 5346, 5345, 2185, 2186, 5130, 5133, 5136, 5412, 2200, 2201, 2202, 2226, 5141, 5455, 5146, 5458, 5151, 5367, 5366, 5368, 5369, 5442, 5443, 5378, 2303, 2304, 2305, 2306, 5380, 2316, 2317, 2318, 5443, 2322, 2323, 5436, 5381, 5412, 2328, 2329, 5436, 5435, 5412, 2358, 2391, 2392, 2393, 2394, 2395, 2396, 5412, 2398, 2399, 2409, 2410, 2411, 2412, 2413, 5412, 2415, 2416, 4632, 4632, 4632, 4635, 2474, 2475, 2476, 2477, 2478, 5432, 5433, 5434, 5442, 5443, 2494, 2495, 2496, 2500, 2501, 2502, 5436, 5435, 5442, 5443, 2526, 2527, 2528, 2529, 5441, 5440, 5442, 5443, 2564, 5474, 2582, 2583, 5469, 5462, 5461, 5460, 5522, 5521, 5527, 5522, 5521, 2611, 2614, 2615, 5532, 2648, 2649, 5469, 5462, 5461, 5460, 2656, 2657, 2667, 5469, 5470, 5463, 5491, 5490, 5492, 2695, 5469, 5468, 2703, 2704, 5470, 5469, 5468, 2722, 5474, 5491, 5490, 5492, 5501, 5500, 5502, 5491, 5490, 5492, 5501, 5500, 5502, 2802, 5508, 2809, 2814, 5522, 5521, 2828, 5548, 5522, 5521, 2838, 2839, 2840, 25, 26, 27, 28, 29, 30, 31, 1558, 1559, 1560, 1561, 1562, 1563, 1564, 1565, 1566, 1567, 1568, 1569, 1570, 1571, 5583, 1574, 1575, 1576, 1577, 1578, 1579, 1580, 1584, 1585, 1586, 1587, 1590, 1591, 1592, 1593, 1594, 1595, 1596, 1597, 5607, 1600, 1601, 1602, 1603, 1604, 1605, 1606, 1610, 1611, 1612, 1613, 1614, 1615, 1616, 1617, 1618, 1619, 1620, 1621, 1622, 1623, 1624, 5633, 1627, 1628, 1629, 1630, 1631, 1632, 1633, 1635, 1636, 1637, 1638, 1639, 1640, 1641, 1642, 1643, 1644, 1645, 1646, 1647, 1649, 1650, 1651, 1652, 1653, 1654, 1655, 1656, 1657, 1658, 1659, 1660, 5669, 1663, 1664, 1665, 1667, 1668, 1669, 1670, 1671, 1672, 1673, 1674, 1675, 1676, 1677, 1733, 1734, 1735, 1736, 1737, 1738, 5692, 5694, 1743, 1744, 1745, 1746, 1747, 1748, 5702, 5704, 1753, 1754, 1755, 1756, 1757, 1758, 1759, 1760, 1811, 1812, 1813, 1814, 1815, 1816, 1817, 1818, 1819, 1821, 1822, 1823, 5728, 1829, 1830, 1831, 1832, 1833, 1834, 1835, 1836, 5740, 1839, 1840, 1841, 1843, 1844, 1845, 1846, 1847, 1848, 1849, 1850, 1852, 1853, 1854, 5758, 1859, 1860, 1861, 1862, 1863, 1864, 1865, 1866, 5770, 1869, 1870, 1871, 1873, 1874, 1875, 1877, 1878, 1879, 1880, 1881, 5785, 1884, 1885, 1886, 1888, 1889, 1890, 1892, 1893, 1894, 1895, 1896, 1897, 1898, 1899, 1901, 1902, 1903, 5808, 1950, 1951, 1952, 1953, 1954, 1955, 1956, 1957, 1958, 1959, 5822, 1962, 1963, 1966, 1967, 1968, 1969, 1970, 1971, 1972, 1973, 1975, 1976, 1978, 1979, 1980, 1981, 1982, 1983, 1984, 1985, 1986, 1987, 1988, 5851, 1991, 1992, 1993, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 5865, 5867, 2007, 2008, 5871, 2015, 5874, 2048, 2049, 2050, 2051, 2052, 2053, 2078, 2079, 2080, 2081, 2082, 2083, 2084, 2085, 5890, 2088, 2089, 2090, 2091, 2092, 2093, 2094, 2096, 2097, 2098, 2099, 2100, 2101, 2102, 2103, 2104, 2105, 2106, 2107, 2108, 2109, 2110, 2111, 5916, 5131, 5134, 5137, 2196, 5922, 5142, 5147, 5152, 2277, 2278, 2279, 2280, 2288, 2289, 2299, 5938, 5940, 2307, 5943, 2319, 5947, 2325, 2326, 2327, 5952, 2331, 2332, 2333, 5959, 5962, 2397, 5965, 5967, 5970, 2414, 5973, 2437, 2458, 2471, 2473, 5979, 5982, 2479, 2480, 2482, 2489, 2490, 5989, 5992, 2503, 2504, 2505, 2506, 6000, 2530, 2531, 2532, 2535, 5213, 2567, 6009, 2584, 2585, 2586, 2587, 2595, 2596, 2609, 2610, 5217, 6021, 6024, 2650, 2651, 2652, 2653, 6030, 5221, 2670, 2671, 2672, 2681, 2682, 2683, 5223, 2698, 2699, 6042, 2705, 2706, 2707, 5225, 2725, 2738, 2739, 2740, 2743, 2744, 2745, 2772, 2773, 2774, 2777, 2778, 2779, 5227, 2805, 5229, 5545, 2824, 2825, 6066, 2836, 2837, 6071, 28, 29, 30, 31, 6081, 6084, 6087, 6093, 6096, 6100, 6103, 6107, 6113, 6116, 6120, 6123, 6126, 6128, 6130, 6136, 6139, 6143, 6147, 6150, 6152, 6156, 6160, 6165, 6169, 6172, 6175, 6179, 6181, 6184, 6186, 6194, 6202, 6204, 6206, 6208, 6210, 6216, 5722, 6219, 5729, 6223, 6229, 6232, 6235, 6239, 5753, 6243, 5759, 6247, 6253, 6256, 6259, 6262, 6265, 6268, 6271, 6274, 6279, 5802, 6282, 5809, 6288, 6294, 6297, 6299, 6302, 6305, 6307, 6309, 6312, 6318, 6324, 6327, 6340, 6343, 6345, 6351, 6354, 6358, 6361, 6364, 6373, 6375, 5923, 6133, 6090, 6110, 6368, 6366, 6370, 6090, 6315, 6110, 6133, 6162, 6370, 6386, 6189, 6191, 6197, 6199, 5944, 5477, 6399, 5480, 6403, 6213, 6250, 6315, 6226, 6366, 6213, 6226, 6315, 6250, 6315, 6315, 6368, 5960, 5968, 6348, 6368, 6366, 6333, 6341, 6334, 6291, 6315, 6322, 6320, 6368, 6366, 6341, 6338, 6334, 6336, 6348, 6368, 6366, 6334, 6336, 5980, 5990, 5993, 6427, 6348, 6368, 6366, 6370, 6001, 6432, 6396, 6421, 6420, 5214, 6378, 6377, 6396, 6388, 6387, 6439, 6441, 6429, 6428, 6443, 6380, 6380, 6423, 6422, 6445, 5218, 6383, 6382, 6396, 6388, 6387, 6450, 6452, 6390, 6389, 6396, 6394, 6421, 5222, 6456, 6400, 6459, 6404, 6396, 6421, 6420, 5224, 6463, 6394, 6421, 6419, 6466, 6396, 6421, 6420, 5226, 6400, 6471, 6404, 6474, 6407, 6477, 6411, 6480, 6421, 6420, 6419, 5228, 6423, 6422, 5230, 5546, 6429, 6428, 6487, 6434, 6433, 6490, 6072, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 6088, 6097, 6101, 6104, 6108, 6117, 6121, 6124, 6131, 6140, 6144, 6148, 6153, 6157, 6161, 6166, 6173, 6176, 6182, 6187, 6195, 6211, 6220, 6224, 6233, 6236, 6240, 6244, 6248, 6257, 6260, 6263, 6269, 6272, 6275, 6283, 6289, 6300, 6303, 6310, 6313, 6325, 6328, 6346, 6355, 6359, 6362, 6365, 2203, 6511, 2209, 6499, 2215, 6504, 2221, 2222, 2223, 6497, 6496, 2227, 6499, 2233, 6515, 2239, 6504, 6508, 2246, 6511, 6515, 2257, 6520, 2262, 6525, 2291, 2293, 2294, 2296, 6530, 6528, 2334, 6533, 6536, 2339, 6546, 2344, 6554, 6557, 2349, 6538, 2354, 6333, 6341, 6578, 2359, 6533, 6536, 2364, 6538, 2369, 6550, 6544, 2374, 6546, 2379, 6550, 2384, 6554, 6557, 2389, 6578, 2426, 6573, 2432, 2433, 2434, 2435, 2436, 2438, 6559, 6564, 6560, 6564, 6563, 2447, 6567, 2450, 2451, 2453, 2454, 2455, 2456, 2457, 2459, 2460, 6573, 2466, 2467, 6333, 6341, 2470, 2472, 6341, 6338, 2516, 6573, 2522, 2523, 2524, 6578, 6639, 2561, 2562, 2563, 6379, 2577, 2578, 2579, 2580, 2581, 6658, 6580, 2593, 2594, 6615, 2600, 2601, 6646, 2607, 2608, 6580, 6384, 2643, 2644, 2645, 2646, 2647, 6674, 2654, 2655, 6639, 2664, 2665, 2666, 6457, 6615, 2680, 6616, 2685, 6639, 2692, 2693, 2694, 2700, 2701, 2702, 6467, 6598, 2719, 2720, 2721, 6599, 2737, 6601, 2742, 6615, 2771, 6616, 2776, 6638, 2799, 2800, 2801, 6646, 2807, 2808, 6639, 6640, 2822, 2823, 6646, 2834, 2835, 6651, 6661, 6666, 6716, 6716, 6716, 6683, 6688, 6697, 6699, 6701, 6703, 6705, 6709, 6716, 6716, 6716, 6719, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 6760, 2205, 6762, 6761, 6145, 6752, 2211, 6754, 6753, 5592, 6756, 2217, 6758, 6757, 5616, 6807, 2224, 2225, 6752, 2229, 6754, 6753, 5592, 6783, 2235, 6765, 6764, 5597, 6756, 2241, 6758, 6757, 5616, 2245, 6760, 2248, 6762, 6761, 6145, 2252, 6765, 6764, 6158, 6767, 2258, 6769, 6768, 6523, 2263, 6771, 6772, 2297, 2298, 6773, 2336, 6774, 2338, 6780, 2341, 6781, 6782, 6786, 2346, 6787, 2348, 6775, 2351, 6776, 6777, 2355, 2356, 2357, 6773, 2361, 6774, 2363, 6775, 2366, 6776, 6777, 6778, 2371, 6779, 2373, 6780, 2376, 6781, 6782, 6783, 2381, 6784, 6785, 6786, 2386, 6787, 2388, 2390, 6795, 2428, 6797, 6796, 6798, 6865, 6867, 6788, 2440, 2441, 2442, 6789, 2444, 2445, 6791, 6792, 2449, 6878, 6793, 6880, 6882, 6795, 2462, 6797, 6796, 6798, 6888, 2468, 2469, 2492, 2493, 6795, 2518, 6797, 6796, 6798, 6898, 2525, 6884, 6868, 6884, 6883, 2560, 6903, 2576, 6909, 2592, 6914, 2599, 2606, 6920, 2616, 2642, 6926, 6930, 2663, 6933, 2679, 2684, 6884, 6868, 2691, 6942, 6945, 6884, 6868, 6884, 6883, 2718, 6950, 2736, 2741, 2770, 2775, 6884, 6868, 6884, 6883, 6892, 6891, 2798, 6962, 2806, 6966, 2813, 2821, 6970, 2833, 6973, 2848, 6437, 2860, 2866, 2868, 2870, 6448, 2890, 6680, 2903, 2908, 6464, 2918, 2927, 2929, 2945, 2947, 2957, 2959, 2962, 2966, 2970, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 2204, 2206, 2207, 2208, 2210, 2212, 2213, 2214, 2216, 2218, 2219, 2220, 7025, 2228, 2230, 2231, 2232, 2234, 2236, 2237, 2238, 2240, 2242, 2243, 2244, 2247, 2249, 2250, 2251, 2253, 2254, 2255, 2256, 2259, 2260, 2261, 2292, 2295, 7060, 2335, 2337, 2340, 2342, 2343, 2345, 2347, 2350, 2352, 2353, 7078, 2360, 2362, 2365, 2367, 2368, 2370, 2372, 2375, 2377, 2378, 2380, 2382, 2383, 2385, 2387, 2427, 2429, 2430, 2431, 2439, 7115, 2443, 7118, 2446, 2448, 2452, 2461, 2463, 2464, 2465, 7133, 7135, 2517, 2519, 2520, 2521, 7110, 2553, 2554, 7124, 2558, 2559, 6904, 7023, 6906, 6910, 7141, 7142, 7141, 7142, 7023, 7041, 7131, 7056, 6923, 6927, 6934, 7079, 7110, 2689, 2690, 6943, 6946, 7110, 2711, 2712, 7124, 2716, 2717, 6951, 7079, 7079, 7104, 7110, 2783, 2784, 7124, 2791, 2792, 2796, 2797, 6963, 7141, 7142, 7141, 7142, 2856, 7151, 7153, 7178, 7154, 7189, 7156, 2888, 7190, 2894, 7162, 7163, 2910, 7175, 7176, 7177, 7178, 7187, 7189, 7190, 7192, 29, 30, 31, 7232, 7234, 7236, 7238, 7240, 7242, 7245, 7247, 7249, 7251, 7253, 7255, 7257, 7259, 7262, 6821, 7266, 7268, 7269, 7271, 7273, 7276, 7278, 7282, 7284, 7287, 7289, 7292, 7295, 7297, 7299, 7301, 7306, 7308, 7310, 7314, 7316, 2552, 7320, 7302, 2557, 7323, 2574, 7244, 6907, 2590, 2591, 2604, 2605, 2625, 7244, 2633, 2640, 2641, 6924, 7141, 7270, 7279, 7277, 6841, 2678, 2688, 7342, 2710, 7347, 7302, 2715, 7350, 7277, 7274, 7279, 7272, 6841, 2735, 7272, 7274, 7277, 7279, 6841, 2755, 7283, 7285, 7288, 7290, 7293, 7296, 6860, 2769, 2782, 7357, 7302, 7304, 7122, 2790, 7360, 7131, 7362, 7141, 2819, 2820, 2831, 2832, 7147, 2859, 2861, 2862, 2865, 2867, 2869, 2889, 7160, 2902, 2904, 7166, 7190, 7173, 2926, 2928, 2944, 2946, 7185, 2958, 2961, 2965, 2969, 29, 30, 31, 7421, 7422, 7423, 2556, 7394, 7395, 7392, 7393, 7396, 7397, 2575, 7427, 7428, 7427, 7428, 7392, 7393, 7394, 7395, 7396, 7397, 2626, 7398, 7399, 7400, 7401, 7402, 7403, 7404, 7405, 7419, 7406, 7407, 7408, 7410, 7409, 7428, 2661, 2662, 7414, 2674, 7413, 2676, 2677, 7421, 7422, 7421, 7422, 7423, 2714, 7413, 2727, 7412, 2729, 7414, 2731, 7411, 2733, 2734, 7411, 2747, 7412, 2749, 7413, 2751, 7414, 2753, 2754, 7415, 2757, 7416, 2759, 7417, 2761, 7418, 2763, 7419, 2765, 7420, 2767, 2768, 7421, 7422, 7423, 2786, 2787, 7424, 2789, 7425, 7426, 2795, 2812, 7427, 7428, 7427, 7428, 7429, 7432, 2847, 7436, 7437, 7439, 7444, 7444, 7446, 2893, 7453, 2907, 2909, 7455, 7458, 2917, 7480, 7485, 2956, 7490, 7492, 7495, 7497, 7496, 7498, 7499, 7500, 7501, 7504, 7503, 7509, 7508, 7511, 7510, 7513, 7514, 7515, 7516, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 2550, 2551, 2555, 2568, 2569, 2570, 2571, 2572, 2573, 2588, 2589, 2602, 2603, 2619, 2620, 2621, 2622, 2623, 2624, 2627, 2628, 2629, 2630, 2631, 2632, 2634, 2635, 2636, 2637, 2638, 2639, 2658, 2659, 2660, 2673, 2675, 2686, 2687, 2708, 2709, 2713, 2726, 2728, 2730, 2732, 2746, 2748, 2750, 2752, 2756, 2758, 2760, 2762, 2764, 2766, 2780, 2781, 2785, 2788, 2793, 2794, 2817, 2818, 2829, 2830, 2842, 2846, 7434, 2855, 2858, 2864, 7441, 2882, 2886, 2887, 7557, 7563, 2906, 2912, 2916, 7578, 7587, 7600, 2949, 2953, 7610, 7611, 2964, 2968, 7618, 2980, 2981, 2982, 2984, 2985, 2986, 2991, 7625, 2996, 2997, 7627, 7628, 7631, 3009, 3010, 3013, 3014, 7634, 3019, 3021, 3023, 3025, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 7712, 7680, 7738, 7738, 7682, 7687, 7701, 7683, 7707, 7685, 2854, 7689, 7691, 7693, 7707, 7695, 7701, 7697, 2876, 7707, 7699, 7701, 7705, 7703, 7709, 7707, 7705, 2892, 7731, 7715, 7714, 7733, 7725, 7726, 2901, 7716, 7718, 7738, 7738, 7720, 7722, 7724, 7733, 7721, 7723, 7731, 2925, 7726, 7728, 7733, 7725, 7731, 7727, 2936, 7730, 7731, 7729, 7734, 7733, 7732, 2943, 7735, 7738, 7738, 7737, 7739, 2955, 2960, 7741, 7743, 2974, 7748, 7772, 7754, 2993, 7779, 3001, 3002, 3006, 7784, 7786, 3018, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 2841, 2843, 2844, 2845, 2849, 2850, 2851, 2852, 2853, 2857, 2863, 2871, 2872, 2873, 2874, 2875, 2877, 2878, 2879, 2880, 2881, 2883, 2884, 2885, 7808, 2895, 2896, 2897, 2898, 2899, 2900, 2905, 2911, 2913, 2914, 2915, 2919, 2920, 2921, 2922, 2923, 2924, 2930, 2931, 2932, 2933, 2934, 2935, 2937, 2938, 2939, 2940, 2941, 2942, 2948, 2950, 2951, 2952, 2954, 2963, 2967, 2978, 2990, 31, 7906, 7909, 7911, 7916, 7918, 7921, 7923, 7926, 2891, 7930, 7932, 7934, 7938, 7941, 7943, 7945, 7947, 7949, 7951, 7953, 7955, 7957, 7960, 7962, 7904, 7913, 7914, 7962, 7935, 7962, 7936, 7962, 7958, 7963, 7963, 7964, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 7907, 7970, 7972, 7974, 7927, 7978, 7939, 7982, 7985, 7988, 7961, 2971, 2973, 2979, 2983, 7976, 2998, 3000, 3003, 3005, 3015, 3017, 3020, 3022, 3024, 25, 26, 27, 28, 29, 30, 31, 7912, 7919, 7924, 7979, 7983, 7986, 7989, 8032, 8036, 8036, 2992, 8038, 8038, 8042, 8046, 8054, 8055, 8055, 8045, 8056, 8054, 8055, 8055, 8054, 24, 25, 26, 27, 28, 29, 30, 31, 2972, 2975, 8066, 8064, 2987, 8066, 8065, 8070, 8067, 2999, 3004, 8070, 8068, 8070, 8069, 3016, 3026, 3028, 3029, 3030, 3034, 8074, 3037, 3038, 3040, 3041, 3045, 27, 28, 29, 30, 31, 8096, 2976, 2977, 2988, 2989, 2994, 2995, 8105, 8106, 3007, 3008, 3011, 3012, 8111, 3036, 8114, 8121, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 8044, 8129, 8131, 8134, 8049, 8051, 8138, 8140, 8053, 8118, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 8130, 8132, 8163, 8160, 8166, 8168, 8164, 8165, 8167, 8163, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 3027, 8192, 3032, 3033, 3035, 3039, 3042, 3043, 8193, 3046, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 3031, 3044, 8224, 8227, 8228, 8229, 8231, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 8256, 8122, 8143, 8169, 8144, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 8259, 8289, 8292, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 8320, 8233, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 8322, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 8353, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31};
bool h_Op[]= {
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
#define THREADS_PER_BLOCK 32
#define BLOCKS_PER_GRID 1
#define SIZE_OF_IN 3072
#define SIZE_OF_AC 5376
__device__ void
ac(float *A, const int *B, const int *C, const bool *Op, int n_iter) {
int i= blockDim.x * blockIdx.x + threadIdx.x;
__shared__ float R[264*THREADS_PER_BLOCK];
const int t= THREADS_PER_BLOCK;
__shared__ float final;
final=0;
R[i + 0*t] = A[i + 0*t];
R[i + 1*t] = A[i + 1*t];
R[i + 2*t] = A[i + 2*t];
R[i + 3*t] = A[i + 3*t];
R[i + 4*t] = A[i + 4*t];
R[i + 5*t] = A[i + 5*t];
R[i + 6*t] = A[i + 6*t];
R[i + 7*t] = A[i + 7*t];
R[i + 8*t] = A[i + 8*t];
R[i + 9*t] = A[i + 9*t];
R[i + 10*t] = A[i + 10*t];
R[i + 11*t] = A[i + 11*t];
R[i + 12*t] = A[i + 12*t];
R[i + 13*t] = A[i + 13*t];
R[i + 14*t] = A[i + 14*t];
R[i + 15*t] = A[i + 15*t];
R[i + 16*t] = A[i + 16*t];
R[i + 17*t] = A[i + 17*t];
R[i + 18*t] = A[i + 18*t];
R[i + 19*t] = A[i + 19*t];
R[i + 20*t] = A[i + 20*t];
R[i + 21*t] = A[i + 21*t];
R[i + 22*t] = A[i + 22*t];
R[i + 23*t] = A[i + 23*t];
R[i + 24*t] = A[i + 24*t];
R[i + 25*t] = A[i + 25*t];
R[i + 26*t] = A[i + 26*t];
R[i + 27*t] = A[i + 27*t];
R[i + 28*t] = A[i + 28*t];
R[i + 29*t] = A[i + 29*t];
R[i + 30*t] = A[i + 30*t];
R[i + 31*t] = A[i + 31*t];
R[i + 32*t] = A[i + 32*t];
R[i + 33*t] = A[i + 33*t];
R[i + 34*t] = A[i + 34*t];
R[i + 35*t] = A[i + 35*t];
R[i + 36*t] = A[i + 36*t];
R[i + 37*t] = A[i + 37*t];
R[i + 38*t] = A[i + 38*t];
R[i + 39*t] = A[i + 39*t];
R[i + 40*t] = A[i + 40*t];
R[i + 41*t] = A[i + 41*t];
R[i + 42*t] = A[i + 42*t];
R[i + 43*t] = A[i + 43*t];
R[i + 44*t] = A[i + 44*t];
R[i + 45*t] = A[i + 45*t];
R[i + 46*t] = A[i + 46*t];
R[i + 47*t] = A[i + 47*t];
R[i + 48*t] = A[i + 48*t];
R[i + 49*t] = A[i + 49*t];
R[i + 50*t] = A[i + 50*t];
R[i + 51*t] = A[i + 51*t];
R[i + 52*t] = A[i + 52*t];
R[i + 53*t] = A[i + 53*t];
R[i + 54*t] = A[i + 54*t];
R[i + 55*t] = A[i + 55*t];
R[i + 56*t] = A[i + 56*t];
R[i + 57*t] = A[i + 57*t];
R[i + 58*t] = A[i + 58*t];
R[i + 59*t] = A[i + 59*t];
R[i + 60*t] = A[i + 60*t];
R[i + 61*t] = A[i + 61*t];
R[i + 62*t] = A[i + 62*t];
R[i + 63*t] = A[i + 63*t];
R[i + 64*t] = A[i + 64*t];
R[i + 65*t] = A[i + 65*t];
R[i + 66*t] = A[i + 66*t];
R[i + 67*t] = A[i + 67*t];
R[i + 68*t] = A[i + 68*t];
R[i + 69*t] = A[i + 69*t];
R[i + 70*t] = A[i + 70*t];
R[i + 71*t] = A[i + 71*t];
R[i + 72*t] = A[i + 72*t];
R[i + 73*t] = A[i + 73*t];
R[i + 74*t] = A[i + 74*t];
R[i + 75*t] = A[i + 75*t];
R[i + 76*t] = A[i + 76*t];
R[i + 77*t] = A[i + 77*t];
R[i + 78*t] = A[i + 78*t];
R[i + 79*t] = A[i + 79*t];
R[i + 80*t] = A[i + 80*t];
R[i + 81*t] = A[i + 81*t];
R[i + 82*t] = A[i + 82*t];
R[i + 83*t] = A[i + 83*t];
R[i + 84*t] = A[i + 84*t];
R[i + 85*t] = A[i + 85*t];
R[i + 86*t] = A[i + 86*t];
R[i + 87*t] = A[i + 87*t];
R[i + 88*t] = A[i + 88*t];
R[i + 89*t] = A[i + 89*t];
R[i + 90*t] = A[i + 90*t];
R[i + 91*t] = A[i + 91*t];
R[i + 92*t] = A[i + 92*t];
R[i + 93*t] = A[i + 93*t];
R[i + 94*t] = A[i + 94*t];
R[i + 95*t] = A[i + 95*t];
__syncthreads();
for (int iter=0; iter< n_iter; iter++) {
R[i + 96*t] = Op[i + 0*t] ? R[B[i + 0*t]] * R[C[i + 0*t]] : R[B[i + 0*t]] + R[C[i + 0*t]];
R[i + 97*t] = Op[i + 1*t] ? R[B[i + 1*t]] * R[C[i + 1*t]] : R[B[i + 1*t]] + R[C[i + 1*t]];
R[i + 98*t] = Op[i + 2*t] ? R[B[i + 2*t]] * R[C[i + 2*t]] : R[B[i + 2*t]] + R[C[i + 2*t]];
R[i + 99*t] = Op[i + 3*t] ? R[B[i + 3*t]] * R[C[i + 3*t]] : R[B[i + 3*t]] + R[C[i + 3*t]];
R[i + 100*t] = Op[i + 4*t] ? R[B[i + 4*t]] * R[C[i + 4*t]] : R[B[i + 4*t]] + R[C[i + 4*t]];
R[i + 101*t] = Op[i + 5*t] ? R[B[i + 5*t]] * R[C[i + 5*t]] : R[B[i + 5*t]] + R[C[i + 5*t]];
R[i + 102*t] = Op[i + 6*t] ? R[B[i + 6*t]] * R[C[i + 6*t]] : R[B[i + 6*t]] + R[C[i + 6*t]];
R[i + 103*t] = Op[i + 7*t] ? R[B[i + 7*t]] * R[C[i + 7*t]] : R[B[i + 7*t]] + R[C[i + 7*t]];
R[i + 104*t] = Op[i + 8*t] ? R[B[i + 8*t]] * R[C[i + 8*t]] : R[B[i + 8*t]] + R[C[i + 8*t]];
R[i + 105*t] = Op[i + 9*t] ? R[B[i + 9*t]] * R[C[i + 9*t]] : R[B[i + 9*t]] + R[C[i + 9*t]];
R[i + 106*t] = Op[i + 10*t] ? R[B[i + 10*t]] * R[C[i + 10*t]] : R[B[i + 10*t]] + R[C[i + 10*t]];
R[i + 107*t] = Op[i + 11*t] ? R[B[i + 11*t]] * R[C[i + 11*t]] : R[B[i + 11*t]] + R[C[i + 11*t]];
R[i + 108*t] = Op[i + 12*t] ? R[B[i + 12*t]] * R[C[i + 12*t]] : R[B[i + 12*t]] + R[C[i + 12*t]];
R[i + 109*t] = Op[i + 13*t] ? R[B[i + 13*t]] * R[C[i + 13*t]] : R[B[i + 13*t]] + R[C[i + 13*t]];
R[i + 110*t] = Op[i + 14*t] ? R[B[i + 14*t]] * R[C[i + 14*t]] : R[B[i + 14*t]] + R[C[i + 14*t]];
R[i + 111*t] = Op[i + 15*t] ? R[B[i + 15*t]] * R[C[i + 15*t]] : R[B[i + 15*t]] + R[C[i + 15*t]];
R[i + 112*t] = Op[i + 16*t] ? R[B[i + 16*t]] * R[C[i + 16*t]] : R[B[i + 16*t]] + R[C[i + 16*t]];
R[i + 113*t] = Op[i + 17*t] ? R[B[i + 17*t]] * R[C[i + 17*t]] : R[B[i + 17*t]] + R[C[i + 17*t]];
R[i + 114*t] = Op[i + 18*t] ? R[B[i + 18*t]] * R[C[i + 18*t]] : R[B[i + 18*t]] + R[C[i + 18*t]];
R[i + 115*t] = Op[i + 19*t] ? R[B[i + 19*t]] * R[C[i + 19*t]] : R[B[i + 19*t]] + R[C[i + 19*t]];
R[i + 116*t] = Op[i + 20*t] ? R[B[i + 20*t]] * R[C[i + 20*t]] : R[B[i + 20*t]] + R[C[i + 20*t]];
R[i + 117*t] = Op[i + 21*t] ? R[B[i + 21*t]] * R[C[i + 21*t]] : R[B[i + 21*t]] + R[C[i + 21*t]];
R[i + 118*t] = Op[i + 22*t] ? R[B[i + 22*t]] * R[C[i + 22*t]] : R[B[i + 22*t]] + R[C[i + 22*t]];
R[i + 119*t] = Op[i + 23*t] ? R[B[i + 23*t]] * R[C[i + 23*t]] : R[B[i + 23*t]] + R[C[i + 23*t]];
__syncthreads();
R[i + 120*t] = Op[i + 24*t] ? R[B[i + 24*t]] * R[C[i + 24*t]] : R[B[i + 24*t]] + R[C[i + 24*t]];
R[i + 121*t] = Op[i + 25*t] ? R[B[i + 25*t]] * R[C[i + 25*t]] : R[B[i + 25*t]] + R[C[i + 25*t]];
R[i + 122*t] = Op[i + 26*t] ? R[B[i + 26*t]] * R[C[i + 26*t]] : R[B[i + 26*t]] + R[C[i + 26*t]];
R[i + 123*t] = Op[i + 27*t] ? R[B[i + 27*t]] * R[C[i + 27*t]] : R[B[i + 27*t]] + R[C[i + 27*t]];
R[i + 124*t] = Op[i + 28*t] ? R[B[i + 28*t]] * R[C[i + 28*t]] : R[B[i + 28*t]] + R[C[i + 28*t]];
R[i + 125*t] = Op[i + 29*t] ? R[B[i + 29*t]] * R[C[i + 29*t]] : R[B[i + 29*t]] + R[C[i + 29*t]];
R[i + 126*t] = Op[i + 30*t] ? R[B[i + 30*t]] * R[C[i + 30*t]] : R[B[i + 30*t]] + R[C[i + 30*t]];
R[i + 127*t] = Op[i + 31*t] ? R[B[i + 31*t]] * R[C[i + 31*t]] : R[B[i + 31*t]] + R[C[i + 31*t]];
R[i + 128*t] = Op[i + 32*t] ? R[B[i + 32*t]] * R[C[i + 32*t]] : R[B[i + 32*t]] + R[C[i + 32*t]];
R[i + 129*t] = Op[i + 33*t] ? R[B[i + 33*t]] * R[C[i + 33*t]] : R[B[i + 33*t]] + R[C[i + 33*t]];
R[i + 130*t] = Op[i + 34*t] ? R[B[i + 34*t]] * R[C[i + 34*t]] : R[B[i + 34*t]] + R[C[i + 34*t]];
R[i + 131*t] = Op[i + 35*t] ? R[B[i + 35*t]] * R[C[i + 35*t]] : R[B[i + 35*t]] + R[C[i + 35*t]];
R[i + 132*t] = Op[i + 36*t] ? R[B[i + 36*t]] * R[C[i + 36*t]] : R[B[i + 36*t]] + R[C[i + 36*t]];
R[i + 133*t] = Op[i + 37*t] ? R[B[i + 37*t]] * R[C[i + 37*t]] : R[B[i + 37*t]] + R[C[i + 37*t]];
__syncthreads();
R[i + 134*t] = Op[i + 38*t] ? R[B[i + 38*t]] * R[C[i + 38*t]] : R[B[i + 38*t]] + R[C[i + 38*t]];
R[i + 135*t] = Op[i + 39*t] ? R[B[i + 39*t]] * R[C[i + 39*t]] : R[B[i + 39*t]] + R[C[i + 39*t]];
R[i + 136*t] = Op[i + 40*t] ? R[B[i + 40*t]] * R[C[i + 40*t]] : R[B[i + 40*t]] + R[C[i + 40*t]];
R[i + 137*t] = Op[i + 41*t] ? R[B[i + 41*t]] * R[C[i + 41*t]] : R[B[i + 41*t]] + R[C[i + 41*t]];
R[i + 138*t] = Op[i + 42*t] ? R[B[i + 42*t]] * R[C[i + 42*t]] : R[B[i + 42*t]] + R[C[i + 42*t]];
R[i + 139*t] = Op[i + 43*t] ? R[B[i + 43*t]] * R[C[i + 43*t]] : R[B[i + 43*t]] + R[C[i + 43*t]];
R[i + 140*t] = Op[i + 44*t] ? R[B[i + 44*t]] * R[C[i + 44*t]] : R[B[i + 44*t]] + R[C[i + 44*t]];
R[i + 141*t] = Op[i + 45*t] ? R[B[i + 45*t]] * R[C[i + 45*t]] : R[B[i + 45*t]] + R[C[i + 45*t]];
R[i + 142*t] = Op[i + 46*t] ? R[B[i + 46*t]] * R[C[i + 46*t]] : R[B[i + 46*t]] + R[C[i + 46*t]];
R[i + 143*t] = Op[i + 47*t] ? R[B[i + 47*t]] * R[C[i + 47*t]] : R[B[i + 47*t]] + R[C[i + 47*t]];
R[i + 144*t] = Op[i + 48*t] ? R[B[i + 48*t]] * R[C[i + 48*t]] : R[B[i + 48*t]] + R[C[i + 48*t]];
R[i + 145*t] = Op[i + 49*t] ? R[B[i + 49*t]] * R[C[i + 49*t]] : R[B[i + 49*t]] + R[C[i + 49*t]];
R[i + 146*t] = Op[i + 50*t] ? R[B[i + 50*t]] * R[C[i + 50*t]] : R[B[i + 50*t]] + R[C[i + 50*t]];
R[i + 147*t] = Op[i + 51*t] ? R[B[i + 51*t]] * R[C[i + 51*t]] : R[B[i + 51*t]] + R[C[i + 51*t]];
R[i + 148*t] = Op[i + 52*t] ? R[B[i + 52*t]] * R[C[i + 52*t]] : R[B[i + 52*t]] + R[C[i + 52*t]];
__syncthreads();
R[i + 149*t] = Op[i + 53*t] ? R[B[i + 53*t]] * R[C[i + 53*t]] : R[B[i + 53*t]] + R[C[i + 53*t]];
R[i + 150*t] = Op[i + 54*t] ? R[B[i + 54*t]] * R[C[i + 54*t]] : R[B[i + 54*t]] + R[C[i + 54*t]];
R[i + 151*t] = Op[i + 55*t] ? R[B[i + 55*t]] * R[C[i + 55*t]] : R[B[i + 55*t]] + R[C[i + 55*t]];
R[i + 152*t] = Op[i + 56*t] ? R[B[i + 56*t]] * R[C[i + 56*t]] : R[B[i + 56*t]] + R[C[i + 56*t]];
R[i + 153*t] = Op[i + 57*t] ? R[B[i + 57*t]] * R[C[i + 57*t]] : R[B[i + 57*t]] + R[C[i + 57*t]];
R[i + 154*t] = Op[i + 58*t] ? R[B[i + 58*t]] * R[C[i + 58*t]] : R[B[i + 58*t]] + R[C[i + 58*t]];
R[i + 155*t] = Op[i + 59*t] ? R[B[i + 59*t]] * R[C[i + 59*t]] : R[B[i + 59*t]] + R[C[i + 59*t]];
R[i + 156*t] = Op[i + 60*t] ? R[B[i + 60*t]] * R[C[i + 60*t]] : R[B[i + 60*t]] + R[C[i + 60*t]];
R[i + 157*t] = Op[i + 61*t] ? R[B[i + 61*t]] * R[C[i + 61*t]] : R[B[i + 61*t]] + R[C[i + 61*t]];
R[i + 158*t] = Op[i + 62*t] ? R[B[i + 62*t]] * R[C[i + 62*t]] : R[B[i + 62*t]] + R[C[i + 62*t]];
R[i + 159*t] = Op[i + 63*t] ? R[B[i + 63*t]] * R[C[i + 63*t]] : R[B[i + 63*t]] + R[C[i + 63*t]];
R[i + 160*t] = Op[i + 64*t] ? R[B[i + 64*t]] * R[C[i + 64*t]] : R[B[i + 64*t]] + R[C[i + 64*t]];
R[i + 161*t] = Op[i + 65*t] ? R[B[i + 65*t]] * R[C[i + 65*t]] : R[B[i + 65*t]] + R[C[i + 65*t]];
R[i + 162*t] = Op[i + 66*t] ? R[B[i + 66*t]] * R[C[i + 66*t]] : R[B[i + 66*t]] + R[C[i + 66*t]];
R[i + 163*t] = Op[i + 67*t] ? R[B[i + 67*t]] * R[C[i + 67*t]] : R[B[i + 67*t]] + R[C[i + 67*t]];
__syncthreads();
R[i + 164*t] = Op[i + 68*t] ? R[B[i + 68*t]] * R[C[i + 68*t]] : R[B[i + 68*t]] + R[C[i + 68*t]];
R[i + 165*t] = Op[i + 69*t] ? R[B[i + 69*t]] * R[C[i + 69*t]] : R[B[i + 69*t]] + R[C[i + 69*t]];
R[i + 166*t] = Op[i + 70*t] ? R[B[i + 70*t]] * R[C[i + 70*t]] : R[B[i + 70*t]] + R[C[i + 70*t]];
R[i + 167*t] = Op[i + 71*t] ? R[B[i + 71*t]] * R[C[i + 71*t]] : R[B[i + 71*t]] + R[C[i + 71*t]];
R[i + 168*t] = Op[i + 72*t] ? R[B[i + 72*t]] * R[C[i + 72*t]] : R[B[i + 72*t]] + R[C[i + 72*t]];
R[i + 169*t] = Op[i + 73*t] ? R[B[i + 73*t]] * R[C[i + 73*t]] : R[B[i + 73*t]] + R[C[i + 73*t]];
R[i + 170*t] = Op[i + 74*t] ? R[B[i + 74*t]] * R[C[i + 74*t]] : R[B[i + 74*t]] + R[C[i + 74*t]];
R[i + 171*t] = Op[i + 75*t] ? R[B[i + 75*t]] * R[C[i + 75*t]] : R[B[i + 75*t]] + R[C[i + 75*t]];
R[i + 172*t] = Op[i + 76*t] ? R[B[i + 76*t]] * R[C[i + 76*t]] : R[B[i + 76*t]] + R[C[i + 76*t]];
R[i + 173*t] = Op[i + 77*t] ? R[B[i + 77*t]] * R[C[i + 77*t]] : R[B[i + 77*t]] + R[C[i + 77*t]];
__syncthreads();
R[i + 174*t] = Op[i + 78*t] ? R[B[i + 78*t]] * R[C[i + 78*t]] : R[B[i + 78*t]] + R[C[i + 78*t]];
R[i + 175*t] = Op[i + 79*t] ? R[B[i + 79*t]] * R[C[i + 79*t]] : R[B[i + 79*t]] + R[C[i + 79*t]];
R[i + 176*t] = Op[i + 80*t] ? R[B[i + 80*t]] * R[C[i + 80*t]] : R[B[i + 80*t]] + R[C[i + 80*t]];
R[i + 177*t] = Op[i + 81*t] ? R[B[i + 81*t]] * R[C[i + 81*t]] : R[B[i + 81*t]] + R[C[i + 81*t]];
R[i + 178*t] = Op[i + 82*t] ? R[B[i + 82*t]] * R[C[i + 82*t]] : R[B[i + 82*t]] + R[C[i + 82*t]];
R[i + 179*t] = Op[i + 83*t] ? R[B[i + 83*t]] * R[C[i + 83*t]] : R[B[i + 83*t]] + R[C[i + 83*t]];
R[i + 180*t] = Op[i + 84*t] ? R[B[i + 84*t]] * R[C[i + 84*t]] : R[B[i + 84*t]] + R[C[i + 84*t]];
R[i + 181*t] = Op[i + 85*t] ? R[B[i + 85*t]] * R[C[i + 85*t]] : R[B[i + 85*t]] + R[C[i + 85*t]];
R[i + 182*t] = Op[i + 86*t] ? R[B[i + 86*t]] * R[C[i + 86*t]] : R[B[i + 86*t]] + R[C[i + 86*t]];
R[i + 183*t] = Op[i + 87*t] ? R[B[i + 87*t]] * R[C[i + 87*t]] : R[B[i + 87*t]] + R[C[i + 87*t]];
R[i + 184*t] = Op[i + 88*t] ? R[B[i + 88*t]] * R[C[i + 88*t]] : R[B[i + 88*t]] + R[C[i + 88*t]];
R[i + 185*t] = Op[i + 89*t] ? R[B[i + 89*t]] * R[C[i + 89*t]] : R[B[i + 89*t]] + R[C[i + 89*t]];
R[i + 186*t] = Op[i + 90*t] ? R[B[i + 90*t]] * R[C[i + 90*t]] : R[B[i + 90*t]] + R[C[i + 90*t]];
R[i + 187*t] = Op[i + 91*t] ? R[B[i + 91*t]] * R[C[i + 91*t]] : R[B[i + 91*t]] + R[C[i + 91*t]];
R[i + 188*t] = Op[i + 92*t] ? R[B[i + 92*t]] * R[C[i + 92*t]] : R[B[i + 92*t]] + R[C[i + 92*t]];
R[i + 189*t] = Op[i + 93*t] ? R[B[i + 93*t]] * R[C[i + 93*t]] : R[B[i + 93*t]] + R[C[i + 93*t]];
__syncthreads();
R[i + 190*t] = Op[i + 94*t] ? R[B[i + 94*t]] * R[C[i + 94*t]] : R[B[i + 94*t]] + R[C[i + 94*t]];
R[i + 191*t] = Op[i + 95*t] ? R[B[i + 95*t]] * R[C[i + 95*t]] : R[B[i + 95*t]] + R[C[i + 95*t]];
R[i + 192*t] = Op[i + 96*t] ? R[B[i + 96*t]] * R[C[i + 96*t]] : R[B[i + 96*t]] + R[C[i + 96*t]];
R[i + 193*t] = Op[i + 97*t] ? R[B[i + 97*t]] * R[C[i + 97*t]] : R[B[i + 97*t]] + R[C[i + 97*t]];
R[i + 194*t] = Op[i + 98*t] ? R[B[i + 98*t]] * R[C[i + 98*t]] : R[B[i + 98*t]] + R[C[i + 98*t]];
R[i + 195*t] = Op[i + 99*t] ? R[B[i + 99*t]] * R[C[i + 99*t]] : R[B[i + 99*t]] + R[C[i + 99*t]];
R[i + 196*t] = Op[i + 100*t] ? R[B[i + 100*t]] * R[C[i + 100*t]] : R[B[i + 100*t]] + R[C[i + 100*t]];
R[i + 197*t] = Op[i + 101*t] ? R[B[i + 101*t]] * R[C[i + 101*t]] : R[B[i + 101*t]] + R[C[i + 101*t]];
R[i + 198*t] = Op[i + 102*t] ? R[B[i + 102*t]] * R[C[i + 102*t]] : R[B[i + 102*t]] + R[C[i + 102*t]];
R[i + 199*t] = Op[i + 103*t] ? R[B[i + 103*t]] * R[C[i + 103*t]] : R[B[i + 103*t]] + R[C[i + 103*t]];
R[i + 200*t] = Op[i + 104*t] ? R[B[i + 104*t]] * R[C[i + 104*t]] : R[B[i + 104*t]] + R[C[i + 104*t]];
R[i + 201*t] = Op[i + 105*t] ? R[B[i + 105*t]] * R[C[i + 105*t]] : R[B[i + 105*t]] + R[C[i + 105*t]];
R[i + 202*t] = Op[i + 106*t] ? R[B[i + 106*t]] * R[C[i + 106*t]] : R[B[i + 106*t]] + R[C[i + 106*t]];
__syncthreads();
R[i + 203*t] = Op[i + 107*t] ? R[B[i + 107*t]] * R[C[i + 107*t]] : R[B[i + 107*t]] + R[C[i + 107*t]];
R[i + 204*t] = Op[i + 108*t] ? R[B[i + 108*t]] * R[C[i + 108*t]] : R[B[i + 108*t]] + R[C[i + 108*t]];
R[i + 205*t] = Op[i + 109*t] ? R[B[i + 109*t]] * R[C[i + 109*t]] : R[B[i + 109*t]] + R[C[i + 109*t]];
R[i + 206*t] = Op[i + 110*t] ? R[B[i + 110*t]] * R[C[i + 110*t]] : R[B[i + 110*t]] + R[C[i + 110*t]];
R[i + 207*t] = Op[i + 111*t] ? R[B[i + 111*t]] * R[C[i + 111*t]] : R[B[i + 111*t]] + R[C[i + 111*t]];
R[i + 208*t] = Op[i + 112*t] ? R[B[i + 112*t]] * R[C[i + 112*t]] : R[B[i + 112*t]] + R[C[i + 112*t]];
R[i + 209*t] = Op[i + 113*t] ? R[B[i + 113*t]] * R[C[i + 113*t]] : R[B[i + 113*t]] + R[C[i + 113*t]];
R[i + 210*t] = Op[i + 114*t] ? R[B[i + 114*t]] * R[C[i + 114*t]] : R[B[i + 114*t]] + R[C[i + 114*t]];
__syncthreads();
R[i + 211*t] = Op[i + 115*t] ? R[B[i + 115*t]] * R[C[i + 115*t]] : R[B[i + 115*t]] + R[C[i + 115*t]];
R[i + 212*t] = Op[i + 116*t] ? R[B[i + 116*t]] * R[C[i + 116*t]] : R[B[i + 116*t]] + R[C[i + 116*t]];
R[i + 213*t] = Op[i + 117*t] ? R[B[i + 117*t]] * R[C[i + 117*t]] : R[B[i + 117*t]] + R[C[i + 117*t]];
R[i + 214*t] = Op[i + 118*t] ? R[B[i + 118*t]] * R[C[i + 118*t]] : R[B[i + 118*t]] + R[C[i + 118*t]];
R[i + 215*t] = Op[i + 119*t] ? R[B[i + 119*t]] * R[C[i + 119*t]] : R[B[i + 119*t]] + R[C[i + 119*t]];
R[i + 216*t] = Op[i + 120*t] ? R[B[i + 120*t]] * R[C[i + 120*t]] : R[B[i + 120*t]] + R[C[i + 120*t]];
R[i + 217*t] = Op[i + 121*t] ? R[B[i + 121*t]] * R[C[i + 121*t]] : R[B[i + 121*t]] + R[C[i + 121*t]];
R[i + 218*t] = Op[i + 122*t] ? R[B[i + 122*t]] * R[C[i + 122*t]] : R[B[i + 122*t]] + R[C[i + 122*t]];
__syncthreads();
R[i + 219*t] = Op[i + 123*t] ? R[B[i + 123*t]] * R[C[i + 123*t]] : R[B[i + 123*t]] + R[C[i + 123*t]];
R[i + 220*t] = Op[i + 124*t] ? R[B[i + 124*t]] * R[C[i + 124*t]] : R[B[i + 124*t]] + R[C[i + 124*t]];
R[i + 221*t] = Op[i + 125*t] ? R[B[i + 125*t]] * R[C[i + 125*t]] : R[B[i + 125*t]] + R[C[i + 125*t]];
R[i + 222*t] = Op[i + 126*t] ? R[B[i + 126*t]] * R[C[i + 126*t]] : R[B[i + 126*t]] + R[C[i + 126*t]];
R[i + 223*t] = Op[i + 127*t] ? R[B[i + 127*t]] * R[C[i + 127*t]] : R[B[i + 127*t]] + R[C[i + 127*t]];
R[i + 224*t] = Op[i + 128*t] ? R[B[i + 128*t]] * R[C[i + 128*t]] : R[B[i + 128*t]] + R[C[i + 128*t]];
R[i + 225*t] = Op[i + 129*t] ? R[B[i + 129*t]] * R[C[i + 129*t]] : R[B[i + 129*t]] + R[C[i + 129*t]];
__syncthreads();
R[i + 226*t] = Op[i + 130*t] ? R[B[i + 130*t]] * R[C[i + 130*t]] : R[B[i + 130*t]] + R[C[i + 130*t]];
R[i + 227*t] = Op[i + 131*t] ? R[B[i + 131*t]] * R[C[i + 131*t]] : R[B[i + 131*t]] + R[C[i + 131*t]];
R[i + 228*t] = Op[i + 132*t] ? R[B[i + 132*t]] * R[C[i + 132*t]] : R[B[i + 132*t]] + R[C[i + 132*t]];
R[i + 229*t] = Op[i + 133*t] ? R[B[i + 133*t]] * R[C[i + 133*t]] : R[B[i + 133*t]] + R[C[i + 133*t]];
R[i + 230*t] = Op[i + 134*t] ? R[B[i + 134*t]] * R[C[i + 134*t]] : R[B[i + 134*t]] + R[C[i + 134*t]];
__syncthreads();
R[i + 231*t] = Op[i + 135*t] ? R[B[i + 135*t]] * R[C[i + 135*t]] : R[B[i + 135*t]] + R[C[i + 135*t]];
R[i + 232*t] = Op[i + 136*t] ? R[B[i + 136*t]] * R[C[i + 136*t]] : R[B[i + 136*t]] + R[C[i + 136*t]];
R[i + 233*t] = Op[i + 137*t] ? R[B[i + 137*t]] * R[C[i + 137*t]] : R[B[i + 137*t]] + R[C[i + 137*t]];
R[i + 234*t] = Op[i + 138*t] ? R[B[i + 138*t]] * R[C[i + 138*t]] : R[B[i + 138*t]] + R[C[i + 138*t]];
__syncthreads();
R[i + 235*t] = Op[i + 139*t] ? R[B[i + 139*t]] * R[C[i + 139*t]] : R[B[i + 139*t]] + R[C[i + 139*t]];
R[i + 236*t] = Op[i + 140*t] ? R[B[i + 140*t]] * R[C[i + 140*t]] : R[B[i + 140*t]] + R[C[i + 140*t]];
R[i + 237*t] = Op[i + 141*t] ? R[B[i + 141*t]] * R[C[i + 141*t]] : R[B[i + 141*t]] + R[C[i + 141*t]];
R[i + 238*t] = Op[i + 142*t] ? R[B[i + 142*t]] * R[C[i + 142*t]] : R[B[i + 142*t]] + R[C[i + 142*t]];
R[i + 239*t] = Op[i + 143*t] ? R[B[i + 143*t]] * R[C[i + 143*t]] : R[B[i + 143*t]] + R[C[i + 143*t]];
__syncthreads();
R[i + 240*t] = Op[i + 144*t] ? R[B[i + 144*t]] * R[C[i + 144*t]] : R[B[i + 144*t]] + R[C[i + 144*t]];
R[i + 241*t] = Op[i + 145*t] ? R[B[i + 145*t]] * R[C[i + 145*t]] : R[B[i + 145*t]] + R[C[i + 145*t]];
R[i + 242*t] = Op[i + 146*t] ? R[B[i + 146*t]] * R[C[i + 146*t]] : R[B[i + 146*t]] + R[C[i + 146*t]];
R[i + 243*t] = Op[i + 147*t] ? R[B[i + 147*t]] * R[C[i + 147*t]] : R[B[i + 147*t]] + R[C[i + 147*t]];
__syncthreads();
R[i + 244*t] = Op[i + 148*t] ? R[B[i + 148*t]] * R[C[i + 148*t]] : R[B[i + 148*t]] + R[C[i + 148*t]];
R[i + 245*t] = Op[i + 149*t] ? R[B[i + 149*t]] * R[C[i + 149*t]] : R[B[i + 149*t]] + R[C[i + 149*t]];
R[i + 246*t] = Op[i + 150*t] ? R[B[i + 150*t]] * R[C[i + 150*t]] : R[B[i + 150*t]] + R[C[i + 150*t]];
__syncthreads();
R[i + 247*t] = Op[i + 151*t] ? R[B[i + 151*t]] * R[C[i + 151*t]] : R[B[i + 151*t]] + R[C[i + 151*t]];
R[i + 248*t] = Op[i + 152*t] ? R[B[i + 152*t]] * R[C[i + 152*t]] : R[B[i + 152*t]] + R[C[i + 152*t]];
__syncthreads();
R[i + 249*t] = Op[i + 153*t] ? R[B[i + 153*t]] * R[C[i + 153*t]] : R[B[i + 153*t]] + R[C[i + 153*t]];
R[i + 250*t] = Op[i + 154*t] ? R[B[i + 154*t]] * R[C[i + 154*t]] : R[B[i + 154*t]] + R[C[i + 154*t]];
__syncthreads();
R[i + 251*t] = Op[i + 155*t] ? R[B[i + 155*t]] * R[C[i + 155*t]] : R[B[i + 155*t]] + R[C[i + 155*t]];
__syncthreads();
R[i + 252*t] = Op[i + 156*t] ? R[B[i + 156*t]] * R[C[i + 156*t]] : R[B[i + 156*t]] + R[C[i + 156*t]];
__syncthreads();
R[i + 253*t] = Op[i + 157*t] ? R[B[i + 157*t]] * R[C[i + 157*t]] : R[B[i + 157*t]] + R[C[i + 157*t]];
__syncthreads();
R[i + 254*t] = Op[i + 158*t] ? R[B[i + 158*t]] * R[C[i + 158*t]] : R[B[i + 158*t]] + R[C[i + 158*t]];
__syncthreads();
R[i + 255*t] = Op[i + 159*t] ? R[B[i + 159*t]] * R[C[i + 159*t]] : R[B[i + 159*t]] + R[C[i + 159*t]];
__syncthreads();
R[i + 256*t] = Op[i + 160*t] ? R[B[i + 160*t]] * R[C[i + 160*t]] : R[B[i + 160*t]] + R[C[i + 160*t]];
__syncthreads();
R[i + 257*t] = Op[i + 161*t] ? R[B[i + 161*t]] * R[C[i + 161*t]] : R[B[i + 161*t]] + R[C[i + 161*t]];
__syncthreads();
R[i + 258*t] = Op[i + 162*t] ? R[B[i + 162*t]] * R[C[i + 162*t]] : R[B[i + 162*t]] + R[C[i + 162*t]];
__syncthreads();
R[i + 259*t] = Op[i + 163*t] ? R[B[i + 163*t]] * R[C[i + 163*t]] : R[B[i + 163*t]] + R[C[i + 163*t]];
__syncthreads();
R[i + 260*t] = Op[i + 164*t] ? R[B[i + 164*t]] * R[C[i + 164*t]] : R[B[i + 164*t]] + R[C[i + 164*t]];
__syncthreads();
R[i + 261*t] = Op[i + 165*t] ? R[B[i + 165*t]] * R[C[i + 165*t]] : R[B[i + 165*t]] + R[C[i + 165*t]];
__syncthreads();
R[i + 262*t] = Op[i + 166*t] ? R[B[i + 166*t]] * R[C[i + 166*t]] : R[B[i + 166*t]] + R[C[i + 166*t]];
__syncthreads();
R[i + 263*t] = Op[i + 167*t] ? R[B[i + 167*t]] * R[C[i + 167*t]] : R[B[i + 167*t]] + R[C[i + 167*t]];
if (i==0) { final += R[263*t]; }
__syncthreads();
}
if (i==0) { A[0]= final;}
}
|
4,179 | /***************************************************************************//**
* \file initialise.cu
* \author Christopher Minar (minarc@oregonstate.edu)
*/
#include "initialise.h"
namespace kernels
{
/*
* sets all the initial u values
* param u u velocities
* param xu x locations of where u is stored
* param yu y locations of where u is stored
* param uInitial initial u velocity
* param uPerturb perturbation coefficient
* param pi... //flag
* param xmax highest x value in the domain
* param xmin lowest x value in the domain
* param ymax highest y value in the domain
* param ymin lowest yvalue in the domain
* nx number of nodes in the x direction
* ny number of nodes in the y direction
*/
__global__
void initialiseU(double *u, double *xu, double *yu, double uInitial, double uPerturb, double pi, double xmax, double xmin,double ymax,double ymin, int nx, int ny)
{
if (threadIdx.x + (blockDim.x * blockIdx.x) >= (nx-1)*ny)
return;
int idx = threadIdx.x + (blockDim.x * blockIdx.x),
i = idx%(nx-1),
j = idx/(nx-1);
u[idx] = uInitial + uPerturb * cos(0.5*pi*(2*xu[i]-xmax-xmin)/(xmax-xmin)) * sin( pi * (2*yu[j]-ymax-ymin)/(ymax-ymin));
}
/*
* sets all the initial v values
* param u v velocities
* param xv x locations of where v is stored
* param yv y locations of where v is stored
* param vInitial initial v velocity
* param vPerturb perturbation coefficient
* param pi... //flag
* param xmax highest x value in the domain
* param xmin lowest x value in the domain
* param ymax highest y value in the domain
* param ymin lowest yvalue in the domain
* nx number of nodes in the x direction
* ny number of nodes in the y direction
*/
__global__
void initialiseV(double *u, double *xv, double *yv, double vInitial, double vPerturb, double pi, double xmax, double xmin,double ymax,double ymin, int nx, int ny)
{
if (threadIdx.x + (blockDim.x * blockIdx.x) >= nx*(ny-1))
return;
int idx = threadIdx.x + (blockDim.x * blockIdx.x),
i = idx%nx,
j = idx/nx;
idx += (nx-1)*ny;
u[idx] = vInitial + vPerturb * cos(0.5*pi*(2*yv[j]-ymax-ymin)/(ymax-ymin)) * sin( pi * (2*xv[i]-xmax-xmin)/(xmax-xmin));
}
}
|
4,180 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <iostream>
#include <cuda.h>
// const int N=1280;
// const int window=3;
__global__ void mean_Filter (int *inputImage, int *outputImage , int window, int N) {
window=window/2;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int index = col + row * N;
outputImage[index] = inputImage[index];
// Neglect matrix edge value
if(col < N-window && row < N-window && row >= window && col >= window) {
int sum = 0;
for(int x = -window; x<=window ; x++) {
for(int y = -window; y<= window; y++) {
// N*x control row, y control column
sum += inputImage[index + N*x + y];
}
}
outputImage[index] = sum/((window*2+1)*(window*2+1));
}
}
void mean_Filter_h(int *inputImage, int *outputImage,int window, int N){
window=window/2;
int sum=0;
int index=0;
for (int row=window;row<(N-window); row++){
for(int col =window; col<(N-window);col++){
index=col + row * N;
sum=0;
for(int x=-window;x<=window;x++){
for(int y=-window;y<=window;y++){
sum += inputImage[index + N*x + y];
}
}
outputImage[index] = sum/((window*2+1)*(window*2+1));
}
}
}
int main(int argc, char**argv)
{
const int N=strtol(argv[1],NULL, 10);
const int window=strtol(argv[2],NULL,10);
// Input image and output image for the testing
int *inputImgageS, *outputImgageS,*outputImgageS1;
// Input and output image for the filering
int *inputImage, *outputImage;
int *inputImage_h, *outputImage_h;
// Number of elements in a 2D array image
int size = N * N * sizeof(int);
// Allcate memory for input output image
inputImgageS= (int*)malloc(size);
outputImgageS= (int*)malloc(size);
outputImgageS1=(int*)malloc(size);
// Create a image5(2D Array) with random numbers
for (int row=0; row<N; row++) {
for (int col=0; col<N; col++){
inputImgageS[col + row * N]=rand() % 256;
// inputImgageS[row][col] = rand() % 256;
}
}
// Allocate memory on the GPUs for the image
cudaMalloc((void**)&inputImage, size);
cudaMalloc((void**)&outputImage, size);
//Allocate memory on the GPUs for the image
inputImage_h= (int*)malloc(size);
outputImage_h= (int*)malloc(size);
// Copy the image form host to device (input and output)
cudaMemcpy(inputImage, inputImgageS, size, cudaMemcpyHostToDevice);
cudaMemcpy(outputImage, inputImgageS, size, cudaMemcpyHostToDevice);
// Copy input image into output image
cudaMemcpy(inputImage_h,inputImgageS,size,cudaMemcpyHostToHost);
cudaMemcpy(outputImage_h,inputImgageS,size,cudaMemcpyHostToHost);
// Total Number of threads in a block 32*32 = 1024 <= 1024(Threads per block)
dim3 threadsPerBlock(32,32);
// For 640*640 , 20*20 block For 1280*1280 , 40*40
dim3 blocksForGrid(N/threadsPerBlock.x, N/threadsPerBlock.y);
// GPUs mean filter, time start
clock_t start_d=clock();
printf("Doing GPU mean filter\n");
// GPUs' mean filtering
mean_Filter<<<blocksForGrid, threadsPerBlock>>>(inputImage,outputImage,window,N);
cudaDeviceSynchronize();
clock_t end_d = clock();
// CPUs mean filter, time start
clock_t start_h = clock();
printf("Doing CPU mean filter\n");
mean_Filter_h(inputImage_h,outputImage_h,window,N);
clock_t end_h = clock();
// Time calculation
double time_d = (double)(end_d-start_d)/CLOCKS_PER_SEC;
double time_h = (double)(end_h-start_h)/CLOCKS_PER_SEC;
// Copy GPUs' output image to Host
cudaMemcpy(outputImgageS, outputImage , size, cudaMemcpyDeviceToHost);
// Cpoy CPUs' output image to host
cudaMemcpy(outputImgageS1, outputImage_h , size, cudaMemcpyHostToHost);
printf("Image size : %d Window size : %d GPU Time: %f CPU Time: %f\n",N,window,time_d,time_h);
// printf("Image size : %d Window size : %d GPU Time: %f \n",N,window,time_d);
// // Print Imput image
// printf("Input image \n");
// for (int row=0;row< N;row++){
// printf("[");
// for(int col=0;col<N;col++){
// printf("%d,",inputImgageS[row][col]);
// }
// printf("]\n");
// }
// // Print output image
// printf("Output Image\n");
// for (int row=0;row< N;row++){
// printf("[");
// for(int col=0;col<N;col++){
// printf("%d,",outputImgageS[row][col]);
// }
// printf("]\n");
// }
// Check both output matrix are same
printf("Both outputs are same : ");
bool check=true;
for (int row=0;row< N;row++){
for(int col=0;col<N;col++){
if(outputImgageS[row*N+col]!=outputImgageS1[row*N+col]){
check=false;
}else{
check=true;
}
}
}
// Print the status
if(check){
printf("YES\n");
}else{
printf("NO\n");
}
// Free memory on GPU
cudaFree(inputImage);
cudaFree(outputImage);
// Free host memory
free(inputImgageS);
free(outputImgageS);
free(outputImgageS1);
free(inputImage_h);
free(outputImage_h);
return 0;
} |
4,181 | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
struct timeval t1, t2;
#define BLOCK_SIZE 16
// kernel MM routine
__global__ void mmkernel(float *a, float *b, float *c, int N, int M, int K)
{
int i = threadIdx.x;
int j = threadIdx.y;
float sum = 0.0f;
for (int k = 0; k< M; k++) sum += a[i+N*k] * b[k+K*j];
c [i+N*j] = sum;
}
// host multiplication function
// C = A * B
// A is a hA x wA matrix
// B is a wA x wB matrix
// C is a hA x wB matrix
void Mul (const float *A, const float *B, float *C, int N, int M, int K)
{
int size;
float *dev_A, *dev_B, *dev_C;
printf("%d %d %d\n", N, M, K);
size = N*M*sizeof(float);
cudaMalloc((void **)&dev_A, size);
cudaMemcpy(dev_A, A, size, cudaMemcpyHostToDevice);
size = M*K *sizeof(float);
cudaMalloc((void **)&dev_B, size);
cudaMemcpy(dev_B, B, size, cudaMemcpyHostToDevice);
size = N*K * sizeof(float);
cudaMalloc((void **)&dev_C, size);
dim3 dimBlock(1);
dim3 dimGrid(N, K);
mmkernel<<<dimBlock, dimGrid>>> (dev_A, dev_B, dev_C, N, M, K);
cudaMemcpy(C, dev_C, size, cudaMemcpyDeviceToHost);
cudaFree(dev_A);
cudaFree(dev_B);
cudaFree(dev_C);
}
int main( int argc, char *argv[])
{
float *A, *B, *C;
int N, M, K, iter, i;
int method;
if (argc < 6) {
printf("Usage: a.out N M K iter method\n");
exit(0);
}
N= atoi(argv[1]);
M = atoi(argv[2]);
K = atoi(argv[3]);
iter = 1;
if (argc >=5)
iter = atoi(argv[4]);
method = 0;
if (argc >= 6)
method = atoi(argv[5]);
A = (float *)malloc(N*M*sizeof(float));
B = (float *)malloc(M*K*sizeof(float));
C = (float *)malloc(N*K*sizeof(float));
srand48(100);
for (i=0; i<N*M; ++i) {
// A[i] = drand48();
A[i] = 1.0;
C[i] = 0.0;
}
for (i=0; i<M*K; ++i) {
// B[i] = drand48();
// B[i] = myid*1.0;
B[i] = 1.0;
}
gettimeofday(&t1, NULL);
for (i=0; i<iter; i++) {
if (method == 0) {
// printf("A[0][0] = %lf, B[0][0] = %lf\n", *A, *B);
Mul(A, B, C, N, M, K);
} else {
printf("Method not supported.\n");
exit(0);
}
}
gettimeofday(&t2, NULL);
printf("Time for the matrix multiplication(%d) is %d milliseconds\n",
method,
(t2.tv_sec - t1.tv_sec)*1000 +
(t2.tv_usec - t1.tv_usec) / 1000);
#ifdef CHECK
{
FILE *fd;
if ((fd = fopen("tmp333", "w")) == NULL) {
printf("Cannot open tmp333\n"); exit(0);
}
for (i=0; i<N*K; i++)
fprintf(fd, "%6.2lf\n", C[i]);
fclose(fd);
}
#endif
return 0;
}
|
4,182 | #include <stdio.h>
#include <stdlib.h>
// forward declearation
void addOne(float *out_h, const float *in_h, int numElements);
int main(void)
{
int numElements = 50000;
float *in_h, *out_h;
in_h = (float *)malloc(sizeof(float) * numElements);
out_h = (float *)malloc(sizeof(float) * numElements);
for (int i = 0; i < numElements; ++i)
{
in_h[i] = rand() / (float)RAND_MAX;
}
addOne(out_h, in_h, numElements);
float delta = 0;
for(int i = 0; i < numElements; ++i)
{
delta += out_h[i] - in_h[i];
}
printf("num: %d, delta: %.1f\n", numElements, delta);
free(in_h);
free(out_h);
return 0;
}
|
4,183 | #include "includes.h"
__global__ void kernel_test0_write(char* _ptr, char* end_ptr, unsigned int pattern, unsigned int* err, unsigned long* err_addr, unsigned long* err_expect, unsigned long* err_current, unsigned long* err_second_read)
{
unsigned int i;
unsigned int* ptr = (unsigned int*) (_ptr + blockIdx.x*BLOCKSIZE);
if (ptr >= (unsigned int*) end_ptr) {
return;
}
for (i = 0;i < BLOCKSIZE/sizeof(unsigned int); i++){
ptr[i] = pattern;
}
return;
} |
4,184 | #ifndef __CUDACC__
#define __CUDACC__
#endif
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cuda.h>
#include <device_functions.h>
#include <cuda_runtime_api.h>
#include <curand.h>
#include <curand_kernel.h>
#include <math.h>
#include <stdio.h>
#include <random>
#include <iomanip>
#include <iostream>
#define N 16
#define BLOCKSIZE 16
cudaError_t minmaxCuda(double *max, double *min, const double *a, float &time, float &seq_time);
__global__ void minmaxKernel(double *max, double *min, const double *a) {
__shared__ double maxtile[BLOCKSIZE];
__shared__ double mintile[BLOCKSIZE];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
maxtile[tid] = a[i];
mintile[tid] = a[i];
__syncthreads();
// strided index and non-divergent branch
for (unsigned int s = 1; s < blockDim.x; s *= 2) {
int index = 2 * s * tid;
if (index < blockDim.x) {
if (maxtile[tid + s] > maxtile[tid])
maxtile[tid] = maxtile[tid + s];
if (mintile[tid + s] < mintile[tid])
mintile[tid] = mintile[tid + s];
}
__syncthreads();
}
if (tid == 0) {
max[blockIdx.x] = maxtile[0];
min[blockIdx.x] = mintile[0];
}
}
__global__ void seq_minmaxKernel(double *max, double *min, const double *a) {
__shared__ double maxtile[BLOCKSIZE];
__shared__ double mintile[BLOCKSIZE];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
maxtile[tid] = a[i];
mintile[tid] = a[i];
__syncthreads();
//sequential addressing by reverse loop and thread-id based indexing
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (tid < s) {
if (maxtile[tid + s] > maxtile[tid])
maxtile[tid] = maxtile[tid + s];
if (mintile[tid + s] < mintile[tid])
mintile[tid] = mintile[tid + s];
}
__syncthreads();
}
if (tid == 0) {
max[blockIdx.x] = maxtile[0];
min[blockIdx.x] = mintile[0];
}
}
__global__ void finalminmaxKernel(double *max, double *min) {
__shared__ double maxtile[BLOCKSIZE];
__shared__ double mintile[BLOCKSIZE];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
maxtile[tid] = max[i];
mintile[tid] = min[i];
__syncthreads();
// strided index and non-divergent branch
for (unsigned int s = 1; s < blockDim.x; s *= 2) {
int index = 2 * s * tid;
if (index < blockDim.x) {
if (maxtile[tid + s] > maxtile[tid])
maxtile[tid] = maxtile[tid + s];
if (mintile[tid + s] < mintile[tid])
mintile[tid] = mintile[tid + s];
}
__syncthreads();
}
if (tid == 0) {
max[blockIdx.x] = maxtile[0];
min[blockIdx.x] = mintile[0];
}
}
__global__ void seq_finalminmaxKernel(double *max, double *min) {
__shared__ double maxtile[BLOCKSIZE];
__shared__ double mintile[BLOCKSIZE];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
maxtile[tid] = max[i];
mintile[tid] = min[i];
__syncthreads();
//sequential addressing by reverse loop and thread-id based indexing
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (tid < s) {
if (maxtile[tid + s] > maxtile[tid])
maxtile[tid] = maxtile[tid + s];
if (mintile[tid + s] < mintile[tid])
mintile[tid] = mintile[tid + s];
}
__syncthreads();
}
if (tid == 0) {
max[blockIdx.x] = maxtile[0];
min[blockIdx.x] = mintile[0];
}
}
int main()
{
const double a[N*N] = {-8.5, -8.4, -6.8, -4.5, -4.2, -3.9, -3.4, -2.3, 1.5, 3.3, 4.3, 4.7, 6.5, 6.7, 8.0, 9.4,
-7.3, -6.9, -6.0, -4.8, -4.4, -4.3, -3.8, -5.0, 2.5, 2.9, 5.8, 6.3, 6.7, 7.1, 8.0, 9.0,
-9.0, -8.2, -6.0, -4.8, -1.7, -1.2, -1.0, 2.1, 2.7, 3.1, 4.0, 4.2, 7.3, 7.9, 8.1, 8.8,
-9.4, -8.5, -7.2, -6.6, -5.1, -4.4, -3.8, -3.1, -1.9, 2.0, 1.7, 2.5, 3.3, 5.1, 5.7, 6.6,
-9.6, -8.9, -5.9, -2.5, -2.1, -1.8, -8.0, 1.0, 1.7, 2.3, 3.0, 3.8, 5.3, 6.4, 8.4, 9.9,
-9.7, -8.8, -8.1, -7.5, -4.9, -4.2, -2.2, -6.0, 2.1, 3.3, 3.5, 5.3, 5.8, 5.9, 6.7, 7.2,
-9.5, -8.8, -8.3, -8.2, -7.1, -6.5, -4.4, -3.6, -1.1, -6.0, 2.5, 3.8, 4.5, 4.7, 7.1, 9.6,
-9.6, -8.6, -8.4, -6.9, -5.5, -5.4, -4.8, -3.9, -3.6, -7.0, 9.0, 1.1, 3.4, 4.3, 5.8, 10.0,
-9.7, -9.3, -6.1, -5.9, -4.9, -4.6, -4.2, -4.1, -1.8, 4.0, 1.4, 4.0, 5.0, 5.2, 7.3, 7.7,
-7.9, -5.5, -5.0, -4.2, -4.1, -3.7, -1.5, 1.9, 4.5, 5.4, 6.1, 6.5, 6.7, 7.7, 8.1, 9.8,
-8.6, -7.1, -5.3, -5.1, -4.5, -4.1, -2.7, -2.4, -2.1, -1.3, -7.0, 4.4, 6.7, 7.0, 8.2, 9.7,
-9.2, -8.7, -7.9, -6.9, -6.7, -5.3, -2.6, -2.2, -1.9, -1.1, 4.0, 1.4, 6.9, 7.1, 7.9, 9.5,
-9.9, -6.0, -4.8, -3.4, 4.0, 7.0, 1.2, 1.6, 4.5, 5.3, 6.5, 7.3, 7.6, 8.0, 9.0, 9.8,
-9.6, -9.0, -6.7, -6.5, -4.8, -3.0, -2.4, 1.1, 1.2, 1.4, 4.0, 4.5, 4.9, 5.5, 7.0, 7.3,
-8.5, -7.7, -7.1, -6.0, -5.1, -4.8, -3.7, -2.8, -1.8, -1.4, 2.0, 2.3, 4.8, 5.3, 6.4, 9.2,
-9.4, -6.7, -5.2, -4.6, -3.2, -2.3, -1.9, -5.0, 2.0, 2.9, 3.2, 4.3, 4.7, 5.1, 6.4, 6.6};
double *max;
double *min;
float time = 0.0f;
float seq_time = 0.0f;
max = (double *)malloc((N)*sizeof(double));
min = (double *)malloc((N)*sizeof(double));
cudaError_t cudaStatus = minmaxCuda(max, min, a, time, seq_time);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "minmaxCuda failed!");
return 1;
}
/*for (int i = 0; i < N; i++) {
std::cout << "Max[" << i << "] = " << max[i] << std::endl;
}
std::cout << std::endl;
for (int i = 0; i < N; i++) {
std::cout << "Min[" << i << "] = " << min[i] << std::endl;
}*/
std::cout << "Parallel Reduction GPU Implementation" << std::endl;
std::cout << "Execution Time : " << time / 1000 << " seconds" << std::endl;
std::cout << "Effective Bandwidth : " << (N*N*sizeof(double)*2) / (time / 1000) << " GB/s" << std::endl;
std::cout << std::endl;
std::cout << "Parallel Reduction Sequential Addressing GPU Implementation" << std::endl;
std::cout << "Execution Time : " << seq_time / 1000 << " seconds" << std::endl;
std::cout << "Effective Bandwidth : " << (N*N*sizeof(double)*2) / (seq_time / 1000) << " GB/s" << std::endl;
std::cout << std::endl;
std::cout << "Max value: " << max[0] << std::endl;
std::cout << "Min value: " << min[0] << std::endl;
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
cudaError_t minmaxCuda(double *max, double *min, const double *a, float &time, float &seq_time)
{
double *dev_a = 0;
double *dev_max = 0;
double *dev_min = 0;
float milliseconds = 0;
float milliseconds1 = 0;
dim3 dimBlock(BLOCKSIZE);
dim3 dimGrid(N);
cudaError_t cudaStatus;
cudaEvent_t start, stop;
cudaEvent_t start1, stop1;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventCreate(&start1);
cudaEventCreate(&stop1);
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_max, N * sizeof(double));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_min, N * sizeof(double));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, N * N * sizeof(double));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_a, a, N * N * sizeof(double), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaEventRecord(start);
minmaxKernel<<<dimGrid, dimBlock>>>(dev_max, dev_min, dev_a);
cudaThreadSynchronize();
finalminmaxKernel<<<1, dimBlock>>>(dev_max, dev_min);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaThreadSynchronize();
cudaEventRecord(start1);
seq_minmaxKernel<<<dimGrid, dimBlock>>>(dev_max, dev_min, dev_a);
cudaThreadSynchronize();
seq_finalminmaxKernel<<<1, dimBlock>>>(dev_max, dev_min);
cudaEventRecord(stop1);
cudaEventSynchronize(stop1);
cudaThreadSynchronize();
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "minmaxKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching minmaxKernel!\n", cudaStatus);
goto Error;
}
cudaStatus = cudaMemcpy(max, dev_max, N * sizeof(double), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(min, dev_min, N * sizeof(double), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaEventElapsedTime(&milliseconds, start, stop);
cudaEventElapsedTime(&milliseconds1, start1, stop1);
time = milliseconds;
seq_time = milliseconds1;
Error:
cudaFree(dev_max);
cudaFree(dev_min);
cudaFree(dev_a);
return cudaStatus;
}
|
4,185 | /* Program : To query the device properties of the Tesla K40c GPU
* Author : Anant Shah
* Roll Number : EE16B105
* Date : 14-8-2018
**/
#include<stdio.h>
#include<cuda.h>
#include<stdlib.h>
#define DEVICE_ID 0
#define ERROR_HANDLER(error_msg) error_handler(error_msg)
void error_handler(cudaError_t error_msg){
/* Function to handle an error in CUDA; will be used as a macro */
if(error_msg != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(error_msg),__FILE__,__LINE__);
exit(EXIT_FAILURE);
}
}
int main(int argc,char *argv[]){
cudaDeviceProp* device_properties; /* cudaDeviceProp is a struct whose variable fields consist data about the device */
FILE *fp; /* Pointer to a file to which the data is written */
/* To query the specifications of each device, we run them through a for loop */
device_properties = (cudaDeviceProp *)malloc(sizeof(cudaDeviceProp));
ERROR_HANDLER(cudaGetDeviceProperties(device_properties,DEVICE_ID));
fp = fopen("ee16b105_1_out.txt","w");
if(fp!=NULL){
fprintf(fp,"%d\n",(device_properties)->localL1CacheSupported);
fprintf(fp,"%d \n",(device_properties)->globalL1CacheSupported);
fprintf(fp,"%d \n",(device_properties)->l2CacheSize);
fprintf(fp,"%d \n",(device_properties)->maxThreadsPerBlock);
fprintf(fp,"%d \n",(device_properties)->regsPerBlock);
fprintf(fp,"%d \n",(device_properties)->regsPerMultiprocessor);
fprintf(fp,"%d \n",(device_properties)->warpSize);
fprintf(fp,"%zu \n",device_properties->totalGlobalMem);
}else{
printf("Error : File not found");
exit(EXIT_FAILURE);
}
}
|
4,186 | #include <stdio.h>
#include <iostream>
#include <stdlib.h>
using namespace std;
__global__ void MM(int m, int k, int n, int *A, int *B, int *C)
{
int Row = blockIdx.y * blockDim.y + threadIdx.y;
int Col = blockIdx.x * blockDim.x + threadIdx.x;
if((Row < m) && (Col < k))
{
int Cvalue = 0;
for(int i = 0; i < n; i++)
Cvalue += A[Row*n+i] * B[Col +i*k];
C[Row*k+Col] = Cvalue;
}
}
int main(int argc, char* argv[])
{
char file1[100], file2[100], file3[100];
strcpy(file3,argv[1]);
strcpy(file1,argv[2]);
strcpy(file2,argv[3]);
FILE *handle1 = fopen(file1, "r");
FILE *handle2 = fopen(file2, "r");
FILE *handle3 = fopen(file3,"r");
int m,n,k;
fscanf(handle1, "%d", &m);
fscanf(handle1, "%d", &k);
fscanf(handle2, "%d", &k);
fscanf(handle2, "%d", &n);
fscanf(handle3, "%d", &m);
fscanf(handle3, "%d", &n);
//cout<<m<<" "<<n<<" "<<k<<endl;
int (*pA), (*pB), (*pC);
int i,j;
int a[500*500], b[500*500], c[500*500], c_ans[500*500];
for(i=0;i<m;i++)
for(j=0;j<k;j++)
{
fscanf(handle1, "%d", &a[i*k + j]);
}
for(i=0;i<k;i++)
for(j=0;j<n;j++)
{
fscanf(handle2, "%d", &b[i*n + j]);
}
for(i=0;i<m;i++)
for(j=0;j<n;j++)
{
fscanf(handle3, "%d", &c_ans[i*n + j]);
}
cudaMalloc((void**)&pA, (m*k)*sizeof(int));
cudaMalloc((void**)&pB, (k*n)*sizeof(int));
cudaMalloc((void**)&pC, (m*n)*sizeof(int));
cudaMemcpy(pA, a, (m*k)*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(pB, b, (k*n)*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(pC, c, (m*n)*sizeof(int), cudaMemcpyHostToDevice);
dim3 dimBlock(32, 32);
dim3 dimGrid(max(m,max(n,k))/dimBlock.x+1, max(m,max(k,n))/dimBlock.y+1);
//cout<<dimGrid.x<<" "<<dimGrid.y<<endl;
MM<<<dimGrid,dimBlock>>>(m,n,k,pA,pB,pC);
cudaMemcpy(c, pC, (m*n)*sizeof(int), cudaMemcpyDeviceToHost);
cout<<"Verifying results: \n";
int flag = 1;
for(i=0;i<m;i++)
{
for(j=0;j<n;j++)
{
if(c[i*n + j] != c_ans[i*n+ j])
{
flag = 0;
cout<<"Wrong answer\n" << c[i*n + j]<<" "<<c_ans[i*n + j]<<endl<<i<<" "<<j<<endl;
break;
}
}
//if(!flag)
//break;
}
if(flag)
cout<<"Answer verified\n";
cudaFree(pA);
cudaFree(pB);
cudaFree(pC);
} |
4,187 | #include "includes.h"
__global__ void __hashmult2(int nrows, int nfeats, int ncols, int brows1, int brows2, float *A, float *Bdata, int *Bir, int *Bjc, float *C, int transpose) {} |
4,188 | #include "includes.h"
__global__ void normCalc (float *d_A, float *d_B, int n) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ int row, mu, sigma;
if (col < n){
mu = (float)0.0;
for (row=0; row < n; row++)
mu += d_A[col*n+row];
mu /= (float) n;
__syncthreads();
sigma = (float)0.0;
for (row=0; row < n; row++)
sigma += powf(d_A[col*n+row] - mu, (float)2.0);
sigma /= (float) n;
__syncthreads();
sigma = sqrt((float)sigma);
for (row=0; row < n; row++) {
if (sigma == (float)0.0)
d_B[row*n+col] = (float)0.0;
else
d_B[row*n+col] = (d_A[col*n+row] - mu) / sigma;
}
}
} |
4,189 | #include "includes.h"
__global__ void DrawMaskedColorKernel(float *target, int targetWidth, int targetHeight, int inputX, int inputY, float *textureMask, int textureWidth, int textureHeight, float r, float g, float b)
{
int id = blockDim.x * blockIdx.y * gridDim.x
+ blockDim.x * blockIdx.x
+ threadIdx.x;
int targetPixels = targetWidth * targetHeight;
int texturePixels = textureWidth * textureHeight;
int idTextureRgb = id / texturePixels;
int idTexturePixel = (id - idTextureRgb * texturePixels); // same as (id % texturePixels), but the kernel runs 10% faster
int idTextureY = idTexturePixel / textureWidth;
int idTextureX = (idTexturePixel - idTextureY * textureWidth); // same as (id % textureWidth), but the kernel runs another 10% faster
if (idTextureRgb < 3) // only RGB channels are interesting
{
// if the texture pixel offset by inputX, inputY, lies inside the target
if (idTextureX + inputX < targetWidth &&
idTextureX + inputX >= 0 &&
idTextureY + inputY < targetHeight &&
idTextureY + inputY >= 0)
{
int tIndex = targetPixels * idTextureRgb + targetWidth * (idTextureY + inputY) + (idTextureX + inputX);
int aIndex = idTexturePixel + 3 * texturePixels; // the A component of the texture
float a = textureMask[aIndex];
if (a > 0) // mask allows color here
{
switch (idTextureRgb)
{
case 0:
target[tIndex] = r;
break;
case 1:
target[tIndex] = g;
break;
case 2:
default:
target[tIndex] = b;
break;
}
}
}
}
} |
4,190 | #include <stdio.h>
#include <time.h>
/*
Measure Time
Maximum Matrix Size
*/
const int TILE_DIM = 32;
inline cudaError_t checkCuda(cudaError_t result) {
if (result != cudaSuccess) {
printf("CUDA Runtime Error: %s\n", cudaGetErrorString(result));
exit(1);
}
return result;
}
__global__ void transposeMatrix(float *B, float *A, int width)
{
__shared__ float tile[TILE_DIM][TILE_DIM];
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
tile[threadIdx.y][threadIdx.x] = A[y*width + x];
__syncthreads();
x = blockIdx.y * TILE_DIM + threadIdx.x;
y = blockIdx.x * TILE_DIM + threadIdx.y;
B[y*width + x] = tile[threadIdx.x][threadIdx.y];
}
int main(int argc, char **argv)
{
if (argc < 1) {
printf("matrix size is a mandatory parameter");
exit(1);
}
// Number of rows and columns must be a multiple of 32
int rows = atoi(argv[1]);
if (rows % TILE_DIM ) {
printf("number of rows must be multiple of 32\n");
exit(1);
}
const int columns = rows;
const int width = rows;
const int size = rows*columns*sizeof(float);
dim3 dimGrid(width/TILE_DIM, width/TILE_DIM);
dim3 dimBlock(TILE_DIM, TILE_DIM);
float *A, *B, *C;
checkCuda( cudaMallocHost(&A, size) );
checkCuda( cudaMallocHost(&B, size) );
checkCuda( cudaMallocHost(&C, size) );
/*
A = (float*)malloc(size);
B = (float*)malloc(size);
C = (float*)malloc(size);
*/
float *dA, *dB;
checkCuda( cudaMalloc(&dA, size) );
checkCuda( cudaMalloc(&dB, size) );
for (int j = 0; j < rows; j++)
for (int i = 0; i < columns; i++)
A[j*width + i] = 0.15*i + 0.1*j;
clock_t tStart = clock();
for (int j = 0; j < rows; j++)
for (int i = 0; i < columns; i++)
C[j*width + i] = A[i*width + j];
printf("Time taken by Host: %.6fs\n", (double)(clock() - tStart) / CLOCKS_PER_SEC);
checkCuda( cudaMemcpy(dA, A, size, cudaMemcpyHostToDevice) );
tStart = clock();
transposeMatrix<<<dimGrid, dimBlock>>>(dB, dA, width);
checkCuda( cudaDeviceSynchronize() );
printf("Time taken by GPU: %.6fs\n", (double)(clock() - tStart) / CLOCKS_PER_SEC);
checkCuda( cudaMemcpy(B, dB, size, cudaMemcpyDeviceToHost) );
for (int i = 0; i < rows * columns; i++)
{
if (B[i] != C[i]) {
printf("%d %f %f INVALID RESULTS \n", i, B[i], C[i]);
goto finished;
}
}
printf("Matrix Transpose Successful");
finished:
checkCuda( cudaFree(dA) );
checkCuda( cudaFree(dB) );
checkCuda(cudaFreeHost(A));
checkCuda(cudaFreeHost(B));
checkCuda(cudaFreeHost(C));
} |
4,191 | extern "C"
{
__global__ void blur(const long *IN, long *OUT, const int n) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
long v = 0;
if (!(idx==0 || idx==n-1 || idy == 0 || idy==n-1) ) {
for(int i=-1; i<2; i++) {
for (int j=-1; j<2; j++) {
v = v + IN[(idx+i) + (idy+j)*n];
}
}
v = v / 9;
} else {
v = IN[idx + idy*n];
}
OUT[idx + idy*n] = v;
}
} |
4,192 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
|
4,193 | //
//This is a code for the kernel basics and also the error handling
//Author: Zhaoyuan "Maxwell" Cui
#include<cuda_runtime.h>
#include<stdio.h>
#define CHECK(call)\
{\
const cudaError_t error=call;\
if(error!=cudaSuccess)\
{\
printf("Error: %s:%d, ", __FILE__, __LINE__);\
printf("code:%d, reason: %s\n",error,cudaGetErrorString(error));\
exit(1);\
}\
}\
__global__ void kernel(float *A, float *B, float *C)
{
int i;
i=blockIdx.x*blockDim.x+threadIdx.x;
C[i]=A[i]+B[i];
}
int main()
{
int nElm=100;
float *h_A, *h_B, *h_C;
int size=nElm*sizeof(float);
h_A=(float*)malloc(size);
h_B=(float*)malloc(size);
h_C=(float*)malloc(size);
for(int i=0;i<nElm;i++)
{
h_A[i]=1;
h_B[i]=2;
h_C[i]=0;
}
float *d_A,*d_B,*d_C;
CHECK(cudaMalloc(&d_A,size));
CHECK(cudaMalloc(&d_B,size));
CHECK(cudaMalloc(&d_C,size));
CHECK(cudaMemcpy(d_A,h_A,size,cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_B,h_B,size,cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_C,h_C,size,cudaMemcpyHostToDevice));
dim3 block (10);
dim3 grid ((nElm+block.x-1)/block.x);
kernel<<<grid,block>>>(d_A,d_B,d_C);
CHECK(cudaMemcpy(h_C,d_C,size,cudaMemcpyDeviceToHost));
for(int i=0;i<nElm;i++)
{
if(h_C[i]!=3)
{
printf("Calculation error!\n");
exit(1);
}
else
printf("Success!\n");
}
free(h_A);
free(h_B);
free(h_C);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
return 0;
}
|
4,194 | #include "includes.h"
__global__ void naive_histo(int *d_bins, const int *d_in, const int BIN_COUNT)
{
int myId = threadIdx.x + blockDim.x * blockIdx.x;
int myItem = d_in[myId];
int myBin = myItem % BIN_COUNT;
d_bins[myBin]++;
} |
4,195 | //http://www.bu.edu/pasi/files/2011/07/Lab5.pdf
//http://fgiesen.wordpress.com/2009/12/13/decoding-morton-codes/
/*
Sort Voronoi using Morton Code
*/
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <cmath>
#include <thrust/sort.h>
const int n = 4;
struct Color{
int blue, green, red;
int dist;
int index;
int morton;
void print()
{
printf("%d\t%d\t%d\t%d\t%d\t%d\n", index, morton, blue, green, red, dist);
}
};
__host__ __device__ bool operator<(const Color &lhs, const Color &rhs)
{
return lhs.morton < rhs.morton;
}
// "Insert" a 0 bit after each of the 16 low bits of x
int Part1By1(int x)
{
x &= 0x0000ffff; // x = ---- ---- ---- ---- fedc ba98 7654 3210
x = (x ^ (x << 8)) & 0x00ff00ff; // x = ---- ---- fedc ba98 ---- ---- 7654 3210
x = (x ^ (x << 4)) & 0x0f0f0f0f; // x = ---- fedc ---- ba98 ---- 7654 ---- 3210
x = (x ^ (x << 2)) & 0x33333333; // x = --fe --dc --ba --98 --76 --54 --32 --10
x = (x ^ (x << 1)) & 0x55555555; // x = -f-e -d-c -b-a -9-8 -7-6 -5-4 -3-2 -1-0
return x;
}
// "Insert" two 0 bits after each of the 10 low bits of x
int Part1By2(int x)
{
x &= 0x000003ff; // x = ---- ---- ---- ---- ---- --98 7654 3210
x = (x ^ (x << 16)) & 0xff0000ff; // x = ---- --98 ---- ---- ---- ---- 7654 3210
x = (x ^ (x << 8)) & 0x0300f00f; // x = ---- --98 ---- ---- 7654 ---- ---- 3210
x = (x ^ (x << 4)) & 0x030c30c3; // x = ---- --98 ---- 76-- --54 ---- 32-- --10
x = (x ^ (x << 2)) & 0x09249249; // x = ---- 9--8 --7- -6-- 5--4 --3- -2-- 1--0
return x;
}
int EncodeMorton2(int x, int y)
{
return (Part1By1(y) << 1) + Part1By1(x);
}
int EncodeMorton3(int x, int y, int z)
{
return (Part1By2(z) << 2) + (Part1By2(y) << 1) + Part1By2(x);
}
int main(void)
{
thrust::device_vector<Color> cd;
thrust::host_vector<Color> ch;
printf("Unsorted\n");
printf("index\tmorton\tblue\tgreen\tred\tdist\n");
int idx = 0;
for (int y = 0; y < n; y++) //row major
{
for(int x = 0; x < n; x++)
{
Color c;
//std::cout << "x " << x << " y " << y << std::endl;
//std::cout << "ID: " << idx <<" Morton " << EncodeMorton2(x, y) << std::endl;
c.blue = 25 + 204 * (rand()%256)/255;
c.green = 25 + 204 * (rand()%256)/255;
c.red = 25 + 204 * (rand()%256)/255;
c.dist = rand();
c.index = idx;
c.morton = EncodeMorton2(x, y);
c.print();
ch.push_back(c);
idx++;
}
//std::cout << std::endl;
}
cd = ch;
thrust::sort(cd.begin(), cd.end());
ch = cd;
printf("\nSorted Morton Codes(Second Columm)\n");
printf("index\tmorton\tblue\tgreen\tred\tdist\n");
for(unsigned int i = 0; i < ch.size(); i++)
ch[i].print();
return 0;
}
|
4,196 | #include <cstdio>
using namespace std;
__global__ void matmul_kernel(const float* A, const float* B, float* C,
unsigned int n) {
extern __shared__ float arr[];
float* sA = &arr[0];
float* sB = &arr[blockDim.x * blockDim.y];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int aBegin = n * blockDim.x * by;
int aEnd = aBegin + n - 1;
int aStep = blockDim.x;
int bBegin = blockDim.x * bx;
int bStep = blockDim.x * n;
float Cval = 0;
for (int a = aBegin, b = bBegin, brow = 0; a <= aEnd;
a += aStep, b += bStep, brow += blockDim.y) {
int rowA = blockDim.y * by + ty;
int colA = a + tx - blockDim.y * by * n;
if (rowA < n && colA < n)
sA[ty * blockDim.x + tx] = A[a + n * ty + tx];
else
sA[ty * blockDim.x + tx] = 0;
int colB = blockDim.x * bx + tx;
int rowB = brow + ty;
if (rowB < n && colB < n)
sB[ty * blockDim.x + tx] = B[b + n * ty + tx];
else
sB[ty * blockDim.x + tx] = 0;
__syncthreads();
// if (tx == 0 && ty == 0 && bx == 0 && by == 0) {
// for (int i = 0; i < blockDim.y; i++) {
// for (int j = 0; j < blockDim.x; j++) {
// printf("%f ", sA[i*blockDim.x + j]);
// }
// printf("\n");
// }
// printf("---------\n");
// for (int i = 0; i < blockDim.y; i++) {
// for (int j = 0; j < blockDim.x; j++) {
// printf("%f ", sB[i*blockDim.x + j]);
// }
// printf("\n");
// }
// }
// safe because out-of-bounds entries are 0
for (int k = 0; k < blockDim.x; k++) {
Cval += sA[ty * blockDim.x + k] * sB[k * blockDim.x + tx];
}
// if (tx == 1 && ty == 1)
// printf("%f\n", Cval);
__syncthreads();
}
int idx = n * blockDim.x * by + blockDim.x * bx;
int rowC = blockDim.y * by + ty;
int colC = blockDim.x * bx + tx;
if (rowC < n && colC < n) C[idx + n * ty + tx] = Cval;
}
__host__ void matmul(const float* A, const float* B, float* C, unsigned int n,
unsigned int block_dim) {
dim3 block(block_dim, block_dim);
dim3 grid((n + block.x - 1) / block.x, (n + block.y - 1) / block.y);
matmul_kernel<<<grid, block, 2 * sizeof(float) * block_dim * block_dim>>>(
A, B, C, n);
cudaDeviceSynchronize();
} |
4,197 | #include <cuda.h>
#include <stdio.h>
#include <cuda.h>
#include <curand_kernel.h>
#include <time.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/sort.h>
#include <thrust/copy.h>
#include <thrust/random.h>
#include <thrust/inner_product.h>
#include <thrust/binary_search.h>
#include <thrust/adjacent_difference.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/iterator/counting_iterator.h>
__global__ void initPRNG(int seed, curandState *rngState)
{
unsigned int tid = threadIdx.x + blockIdx.x*blockDim.x;
curand_init(seed, tid, 0, &rngState[tid]);
}
__global__ void generate_uniform_int(int n, unsigned int *data, int q, curandState *rngState)
{
unsigned int tid = threadIdx.x + blockIdx.x*blockDim.x;
unsigned nGrid = blockDim.x*gridDim.x;
curandState localState = rngState[tid];
for(int i=tid; i<n; i+= nGrid)
data[i] = curand(&localState)%q;
rngState[tid] = localState;
}
int main()
{
int nBlocks = 128, nThreads = 128;
int seed = 1234;
int q=4;
int hist[q];
// default PRNG
curandState *rngState_dev;
cudaMalloc(&rngState_dev, sizeof(curandState)*nBlocks*nThreads);
initPRNG<<<nBlocks, nThreads>>>(seed, rngState_dev);
int n=1<<28;
unsigned int *hostData, *devData;
size_t memSize=sizeof(unsigned int)*n;
// host and device memory allocation
hostData = (unsigned int *)malloc(memSize);
cudaMalloc(&devData, memSize);
for(int i=0; i<10; i++)
generate_uniform_int<<<nBlocks, nThreads>>>(n, devData, q, rngState_dev);
cudaMemcpy(hostData, devData, memSize, cudaMemcpyDeviceToHost);
// histogram
for(int i=0; i<q; i++) hist[i] = 0;
for(int i=0; i<n; i++) {
hist[hostData[i]] ++;
}
for(int i=0; i<q; i++) printf("%d %d\n", i, hist[i]);
thrust::sort(thrust::device_ptr<unsigned int>(devData),
thrust::device_ptr<unsigned int>(devData)+n);
thrust::device_vector<unsigned int> histogram(q,0);
thrust::counting_iterator<unsigned int> search_begin(0);
thrust::upper_bound(thrust::device_ptr<unsigned int>(devData),
thrust::device_ptr<unsigned int>(devData)+n,
search_begin, search_begin+q, histogram.begin());
thrust::adjacent_difference(histogram.begin(), histogram.end(),
histogram.begin());
for(int i=0; i<histogram.size(); i++)
std::cout << i << " " << histogram[i] << "\n";
cudaFree(rngState_dev);
cudaFree(devData); free(hostData);
}
|
4,198 | #include <stdio.h>
#include <cuda.h>
#include <time.h>
#define EXPO 3
__global__ void RecursiveDoublingKernel(int variableSize, int step,int blockRow, int blockColumn,float* deviceY,float* deviceM,int evenOrOddFlag,float deviceA,float* deviceB,float* deviceC, float *deviceD)
{
//we weill do something like y(i+1)=my(i)+b
int bx=blockIdx.x;
int by=blockIdx.y;
int tx=threadIdx.x;
int ty=threadIdx.y;
int processIndex=tx;
printf("%d ",tx);
printf("%f,%f,%f \n",deviceY[0],deviceY[1],deviceY[2]);
printf("%f,%f,%f \n",deviceM[0],deviceM[1],deviceM[2]);
//so M and Y will be divided into two part, the first part store the old value
//the second half part store the updated value
int halfSize=variableSize;
//teh start index of the second part will be halfsize;
//so if evenOrOddFlag is Odd, the new value will be stored in the second half,
//otherwise it will be stored in the first half.
int secondhalfHelper=halfSize+step+processIndex;
printf("second half helper is: %d \n",secondhalfHelper);
//be careful that 1-step the old value still need to be copied to the current value,since the new value will start calculated at step+1
if(evenOrOddFlag%2==1)
{
printf("does this ever got run?");
deviceY[secondhalfHelper]=deviceY[secondhalfHelper-halfSize]+deviceM[secondhalfHelper-halfSize]*deviceY[processIndex];
deviceM[secondhalfHelper]=deviceM[secondhalfHelper-halfSize]*deviceM[processIndex];
//copy it once here
if(tx==0&&ty==0)
{
for(int i=0;i<step;i++)
{
deviceY[i+halfSize]=deviceY[i];
deviceM[i+halfSize]=deviceM[i];
}
}
}
else
{
printf("this should not run \n");//so will store the new value in the first part
deviceY[secondhalfHelper-halfSize]=deviceY[secondhalfHelper]+deviceM[secondhalfHelper]*deviceY[halfSize+processIndex];
deviceM[secondhalfHelper-halfSize]=deviceM[secondhalfHelper]*deviceM[halfSize+processIndex];
if(tx==0&&ty==0) //just need to copy once, so the other processors allow to idle at thsi time
{
for(int i=0;i<step;i++)
{
deviceY[i]=deviceY[i+halfSize];
deviceM[i+halfSize]=deviceM[i];
}
}
}
__syncthreads();
}
__global__ void MatrixVersionRecursiveDoubling(int variableSize, int step,int blockRow, int blockColumn,float* deviceYForW,float* deviceMForW,int evenOrOddFlag,float* deviceA, float* deviceB, float* deviceC, float* deviceD)
{
//so right now just use grid (1,1) if time allow will implment other grid size
int bx=blockIdx.x;
int by=blockIdx.y;
int tx=threadIdx.x;
int ty=threadIdx.y;
int processId=tx; //this is only for the this particluar grid and block setup
int halfSizeY=variableSize;
int halfSizeM=2*variableSize;
/*
int secondhalfHelper=halfSize+step+2*processIndex; //this need to multiply 2, different from non-matrix version
int secondhalfHelper1=halfSize+step+4*processIndex;*/
int indexHelperY=halfSizeY+2*step+2*processId;
int indexHelperM=halfSizeM+4*step+4*processId;
if(evenOrOddFlag%2==1)
{
//update M and Y here
deviceYForW[indexHelperY]=deviceYForW[indexHelperY-halfSizeY]+deviceMForW[indexHelperM-halfSizeM]*deviceYForW[2*processId]+deviceMForW[indexHelperM-halfSizeM+1]*deviceYForW[2*processId+1];
deviceYForW[indexHelperY+1]=deviceYForW[indexHelperY-halfSizeY+1]+deviceMForW[indexHelperM-halfSizeM+2]*deviceYForW[2*processId]+deviceMForW[indexHelperM-halfSizeM+3]*deviceYForW[2*processId+1];
deviceMForW[indexHelperM]=deviceMForW[4*step+4*processId]*deviceMForW[4*processId]+deviceMForW[4*step+4*processId+1]*deviceMForW[4*processId+2];
deviceMForW[indexHelperM+1]=deviceMForW[4*step+4*processId]*deviceMForW[4*processId+1]+deviceMForW[4*step+4*processId+1]*deviceMForW[4*processId+3];
deviceMForW[indexHelperM+2]=deviceMForW[4*step+4*processId+2]*deviceMForW[4*processId]+deviceMForW[4*step+4*processId+3]*deviceMForW[4*processId+2];
deviceMForW[indexHelperM+3]=deviceMForW[4*step+4*processId+2]*deviceMForW[4*processId+1]+deviceMForW[4*step+4*processId+3]*deviceMForW[4*processId+3];
//now need to copy 1-- step old value to new value just need to copy once for each step
for(int i=0;i<step;i++)
{
deviceYForW[halfSizeY+2*i]=deviceYForW[2*i];
deviceYForW[halfSizeY+2*i+1]=deviceYForW[2*i+1];
deviceMForW[halfSizeM+4*i]=deviceMForW[4*i];
deviceMForW[halfSizeM+4*i+1]=deviceMForW[4*i+1];
deviceMForW[halfSizeM+4*i+2]=deviceMForW[4*i+2];
deviceMForW[halfSizeM+4*i+3]=deviceMForW[4*i+3];
}
}
else
{
deviceYForW[indexHelperY-halfSizeY]=deviceYForW[indexHelperY]+deviceMForW[indexHelperM]*deviceYForW[2*processId+halfSizeY]+deviceMForW[indexHelperM+1]*deviceYForW[2*processId+1+halfSizeY];
deviceYForW[indexHelperY-halfSizeY+1]=deviceYForW[indexHelperY+1]+deviceMForW[indexHelperM+2]*deviceYForW[2*processId+halfSizeY]+deviceMForW[indexHelperM+3]*deviceYForW[2*processId+1+halfSizeY];
deviceMForW[indexHelperM-halfSizeM]=deviceMForW[4*step+4*processId+halfSizeM]*deviceMForW[4*processId+halfSizeM]+deviceMForW[4*step+4*processId+1+halfSizeM]*deviceMForW[4*processId+2+halfSizeM];
deviceMForW[indexHelperM+1-halfSizeM]=deviceMForW[4*step+4*processId+halfSizeM]*deviceMForW[4*processId+1+halfSizeM]+deviceMForW[4*step+4*processId+1+halfSizeM]*deviceMForW[4*processId+3+halfSizeM];
deviceMForW[indexHelperM+2-halfSizeM]=deviceMForW[4*step+4*processId+2+halfSizeM]*deviceMForW[4*processId+halfSizeM]+deviceMForW[4*step+4*processId+3+halfSizeM]*deviceMForW[4*processId+2+halfSizeM];
deviceMForW[indexHelperM+3-halfSizeM]=deviceMForW[4*step+4*processId+2+halfSizeM]*deviceMForW[4*processId+1+halfSizeM]+deviceMForW[4*step+4*processId+3+halfSizeM]*deviceMForW[4*processId+3+halfSizeM];
//now need to copy 1-- step old value to new value just need to copy once for each step
for(int i=0;i<step;i++)
{
deviceYForW[2*i]=deviceYForW[2*i+halfSizeY];
deviceYForW[2*i+1]=deviceYForW[2*i+1+halfSizeY];
deviceMForW[4*i]=deviceMForW[4*i+halfSizeM];
deviceMForW[4*i+1]=deviceMForW[4*i+1+halfSizeM];
deviceMForW[4*i+2]=deviceMForW[4*i+2+halfSizeM];
deviceMForW[4*i+3]=deviceMForW[4*i+3+halfSizeM];
}
}
}
int main()
{
/* float* M;
float* Y;
int variableSize=10;
int variableSpace=2*variableSize*sizeof(float);*/
//make it double size since it run in parallel so you want to keep all the previous version
/* M=(float*)malloc(variableSpace);
Y=(float*)malloc(variableSpace); */
/* M[0]=1;
Y[0]=1;*/
int m=pow(2,EXPO)-1;
int b=1;
int a=0;
float delta=(b-a)*1.0/(m+1.0);
//store teh metrix that is to be LU decomposited
float *A;
float *B;
float *C;
float *D;
int chunkLength=m;
int chunkSize=chunkLength*sizeof(float);
A=(float*)malloc(chunkSize);
B=(float*)malloc(chunkSize);
C=(float*)malloc(chunkSize);
D=(float*)malloc(chunkSize);
A[0]=0;
//int vectorLength=EXPO*m;
for(int i=1;i<m;i++)
{
A[i]=1-delta*delta*0.5*(i+1);
}
//else will be 0
/* for(int i=m;i<chunkLength;i++)
{
A[i]=0;
}*/
for(int i=0;i<m;i++)
{
B[i]=-2+delta*delta*1.0;
}
/* for(int i=m;i<chunkLength;i++)
{
B[i]=0;
}*/
C[m-1]=0;
for(int i=0;i<m-1;i++)
{
C[i]=1+0.5*delta*delta*(i+1);
}
/* for(int i=m;i<chunkLength;i++)
{
C[i]=0;
}*/
/* D[0]=2*delta*delta*delta+0.5*delta*delta-1;*/
for(int i=0;i<m-1;i++)
{
D[i]=2*(i+1)*pow(delta,3);
}
D[m-1]=2*m*delta*delta*delta-1+3.5*delta*delta;
/* for(int i=m;i<chunkLength;i++)
{
D[i]=0;
}*/
float *deviceA, *deviceB, *deviceC, *deviceD;
cudaMalloc((void**)&deviceA,chunkSize);
cudaMalloc((void**)&deviceB,chunkSize);
cudaMalloc((void**)&deviceC,chunkSize);
cudaMalloc((void**)&deviceD,chunkSize);
//copy the host vector to device.
cudaMemcpy(deviceA,A,chunkSize,cudaMemcpyHostToDevice);
cudaMemcpy(deviceB,B,chunkSize,cudaMemcpyHostToDevice);
cudaMemcpy(deviceC,C,chunkSize,cudaMemcpyHostToDevice);
cudaMemcpy(deviceD,D,chunkSize,cudaMemcpyHostToDevice);
clock_t begin,end;
begin=clock();
//start the code to calculate the w with recursive doubling applied to matrix
//so we need 2*2*(N-1) for both YforW and 2*4*(N-1) for MforW , the size N should be equal to m here
float *MforW, *YforW;
int MforWLength=4*(m-1);
int YforWLength=2*(m-1);
int MforWSize=2*MforWLength*sizeof(float);
int YforWSize=2*YforWLength*sizeof(float);
MforW=(float*)malloc(MforWSize);
YforW=(float*)malloc(YforWSize);
//the first step of recursive doubling, initialize Y and M;
YforW[0]=1;
YforW[1]=B[0]/(C[0]*1.0);
//the other should be 0 since V(I)=A[I]V[I-1]+0
for(int i=2;i<YforWLength;i++)
{
YforW[i]=0;
}
//the first one for M should be[1,0,0,1]
MforW[0]=1;
MforW[1]=0;
MforW[2]=0;
MforW[3]=1;
for(int i=4;i<MforWLength;i=i+4)
{
MforW[i]=0;
MforW[i+1]=1;
MforW[i+2]=-1.0*A[i/4]/C[i/4];
MforW[i+3]=1.0*B[i/4]/C[i/4];
}
float *deviceMforW, *deviceYforW;
cudaMalloc((void**)&deviceMforW,MforWSize);
cudaMalloc((void**)&deviceYforW,YforWSize);
cudaMemcpy(deviceMforW,MforW,MforWSize,cudaMemcpyHostToDevice);
cudaMemcpy(deviceYforW,YforW,YforWSize,cudaMemcpyHostToDevice);
int step=1;
int evenOrOddFlag=0;
do {
//each time needs N-Step processors
evenOrOddFlag=evenOrOddFlag+1;
dim3 dimGrid(1,1);
int blockRow=1;
int blockColumn=(m-1)-step;
dim3 dimBlock(blockColumn,blockRow);
//variableSIZE should be half size y
MatrixVersionRecursiveDoubling<<<dimGrid,dimBlock>>>(YforWLength,step,blockRow,blockColumn,deviceYforW,deviceMforW,evenOrOddFlag,deviceA,deviceB,deviceC,deviceD);
step=step+step;
}while( step <= YforWLength/2);
//so if evenOrOddFlag is odd, it means that the latest value will be second half,
//otherwise it will be in the first half
cudaMemcpy(MforW,deviceMforW,MforWSize,cudaMemcpyDeviceToHost);
cudaMemcpy(YforW,deviceYforW,YforWSize,cudaMemcpyDeviceToHost);
/*M[0]=1;
Y[0]=1;
for(int i=1;i<variableSize;i++)
{
M[i]=2;
Y[i]=3;
}
float *deviceM, *deviceY;
cudaMalloc((void**)&deviceM,variableSpace);
cudaMalloc((void**)&deviceY,variableSpace);
cudaMemcpy(deviceM,M,variableSpace,cudaMemcpyHostToDevice);
cudaMemcpy(deviceY,Y,variableSpace,cudaMemcpyHostToDevice);
int step=1;
int evenOrOddFlag=0;
do {
//each time needs N-Step processors
evenOrOddFlag=evenOrOddFlag+1;
dim3 dimGrid(1,1);
int blockRow=1;
int blockColumn=variableSize-step;
dim3 dimBlock(blockColumn,blockRow);
RecursiveDoublingKernel<<<dimGrid,dimBlock>>>(variableSize,step,blockRow,blockColumn,deviceY,deviceM,evenOrOddFlag);
step=step+step;
}while( step <= variableSize);
//so if evenOrOddFlag is odd, it means that the latest value will be second half,
//otherwise it will be in the first half
cudaMemcpy(M,deviceM,variableSpace,cudaMemcpyDeviceToHost);
cudaMemcpy(Y,deviceY,variableSpace,cudaMemcpyDeviceToHost);*/
/* printf("solution is here: \n");
if(evenOrOddFlag%2==0)
{
for(int i=0;i<variableSize;i++)
{
printf("%f \n",Y[i]);
}
}
else
{
for(int i=0;i<variableSize;i++)
{
printf("%f \n",Y[i+variableSize]);
}
}*/
printf("solution is here: \n");
if(evenOrOddFlag%2==0)
{
for(int i=0;i<YforWLength;i++)
{
printf("%f \n",YforW[i]);
}
}
else
{
for(int i=0;i<YforWLength;i++)
{
printf("%f \n",YforW[i+YforWLength]);
}
}
double time_spent;
end=clock();
time_spent=(double)(end-begin)/CLOCKS_PER_SEC;
printf("\n time used to calculate this is :%f seconds \n",time_spent);
cudaFree(deviceA);
cudaFree(deviceB);
cudaFree(deviceC);
cudaFree(deviceD);
cudaFree(deviceMforW);
cudaFree(deviceYforW);
free(A);
free(B);
free(C);
free(D);
free(MforW);
free(YforW);
return 0;
}
|
4,199 | #include <cstdio>
#define gpuErrchk(ans) \
{ gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line,
bool abort = true) {
if (code != cudaSuccess) {
fprintf(stderr, "GPUassert: %d %s %s %d\n", code, cudaGetErrorString(code),
file, line);
if (abort)
exit(code);
}
}
__global__ void matrixAddKernel(float *matA, float *matB, float *matC,
int size) {
size_t indexX = blockIdx.x * blockDim.x + threadIdx.x;
size_t strideX = blockDim.x * gridDim.x;
size_t indexY = blockIdx.y * blockDim.y + threadIdx.y;
size_t strideY = blockDim.y * gridDim.y;
for (size_t i = indexX; i < size; i += strideX)
for (size_t j = indexY; j < size; j += strideY)
matC[i * size + j] = matA[i * size + j] + matB[i * size + j];
}
// 1.B Un thread por elemento
__global__ void matrixAddKernel_B(float *matA, float *matB, float *matC,
int size) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
if (i < size and j < size)
matC[i * size + j] = matA[i * size + j] + matB[i * size + j];
}
// 1.C Un thread por fila
__global__ void matrixAddKernel_C(float *matA, float *matB, float *matC,
int size) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size) {
for (size_t j = 0; j < size; j++)
matC[i * size + j] = matA[i * size + j] + matB[i * size + j];
}
}
// 1.D Un thread por columna
__global__ void matrixAddKernel_D(float *matA, float *matB, float *matC,
int size) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size) {
for (size_t j = 0; j < size; j++)
matC[j * size + i] = matA[j * size + i] + matB[j * size + i];
}
}
// 1.A
void matrixAdd(float *matA, float *matB, float *matC, int size) {
size_t sizeM = size * size * sizeof(float);
float *d_matA, *d_matB, *d_matC;
gpuErrchk(cudaMalloc(&d_matA, sizeM));
gpuErrchk(cudaMemcpy(d_matA, matA, sizeM, cudaMemcpyHostToDevice));
gpuErrchk(cudaMalloc(&d_matB, sizeM));
gpuErrchk(cudaMemcpy(d_matB, matB, sizeM, cudaMemcpyHostToDevice));
gpuErrchk(cudaMalloc(&d_matC, sizeM));
// Execute the kernel
// 1.B
dim3 threads(16, 16);
dim3 blocks(ceil(size / threads.x), ceil(size / threads.y));
matrixAddKernel_B<<<blocks, threads>>>(d_matA, d_matB, d_matC, size);
// 1.C
threads = dim3(16);
blocks = dim3(ceil(size / threads.x));
matrixAddKernel_C<<<blocks, threads>>>(d_matA, d_matB, d_matC, size);
// 1.D
threads = dim3(16);
blocks = dim3(ceil(size / threads.x));
matrixAddKernel_D<<<blocks, threads>>>(d_matA, d_matB, d_matC, size);
gpuErrchk(cudaMemcpy(matC, d_matC, sizeM, cudaMemcpyDeviceToHost));
cudaFree(d_matC);
cudaFree(d_matA);
cudaFree(d_matB);
}
int main() {
size_t size = 16;
float *matA = new float[size * size];
float *matB = new float[size * size];
float *matC = new float[size * size];
for (size_t i = 0; i < size * size; i++) {
matA[i] = 1.0;
matB[i] = 2.0;
}
matrixAdd(matA, matB, matC, size);
}
|
4,200 | // fermi
// Avoid mangling of function names
extern "C" {
__global__ void vectoraddKernel(const int n, float* c, const float* a, const float* b);
}
__global__ void vectoraddKernel(const int n, float* c, const float* a, const float* b) {
const int bi = blockIdx.x;
const int wti = threadIdx.y;
const int tti = threadIdx.x;
const int nrThreadsN = min(1024, n);
const int nrBlocksN = n == 1 * nrThreadsN ?
1 :
n % (1 * nrThreadsN) == 0 ?
n / (1 * nrThreadsN) :
n / (1 * nrThreadsN) + 1
;
const int nrThreadsNrThreadsN = min(32, nrThreadsN);
const int nrWarpsNrThreadsN = nrThreadsN == 1 * nrThreadsNrThreadsN ?
1 :
nrThreadsN % (1 * nrThreadsNrThreadsN) == 0 ?
nrThreadsN / (1 * nrThreadsNrThreadsN) :
nrThreadsN / (1 * nrThreadsNrThreadsN) + 1
;
const int ti = wti * (1 * nrThreadsNrThreadsN) + tti;
if (ti < nrThreadsN) {
const int i = bi * (1 * nrThreadsN) + ti;
if (i < n) {
c[i] = a[i] + b[i];
}
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.