serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
22,601 | // Memoria global
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#define N 16
int main(int argc, char** argv) {
// declaraciones
float *hst_A, *hst_B;
float *dev_A, *dev_B;
// reserva en el host
hst_A = (float*)malloc(N * sizeof(float));
hst_B = (float*)malloc(N * sizeof(float));
// reserva en el device
cudaMalloc((void**)&dev_A, N * sizeof(float));
cudaMalloc((void**)&dev_B, N * sizeof(float));
// inicializacion
for (int i=0; i<N; i++) {
hst_A[i] = (float)rand() / RAND_MAX;
hst_B[i] = 0;
}
// copia de datos
cudaMemcpy(dev_A, hst_A, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_B, dev_A, N*sizeof(float), cudaMemcpyDeviceToDevice);
cudaMemcpy(hst_B, dev_B, N*sizeof(float), cudaMemcpyDeviceToHost);
// muestra de resultados
printf("ENTRADA (hst_A):\n");
for (int i=0; i<N; i++) {
printf("%.2f ", hst_A[i]);
}
printf("\n");
printf("SALIDA (hst_B):\n");
for (int i=0; i<N; i++) {
printf("%.2f ", hst_B[i]);
}
printf("\n");
// liberacion de recursos
cudaFree(dev_A);
cudaFree(dev_B);
return 0;
}
|
22,602 | // Vector addition: C = 1/A + 1/B.
// compile with the following command:
//
// (for GTX970)
// nvcc -arch=compute_52 -code=sm_52,sm_52 -O2 -m64 -o vecAdd vecAdd.cu
//
// (for GTX1060)
// nvcc -arch=compute_61 -code=sm_61,sm_61 -O2 -m64 -o vecAdd vecAdd.cu
// Includes
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
/** Variables 2D
float** h_A; // host vectors
float** h_B;
float** h_C;
float** h_D;
float** d_A; // device vectors
float** d_B;
float** d_C;
**/
// Functions
void RandomInit(float*data, int n)
{
for(int i = 0; i < n*n; i++)
data[i] = rand()/(float)RAND_MAX;
}
// Device code
__global__ void VecAdd(float* A, float* B, float* C, int N)
{
int threadCol = threadIdx.x + blockIdx.x * blockDim.x; //column
int threadRow = threadIdx.y + blockIdx.y * blockDim.y; //row
int indexOfMatrix = threadCol + threadRow * N;
/**
if(i <= N && j <= N)
C[i][j] = A[j][i] + B[j][i];
**/
if(threadCol < N && threadRow < N){
C[indexOfMatrix] = A[indexOfMatrix] + B[indexOfMatrix];
}
__syncthreads();
}
// Host code
int main( )
{
float *h_A, *h_B, *h_C, *h_D;
float *d_A, *d_B, *d_C;
int gid;
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
scanf("%d",&gid);
err = cudaSetDevice(gid);
if (err != cudaSuccess) {
printf("!!! Cannot select GPU with device ID = %d\n", gid);
exit(1);
}
printf("Set GPU with device ID = %d\n", gid);
cudaSetDevice(gid);
printf("Vector Addition: C = A + B\n");
int mem = 1024*1024*1024; // Giga
int N;
printf("Enter the size of the vectors: ");
scanf("%d",&N);
printf("%d\n",N);
if( 3*N > mem ) { // each real number takes 4 bytes
printf("The size of these 3 vectors cannot be fitted into 4 Gbyte\n");
exit(2);
}
long size = N*N*sizeof(float);
// Allocate input vectors h_A and h_B in host memory
h_A = (float*)malloc(size);
h_B = (float*)malloc(size);
h_C = (float*)malloc(size);
/** 2D array
h_A = new float*[N];
h_B = new float*[N];
h_C = new float*[N];
for(int i = 0; i <N; i++)
{
h_A[i] = new float[N];
h_B[i] = new float[N];
h_C[i] = new float[N];
}
*/
/*
for(int i = 0; i<N; i++){
for(int j = 0; j<N; j++){
h_A[i][j] = 0.;
h_B[i][j] = 0.;
h_C[i][j] = 0.;
}
}*/
// Initialize the input vectors with random numbers
RandomInit(h_A, N);
RandomInit(h_B, N);
// Set the sizes of threads and blocks
int threadsPerBlock = 0;
int blocksPerGrid = 0;
/**I am not sure, but the below lines are not need, the threadsPerBlock = (N,N) and blocksPerGrid = (4,4),(8,8),(10,10)...**/
while(1)
{
printf("Enter the number of threads per block: ");
scanf("%d",&threadsPerBlock);
printf("%d\n",threadsPerBlock);
if( threadsPerBlock > 1024) {
printf("%d, The number of threads per block must be 1024!\n",threadsPerBlock);
continue;
}
blocksPerGrid = (N + threadsPerBlock - 1)/threadsPerBlock;
if( blocksPerGrid > 2147483647){
printf("%d, The number of blocks per grid must be 2147483647!\n",blocksPerGrid);
continue;
}
break;
}
// create the timer
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// start the timer
cudaEventRecord(start,0);
// Allocate vectors in device memory
cudaMalloc((void**)&d_A, size);
cudaMalloc((void**)&d_B, size);
cudaMalloc((void**)&d_C, size);
// Copy vectors from host memory to device memory
/** cudaMemcpy 1D**/
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
/** cudaMemcpy 2D
cudaMemcpy2D(d_A,sizeof(float)*N,h_A,sizeof(float)*N,sizeof(float)*N,N,cudaMemcpyHostToDevice);
cudaMemcpy2D(d_B,sizeof(float)*N,h_B,sizeof(float)*N,sizeof(float)*N,N,cudaMemcpyHostToDevice);
**/
// stop the timer
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float Intime;
cudaEventElapsedTime( &Intime, start, stop);
printf("Input time for GPU: %f (ms) \n",Intime);
// start the timer
cudaEventRecord(start,0);
dim3 blocksPerGrid2D(4,4);
dim3 threadsPerBlock2D(threadsPerBlock,threadsPerBlock);
/**VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);**/
VecAdd<<<blocksPerGrid2D,threadsPerBlock2D>>>(d_A, d_B, d_C,N);
// stop the timer
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float gputime;
cudaEventElapsedTime( &gputime, start, stop);
printf("Processing time for GPU: %f (ms) \n",gputime);
printf("GPU Gflops: %f\n",3*N/(1000000.0*gputime));
// Copy result from device memory to host memory
// h_C contains the result in host memory
// start the timer
cudaEventRecord(start,0);
/**1D cudaMemcpy**/
cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
/**2D cudaMemcpy**
cudaMemcpy2D(h_C,sizeof(float)*N,d_C,sizeof(float)*N,sizeof(float)*N,N,cudaMemcpyDeviceToHost);
**/
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
// stop the timer
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float Outime;
cudaEventElapsedTime( &Outime, start, stop);
printf("Output time for GPU: %f (ms) \n",Outime);
float gputime_tot;
gputime_tot = Intime + gputime + Outime;
printf("Total time for GPU: %f (ms) \n",gputime_tot);
// start the timer
cudaEventRecord(start,0);
h_D = (float*)malloc(size); // to compute the reference solution
/**2D array
h_D = new float*[N];
for(int i = 0; i < N; i++){
h_D[i] = new float[N];
}
for(int i = 0; i<N; i++){
for(int j = 0; j<N; j++){
h_D[i][j] = 0.;
}
}
for (int i = 0; i < N; ++i)
for(int j = 0; j < N ; j++)
h_D[i][j] = h_A[i][j] + h_B[i][j];
**/
/**
for (int i = 0; i<N; i++)
for(int j = 0; j<N; j++)
h_D[i*N+j] = h_A[i*N+j] + h_B[i*N+j];
**/
for (int i = 0; i<N*N;i++)
h_D[i] = h_A[i] + h_B[i];
// stop the timer
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float cputime;
cudaEventElapsedTime( &cputime, start, stop);
printf("Processing time for CPU: %f (ms) \n",cputime);
printf("CPU Gflops: %f\n",3*N/(1000000.0*cputime));
printf("Speed up of GPU = %f\n", cputime/(gputime_tot));
// destroy the timer
cudaEventDestroy(start);
cudaEventDestroy(stop);
// check result
printf("Check result:\n");
/** 2D array
double sum=0.;
double diff;
for (int i = 0; i < N; ++i) {
for (int j = 0; j < N; ++j){
//printf("%f , %f\n",h_D[i][j],h_C[i][j]);
diff = abs(h_D[i][j] - h_C[i][j]);
sum += diff*diff;
//printf("Test sum: %f\n",sum);
}
}
sum = sqrt(sum);
printf("norm(h_C - h_D)=%20.15e\n\n",sum);
**/
double sum = 0.;
double diff;
for(int i = 0; i < N*N; i++){
diff = abs(h_D[i]-h_C[i]);
sum += diff;
}
/**
for(int i = 0; i < N; i++){
for(int j = 0; j < N; j++){
diff = abs(h_D[i*N+j] - h_C[i*N+j]);
sum += diff * diff;
}
}**/
sum = sqrt(sum);
printf("norm(h_C - h_D)=%20.15e\n\n",sum);
cudaDeviceReset();
}
|
22,603 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* DESCRIPTION :
*
Serial Concurrent Wave Equation - C Version
*
This program implements the concurrent wave equation
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
# include <stdio.h>
# include <stdlib.h>
# include <math.h>
# include <time.h>
# include <cuda_runtime.h>
# define MAXPOINTS 1000000
# define MAXSTEPS 1000000
# define MINPOINTS 20
# define PI 3.14159265
# define DIM_GRID 1
# define BLOCK_SIZE 256
void check_param ( void ) ;
void init_line ( void ) ;
void update ( void ) ;
void printfinal ( void ) ;
int nsteps , /*number of time steps */
tpoints , /*total points along string */
rcode ; /*generic return code */
float values [ MAXPOINTS +2], /*values at time t */
oldval [ MAXPOINTS +2], /*values at time (t - dt ) */
newval [ MAXPOINTS +2]; /*values at time ( t + dt ) */
float *d_values, *d_oldval, *d_newval;
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* Checks input values from parameters
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
void check_param ( void )
{
char tchar [20];
/* check number of points , number of iterations */
while (( tpoints < MINPOINTS ) || ( tpoints > MAXPOINTS ) ) {
printf ( "Enter number of points along vibrating string [%d-%d]: "
, MINPOINTS , MAXPOINTS ) ;
scanf ( "%s" , tchar ) ;
tpoints = atoi ( tchar ) ;
if (( tpoints < MINPOINTS ) || ( tpoints > MAXPOINTS ) )
printf ( "Invalid. Please enter value between %d and %d\n" ,
MINPOINTS , MAXPOINTS ) ;
}
while (( nsteps < 1) || ( nsteps > MAXSTEPS ) ) {
printf ( "Enter number of time steps [1-%d]: " , MAXSTEPS ) ;
scanf ( "%s" , tchar ) ;
nsteps = atoi ( tchar ) ;
if (( nsteps < 1) || ( nsteps > MAXSTEPS ) )
printf ( "Invalid. Please enter value between 1 and %d\n" ,
MAXSTEPS ) ;
}
printf ( "Using points = %d, steps = %d\n", tpoints, nsteps) ;
}
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* Initialize points on line
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
__global__ void init_line (float* values_d, float* oldvalue_d, int tpoints)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
float fac = 2.0 * PI;
float x;
for ( int i = idx; i <= tpoints; i+=stride) {
x = (float)(i-1)/(tpoints-1); //might loss some precision.
values_d[i] = __sinf(fac * x);
oldvalue_d [i] = values_d[i];
}
}
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* Calculate new values using wave equation
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* Update all values along line a specified number of times
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
__global__ void update(float* values_d, float* oldvalue_d, int tpoints, int nsteps)
{
int i, j;
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
float newval;
/* Update values for each time step */
for ( i = 1; i <= nsteps ; i++) {
/* Update points along line for this time step */
for ( j = index; j <= tpoints; j+=stride) {
if (( j == 1) || ( j == tpoints ) )
newval = 0.0;
else
newval = (1.82)*values_d[j] - oldvalue_d[j];
oldvalue_d [ j ] = values_d [ j ]; /* Update old values with new values */
values_d [ j ] = newval;
}
}
}
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* Print final results
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
void printfinal()
{
int i ;
for( i = 1; i <= tpoints ; i ++){
printf("%6.4f ", values[i]);
if( i %10 == 0)
printf("\n");
}
}
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* Main program
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
int main (int argc, char *argv[])
{
sscanf (argv[1],"%d" ,&tpoints);
sscanf (argv[2],"%d" ,&nsteps);
int numBlock = (tpoints+1+BLOCK_SIZE)/BLOCK_SIZE;
cudaMalloc(&d_values, (MAXPOINTS+2) * sizeof(float));
cudaMalloc(&d_oldval, (MAXPOINTS+2) * sizeof(float));
check_param();
printf("Initializing points on the line...\n");
init_line<<<numBlock, BLOCK_SIZE>>>(d_values, d_oldval, tpoints);
cudaMemcpy(values, d_values, (MAXPOINTS+2) * sizeof(float), cudaMemcpyDeviceToHost);
for(int i = 1; i<=tpoints; i++){
printf("%f\n", *(values+i));
}
printf("Updating all points for all time steps...\n");
update<<<numBlock, BLOCK_SIZE>>>(d_values, d_oldval, tpoints, nsteps);
printf("Printing final results...\n");
cudaMemcpy(values, d_values, (MAXPOINTS+2) * sizeof(float), cudaMemcpyDeviceToHost);
printfinal();
printf("\nDone.\n\n");
return 0;
} |
22,604 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
int main() {
// Get number of GPUs
int deviceCount;
cudaGetDeviceCount(&deviceCount);
printf("Number of GPU devices: %i\n", deviceCount);
// Get CUDA driver and runtime version
int driverVersion;
int runtimeVersion;
cudaDriverGetVersion(&driverVersion);
cudaRuntimeGetVersion(&runtimeVersion);
printf("CUDA Driver Version / Runtime Version: %d.%d / %d.%d\n", driverVersion/1000, (driverVersion%100)/10, runtimeVersion/1000, (runtimeVersion%100)/10);
// Get device properties
cudaDeviceProp deviceProperties;
for (int i = 0; i < deviceCount; i++) {
cudaGetDeviceProperties(&deviceProperties, i);
printf("Name: %s\n", deviceProperties.name);
}
return 0;
}
|
22,605 | #include "includes.h"
__global__ void ScaleUp(float *d_Result, float *d_Data, int width, int pitch, int height, int newpitch)
{
#define BW (SCALEUP_W/2 + 2)
#define BH (SCALEUP_H/2 + 2)
__shared__ float buffer[BW*BH];
const int tx = threadIdx.x;
const int ty = threadIdx.y;
if (tx<BW && ty<BH) {
int x = min(max(blockIdx.x*(SCALEUP_W/2) + tx - 1, 0), width-1);
int y = min(max(blockIdx.y*(SCALEUP_H/2) + ty - 1, 0), height-1);
buffer[ty*BW + tx] = d_Data[y*pitch + x];
}
__syncthreads();
int x = blockIdx.x*SCALEUP_W + tx;
int y = blockIdx.y*SCALEUP_H + ty;
if (x<2*width && y<2*height) {
int bx = (tx + 1)/2;
int by = (ty + 1)/2;
int bp = by*BW + bx;
float wx = 0.25f + (tx&1)*0.50f;
float wy = 0.25f + (ty&1)*0.50f;
d_Result[y*newpitch + x] = wy*(wx*buffer[bp] + (1.0f-wx)*buffer[bp+1]) +
(1.0f-wy)*(wx*buffer[bp+BW] + (1.0f-wx)*buffer[bp+BW+1]);
}
} |
22,606 | /**
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/reduce.h>
int main()
{
}
|
22,607 | #include "includes.h"
__global__ void uniformAdd(unsigned int n, unsigned int *data, unsigned int *inter)
{
__shared__ unsigned int uni;
if (threadIdx.x == 0) { uni = inter[blockIdx.x]; }
__syncthreads();
unsigned int g_ai = blockIdx.x*2*blockDim.x + threadIdx.x;
unsigned int g_bi = g_ai + blockDim.x;
if (g_ai < n) { data[g_ai] += uni; }
if (g_bi < n) { data[g_bi] += uni; }
} |
22,608 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <time.h>
#include <stdio.h>
#include <iostream>
#define epsilon 0.000001
using namespace std;
void Gaussian(float* data, int size, FILE* file);
void ForwardElim(float* data, int size);
void BackSub(float* data, int size);
void SwapRows(float* data, int size, int upperRow, int lowerRow);
bool CompareResults(float* data, float* data2, int size);
void GPUGaussian(float* &data, int size, int blocks, int rowsPerThread, FILE* file);
void FillMatrix(float* data, float* data2, float* backup, int size);
void CopyMatrix(float* src, float* dest, int size);
__global__ void KernelForwardElim(float* _matrix, int _size, int _upperRowIdx, int _rowsPerThread);
int main()
{
srand(time(NULL));
// Add vectors in parallel.
cudaError_t cudaStatus = cudaSuccess;
//int size = 256;
//int colsPerThread = 1;
//vector<vector<float>> data, data2, backup;
float* data, *data2, *backup;
FILE* file = fopen("data.csv", "w+");
for (int size = 128; size < 2049; size *= 2)
{
std::cout << "---------------------------------------------------------------------" << endl;
std::cout << "Working on size: " << size << endl;
data = (float*)malloc((size + 1) * size * sizeof(float));
backup = (float*)malloc((size + 1) * size * sizeof(float));
data2 = (float*)malloc((size + 1) * size * sizeof(float));
FillMatrix(data, data2, backup, size);
Gaussian(data, size, file);
for (int rowsPerThread = 1; rowsPerThread < 9; rowsPerThread *= 2)
{
std::cout << "Working on rowsPerThread: " << rowsPerThread << endl;
CopyMatrix(backup, data2, size);
int threads = (size + 1) / rowsPerThread;
int blocks = (threads - 1) / 1024 + 1; /*1024 max for current graphics card used*/
GPUGaussian(data2, size, blocks, rowsPerThread, file);
if (!CompareResults(data, data2, size))
{
break;
}
}
fprintf(file, "\n");
free(data);
free(data2);
free(backup);
}
fclose(file);
std::cout << "---------------------------------------------------------------------" << endl;
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
std::cout << "Press any key to continue . . .";
std::cin.get();
return 0;
}
void Gaussian(float* data, int size, FILE* file)
{
clock_t t;
t = clock();
ForwardElim(data, size);
t = clock() - t;
std::cout << "CPU Forward Substituion took: " << t << "clicks ("<< ((float)t)/CLOCKS_PER_SEC << " seconds.)" << endl;
fprintf(file, "%f.2,", ((float)t) / CLOCKS_PER_SEC);
BackSub(data, size);
}
void ForwardElim(float* data, int size)
{
for (unsigned int i = 0; i < size - 1; ++i)
{
float upper = data[i * (size + 1) + i];
for (unsigned int j = i + 1; j < size; ++j)
{
float lower = data[j * (size + 1) + i];
float multiplier = upper / lower;
for (unsigned int k = i + 1; k < size + 1; ++k)
{
data[j * (size + 1) + k] *= multiplier;
data[j * (size + 1) + k] -= data[i * (size + 1) + k];
}
}
}
}
void BackSub(float* data, int size)
{
for (int i = size - 1; i >= 0; --i)
{
data[i * (size + 1) + size] /= data[i * (size + 1) + i];
for (int j = i - 1; j >= 0; --j)
{
float subtrahend = data[j * (size + 1) + i] * data[i * (size + 1) + size];
data[j * (size + 1) + size] -= subtrahend;
}
}
}
void GPUGaussian(float* &data, int size, int blocks, int rowsPerThread, FILE* file)
{
float* devMatrix = 0;
clock_t t;
t = clock();
cudaError_t cudaStatus = cudaMalloc((void**)&devMatrix, (size + 1) * size * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed for matrix\n");
return;
}
cudaStatus = cudaMemcpy(devMatrix, data, (size + 1) * size * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed for multiplier\n");
return;
}
float* tempMatrix = (float*)malloc((size + 1) * size * sizeof(float));
for (int i = 0; i < (size - 1); ++i)
{
KernelForwardElim<<<blocks, 1024>>>(devMatrix, size, i, rowsPerThread);
}
cudaStatus = cudaMemcpy((void*)tempMatrix, (void*)devMatrix, (size + 1) * size * sizeof(float), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed for lowerRow DtH\n");
return;
}
data = tempMatrix;
t = clock() - t;
std::cout << "GPU Forward Substituion took: " << t << "clicks (" << ((float)t) / CLOCKS_PER_SEC << " seconds.)" << endl;
fprintf(file, "%f.2,", ((float)t) / CLOCKS_PER_SEC);
BackSub(data, size);
cudaFree(devMatrix);
cudaFree(tempMatrix);
}
void SwapRows(float* data, int size, int upperRow, int lowerRow)
{
for (int i = 0; i < size + 1; ++i)
{
float temp = data[upperRow * (size + 1) + i];
data[upperRow * (size + 1) + i] = data[lowerRow * (size + 1) * i];
data[lowerRow * (size + 1) + i] = temp;
}
}
bool CompareResults(float* data, float* data2, int size)
{
bool test = true;
for (int i = 0; i < size; ++i)
{
for (int j = 0; j < size + 1; ++j)
{
if (abs(data[i * (size + 1) + j] - data2[i * (size + 1) + j]) > epsilon && abs(data[i * (size + 1) + j]) > epsilon && abs(data2[i * (size + 1) + j]) > epsilon)
{
std::cout << "Something went wrong" << endl;
std::cout << "CPU: " << data[i * (size + 1) + j] << "|\tGPU:" << data2[i * (size + 1) + j] << endl;
test = false;
}
}
}
if (test)
{
std::cout << "CPU and GPU results match!" << endl;
}
return test;
}
__global__ void KernelForwardElim(float* _matrix, int _size, int _upperRowIdx, int _rowsPerThread)
{
int startRow = (threadIdx.x + blockIdx.x * blockDim.x) * _rowsPerThread + 1 + _upperRowIdx;
if (startRow > _upperRowIdx)
{
for (int row = startRow; row < _rowsPerThread + startRow; ++row)
{
if (row < _size)
{
float multiplier = _matrix[_upperRowIdx * (_size + 1) + _upperRowIdx] / _matrix[row * (_size + 1) + _upperRowIdx];
for (int i = _upperRowIdx + 1; i < _size + 1; ++i)
{
_matrix[row * (_size + 1) + i] *= multiplier;
_matrix[row * (_size + 1) + i] -= _matrix[_upperRowIdx * (_size + 1) + i];
}
}
}
}
}
void FillMatrix(float* data, float* data2, float* backup, int size)
{
for (int i = 0; i < size; ++i)
{
for (int j = 0; j < size + 1; ++j)
{
data[i * (size + 1) + j] = data2[i * (size + 1) + j] = backup[i * (size + 1) + j] = rand() % 10 + 1;
}
}
}
void CopyMatrix(float* src, float* dest, int size)
{
for (int i = 0; i < size; ++i)
{
for (int j = 0; j < size + 1; ++j)
{
dest[i * (size + 1) + j] = src[i * (size + 1) + j];
}
}
} |
22,609 | #include <iostream>
#include <cstdlib>
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
__global__ void vecAdd(double* res, double* inA, double* inB, size_t n) {
int x = blockDim.x * blockIdx.x + threadIdx.x;
if (x >= n) return;
res[x] = inA[x] + inB[x];
}
void add() {
size_t N = 1000;
double *A;
double *B;
double *C;
cudaError_t err;
int alloc_size = N*sizeof(double);
err = cudaMallocManaged(&A, alloc_size);
if (err != cudaSuccess) {
printf("ERROR: unable to allocate!\n");
std::cerr << "Err is " << cudaGetErrorString(err) << std::endl;
}
err = cudaMallocManaged(&B, alloc_size);
if (err != cudaSuccess) {
printf("ERROR: unable to allocate!\n");
std::cerr << "Err is " << cudaGetErrorString(err) << std::endl;
}
err = cudaMallocManaged(&C, alloc_size);
if (err != cudaSuccess) {
printf("ERROR: unable to allocate!\n");
std::cerr << "Err is " << cudaGetErrorString(err) << std::endl;
}
for(int i = 0; i < N ; i++) {
A[i] = i;
B[i]= N*i;
}
vecAdd<<<N,1>>>(C, A, B, N);
cudaDeviceSynchronize();
for(int i =0;i<N;i++) {
std::cout << A[i] << " + " << B[i] << " = " << C[i] <<std::endl;
}
}
int main () {
add();
return 0;
}
|
22,610 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
__global__ void reduce(int *g_idata, int *g_odata)
{
}
int main(int argc, char *argv[])
{
// We assume that the element number is the power of 2 for simplification.
const int elemNum = 1 << 22;
int arraySize = elemNum * sizeof(int);
// host memory
int *h_idata;
int sum; // final output value
// device memory
int *d_idata; // input data ptr
int *d_odata; // output data ptr
// initialize input data from file
// use the first argument as the file name
h_idata = (int *) malloc(arraySize);
FILE *fp;
if((fp = fopen(argv[1], "rb")) == NULL)
{
printf("Can not open input file!\n");
exit(0);
}
for (int i = 0; i < elemNum; ++i)
{
fscanf(fp, "%d", &h_idata[i]);
}
fclose(fp);
// TODO: copy input data from CPU to GPU
// Hint: use cudaMalloc and cudaMemcpy
int threadNum = 0;
int blockNum = 0;
// TODO: malloc GPU output memory to store the outcome from each kernel.
// Hint: use cudaMalloc
cudaEvent_t start, stop;
float stepTime;
float totalTime = 0;
// create event for recording GPU execution time
cudaEventCreate(&start);
cudaEventCreate(&stop);
// execute the first kernel and set the GPU timer
cudaEventRecord(start, 0);
// TODO: set grid and block size
// threadNum = ?
// blockNum = ?
// parameters for the first kernel
int sMemSize = 1024 * sizeof(int);
reduce<<<threadNum, blockNum, sMemSize>>>(d_idata, d_odata);
cudaDeviceSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// calculate the execution time of the first kernel
cudaEventElapsedTime(&stepTime, start, stop);
totalTime += stepTime;
cudaEventDestroy(start);
cudaEventDestroy(stop);
// create event for recording GPU execution time
cudaEventCreate(&start);
cudaEventCreate(&stop);
// execute the second kernel and set the GPU timer
cudaEventRecord(start, 0);
// TODO: set grid and block size
// threadNum = ?
// blockNum = ?
sMemSize = threadNum * sizeof(int);
reduce<<<threadNum, blockNum, sMemSize>>>(d_odata, d_odata);
cudaDeviceSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// calculate the execution time of the current kernel
cudaEventElapsedTime(&stepTime, start, stop);
totalTime += stepTime;
cudaEventDestroy(start);
cudaEventDestroy(stop);
/*
* Hint: for "first add during global load" optimization, the third kernel is unnecessary.
*/
// create event for recording GPU execution time
cudaEventCreate(&start);
cudaEventCreate(&stop);
// execute the third kernel and set the GPU timer
cudaEventRecord(start, 0);
// TODO: set grid and block size
// threadNum = ?
// blockNum = ?
sMemSize = threadNum * sizeof(int);
reduce<<<threadNum, blockNum, sMemSize>>>(d_odata, d_odata);
cudaDeviceSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// calculate the execution time of the current kernel
cudaEventElapsedTime(&stepTime, start, stop);
totalTime += stepTime;
cudaEventDestroy(start);
cudaEventDestroy(stop);
// TODO: copy result and free device memory
// Hint: use cudaMemcpy and cudaFree
float bandwidth = elemNum * sizeof(int) / (totalTime / 1000) / 1024 / 1024 / 1024;
printf("%d %fms %fGB/s\n", sum, totalTime, bandwidth);
return 0;
}
|
22,611 | /**
* Add 2 vectors using CUDA.
*/
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <iostream>
#include <string.h>
/**
* This macro checks return value of the CUDA runtime call and exits
* the application if the call failed.
*/
#define CUDA_CHECK_RETURN( value ) { \
cudaError_t err = value; \
if( err != cudaSuccess ) { \
fprintf( stderr, "Error %s at line %d in file %s\n", \
cudaGetErrorString(err), __LINE__, __FILE__ ); \
exit( 1 ); \
} }
#define VECT_SIZE (7841u)
#define BLOCK_SIZE (128u)
__global__ void vect_fill( int * data )
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if( i < VECT_SIZE ) data[ i ] = i + 1;
}
__global__ void vect_add( int * vect_1, int * vect_2, int * vect_result )
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if( i < VECT_SIZE ) vect_result[i] = vect_1[i] + vect_2[i];
}
/**
* Host function that prepares data array and passes it to the CUDA kernel.
*/
int main(int argc, char **argv) {
/* Allocate data buffer in host memory for result vector */
int *h_result = (int*) malloc( VECT_SIZE * sizeof(int) );
memset( h_result, 0, VECT_SIZE * sizeof(int) );
/* Allocate data buffer in device memory for 3 vectors */
int *d_vector_1, *d_vector_2, *d_vector_result = NULL;
CUDA_CHECK_RETURN( cudaMalloc( &d_vector_1, VECT_SIZE * sizeof(int) ) );
CUDA_CHECK_RETURN( cudaMalloc( &d_vector_2, VECT_SIZE * sizeof(int) ) );
CUDA_CHECK_RETURN( cudaMalloc( &d_vector_result, VECT_SIZE * sizeof(int) ) );
/* Configure kernel */
int blockSize = BLOCK_SIZE;
int gridSize = (VECT_SIZE + BLOCK_SIZE - 1) / BLOCK_SIZE;
/* Run kernels to fill 2 vectors */
vect_fill<<< gridSize, blockSize >>>( d_vector_1 );
vect_fill<<< gridSize, blockSize >>>( d_vector_2 );
/* Wait until the kernel finishes its work */
CUDA_CHECK_RETURN( cudaDeviceSynchronize() );
/* Add 2 vectors */
vect_add<<< gridSize, blockSize >>>( d_vector_1, d_vector_2, d_vector_result );
/* Wait until the kernel finishes its work */
CUDA_CHECK_RETURN( cudaDeviceSynchronize() );
/* Copy back to host and print */
CUDA_CHECK_RETURN( cudaMemcpy( h_result, d_vector_result, VECT_SIZE * sizeof(int), cudaMemcpyDeviceToHost) );
for( unsigned int i = 0; i < VECT_SIZE; ++i ) std::cout << h_result[i] << std::endl;
CUDA_CHECK_RETURN( cudaFree(d_vector_1) );
CUDA_CHECK_RETURN( cudaFree(d_vector_2) );
CUDA_CHECK_RETURN( cudaFree(d_vector_result) );
free( h_result );
return 0;
}
|
22,612 | struct MyStruct {
float floatvalue;
int intvalue;
};
__device__ __host__ float sumStruct(struct MyStruct **p_structs, int N) {
float sum = 0;
for(int i = 0; i < N; i++) {
struct MyStruct *mystruct = p_structs[i];
sum += mystruct->floatvalue + float(mystruct->intvalue) * 3.5f;
}
return sum;
}
__global__ void mykernel(float *data, MyStruct *structs, int N) {
data[0] = sumStruct(&structs, N);
data[3] = sumStruct(&structs, 123);
data[4] = sumStruct(&structs, 12300);
}
|
22,613 | /* cada hilo copia su parte */
__global__ void gpuCopiarLayer(float *layer, float *layer_copy) {
int idBloque = blockIdx.x + blockIdx.y*gridDim.x;
int idGlobal = (idBloque*blockDim.x*blockDim.y) + (threadIdx.y*blockDim.x) + threadIdx.x;
layer_copy[idGlobal]=layer[idGlobal];
}
/* se actualiza la capa en función de la energia, la posición y la capa anterior */
__global__ void gpuActualiza(float *layer, int posicion, float energy) {
int idBloque = blockIdx.x + blockIdx.y * gridDim.x;
int idGlobal = (idBloque*blockDim.x*blockDim.y) + (threadIdx.y*blockDim.x) + threadIdx.x;
int distancia = posicion - idGlobal;
if ( distancia < 0 ) distancia = - distancia;
/* 2. El punto de impacto tiene distancia 1 */
distancia = distancia + 1;
/* 3. Raiz cuadrada de la distancia */
float atenuacion = sqrtf( (float)distancia );
/* 4. Calcular energia atenuada */
float energia_k = energy / atenuacion;
/* 5. No sumar si el valor absoluto es menor que umbral */
if ( energia_k >= 0.001f || energia_k <= -0.001f )
layer[idGlobal] = layer[idGlobal] + energia_k;
}
/* Actualizamos la capa sin contar los extremos */
__global__ void gpuAtenuacion(float *layer, float *layer_copy, int layer_size) {
/*Formula para calcular la posicion*/
int idBloque = blockIdx.x + blockIdx.y * gridDim.x;
int idGlobal = (idBloque*blockDim.x*blockDim.y) + (threadIdx.y*blockDim.x) + threadIdx.x;
if(idGlobal > 0 && idGlobal < layer_size){
layer[idGlobal] = ( layer_copy[idGlobal-1] + layer_copy[idGlobal] + layer_copy[idGlobal+1] ) / 3;
}
}
/* el valor de i es el de la reducion */
__global__ void gpuMaximos(float *layer, int *posiciones, float *maximos, int layer_size, int i) {
/*Formula para calcular la posicion*/
int idBloque = blockIdx.x + blockIdx.y * gridDim.x;
int idGlobal = (idBloque*blockDim.x*blockDim.y) + (threadIdx.y*blockDim.x) + threadIdx.x;
if(idGlobal > 0 && idGlobal < layer_size){
if (layer[idGlobal] > layer[idGlobal-1] && layer[idGlobal] > layer[idGlobal+1]) {
if (layer[idGlobal] > maximos[i] ) {
maximos[i] = layer[idGlobal];
posiciones[i] = idGlobal;
}
}
}
}
|
22,614 | #include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <math.h>
#include <time.h>
//#define VERIFY
//uncomment above to print difference between CPU and GPU calculations
__global__ void matmul_kernel(
const float* M1,
const float* M2,
float* M3,
const int m,
const int n,
const int p
)
{
/*
CUDA kernel for matrix multiplication M3 = M1 * M2
This function will be executed by every CUDA thread
The instructions are the same, but each thread will work
on a separate chunk of the data, as specified by the array indices.
Note that the kernel definition is preceded by the __global__
qualifier. Further, the kernel function returns nothing (void)
Thus, we must modify the output matrix M3 within this function.
The changes made to M3 (or M1 and M2) will all be visible outside
the kernel to CPU and GPU memory after the kernel has executed.
*/
//Get the x and y indices of output entry for this thread
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
/*
Wait! what are blockDim, blockIdx and threadIdx??
These are structs provided by CUDA, which tells the thread
how many blocks have been launched, what block number does
the current thread reside in and finally, what is the x and y
index of the current thread within the block.
These variables allow each thread to choose which sub-section
of the A, B and C matrices it should work on and we use them next.
*/
if ((i>=m)||(j>=p))
{
return;
//this just means that dont process anything outside the
//bounds of the output matrix size
}
float cout=0.0;
//this is a local variable we have defined within the thread
//so, this variable will reside in register memory as explained earlier
for (int k=0; k<n; k++)
{
cout += M1[i*n + k]*M2[k*p + j];
//loop through elements of one row of M1 and
//one column of M2, multiply corresponding elements
//and add them up. We are just doing standard matrix
//multiplication.
}
M3[i*p+j] = cout;
//here we modify M3
}
int main(int argc, char* argv[])
{
/*
In this demo, we will create matrices of size
A: M x N
B: N x P
C: M x P <-- for GPU
D: M x P <-- for CPU
We will initialize A, B, C, D and perform matrix multiplications:
C = A*B (on GPU)
D = A*B (on CPU)
*/
if (argc != 4)
{
printf("Matrix multiplication example for A[MxN] and B[NxP]\nUsage: cu_mm.out M N P\n");
exit(1);
}
int M=atoi(argv[1]); //2049;
int N=atoi(argv[2]); //257;
int P=atoi(argv[3]); //512;
float *A, *B, *C, *D;
/*
Let's use unified memory
cudaMallocManaged allows us to allocate memory
once and use it across both CPU and GPU.
*/
cudaMallocManaged(&A, M*N*sizeof(float));//input Mat1
cudaMallocManaged(&B, N*P*sizeof(float));//input Mat2
cudaMallocManaged(&C, M*P*sizeof(float));//output Mat for GPU
cudaMallocManaged(&D, M*P*sizeof(float));//output Mat for CPU
//we will do matmul in both CPU and GPU and compare the execution times
for (int i=0; i<M*N; i++)
{
A[i]=sin((float)i/100);
//init with sine of index, just as an example
}
for (int i=0; i<N*P; i++)
{
B[i]=cos((float)i/100);
//init with sine of index, just as an example
}
//C and D can be left uninitialized
float elapsed_time_gpu=0.0;
double elapsed_time_cpu=0.0;
cudaEvent_t gpu_start, gpu_stop;
struct timespec cpu_start, cpu_stop;
//BEGIN GPU MATMUL
dim3 blocks_per_grid(ceil(M/32),ceil(P/32));
dim3 threads_per_block(32, 32);
/*
We use CUDA events to accurately measure the time taken by matmul op
Refer to page 16 of CUDA C++ Best Practices Guide:
https://docs.nvidia.com/cuda/pdf/CUDA_C_Best_Practices_Guide.pdf
*/
cudaEventCreate(&gpu_start);
cudaEventCreate(&gpu_stop);
cudaEventRecord(gpu_start, 0);
matmul_kernel<<<blocks_per_grid, threads_per_block>>>(A, B, C, M, N, P);
cudaEventRecord(gpu_stop, 0);
cudaEventSynchronize(gpu_stop);
//END GPU MATMUL
timespec_get(&cpu_start, TIME_UTC);
//BEGIN CPU MATMUL
for (int i=0; i<M; i++)
{
for (int j=0; j< P; j++)
{
float cout=0.0;
for(int k=0; k<N; k++)
{
cout+=A[i*N+k]*B[k*P+j];
}
D[i*P+j]=cout;
}
}
//END CPU MATMUL
timespec_get(&cpu_stop, TIME_UTC);
//Measure elapsed times
cudaEventElapsedTime(&elapsed_time_gpu, gpu_start, gpu_stop);
elapsed_time_cpu = ((double)(cpu_stop.tv_sec - cpu_start.tv_sec)) * 1000000 + ((double)(cpu_stop.tv_nsec - cpu_start.tv_nsec)) / 1000;
//tv_nsec is in nanoseconds
/*
Define VERIFY above to print diffs for the
first 100 entries
you will get all values very close to zero
*/
#ifdef VERIFY
for (int i=0; i<100; i++)
{
float diff=C[i]-D[i];
printf("%f, ", diff);
}
printf("\n");
#endif
//convert microseconds to milliseconds
printf("Elapsed time (CPU)= %f milliseconds\n", elapsed_time_cpu/1000);
printf("Elapsed time (GPU)= %f milliseconds\n", elapsed_time_gpu);
//cudaEventElapsedTime reports time in milliseconds
cudaFree(A);
cudaFree(B);
cudaFree(C);
cudaFree(D);
} |
22,615 | float h_A[]= {
0.5497571433874873, 0.8347494050502031, 0.8055736747507383, 0.8446806354421298, 0.9203646031866868, 0.75587394208173, 0.7271795302862971, 0.6541245401546809, 0.6474186907135968, 0.696932168348505, 0.9601942745787786, 0.5481004285262927, 0.7104979842273528, 0.8136085794451676, 0.7747238308303026, 0.9401550912920829, 0.5775778086718177, 0.5185775360254413, 0.5665697009091244, 0.8768684785476898, 0.5861600386006, 0.7517007207843288, 0.8261528201402633, 0.7299739121738679, 0.7647970284866126, 0.9878278189991735, 0.6596874401891246, 0.9891862275624084, 0.6314281212132351, 0.8626358245724133, 0.7711837462384944, 0.654409815703479, 0.8531694874539303, 0.5227185527298892, 0.8380161830016186, 0.6913947521140595, 0.7228560213128084, 0.7093806955394937, 0.5641970884122873, 0.7401826640779585, 0.9769447902585221, 0.8798476952287547, 0.8826071983978366, 0.5092857963602104, 0.7164889331253084, 0.5691855393649521, 0.693558548080896, 0.8959526758551744, 0.6136604296625554, 0.660579067529206, 0.8489626391906292, 0.9100493093967308, 0.6109174889072447, 0.6511118386433346, 0.9167199684684614, 0.9834384569583747, 0.6462378711845018, 0.7115622319571449, 0.8724691567551228, 0.9883008275092077, 0.9434912285676522, 0.9507690670297204, 0.927603592685633, 0.922526494510689, 0.91993126790074, 0.7039621163935301, 0.6118850363613095, 0.5153886636323323, 0.9216643295588656, 0.9167059496780798, 0.9951806059512531, 0.5998073015866516, 0.6069462522118769, 0.6297422501079807, 0.6200867907431244, 0.5340023158057714, 0.6972046505255443, 0.7811645219573331, 0.6698027783999485, 0.9709933955240369, 0.6364930679604923, 0.5587568925617192, 0.766055620565993, 0.7470420241505972, 0.6808066292296664, 0.7259306880593598, 0.933134005297924, 0.8569906759242762, 0.7303126601602719, 0.9194549259961722, 0.8684887240861955, 0.8566709827843, 0.9159390681061492, 0.5109162721531788, 0.5012535783106371, 0.6241457738968794, 0.9094398757883398, 0.8992365652779477, 0.8064508975557612, 0.9833319210804357, 0.7286962857849539, 0.9147066110782547, 0.9984526007800787, 0.5670648664142259, 0.7271573690112332, 0.9951827859795639, 0.763217212611373, 0.8783527130472248, 0.8274476470541816, 0.8321446152620653, 0.6843054194686642, 0.9158327654457672, 0.5373806771901826, 0.6983492210024299, 0.7823774721297714, 0.8638159332977716, 0.8080412992780972, 0.9620287801551738, 0.7243746169354319, 0.6933703863276415, 0.6499033586252245, 0.7861750332213351, 0.6929842446433926, 0.5128704377969776, 0.5469663992721796, 0.6958516344218852, 0.9558400683489032, 0.7363374939496672, 0.8453857104551015, 0.6742108355918908, 0.708006721299778, 0.7277687321281302, 0.8141093340976099, 0.8064308407447347, 0.9720109200723229, 0.5144832540605186, 0.9667997237778858, 0.6730250564410236, 0.9051850116345053, 0.6280160165091189, 0.6281935182889442, 0.800164561985856, 0.8297046832670416, 0.9924217558628237, 0.8489511790913775, 0.85415015099981, 0.6797348580682598, 0.6931794990184785, 0.524212092086683, 0.7504699178080525, 0.9918800000659722, 0.766967808362311, 0.8485642161273546, 0.8848119092066997, 0.8896719506769168, 0.9099594915424607, 0.636259207893542, 0.9208607117070856, 0.5693919950740349, 0.6780793227833253, 0.6833796861790424, 0.5853230339921975, 0.8815329762377175, 0.5580825715319241, 0.5099570886833564, 0.6866350292985819, 0.8470124754781105, 0.9834838362085869, 0.8394202883257724, 0.8333621159477815, 0.789622950480038, 0.5081019268401898, 0.7084415874633794, 0.5390293099839726, 0.5863147485623869, 0.6630810557626434, 0.5148751392425166, 0.6089299896431905, 0.687956838230706, 0.5131532770887377, 0.6510842818832856, 0.9336379865695814, 0.8138070184575721, 0.5934355969380098, 0.5883075949107059, 0.9216662702951923, 0.7432262260655838, 0.8971282389074016, 0.8252095402493174, 0.806848539435413, 0.583099009511583, 0.7312357132281313, 0.5263393629581383, 0.90937812756961, 0.979475609032224, 0.5085026367024231, 0.8497057576372999, 0.6245145549607146, 0.554483652026917, 0.8362948927166272, 0.6983834110432268, 0.5892391732717923, 0.9779027833814329, 0.8182054933209253, 0.9002922148630894, 0.6101561254411073, 0.9717966978568726, 0.6007525571147827, 0.8160334278232049, 0.8759450238552687, 0.8372293668832934, 0.729517354468888, 0.6959473784330887, 0.5553609511697557, 0.783262434431597, 0.8238097826290456, 0.6939183552625683, 0.7154529638975664, 0.9313270465199617, 0.9129613818459443, 0.8743054969616454, 0.687142664513259, 0.8471104314041131, 0.606434908304696, 0.7370970969673674, 0.5508484310576642, 0.550553525307429, 0.5513442178788666, 0.5327678067187046, 0.8206791988878719, 0.8040238804890278, 0.9579788671217644, 0.6103166254315786, 0.5692188100825537, 0.5902973851984221, 0.5234108879572539, 0.7981571624980477, 0.5692321178293402, 0.7715863513212284, 0.5644838838218915, 0.5416510912322015, 0.7977535529538637, 0.6942213555596708, 0.9097710005874061, 0.6054147472751181, 0.6744666663796146, 0.5200282252462533, 0.7596519184617238, 0.742765948135107, 0.6454761593335719, 0.6643205073294407, 0.7611113660553402, 0.6193691079356727, 0.5987999241385114, 0.5677027161594215, 0.9335699380755949, 0.9012391169718124, 0.9084213257231165, 0.8526776543177221, 0.6956606619788952, 0.6912539869898411, 0.523511102873585, 0.7169662016890846, 0.8931393247872652, 0.5266504402447537, 0.6895885019814796, 0.547731834584055, 0.7032155698418701, 0.7323282133300264, 0.51640866059344, 0.6272732292676224, 0.7434860146385533, 0.7663641992738617, 0.9836642624014429, 0.7422746871945896, 0.8835674991643085, 0.8248452819916767, 0.9828603924904503, 0.912337405670054, 0.5365880125977696, 0.84466307449335, 0.6316772372320976, 0.8575940066415236, 0.6698074402076413, 0.7813757710662843, 0.985460109247257, 0.685296943316311, 0.9534086908772373, 0.8919933787151246, 0.9622220841052705, 0.928201292487646, 0.5402398983417656, 0.6365330508729319, 0.574054202088264, 0.9312459039745355, 0.8778091708706552, 0.5422356185648306, 0.6266669881219107, 0.925496982699526, 0.7708894722499162, 0.70927672173884, 0.6497992341307754, 0.8854471643815677, 0.7746889224820634, 0.5653958803545989, 0.9881514145421701, 0.7356323839678625, 0.7692226629371424, 0.8212980936422152, 0.930292916202066, 0.8061846569803437, 0.9889790972901162, 0.8916792364129708, 0.6113481689457916, 0.6564779042827693, 0.960407606254441, 0.6408783000263192, 0.9689516513154224, 0.5230600062405881, 0.7063387112988531, 0.9206067222075826, 0.5552448562501806, 0.5800835235130797, 0.6306010250875467, 0.5623744624310043, 0.9733641344385566, 0.8531953022822296, 0.7978766811172135, 0.5215438973214777, 0.8754998768180688, 0.8629697076859171, 0.6582972332008995, 0.5842370241746958, 0.5619949071623713, 0.8147404417426538, 0.7883808963196535, 0.8262383190696313, 0.5621062679845301, 0.677207727220621, 0.8558060518939825, 0.8223129566333136, 0.9890078838762804, 0.9626680105079415, 0.5450464618335389, 0.7939731668627164, 0.9320366622812566, 0.6367479584006652, 0.7724521898797956, 0.619857843156329, 0.5122428056086528, 0.7753047749738446, 0.687667594945507, 0.7926730992689388, 0.9481693549907584, 0.5850320751372757, 0.6797741581334116, 0.5632884016537647, 0.7720712107740395, 0.7199566292787261, 0.6393440871259073, 0.665909205429104, 0.7961203537334985, 0.516429299878914, 0.7322070215716686, 0.6710206163662735, 0.5740332070432261, 0.6466368079433233, 0.6497091645796407, 0.9956282831013583, 0.6823846338184942, 0.9816894972339028, 0.7771103709987719, 0.7864317119189917, 0.5888320035760581, 0.9268692013146181, 0.9085643743911991, 0.8010327145047662, 0.9274491321784035, 0.769201274152852, 0.9724145733765832, 0.5984182553975754, 0.8287069901017878, 0.6128298873034128, 0.680253893707764, 0.8911766366383234, 0.6089232160567011, 0.7424106969330982, 0.5083146141178783, 0.8958581237592079, 0.857268298785864, 0.6156206929890131, 0.897541647889353, 0.8655311327022279, 0.9035195709213256, 0.8824416392069625, 0.9660014603396678, 0.5831720549304096, 0.9177458564151717, 0.9493921752331026, 0.6279559408114086, 0.6949235276112213, 0.7278153385144714, 0.8746605433658448, 0.607130681938477, 0.8768594349367134, 0.8615248493366147, 0.6274891873891228, 0.6259889351938817, 0.8139387571666746, 0.5280901069181454, 0.5095458712137714, 0.5525333750970995, 0.7081056380547135, 0.7452960020484176, 0.96114799989199, 0.9932222725885945, 0.6900858294071979, 0.9846591663430724, 0.8386301135205179, 0.6223791695356611, 0.6848354426863021, 0.5986312811935645, 0.7354793827216641, 0.5362252607707584, 0.6785853336012646, 0.9538160988169984, 0.5131689497666997, 0.5072637559213974, 0.8182974014701593, 0.6371535793843102, 0.8637643454086232, 0.9395189506204376, 0.8317118656248639, 0.7850688318000792, 0.9221116809067602, 0.5727708213239928, 0.7651528333635592, 0.791644551883376, 0.8404536572943311, 0.5189478640151302, 0.9202939271839767, 0.6235003312792479, 0.5117243199645778, 0.6548920279026602, 0.7434302001475253, 0.6223358912381758, 0.8553086698191135, 0.6044258354369041, 0.7386238645058483, 0.5223572127601509, 0.6934155624763886, 0.7636832049816665, 0.6693218141100539, 0.771826298852039, 0.894951666954851, 0.8278783492398327, 0.8148589462678534, 0.5129950998056796, 0.6433699800606919, 0.9681717998564454, 0.6526818843653127, 0.840030415118006, 0.9154814125081996, 0.5020770921500955, 0.8737164922945302, 0.7458956172828375, 0.5899991144976752, 0.6693238230765215, 0.7829850685352119, 0.7406951705738261, 0.6046213214848575, 0.5054938229732333, 0.6588385843578559, 0.97327699107928, 0.9788133525657736, 0.8970899289815594, 0.5587182758121029, 0.6813366138052253, 0.8742715834880463, 0.5273238603926755, 0.5728602155259834, 0.660486448112946, 0.8419463976744477, 0.82476245193727, 0.7636109524030017, 0.6285744953523903, 0.7857588254446426, 0.693146752912303, 0.8510020723186096, 0.8317728216928936, 0.7468240837407591, 0.8919404246739882, 0.9915379286889043, 0.5197800595085232, 0.5565256254255015, 0.847782370294552, 0.8193520303238342, 0.7565513090221736, 0.6685734427993024, 0.649128174737279, 0.6501821668512204, 0.6376908064940432, 0.5158591346239358, 0.9505819683593144, 0.5790219280918494, 0.8861837113985291, 0.8787803837216328, 0.9581530894770816, 0.8018726598068842, 0.6139419182020146, 0.8741265292629733, 0.841024101465206, 0.8697734612048276, 0.8792999786013909, 0.7351620930623973, 0.9377645380665602, 0.6198865068359358, 0.6031412898584, 0.5872535117224051, 0.6469033107696434, 0.9145339527261688, 0.669931415643179, 0.7624675398102967, 0.5317815921390563, 0.5671402696463286, 0.9574119049389542, 0.5655289969127066, 0.6182984410120189, 0.6269950064677019, 0.6999002334208678, 0.6483816717367299, 0.5709355820681569, 0.6064773032924244, 0.564052148877825, 0.6732670613116004, 0.5237113590956963, 0.9956921483429029, 0.6747909462744535, 0.8593948755426302, 0.7908856575375003, 0.5512242337932591, 0.6642549077897518, 0.8241849414736838, 0.6688047596310567, 0.7571866827546159, 0.6678902404955476, 0.9526592317213529, 0.9595573334854672, 0.9651873021868957, 0.611466280155969, 0.7399177531387806, 0.6688637272216061, 0.7888003718633559, 0.6575749009390427, 0.8123443904104906, 0.7332556824471244, 0.6530906066217651, 0.590650596682682, 0.9529324167484197, 0.8861325616631892, 0.5885245129535435, 0.5962097987767248, 0.7983510726492484, 0.6022825421941873, 0.9529203697462055, 0.6663862253911748, 0.850140426576691, 0.8070500362220647, 0.5105062759873025, 0.8331831243628214, 0.6430575723975498, 0.9571654983219002, 0.9739088526547408, 0.6565153618241044, 0.5060772571158925, 0.692036388455437, 0.9557171528716031, 0.7365685138548739, 0.7664170907130317, 0.5692907948969519, 0.6180765400309495, 0.8852690049337337, 0.5191282324131445, 0.6399438627489561, 0.5011378892402606, 0.508747695437717, 0.6327006845040201, 0.7192338562463564, 0.9146133349441531, 0.6266899543994191, 0.6275798009874265, 0.9513190901206034, 0.7553444106062125, 0.8841253585950115, 0.6140903137694131, 0.7194373692849649, 0.5040830210166569, 0.9438649628153604, 0.819722145123389, 0.7168147313760254, 0.9348674212765349, 0.7608404136087532, 0.7558556284078803, 0.9284464072433187, 0.7236492482002816, 0.6180161408343423, 0.9731257932182495, 0.6124976738445557, 0.9844749765186678, 0.6090370859444698, 0.8432428951736923, 0.9761756272576949, 0.9735300311079387, 0.5318650823157178, 0.6604995906352515, 0.5783394474805608, 0.7299158881379777, 0.597030294651239, 0.9237658757501532, 0.9207296207013156, 0.9139475435024471, 0.5619741693905316, 0.8129255737175964, 0.8145207098651426, 0.9482329967507768, 0.832864398528756, 0.8859246333090838, 0.6394295216807673, 0.8780110552359239, 0.5866821738176697, 0.7100813909037078, 0.8544406313145799, 0.614576269650035, 0.6772496732596724, 0.8540175841675341, 0.657060460809431, 0.5439498932832318, 0.7879426711250073, 0.7052817107989128, 0.8073570121054165, 0.7132806261913713, 0.7030043240694576, 0.5646759556385703, 0.5788568720643287, 0.741458645180199, 0.5036784402446546, 0.7559153989940623, 0.8495195551030843, 0.7297417414629669, 0.6657863997309632, 0.9403764411137066, 0.8253675355529112, 0.6829718689161913, 0.7085619027088128, 0.7392689679604469, 0.5371378285722677, 0.9551658300038617, 0.6616210769933193, 0.5486321488509034, 0.5219754734975346, 0.7084350830293797, 0.6307734267826077, 0.8849775562676014, 0.646064347600213, 0.5014043103851675, 0.9454452690672928, 0.7014408923210971, 0.9677210115079908, 0.7442129479347155, 0.9907611590731848, 0.7066154640892114, 0.9449378311886008, 0.9167190075214676, 0.9656055025238329, 0.6504875437466281, 0.8885486908476824, 0.7835231565564411, 0.5292039992129359, 0.6243329303316716, 0.9076978338072836, 0.9635312951259623, 0.9854088888575421, 0.8398273657396849, 0.7657656697857051, 0.6787976685802286, 0.9443100385666944, 0.6333169868136055, 0.7938251076549487, 0.8842488653517173, 0.9601388836410928, 0.6302500931941638, 0.8666425416286778, 0.863122553394764, 0.9808988982885083, 0.7177141878987767, 0.5533291367770936, 0.5127640365300017, 0.8120008443200737, 0.9986561346228865, 0.5141762265290786, 0.6772601444450381, 0.8147792854783482, 0.7778187271301813, 0.6500918524966934, 0.9364458071415189, 0.9166402024583076, 0.8370177961395797, 0.5575908726125554, 0.7289356025825651, 0.8673724956693433, 0.9406133758562379, 0.8267645633349093, 0.5697685608942162, 0.6468272662195875, 0.6642952074938566, 0.7117790403671406, 0.5314993758148348, 0.5651965813512174, 0.8159287310717243, 0.6238612615451167, 0.8461725832278562, 0.9800499433498635, 0.6812869076436454, 0.5707919306038916, 0.8122670455344674, 0.7819765077446537, 0.6004757030740099, 0.694363188466715, 0.5696455433502969, 0.7750721337856661, 0.9460041198535222, 0.8323078010907257, 0.970978854008864, 0.5551489056665364, 0.6410037214919915, 0.7253291358613602, 0.7326571192177058, 0.8670649147373155, 0.9823141709691564, 0.9969623883711805, 0.6082687015476299, 0.8174388681448888, 0.7581800845432694, 0.6749007842144417, 0.5832335511229134, 0.6742271094517888, 0.5395515978496858, 0.8342490336385489, 0.9927066430042764, 0.6619979643250873, 0.7789028623000683, 0.5106538549180292, 0.9320729106032274, 0.5697915601820236, 0.86261135119499, 0.8631219932745985, 0.9149192701421796, 0.6368532945161585, 0.6778504765671589, 0.7320704219384786, 0.6354987674428533, 0.9153259625831126, 0.8831865544347679, 0.6901436616180677, 0.5531652197835566, 0.6875107447365953, 0.848551357860884, 0.7333368602930647, 0.775379924707174, 0.8590757325875241, 0.8238784178026146, 0.9356918880646536, 0.6763592160195592, 0.7476746185807064, 0.5835905961066632, 0.8758001151255559, 0.8375432424198225, 0.9597075885727002, 0.5075340761570681, 0.8636215442750614, 0.8394877115338457, 0.9865578010756635, 0.5635687727715184, 0.9648531200908884, 0.508606342269595, 0.9065060765768702, 0.7693280019737445, 0.8722340554987108, 0.9453479320199464, 0.8247685048688032, 0.9330062884457686, 0.8284619494251824, 0.8710246785808616, 0.7254332632665397, 0.9266647831863182, 0.8129181266755927, 0.5388732569231369, 0.8413924131740238, 0.7977645139550322, 0.9468080116147876, 0.585311266804422, 0.5619841407588761, 0.9464661355490102, 0.500357619399008, 0.6741960784212866, 0.9097252104675912, 0.5657325915636052, 0.6913698433888561, 0.6244997968636865, 0.5972052741892516, 0.6417741834437167, 0.6335893513986155, 0.8328778715284122, 0.56174838966248, 0.9505462343581812, 0.9005163672862841, 0.9698576162836097, 0.7290829084719916, 0.7041031255227291, 0.5792962612332115, 0.511670972403168, 0.9600198855031785, 0.8216402208317463, 0.5098327505446467, 0.9927939277043867, 0.5221277337270207, 0.980067097386708, 0.6870651928146285, 0.8628282446322235, 0.7811821001331068, 0.9043175331166031, 0.8679798500625533, 0.9156957110875241, 0.6247075360508012, 0.8008745823178456, 0.546722005003788, 0.8089015315862844, 0.7585560242906644, 0.5058051936149048, 0.8413056970994319, 0.9644879381044342, 0.5000987031085193, 0.7482406794627154, 0.5061961256670009, 0.7563503044449823, 0.6090879784394184, 0.6017154893755057, 0.9712241874780558, 0.5509541279657428, 0.788776402287085, 0.7125536251259293, 0.618271656989285, 0.6672943410723171, 0.6521325069277247, 0.9728296523467516, 0.6804890313165126, 0.8543536829442048, 0.6586978176009656, 0.7579596331400239, 0.6060915577405424, 0.5916782200513343, 0.8615571040923287, 0.7028387178504177, 0.9395005274743496, 0.5207101137285867, 0.963481733155903, 0.8545978808724615, 0.6292874186559405, 0.9016779047966663, 0.5632203674679226, 0.5844145739966137, 0.9327940830881212, 0.7647417828296881, 0.8882956343727755, 0.7309557499353996, 0.6560674340153857, 0.6916123369105587, 0.7499832859357545, 0.9360399778387533, 0.7475828340301567, 0.6541487609159558, 0.9695496531594809, 0.9774770763246985, 0.8218799792651534, 0.5240174023221937, 0.6885919062908482, 0.9124895276950825, 0.9187977821397721, 0.5802730454251879, 0.9376286999504742, 0.5196973696403882, 0.6340789718997262, 0.8517499589227594, 0.9099167162468558, 0.9498969392400864, 0.6620827408776861, 0.7364778308607682, 0.8858468689854662, 0.5480403779187347, 0.888699598409561, 0.5890169400192832, 0.9183673218916584, 0.6667675664154643, 0.808844204672293, 0.7946941242108504, 0.7593445286635476, 0.7563156305996116, 0.6653103701435594, 0.8591772128824028, 0.6137209266543305, 0.6398164549560186, 0.5409718101063912, 0.9810510545362608, 0.7675472621891073, 0.6375792328380333, 0.6865260802643145, 0.6172059691420598, 0.7671341399928606, 0.7414011512953083, 0.6461025699434191, 0.8519378800282369, 0.8548506160719982, 0.7434821810228669, 0.7936685871992291, 0.7718548413435874, 0.5121856313415065, 0.6013227040563067, 0.9593607125115684, 0.7623587644034558, 0.5063159857748645, 0.7637558411281199, 0.5560071205475542, 0.5092372581260335, 0.8687576103974852, 0.5744542793668367, 0.9632177548749863, 0.5850139318785506, 0.8284434558886283, 0.5399523158134052, 0.6224073512197996, 0.9170654559972498, 0.6300460126221381, 0.6997804107718324, 0.8036491889789539, 0.674735107622203, 0.9663586316200683, 0.5181496119336444, 0.6845934766929227, 0.9532029397866104, 0.6918364771374024, 0.6971953300636962, 0.655856972319258, 0.6700855321557161, 0.680622148809614, 0.5902200422558204, 0.5828267728833179, 0.5868111246620388, 0.7212636583516009, 0.6713762733137807, 0.6235559539902249, 0.5795686758300583, 0.8058370755268389, 0.5005157176262698, 0.9700748536129188, 0.8758270405654289, 0.8961646796053682, 0.6224780464816982, 0.8060987441991692, 0.8840368126613682, 0.7376108224505002, 0.6603313325137445, 0.8825049184971099, 0.5582863367405424, 0.6922070882582969, 0.5137647717087525, 0.5331609472620635, 0.8432122533912414, 0.7839627620674293, 0.6336813851408074, 0.7023393339944219, 0.763869721340874, 0.7928171233621503, 0.7351400406356629, 0.940107923229297, 0.7228082952414971, 0.5813992497847809, 0.8480105053389451, 0.7440499772284889, 0.9392012425153649, 0.8663040617930198, 0.750656869926354, 0.561300483834867, 0.8298237630380381, 0.7136058648099569, 0.5356307706089083, 0.8161757907999123, 0.9827790084679686, 0.6942989765777292, 0.5247831591729886, 0.951889643536257, 0.511952170253319, 0.9345739080313689, 0.5371016518193006, 0.6317873426509124, 0.7680781826438725, 0.6074540024448241, 0.8717030997560468, 0.7071810268876288, 0.8935996879179727, 0.5121145056539488, 0.8081839008266521, 0.5728300839209685, 0.5608545309147481, 0.528274557693257, 0.5706766082310655, 0.708564174595421, 0.7397454128474157, 0.5590680706614823, 0.60063153069294, 0.9539708957444678, 0.9131465163358523, 0.5678464930367504, 0.5791887340093609, 0.5530202157727535, 0.8366194226671051, 0.9440817532728993, 0.8332746725071111, 0.9605461510325353, 0.7104818272409206, 0.5181522621262249, 0.5001219604661182, 0.750220935536053, 0.7181556248172805, 0.5165285329009042, 0.8789059685708764, 0.6625579422792959, 0.5240416405260065, 0.5527349542842201, 0.8832360269507257, 0.7869553936079596, 0.9678255370286377, 0.6560931592752716, 0.5997696945583786, 0.874578788147485, 0.8815011887672006, 0.9451253203279824, 0.8359295167915329, 0.586453404657254, 0.7392079028245497, 0.5129425585787167, 0.6053316947613808, 0.9351904071727193, 0.6454691113824256, 0.6438651958999251, 0.759585296133932, 0.6208209090072295, 0.5268481788555947, 0.5564919562656251, 0.8521293562995531, 0.6925708353276656, 0.5940695753524116, 0.8785153066866677, 0.7536821708792953, 0.8815284128777006, 0.9341158073129061, 0.6378963311209331, 0.9199280920038404, 0.5702384667800398, 0.7976660331591579, 0.7933394607166802, 0.7109825781250037, 0.9373302423815022, 0.6021916628638342, 0.5299741524200217, 0.9552162164723652, 0.6026302060792815, 0.6174456187194932, 0.7539494080528473, 0.5708287536294394, 0.7281902799186508, 0.6647358788544089, 0.9309637210039696, 0.6766518143941025, 0.8710354706014205, 0.8438890369301975, 0.7097969308474391, 0.983530548898662, 0.5927987348566588, 0.9755274603864262, 0.7556633346816959, 0.7435429025679774, 0.9951094822066349, 0.7391821790071162, 0.5731664005000954, 0.5424499041080704, 0.7666161491394026, 0.9011194246344807, 0.692779499814798, 0.749454636698264, 0.5647036609807032, 0.644922359199505, 0.5100642058500384, 0.5835405194315539, 0.8939897175636133, 0.804110645048502, 0.9655559600647926, 0.9995009465521997, 0.7225459128711521, 0.5284944890398778, 0.9071237454176773, 0.96854311113668, 0.5726065422307539, 0.5751407198276659, 0.9628217252669027, 0.5974873518187793, 0.552518666895772, 0.6603437757808909, 0.933054810030399, 0.5208730133764299, 0.8675767220411781, 0.7226625682912142, 0.5552394515236987, 0.6845741174697446, 0.887806174844133, 0.844604201413401, 0.837355847809694, 0.5520724376908563, 0.8854769028464134, 0.8665011027990552, 0.810983779656854, 0.6422779535323223, 0.7086450266389949, 0.5576176260513699, 0.6244630554231962, 0.5731521996367398, 0.527153164176484, 0.8835846615878596, 0.6043612121949451, 0.8598313026672426, 0.6015321544632015, 0.5193535462055343, 0.9005389005623626, 0.6378782618879897, 0.6843914567359151, 0.5009699758454789, 0.5652997056121454, 0.6726233615016337, 0.8208616355019014, 0.5151094621531893, 0.6356123701165864, 0.8872666877830204, 0.5219132193880912, 0.7479718702296075, 0.6018462389010847, 0.8597352047125406, 0.7066906122512336, 0.7647392490194539, 0.9342254989911696, 0.9669166630576456, 0.5815325012563157, 0.5441778857415789, 0.7154899921934065, 0.5535404097121186, 0.6999218105107358, 0.5906225865016779, 0.874650880385025, 0.9127500031023819, 0.9188173614835993, 0.5933743463912611, 0.9294453791238657, 0.6577115083692651, 0.9248702543249572, 0.947058602553454, 0.9315629357553287, 0.6741583266629618, 0.8527170533952702, 0.6603269263137802, 0.8766849115816107, 0.6989914408543489, 0.5036132694115363, 0.6303343616635373, 0.6335363856209207, 0.7800552690570602, 0.6959769248421402, 0.6344353768982951, 0.7412200148835603, 0.5718211130494679, 0.9634863197008536, 0.5089033997896231, 0.5981232373957536, 0.6604376788541911, 0.569926021124696, 0.8406788689232252, 0.9419190354363314, 0.9101054801249076, 0.9406602312343781, 0.68165609052403, 0.812030442469297, 0.5398768006705355, 0.8074906994653285, 0.603451063702827, 0.9945769205973827, 0.9705222680566654, 0.7397692996425215, 0.8010114639699659, 0.7343246519822024, 0.6736825798048636, 0.8786100957704917, 0.7012148651317855, 0.5653235185055008, 0.7443446488431669, 0.795586011058389, 0.8623255975606943, 0.602387262614027, 0.7567206897007077, 0.7353725033161728, 0.8446384531292772, 0.7040536051263209, 0.9532938558889611, 0.768010258957094, 0.986645630894946, 0.6798233171514748, 0.5990580906389745, 0.7064476793582657, 0.9659307614448042, 0.9692648307117537, 0.5820807636656368, 0.9562490894798521, 0.5943507305410509, 0.5616654399005929, 0.8193419288936847, 0.6353033986782379, 0.6401008283423177, 0.6670094079400264, 0.9098683795168786, 0.5800010006556764, 0.727040448238548, 0.7313837431241824, 0.7927211733643962, 0.6844764416529874, 0.6948043996783482, 0.5452576377649387, 0.9654191758539702, 0.7527998265629033, 0.7876380984064912, 0.9031292151611152, 0.9539964948910172, 0.9248050617344126, 0.843757559297871, 0.9634761498401038, 0.9353032585545914, 0.6110075348957118, 0.9672002675948455, 0.8779628076642756, 0.7990698744825027, 0.6641158702883617, 0.7093335049280258, 0.8586043426890253, 0.7095628461551482, 0.5943240096667233, 0.8668980659269374, 0.7542115953352725, 0.6739191723130831, 0.8133363142510621, 0.7881501611527924, 0.5500253494105338, 0.7464090283126852, 0.5839725010064042, 0.902929397790843, 0.5053160513772166, 0.9967274726914381, 0.8319108616375812, 0.6580023284696014, 0.6409340085006938, 0.6437510614282247, 0.9931475413022925, 0.6743761083850925, 0.7874887779052293, 0.7699602960297092, 0.7001703664793184, 0.7631655059143975, 0.6158477204441771, 0.7256029075441507, 0.8204528604080715, 0.66491863593259, 0.6088984350039485, 0.7225750397985731, 0.8971337079554349, 0.567605411860483, 0.766959801391226, 0.6806630105733356, 0.6378156369321843, 0.939740369110561, 0.9544402112113601, 0.5538879591809691, 0.8357954733885393, 0.9837351498086321, 0.635877839396167, 0.5804116437013448, 0.7504305761656962, 0.6939385392851191, 0.64267162224162, 0.8393439654087275, 0.5696024538737435, 0.9393338829811664, 0.7216340227693883, 0.7784225161098293, 0.8668034389117453, 0.8104495981045722, 0.5209107389911041, 0.932552725099129, 0.8170615013181453, 0.6367934764515983, 0.81612231215286, 0.5905162205686223, 0.6568429562453888, 0.7206298492197059, 0.7046732941161331, 0.6993018394572772, 0.7089104675829134, 0.7693161062932148, 0.563597656797771, 0.9563097770148707, 0.6589661919969457, 0.8036020338622256, 0.8203213012854182, 0.6911865717559914, 0.7214639211920616, 0.5832181666592794, 0.8166945877865097, 0.6657963091898571, 0.9787361254806832, 0.7988687838668428, 0.7326103107056251, 0.901094589607328, 0.9563904263728261, 0.6317310573815051, 0.6094088664582975, 0.8288998002653432, 0.5935128453105759, 0.643644742713047, 0.5298506686693107, 0.9598514907612778, 0.8915241913525442, 0.93345140096102, 0.767870144980596, 0.5131539724767118, 0.7387527437179666, 0.8965962209708367, 0.769931062572446, 0.7124031343560995, 0.6296231998396484, 0.659037374322724, 0.9475669235189017, 0.7489552579718348, 0.5519153456067459, 0.897919314882582, 0.5201496561157295, 0.8146514512787751, 0.5722991492760954, 0.6804863799477099, 0.7074625582958445, 0.6939232438944778, 0.5085142433098437, 0.8361982228018039, 0.654531027734031, 0.6853500196531245, 0.9405474881870015, 0.6275559332628696, 0.8354970842140542, 0.6006258976766716, 0.5017545438782566, 0.8213242838575456, 0.657364745509196, 0.6246518056485992, 0.8819856674506932, 0.512913559615053, 0.5092162534674334, 0.9227085377668425, 0.8959605556314844, 0.9850950819606155, 0.74978678884977, 0.8571190974892486, 0.909913261187062, 0.7692832792485296, 0.5693329492315109, 0.7995948563881347, 0.8943070516748782, 0.6339544777385924, 0.7376954820864232, 0.9410372606666368, 0.8783041559246044, 0.6140531112409617, 0.9168560262971797, 0.8625466993201523, 0.8822460555457783, 0.5927038853051201, 0.7999911031943059, 0.9919738493594752, 0.9521333608292392, 0.8117767286782216, 0.5666101595644992, 0.7438001084555864, 0.7441312172729748, 0.7838003106546124, 0.5503572371793441, 0.950736094849004, 0.7764037754242512, 0.8917175381438647, 0.6556290867127861, 0.998170206705918, 0.8537706345669909, 0.60338210887132, 0.7734055209779858, 0.6720213654968046, 0.8345753370376655, 0.7270550749783671, 0.706461797513813, 0.6818993403844458, 0.6853085883066337, 0.9680732841051212, 0.9515930762237056, 0.5000560401786853, 0.5957043525527533, 0.7538768060989471, 0.8384255031554102, 0.5343989763264321, 0.6979907820109645, 0.6714297736665176, 0.8111355675998486, 0.8688484314845846, 0.67516565546979, 0.7802344078450714, 0.7933406801845397, 0.5419574278372208, 0.991034734836008, 0.8284259403039744, 0.7909047088916961, 0.8173258922653895, 0.5561220325735796, 0.9326415345680849, 0.5272585221738069, 0.8853466781295412, 0.620154708123146, 0.5671159502640639, 0.899776273172006, 0.5460102439681596, 0.5000953229280327, 0.6342966211912195, 0.8959141303161429, 0.9471218654357488, 0.5921642941633263, 0.5989802902467711, 0.8035582374853687, 0.8192581537345867, 0.8649639006103353, 0.710799564397546, 0.5307402666385482, 0.6597185059219156, 0.597807148661399, 0.6911812554345098, 0.9157231994622399, 0.58309837303933, 0.5875676137949435, 0.6894234648125388, 0.7028932832059932, 0.7627471922796072, 0.9048922678762316, 0.6517168321959845, 0.6880668855540516, 0.5355762280570078, 0.8262685145139591, 0.5874357930031999, 0.9017246051444308, 0.9735020395906921, 0.8877243213989398, 0.9142621220349294, 0.8617395139285924, 0.8717774789094168, 0.7458520245540545, 0.6813076497709971, 0.941441877126354, 0.9421244448999337, 0.7726955236771876, 0.5302246013883979, 0.7761090168700394, 0.8719277725771695, 0.546172013311073, 0.804447380273138, 0.6275372229981087, 0.5464291990529901, 0.9001104320711046, 0.63861945291676, 0.728813313548919, 0.925008982259951, 0.7016122024859672, 0.6484998309277038, 0.8071623451925196, 0.9409285965351226, 0.7872334623334792, 0.9707966506431147, 0.6723142088793252, 0.6699195403200446, 0.7607062951818847, 0.5926776097839307, 0.617432734521645, 0.8845621220004514, 0.618475562331754, 0.9616823112117245, 0.5647777867252732, 0.9828785340699828, 0.9015442072350057, 0.944774871652798, 0.5363551844323237, 0.7045766179092968, 0.7947729097253635, 0.8672510139486547, 0.6917838641639926, 0.9158727338257671, 0.6320105249211805, 0.9991002308390915, 0.6745231256980786, 0.7771613744631898, 0.5668555922541086, 0.9019236804416473, 0.994174207778427, 0.8487855733721208, 0.8213640975277852, 0.9026152682777924, 0.8892442593342476, 0.5078434054416281, 0.8603428776290616, 0.7729416710032642, 0.6630307214817313, 0.9806721161691672, 0.9888419841765869, 0.9852872987067733, 0.7490201171937263, 0.781978576074772, 0.6763878829588794, 0.7171409379941398, 0.6708814226269224, 0.8968600613343525, 0.7135062450658483, 0.8279703761162678, 0.9806758461522209, 0.9977768769276533, 0.8812876113238897, 0.55613169160792, 0.8621853608310737, 0.5135907416349219, 0.8484673408370387, 0.9172383187906601, 0.7935263341684546, 0.5253837083958346, 0.6563234365872732, 0.772530939176505, 0.8076862000318533, 0.9519488412562699, 0.6137447844241968, 0.8530837561406389, 0.7952261360757513, 0.5588337453348029, 0.5763812112712905, 0.9842797957560734, 0.6457355956038753, 0.8737803704445994, 0.7277709441120712, 0.525544904409238, 0.9753691393906251, 0.7564532285494805, 0.7866365054724455, 0.8117601223827194, 0.8329167236229169, 0.6493576135814807, 0.7836791567064019, 0.7074163194337759, 0.6742236702947808, 0.7443332079572423, 0.6061167138966927, 0.8347299933859729, 0.9834025275031784, 0.8318480359504141, 0.6054337710690935, 0.5289342112063881, 0.9081499524136294, 0.7087487948525777, 0.8586317375218955, 0.9441316826075665, 0.9537201069536028, 0.9462640052435494, 0.8578163705315467, 0.8214139751827529, 0.8810073084999442, 0.63700856435608, 0.7167620870166601, 0.6867263322916173, 0.6360656316364524, 0.8991337365400769, 0.6049066617751777, 0.569475034573862, 0.9194988739800556, 0.5728520338863841, 0.8239631964776549, 0.599292673381697, 0.5074175255174467, 0.6935427708326369, 0.7358601646912311, 0.7017371187129547, 0.6148863713480948, 0.5643852510764873, 0.5631535862158324, 0.9855187533110469, 0.7530783288388637, 0.9868848065433433, 0.9170429950482593, 0.8069301300117684, 0.7810036488016365, 0.894731291756586, 0.613716745440858, 0.5985150970637281, 0.9042944933748751, 0.7764919584766611, 0.8718067803815144, 0.9782899377748542, 0.5971082240753329, 0.5838341496399215, 0.7793050372941241, 0.7959308729905916, 0.5798490574206548, 0.8158902796272034, 0.5634827913276875, 0.5047845015059526, 0.9775917185982623, 0.7061943370349653, 0.8190401948988737, 0.9825797480321202, 0.8702285408739006, 0.7075721563270653, 0.9716293970195827, 0.8429724578928534, 0.9348480585829689, 0.9068320099294305, 0.6142501269071939, 0.7457141779674357, 0.916518183081123, 0.7417669289364726, 0.8876978733995025, 0.9352765528601847, 0.8699804658438678, 0.8631495473020183, 0.8342149776380023, 0.5338299775898885, 0.621101506439654, 0.9499226324456593, 0.9861508373671586, 0.8431167543924158, 0.6048312165519358, 0.6182358838702318, 0.95457006394498, 0.5360953559512358, 0.6407174525322732, 0.8436018847777228, 0.8533905567376757, 0.6627924087919232, 0.7784428389454288, 0.8708935546953389, 0.9631606141943059, 0.8410883448305975, 0.5446502005157869, 0.76941433882759, 0.9157767130426255, 0.5028271929534557, 0.9939196178512845, 0.9300620251026279, 0.5407766108804781, 0.9149033626014521, 0.7989647784862588, 0.8603977264349215, 0.8005155933965247, 0.5499057386298031, 0.628042028784948, 0.5600740549200909, 0.6190496617426388, 0.5873569200227943, 0.8484461816693945, 0.8032920999936572, 0.7145793851292348, 0.9373624964902558, 0.9258518253472042, 0.594703311005893, 0.6717865708310624, 0.8853752226272544, 0.6652509747286675, 0.8529753439911123, 0.8983624352165942, 0.5139897303774351, 0.5177236932100395, 0.8733659199554604, 0.5191788732240346, 0.5833210206679286, 0.8042293101065016, 0.8214780783906196, 0.6688584484833378, 0.5757222566152382, 0.6865226685818848, 0.6297538058548684, 0.9306675182636986, 0.8841637825532751, 0.7546574664256172, 0.9998637394452912, 0.7763873251115696, 0.9887628890820659, 0.9037466095211921, 0.7950845323801763, 0.6190431832280204, 0.9946818458071425, 0.5098853680107728, 0.9894018461609236, 0.8668111249001571, 0.720878149683584, 0.9096086283522878, 0.8504558911509632, 0.9456886618591549, 0.6135372680854347, 0.6186748332297107, 0.8814698368301737, 0.7103517146323861, 0.9210547946352072, 0.9294309705557582, 0.5905744465984938, 0.7307248729660913, 0.5992704446592622, 0.713146298871902, 0.6894698768876448, 0.5764923639282749, 0.9891023526547832, 0.8740214333896441, 0.7716221063823765, 0.76685100685397, 0.9499004053510745, 0.5671740078858132, 0.9162478153292744, 0.7980759205538348, 0.8081347817450537, 0.8317839349203766, 0.7668468994857524, 0.9584226912654964, 0.7988632163531943, 0.5748691099789676, 0.7476615329642738, 0.5656745162144753, 0.8023124825219432, 0.6625617161934876, 0.828645017612554, 0.550895162627381, 0.6383934759587284, 0.5643910208659473, 0.7980660075537664, 0.8988637624200235, 0.7561989833528342, 0.9495133101480429, 0.8164576904446547, 0.5686324391425052, 0.8550234531139267, 0.9108147105788511, 0.6899064402895947, 0.9617837483237964, 0.659259549492562, 0.9422102499378984, 0.521980849034722, 0.6744641424932617, 0.6664076794236038, 0.5371934998081527, 0.9830885206533182, 0.6465360999668356, 0.7953800765093858, 0.7738257009471357, 0.6063289211755631, 0.8242827117763085, 0.5032826528899381, 0.9699315281582888, 0.5856953592699894, 0.943269624266168, 0.9266616640669325, 0.965014800912194, 0.9927939450777179, 0.8231652553284117, 0.6531718798823427, 0.5633264579821347, 0.8439694487633577, 0.73702307822498, 0.6021259411039771, 0.7134201620631242, 0.9864378834077148, 0.7369043588622, 0.7782188527107694, 0.9739251372843873, 0.7976311143628014, 0.9734288325096496, 0.5315590735673794, 0.6448594744191573, 0.5230571352531451, 0.8404851997102283, 0.5083999228639067, 0.9377450091141462, 0.5290485809012713, 0.9629085508373643, 0.8182838339068166, 0.8971356979883511, 0.7143552305502427, 0.5376514817675193, 0.7559034403006755, 0.7742831358321028, 0.6609246909577726, 0.9981555206169381, 0.5345439773163931, 0.7149016684536247, 0.8157840472161313, 0.7281847166743147, 0.8711890344837574, 0.8041068754190323, 0.9564598707917211, 0.7220910029096248, 0.522769836025801, 0.8244405662978254, 0.7241505784814515, 0.9946907169299728, 0.8738832051960976, 0.5019599960321355, 0.924906188983814, 0.9687859226908475, 0.8897838926991305, 0.8075604794376869, 0.8790783652901137, 0.5857500141109915, 0.5396776851428005, 0.8716246411814224, 0.9545814323653186, 0.8805172870260806, 0.974727651782979, 0.5585005872822741, 0.9378003752081611, 0.6670428604307028, 0.9700205603129246, 0.5395212945977184, 0.6666614631530832, 0.8034240214501778, 0.7110051023474488, 0.5629404333696564, 0.9241479223495068, 0.9076659552598472, 0.7604940916624325, 0.9395659681918271, 0.7805827139094099, 0.7808897415543222, 0.9488935347435699, 0.9527593646778865, 0.7487922871062609, 0.9111788772783275, 0.9796318782217912, 0.6953671104517956, 0.7314031256963351, 0.9419058830851617, 0.5631229586430033, 0.6135231101220087, 0.7592334866507449, 0.6953614025091858, 0.5850421925019065, 0.8933283301717498, 0.781210522349449, 0.9870563178088816, 0.5429523318046703, 0.9588424945060466, 0.8458319430651753, 0.9009378210679126, 0.9181659504962214, 0.8410972720902757, 0.8386100903437155, 0.5644674034812296, 0.9497587922244478, 0.5381756608933003, 0.647134444596018, 0.7955169204514392, 0.989938802845246, 0.7697897822950566, 0.7559931365857006, 0.8182750182802835, 0.8591296041231475, 0.6864409169168539, 0.969156878739453, 0.9792447203435402, 0.908227228414716, 0.9160464021143966, 0.9877069602943133, 0.5256419337911405, 0.5697041243689679, 0.6633700698250938, 0.6393081051577184, 0.6991934135519848, 0.7895257528049431, 0.7700009808973961, 0.7363570868060235, 0.8760002880962394, 0.8463723913706157, 0.5246555914266358, 0.6138559824233492, 0.5052731356678756, 0.9553559200710463, 0.7680526310286431, 0.9589345836824592, 0.5333679828514878, 0.9947714200646213, 0.7798418980023321, 0.7784977529290295, 0.8925139861898191, 0.8668849858070307, 0.78110977670472, 0.8998803795456607, 0.6465937378177411, 0.6457781585630396, 0.8745021998583027, 0.6020833917636781, 0.6068371376625017, 0.6549685270457433, 0.7133686201357212, 0.5717535584403879, 0.9398058657647591, 0.851681925660106, 0.7488973413502671, 0.7227659495113263, 0.9354663948197406, 0.6551728334699173, 0.957451108466606, 0.6527129965676465, 0.5468545968804609, 0.9737837286032991, 0.7024446962525848, 0.8025272827123533, 0.8370223648084486, 0.6303592989313862, 0.7362823834599458, 0.6642604699174468, 0.9278221811341807, 0.9425769854354168, 0.7490910130026295, 0.744709316896492, 0.513846391572075, 0.9845595373010627, 0.8723678364335261, 0.5036775892886687, 0.7510379424275361, 0.7009911621298592, 0.7951400287340566, 0.6729393726020876, 0.971923431901792, 0.8134600316090828, 0.6496378273067571, 0.9466722416645874, 0.5188795123959267, 0.5769581756486657, 0.7168967861675548, 0.5569912237942912, 0.9060796404103009, 0.5136747622353611, 0.6150750395682323, 0.7640771597475033, 0.6142721420337078, 0.7859783456148628, 0.7587847774539636, 0.7218722152842664, 0.8088658485962426, 0.8419401681403069, 0.7253711120561979, 0.5092916970105473, 0.9612738236667975, 0.9726887241628446, 0.6688207442529637, 0.9047207873705474, 0.5516136313339703, 0.955334992770745, 0.6900511009093, 0.5970452594988924, 0.6138163430196264, 0.948978749041756, 0.8323554790045021, 0.7920015701853123, 0.513318736959244, 0.6497942859517178, 0.6115632276716694, 0.5690117300258601, 0.9374764005063546, 0.7060544674944129, 0.8605693287607012, 0.982550507463654, 0.914292426742339, 0.7339006021830787, 0.8356927705765292, 0.8245247667152521, 0.6285941280979703, 0.9906126679311227, 0.7525956961974377, 0.7491343668640049, 0.9889583655724568, 0.5845836912453225, 0.8084834561102741, 0.5568106103762831, 0.9114241474339526, 0.7743467047275921, 0.5338257233294456, 0.8940698714565188, 0.5671843204626486, 0.5857337014530695, 0.6358201207711087, 0.6063317299893918, 0.9802851355967928, 0.9948118148199302, 0.78348700617901, 0.5374408069242469, 0.7572734961777858, 0.6058722548954836, 0.7932399007383839, 0.7069671911667306, 0.5967598169899845, 0.8757241110371417, 0.6626123224195968, 0.6078759464564087, 0.5681325051381949, 0.5409016782977953, 0.8734339526301511, 0.8736753622321662, 0.8486503241944514, 0.6679978391247654, 0.7098210007062609, 0.7541684925247802, 0.6159525693036761, 0.743582001578929, 0.518943293561202, 0.7122754426820386, 0.6625887087878739, 0.50349804435882, 0.5391050788667804, 0.6802492353945644, 0.6517172567275316, 0.6806534485983675, 0.8737599410836154, 0.9741269890652562, 0.753377466233024, 0.5343785295331214, 0.6547452358269814, 0.5243007582935296, 0.6402588075879919, 0.876217456434059, 0.697236547130068, 0.9489009672716038, 0.5038265838700264, 0.6075024206623874, 0.9414026937347746, 0.8612004943894138, 0.8440156220522849, 0.9644875133128681, 0.6304537020710161, 0.9432401377079194, 0.7180937529552038, 0.6738895667947261, 0.5381377059929049, 0.8957120314970468, 0.5631882014851017, 0.9202607538397822, 0.8137587762250542, 0.7431443717655877, 0.6459528751639252, 0.8747662073127506, 0.9404780030626286, 0.8810835094355909, 0.640440977901406, 0.7885473927190411, 0.9306134263484208, 0.824281419732213, 0.9512579262922515, 0.9545518415417189, 0.8418368008512058, 0.9105982092006304, 0.5914765687934058, 0.8433222945118828, 0.7167765151597321, 0.8653276680862507, 0.7401251724552667, 0.8859174125017564, 0.5668627343701118, 0.9367652046173134, 0.9421099875702431, 0.5783784042613993, 0.5327142429512272, 0.7124124117734381, 0.9112421942570008, 0.5554375457411537, 0.9295246429016664, 0.6760211194793537, 0.7043498818636859, 0.7928018457458255, 0.73501197571858, 0.5836890451771736, 0.820747411241856, 0.8843547739429044, 0.8779133648322417, 0.7450635329823418, 0.7142722872645495, 0.8734164806959355, 0.6236474729060748, 0.8995688976716679, 0.6678048034983346, 0.7034602205990176, 0.7432310223177954, 0.813621365142972, 0.5328727103074257, 0.9261400768557102, 0.6514798672452933, 0.6572897050155986, 0.5501831273556967, 0.5425159773122012, 0.7170628869223863, 0.6278473970169285, 0.9903846713489992, 0.9318138097058415, 0.8060065019559972, 0.7628922321565166, 0.8046660812210206, 0.7434824710317581, 0.5070274640532426, 0.7397806168814882, 0.6869976900574477, 0.5784302124119508, 0.5644696585917549, 0.7173570000015842, 0.6270400218400922, 0.5344377206105032, 0.9420198743464071, 0.7031462840597249, 0.6521611330280646, 0.7890847451125869, 0.570104100491746, 0.5814294100530564, 0.6954757351306093, 0.5877588435147371, 0.8186458090440256, 0.7871443162394807, 0.8997648993534256, 0.9501899482946257, 0.933622116643398, 0.661641315527947, 0.9514714839945382, 0.831513166646866, 0.6150890325832803, 0.9334320600417159, 0.5079136567560487, 0.9748304887810608, 0.7573087106978084, 0.9585924892633375, 0.7133979228527718, 0.8752150467197262, 0.9757064420853625, 0.7519326448332686, 0.5039133231881756, 0.8730171363481852, 0.6216080226071079, 0.71009172732009, 0.6810531700917293, 0.8936645853461035, 0.6754177019989475, 0.5407404364071393, 0.7132153243582858, 0.5534639345230552, 0.924131233660023, 0.7933187605657578, 0.6348610705382065, 0.8131152084308977, 0.6028427800670035, 0.6220349203462241, 0.7811317137025475, 0.8583322971723008, 0.6633527267783748, 0.5770082691429559, 0.7937108596010682, 0.7214692299088161, 0.6546600807713424, 0.8768189601782945, 0.9305257215451215, 0.5479734060647414, 0.5742910264034105, 0.617614988427201, 0.5715423712832088, 0.859853838740742, 0.5862066424161021, 0.559396513999084, 0.5144304091251398, 0.9688083314060804, 0.6465875908153809, 0.8337488901825814, 0.7771651040328661, 0.7772357205419802, 0.6066547364087322, 0.5572682866984129, 0.503116689169705, 0.69522921749982, 0.9820815758736074, 0.9512303152948713, 0.7540472046420235, 0.6517596899800374, 0.908163941401954, 0.7244420475327047, 0.8825575875554454, 0.6382371904455293, 0.8296757416932558, 0.6042965784369003, 0.7693784071364851, 0.753678554629143, 0.7676756342115925, 0.7107297553860794, 0.5000274767621518, 0.7494710111498415, 0.7429406076361869, 0.7927546983462888, 0.9683145965707352, 0.5413428185219211, 0.9669035384245186, 0.9494806537792086, 0.8966010074272828, 0.6721897979446718, 0.6575262147508388, 0.5936076113260688, 0.9257174885713323, 0.5987695658480765, 0.9437820085219673, 0.9191575768238975, 0.9208109189654384, 0.934936432394829, 0.7484405141810895, 0.6447438303471318, 0.6698472469271844, 0.775878766395188, 0.5313824245226504, 0.9494245915838986, 0.7444911741510136, 0.9705263439684896, 0.8737192055728096, 0.9675261267601822, 0.9209275167220179, 0.7560694618802186, 0.7643349650391447, 0.7955092874293056, 0.7934297155464973, 0.9627608088713138, 0.8132374151365696, 0.5019047599003259, 0.7570164447004739, 0.6341658054047848, 0.5838149457410698, 0.6576230301183055, 0.7277315134901824, 0.7292957554002719, 0.666899194600431, 0.8784124856209107, 0.8827701300367116, 0.7807045034892739, 0.9971137774713159, 0.9177368312466627, 0.9796868661928491, 0.9923558222015683, 0.560703368639407, 0.9814117006827021, 0.5451864057344622, 0.6429235867493419, 0.9861079426660451, 0.8675775706078692, 0.9530136231753015, 0.6935292196346793, 0.870400221430344, 0.5716532718859357, 0.8983280011419843, 0.9150488032690923, 0.9112285573181226, 0.9748970163831755, 0.6859387415068474, 0.7122478067018911, 0.9313372885624531, 0.9026752919916486, 0.8892667727875108, 0.6898175626733294, 0.6112711735373784, 0.7837664670509221, 0.6824951335549838, 0.8533811706240799, 0.9560251760810503, 0.757220715604887, 0.6352726108384941, 0.711023929260205, 0.9584203333123144, 0.8555763477012177, 0.8095747098595942, 0.7535361907199815, 0.6437934584233538, 0.5008441673401947, 0.6612876128572828, 0.9042522736861425, 0.921241023030701, 0.8733621956511526, 0.8069611037092314, 0.6565829383443611, 0.8424027762810142, 0.5273599571315744, 0.8374336905347033, 0.7210070334375687, 0.9587182782879661, 0.6761889134450654, 0.6477503303439449, 0.7228804711303696, 0.8026117098962575, 0.9112717929214029, 0.770571771770121, 0.7281777645833093, 0.9965689427227387, 0.6360781422827388, 0.516818044541151, 0.6858405530411734, 0.8011375903537874, 0.979914551967523, 0.5366014570341049, 0.9565142555181265, 0.8455629821184381, 0.7790718169740416, 0.5958119986825783, 0.6063548140855227, 0.7469113087154022, 0.985498725395755, 0.6974605337447943, 0.9213196148009482, 0.7303384978029324, 0.9709121469554917, 0.8668743988118681, 0.9391600514988041, 0.8612979188124521, 0.9003368151894049, 0.8000257364504877, 0.9224733160147844, 0.5888971385416214, 0.8529615852435586, 0.717288972563136, 0.7180036340143716, 0.9770267600805898, 0.6887038690339103, 0.7423543020748768, 0.557863636578165, 0.6585179779724908, 0.6664750372437311, 0.9950767801226685, 0.5337179692448322, 0.8614533965953898, 0.8565734016922488, 0.7520565244568207, 0.8011182661509136, 0.9757852479157476, 0.5715375239762772, 0.7134693548383892, 0.995748994173999, 0.9975736806399205, 0.6186848967055683, 0.9189836375309972, 0.6078004075165899, 0.99119530070847, 0.8414866115304235, 0.8337892752785043, 0.9856610336368457, 0.7883092072837508, 0.6333446857569425, 0.882506835551409, 0.6976398746258279, 0.6207275360232963, 0.8690789902419409, 0.7729124614923487, 0.8619746006786679, 0.7173414752531213, 0.5563410548903569, 0.6761206016872663, 0.8419690064168357, 0.563758669231616, 0.9144068902799602, 0.5339524847387095, 0.6362321814698046, 0.9747015148462327, 0.5621074242890006, 0.7457465417721828, 0.5740258398797669, 0.9994073191401824, 0.7099421064312466, 0.5031301855553939, 0.5132694402868561, 0.5977321513136324, 0.5036560635174205, 0.7743697720536022, 0.9387120186921449, 0.8473760308979592, 0.6523923352151462, 0.9139471131840848, 0.8502811341154839, 0.7291547706403153, 0.5020874852567747, 0.801620723604604, 0.7201728344499372, 0.7766655064819538, 0.6128065218654575, 0.9192401701964072, 0.873681603103724, 0.5429585909441819, 0.940535081806318, 0.8712476023807282, 0.7599376774644824, 0.7902620337570951, 0.5256579176358478, 0.5647087173340734, 0.6970946247086598, 0.8827023101682527, 0.8871650914833464, 0.5260866140445604, 0.8214872899010148, 0.5982837745423508, 0.7497342130890844, 0.9619057259818506, 0.5708480591269344, 0.9146610418308907, 0.9264501861116421, 0.6523233494124425, 0.8428378974267496, 0.8460909106051395, 0.7435002586883566, 0.7359716263018595, 0.578873185440186, 0.6163764857379145, 0.8276717757277445, 0.5010982904934758, 0.8526013163596042, 0.6408848274197447, 0.6372581441215519, 0.742301991269989, 0.7824567175397078, 0.9992497092620767, 0.6647833640358738, 0.7964180397604339, 0.800039650970241, 0.6192411135568412, 0.5260564466772504, 0.6495196017274146, 0.7853651000505603, 0.6815585073585819, 0.5158373326697498, 0.8906028638257473, 0.6618804297219996, 0.5835013316507247, 0.669429911408867, 0.7968823063082737, 0.6541990416397596, 0.9018053044936633, 0.8754281428781916, 0.6087528273897116, 0.6166780567651747, 0.8665751200680514, 0.6638165485816568, 0.6131373552006579, 0.5323761719384867, 0.7723186348941402, 0.9769440743397675, 0.5958366648182715, 0.8115346298123418, 0.5618976347358705, 0.8877214386170555, 0.9551801259970729, 0.6652865249966895, 0.6673557244180122, 0.573686218862282, 0.5129994506273263, 0.8453979319346718, 0.8707425653248047, 0.5814075990425833, 0.5347117449214227, 0.7362977381803116, 0.6152052288545012, 0.8656887140365164, 0.9836336220851225, 0.7912602947358316, 0.5552858030554236, 0.7556717278405736, 0.962409522345349, 0.6617838298746935, 0.654240025651758, 0.9994621724738466, 0.8378439652103951, 0.8657966185366717, 0.6621421949493858, 0.8021695859961021, 0.7465947187811774, 0.832887322504658, 0.6721401866360275, 0.7159817963971251, 0.7111411141918023, 0.8067472419700455, 0.7627833083307991, 0.7729225166142151, 0.6324857268264852, 0.5651856154671664, 0.6604137220721812, 0.5520757947220853, 0.7711249535066063, 0.8925606519453957, 0.7983275619638657, 0.7715842549924244, 0.8693252886834286, 0.583374422231635, 0.7754109301159511, 0.8391082035646242, 0.8903116320667959, 0.7544662407656203, 0.5031416467087887, 0.6908302540029294, 0.5976443692869088, 0.9080516993973872, 0.8667448470024868, 0.5517862328762155, 0.8721470805140591, 0.675458878035782, 0.7006794038893247, 0.5565076706484597, 0.5610985924605627, 0.851873099254155, 0.9960868853678875, 0.5378293032496616, 0.8791498005294507, 0.8773374918752033, 0.6792264626732595, 0.5431847753332179, 0.8000903935127137, 0.780269537132966, 0.9482614591440914, 0.8291093224679154, 0.7872756173914861, 0.9515059174380347, 0.9225247358311859, 0.6926842655243117, 0.9523876247089607, 0.9769776676335458, 0.5569432483336734, 0.7703300368992393, 0.8191125487862045, 0.8271613063339048, 0.6515232864910507, 0.8088387575569624, 0.7654235780799394, 0.87167533874508, 0.5663386377102518, 0.6191243649694597, 0.6178464318218724, 0.6434498815042585, 0.8346834262687701, 0.54053316457401, 0.6322309007961566, 0.5094732408465423, 0.8418219617599842, 0.6673269722492006, 0.7934627700972595, 0.5286659063591588, 0.9489176581308412, 0.8337384255485083, 0.7404256196561958, 0.9692602784619396, 0.7652884478688775, 0.889832551586124, 0.9558358305429729, 0.5726894822323734, 0.6441493699479391, 0.9704311232755363, 0.8124147271221871, 0.6781070369637708, 0.929670758541586, 0.9724714170534804, 0.6462419109460684, 0.8214376155449039, 0.9789923754018797, 0.6260895627981413, 0.5317027639742266, 0.89615713409983, 0.8538281515245637, 0.5548547003223794, 0.741406316185762, 0.6908135492319801, 0.6706966296475088, 0.9423979748433952, 0.670737489697496, 0.7177962502944679, 0.721056640409529, 0.9662027804700994, 0.6038124557599449, 0.9526074598336407, 0.5568217851367083, 0.7592504707940295, 0.9365179363779725, 0.8654047557984966, 0.5128188543193307, 0.5990036941111246, 0.7182918209828517, 0.950002715813391, 0.7212541199828575, 0.9963679534366607, 0.960096045213881, 0.8320741784793115, 0.8042071350008755, 0.9574710160746405, 0.601234968731589, 0.6669568716798724, 0.5255807765890418, 0.6537775155824519, 0.7807664884512735, 0.7144358549084568, 0.8227155322702377, 0.6805314953448725, 0.5390622007416239, 0.9839957789704934, 0.8904875267389728, 0.6288108600878475, 0.8688385152680057, 0.9300493547046851, 0.6726952828987378, 0.7555515820086094, 0.7879792581165148, 0.55494826175882, 0.9558488981079492, 0.821113055854839, 0.9007801560183517, 0.7737925287155893, 0.7266046278802386, 0.8112839343948648, 0.9368542263573965, 0.6235707243355053, 0.5756848967827904, 0.8016715459412453, 0.981303771902984, 0.5746837095377639, 0.7266744796847076, 0.6773007767308454, 0.5600540406801793, 0.5476878231824753, 0.5614491052158803, 0.7112401058727785, 0.8054554295502141, 0.8493997685041876, 0.5836007575515307, 0.9396317017691838, 0.9958054208399834, 0.9554025863476672, 0.6173231349650588, 0.7055933661356903, 0.6268018264281163, 0.9457511850462286, 0.5592404354183087, 0.5960593252850774, 0.5547104611284048, 0.601151187835163, 0.7287987405232123, 0.7014565540344913, 0.6556775765219988, 0.8808991412288941, 0.8895337536811605, 0.5789004831006224, 0.6305237679455836, 0.924221107301209, 0.9835741812273491, 0.5182722567688999, 0.5735273229463772, 0.5905296018184245, 0.5159345266966354, 0.7284962330928005, 0.6227498022449709, 0.8076834507435071, 0.9704207432033737, 0.8927090528434374, 0.842333758627815, 0.9418198111562235, 0.712587707039638, 0.8850693393036222, 0.8097118105455623, 0.7780723811490964, 0.7633220669419329, 0.7705201060095614, 0.5634803034381018, 0.6030798819746647, 0.8269777351269312, 0.9135598432204708, 0.5297040653866989, 0.8297749767028811, 0.868250428010067, 0.8663218025434765, 0.7439289828642983, 0.8722868181311015, 0.9900657915528992, 0.6825957042405633, 0.9061207878514439, 0.613822748040844, 0.6494203075394189, 0.9047962836099182, 0.6325870891434326, 0.6423709302949374, 0.6660696887258932, 0.90411520190751, 0.639454927043043, 0.9508562963000573, 0.7253838800834522, 0.8543473096635148, 0.8310586378303546, 0.5155417222872996, 0.5453130378283304, 0.8640530611414068, 0.734677043321102, 0.5376944837725218, 0.5335573625141126, 0.6092680335637861, 0.657911576097914, 0.7051911175011469, 0.6295397020171312, 0.7699339621748603, 0.7246141271939917, 0.5806613905720452, 0.5138686872140681, 0.818255070039597, 0.7913758184319949, 0.5023213811776784, 0.5910546616176093, 0.6324600190561431, 0.9604819814206668, 0.9079376268765644, 0.764472509311759, 0.7817747924410188, 0.893304234804701, 0.7035102061907736, 0.5057879246474637, 0.746065007415214, 0.6110360347861915, 0.7445214498316692, 0.6260903584335665, 0.6437700015117106, 0.9305448299780945, 0.9615313807337951, 0.5678456631773825, 0.5413905457731178, 0.7504679141460288, 0.5458115052413002, 0.5533585628762225, 0.7255569443072702, 0.5758569277540733, 0.9492398211540833, 0.6880235098364744, 0.7883085730088045, 0.907496224043636, 0.5101585205712216, 0.6803300693102239, 0.6950555429472398, 0.5014360675039917, 0.7423106576198069, 0.5177010625479602, 0.6813738524845723, 0.745677686039746, 0.9961681008033905, 0.9157502194241854, 0.6543827437948877, 0.6003088496193036, 0.8752753959142847, 0.5008390702817875, 0.5250621036199268, 0.7122848275825246, 0.5149961622442238, 0.856352501056248, 0.5257994329291412, 0.9573887350503874, 0.5713202013241001, 0.6788700879973288, 0.8030417452088238, 0.9152374091567592, 0.574094988614587, 0.7723516419243039, 0.8535832734802129, 0.9336864778636551, 0.6447948649595265, 0.7655078955554708, 0.7254645807834945, 0.63637537886461, 0.7887045217916762, 0.5934622218766102, 0.8203116792896142, 0.5692854827523237, 0.6467857529177992, 0.7972636560320179, 0.8804398612860422, 0.533645372562443, 0.6476622420924809, 0.8088255927743637, 0.9471780165465618, 0.9199845293172818, 0.8568457278449424, 0.593468246230265, 0.813810642356088, 0.5288604021946911, 0.7283658691462127, 0.9493313469492533, 0.8449470805947644, 0.8598639162093493, 0.5340233687643436, 0.674677163713364, 0.6842959685725425, 0.7023301236450261, 0.7640832462835048, 0.7922538046422785, 0.7190003384975399, 0.5676322205027622, 0.9209767733722878, 0.6184525942776133, 0.6045651873291266, 0.5777783485248336, 0.6928486027471805, 0.9985326878918185, 0.5637649464109196, 0.8443030887188667, 0.6713476709223738, 0.9318681259576871, 0.6074313627148606, 0.9035322930418755, 0.8298948253329061, 0.6045403684222947, 0.6558046103783799, 0.8557284666522487, 0.6121501281552657, 0.667310793683475, 0.9054852876992111, 0.7848088781333782, 0.8335147843972646, 0.6136241730442061, 0.677022640142104, 0.593698363397618, 0.6148382129742498, 0.5216241402930928, 0.6342784956193848, 0.5620795343105377, 0.6755171121911591, 0.6992711264108081, 0.8965840545208319, 0.7772980026388956, 0.8450824061038255, 0.7209752751792848, 0.793718468521458, 0.9622940318198248, 0.684557553160477, 0.8382042069781336, 0.6598797927488087, 0.7597062283815881, 0.9852343304900788, 0.7559726993860899, 0.9935343034168258, 0.8698953222205434, 0.9957706707942716, 0.5657815701428361, 0.6842998533267599, 0.7205684144981646, 0.9314830174941616, 0.6860745732828184, 0.6387593908720446, 0.9626763372496842, 0.5836547250440686, 0.5004242928692334, 0.8434168180718176, 0.6232540653269076, 0.545617103778656, 0.5796492093516535, 0.753803861660055, 0.5312607493270909, 0.953547914910422, 0.5560224331925252, 0.87339753354718, 0.5025551414048512, 0.8015042527657461, 0.5959116513461101, 0.7447825326060256, 0.5143176130208252, 0.6108928376773226, 0.6374744039898597, 0.9016043213656555, 0.630092828092697, 0.8480157345809908, 0.9430777323214545, 0.639509534398477, 0.662863090868284, 0.9254986682256023, 0.7004538760211909, 0.9085479335577828, 0.7464153680362542, 0.839087067795514, 0.5072384940294229, 0.7753451740475785, 0.7315556227065924, 0.8826962307053683, 0.6074507794784245, 0.9965684101320124, 0.552159546429118, 0.589849692833322, 0.6594682136022542, 0.8975557733381037, 0.5969350786212662, 0.9922623343209901, 0.7837822215665722, 0.5101012490925311, 0.5203221239586255, 0.8348515266536056, 0.5634382985192479, 0.6643362871637202, 0.6145871082501486, 0.6366521756243084, 0.734527075675778, 0.7868888662460698, 0.9257551450466446, 0.8662798955182773, 0.5607970149893309, 0.5980424805276602, 0.613972648044476, 0.6370918503012465, 0.9770035396492885, 0.9110110997005114, 0.9937548807712102, 0.6875447184507159, 0.6689269748995312, 0.5699746037060779, 0.84246500361777, 0.7228686851843567, 0.5633761905092529, 0.7640321454292088, 0.6621286328898175, 0.6584796618809402, 0.5294548345241867, 0.6691890232485189, 0.7813702248263805, 0.9906404355395793, 0.5043412385384607, 0.7218082296171415, 0.8899386498059869, 0.9938601931640505, 0.9121784236078294, 0.684908442346211, 0.5989880371148075, 0.8770373785870167, 0.9230093758405205, 0.6593287357943511, 0.6805851876773147, 0.5546249940171921, 0.538867899211618, 0.5334414773021252, 0.9273028973656117, 0.509516189176552, 0.8654113308880194, 0.5122575162389182, 0.6825386621545374, 0.9130837090995336, 0.824544770282492, 0.9933802377534982, 0.9116963641251179, 0.8336708271503476, 0.8416603939119549, 0.6940793664446392, 0.7253373445447351, 0.6384053477534342, 0.8608817947259118, 0.9642229829483325, 0.6898990535169286, 0.948719053875803, 0.9921883059678085, 0.9797817461597345, 0.5285702242944168, 0.7010812011677543, 0.8279471566437753, 0.648241635913225, 0.6714991187254069, 0.6137943650564311, 0.9238002111968158, 0.5407092194816348, 0.6745535837162813, 0.7307117050951327, 0.7384877207949492, 0.5741176058250288, 0.6662218186903621, 0.781660640476342, 0.9348771903554276, 0.8142690375648682, 0.5710612507633063, 0.6514246308764324, 0.7656079639681209, 0.8879045034036527, 0.6413645830150221, 0.7216095545197201, 0.6264288799902129, 0.505709324892132, 0.7757091797163362, 0.5657193492136277, 0.8958825805008046, 0.9937161322002488, 0.8823092155264343, 0.7814485358326998, 0.8618265086529806, 0.950143345406363, 0.954820573386696, 0.6949853142184929, 0.891774536348829, 0.6408411815351678, 0.8339061774197489, 0.6779160989731849, 0.8662002535672579, 0.7715748882556059, 0.8741347069195139, 0.7112388243724884, 0.7950530477722706, 0.9435409440177326, 0.9247609619381696, 0.6981304800295683, 0.5243025319373231, 0.7968823757418242, 0.5731365965079138, 0.9541686548008026, 0.6431035800978497, 0.5960339485315683, 0.9867032170546683, 0.6587411131541698, 0.840650363106993, 0.8234617754503873, 0.8536465434897242, 0.6013262723050327, 0.608178678223262, 0.6497949122258939, 0.7988042486438849, 0.6722191044752102, 0.7472579201712451, 0.873499079282394, 0.7640119572113249, 0.8957131177901472, 0.5502334909196646, 0.6727632884423469, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0};
int h_B[]= {
0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 53, 55, 57, 59, 62, 64, 66, 68, 70, 72, 74, 76, 78, 80, 82, 84, 88, 90, 92, 94, 96, 98, 100, 102, 104, 106, 108, 110, 112, 114, 116, 118, 120, 122, 124, 126, 128, 130, 132, 134, 138, 140, 142, 144, 146, 148, 150, 152, 154, 156, 158, 160, 162, 164, 166, 168, 170, 172, 175, 177, 179, 181, 184, 186, 188, 190, 193, 195, 197, 199, 201, 203, 205, 207, 209, 211, 214, 216, 218, 220, 222, 224, 226, 228, 230, 232, 234, 236, 238, 240, 242, 244, 246, 248, 250, 252, 254, 256, 258, 260, 262, 264, 266, 268, 270, 272, 274, 276, 278, 280, 282, 284, 286, 288, 290, 292, 294, 296, 298, 300, 302, 304, 306, 308, 310, 312, 314, 316, 318, 320, 322, 324, 326, 328, 330, 332, 334, 336, 339, 341, 344, 346, 348, 350, 352, 354, 356, 358, 360, 362, 364, 366, 368, 370, 372, 374, 376, 378, 380, 382, 384, 386, 388, 390, 392, 394, 396, 398, 400, 402, 406, 408, 410, 412, 414, 416, 418, 420, 422, 424, 426, 428, 430, 432, 434, 436, 439, 441, 443, 445, 447, 449, 451, 453, 455, 457, 459, 461, 463, 465, 467, 469, 471, 473, 476, 478, 480, 482, 484, 486, 488, 490, 492, 494, 496, 498, 500, 502, 504, 506, 508, 510, 512, 514, 516, 518, 520, 522, 524, 526, 529, 531, 533, 535, 538, 540, 542, 544, 547, 549, 551, 553, 556, 558, 560, 562, 564, 566, 568, 570, 572, 574, 576, 578, 581, 583, 586, 588, 591, 593, 596, 598, 601, 603, 606, 608, 611, 613, 616, 618, 620, 622, 625, 627, 629, 631, 634, 636, 640, 642, 644, 646, 648, 650, 652, 654, 657, 659, 661, 663, 665, 667, 670, 672, 674, 676, 678, 680, 682, 684, 686, 688, 691, 693, 695, 697, 700, 702, 705, 707, 713, 715, 719, 721, 724, 726, 728, 730, 732, 734, 736, 738, 740, 742, 745, 747, 751, 753, 756, 758, 761, 763, 766, 768, 770, 772, 774, 776, 780, 782, 785, 787, 792, 794, 796, 798, 800, 802, 804, 806, 808, 810, 812, 814, 817, 819, 822, 824, 827, 829, 832, 834, 837, 839, 842, 844, 850, 852, 855, 857, 860, 862, 864, 866, 868, 870, 873, 875, 878, 880, 883, 885, 888, 890, 892, 894, 896, 898, 901, 903, 906, 908, 911, 913, 916, 918, 920, 922, 924, 926, 929, 931, 934, 936, 939, 941, 760, 755, 760, 755, 760, 755, 760, 755, 900, 915, 86, 86, 87, 87, 900, 915, 995, 997, 999, 1001, 1003, 1005, 1008, 1010, 1012, 1014, 1016, 1018, 1020, 1022, 1024, 1026, 1028, 1030, 638, 633, 638, 633, 638, 633, 928, 943, 928, 943, 1107, 1109, 1111, 1113, 1115, 1117, 1119, 1121, 1124, 1126, 1128, 1130, 1132, 1134, 1136, 1138, 1140, 1142, 1144, 1146, 1149, 1151, 1153, 1155, 791, 778, 1208, 1210, 1212, 1214, 1216, 1218, 1221, 1223, 704, 699, 704, 699, 933, 938, 933, 938, 1257, 1259, 933, 938, 1272, 1274, 1277, 1279, 1283, 1285, 1287, 1289, 1291, 1293, 1295, 1297, 1299, 1301, 1303, 1305, 887, 887, 882, 882, 1325, 1327, 1329, 1331, 1333, 1335, 1337, 1339, 1342, 1344, 1346, 1348, 1350, 1352, 1354, 1356, 1358, 1360, 595, 595, 709, 711, 750, 750, 778, 791, 778, 791, 849, 847, 849, 847, 1466, 1468, 1470, 1472, 1474, 1476, 1478, 1480, 1482, 1484, 1486, 1488, 1492, 1494, 1499, 1501, 1503, 1505, 1508, 1510, 1512, 1514, 1517, 1519, 1523, 1525, 1527, 1529, 1531, 1533, 1536, 1538, 1541, 1543, 1521, 1516, 1148, 1547, 1363, 1498, 1496, 1521, 1516, 1521, 1516, 1498, 1496, 1521, 1516, 994, 994, 1281, 1498, 1496, 1007, 1007, 1703, 1705, 1707, 1709, 1711, 1713, 1715, 1717, 1719, 1721, 1723, 1725, 1729, 1731, 1766, 1768, 1521, 1516, 1774, 1776, 1778, 1780, 1782, 1784, 1786, 1788, 1793, 1795, 1797, 1799, 1148, 1281, 1547, 1363, 1911, 1913, 1915, 1917, 1919, 1921, 1521, 1516, 1363, 1547, 1934, 1936, 1938, 1940, 1942, 1944, 1946, 1948, 1270, 1268, 1270, 1268, 1521, 1516, 1521, 1516, 1363, 2039, 2041, 2043, 2045, 1547, 2058, 2060, 1363, 2072, 2074, 1496, 1498, 1498, 1496, 1535, 1547, 1549, 2136, 2138, 2140, 2142, 2144, 2146, 2149, 2151, 2154, 2156, 2159, 2161, 2164, 2166, 2169, 2171, 2175, 2177, 2180, 2182, 2179, 2153, 2148, 2148, 2153, 2179, 2077, 2179, 2077, 2179, 2077, 2179, 2184, 2077, 2179, 2184, 2174, 2174, 10, 11, 12, 13, 14, 15, 3056, 3058, 3060, 3062, 3064, 3066, 3068, 3070, 3072, 3074, 3076, 3078, 3080, 3082, 3084, 3086, 3088, 3090, 3092, 3094, 3096, 3098, 3100, 3102, 3104, 3106, 3108, 3110, 3112, 3114, 3116, 3118, 3120, 3122, 3124, 3126, 3128, 3130, 3132, 3134, 3136, 3138, 3140, 3142, 3144, 3146, 3148, 3150, 3152, 3154, 3156, 3158, 3160, 3162, 3164, 3166, 3168, 3170, 3172, 3174, 3176, 3178, 3180, 3182, 3184, 3186, 3188, 3190, 3192, 3194, 3196, 3198, 3200, 3202, 3204, 3206, 3208, 3210, 3212, 3214, 3216, 3218, 3220, 3222, 3224, 3226, 3228, 3230, 3232, 3234, 3236, 3238, 3240, 3242, 3244, 3246, 3248, 3250, 3252, 3254, 3256, 3258, 3260, 3262, 3264, 3266, 3268, 3270, 3272, 3274, 3276, 3278, 3280, 3282, 3284, 3286, 3288, 3290, 3292, 3294, 3296, 3298, 3300, 3302, 3304, 3306, 3308, 3310, 3312, 3314, 3316, 3318, 3320, 3322, 3324, 3326, 3328, 3330, 3332, 3334, 3336, 3338, 3340, 3342, 3344, 3346, 3348, 3350, 3352, 3354, 3356, 3358, 3360, 3362, 3364, 3366, 3368, 3370, 3372, 3374, 3376, 3378, 3380, 3382, 3384, 3386, 3388, 3390, 3392, 3394, 3396, 3398, 3400, 3402, 3404, 3406, 3408, 3410, 3412, 3414, 3416, 3418, 3420, 3422, 3424, 3426, 3428, 3430, 3432, 3434, 3436, 3438, 3440, 3442, 3444, 3446, 3448, 3450, 3452, 3454, 3456, 3458, 3460, 3462, 3464, 3466, 3468, 3470, 3472, 3474, 3476, 3478, 3480, 3482, 3484, 3486, 3488, 3489, 3490, 3491, 3492, 3493, 3494, 3495, 3496, 3497, 3498, 3499, 3500, 3501, 3502, 3503, 3504, 3506, 3508, 3510, 3512, 3514, 3516, 3518, 3520, 3522, 3523, 3524, 3525, 3526, 3527, 3528, 3529, 3530, 3531, 3532, 3534, 3536, 3538, 3540, 3542, 3544, 3546, 3548, 3550, 3552, 3554, 3556, 3557, 3558, 3560, 3562, 3564, 3566, 3567, 3568, 3569, 3570, 3571, 3572, 3573, 3574, 3576, 3577, 3578, 3580, 3582, 3584, 3586, 3588, 3590, 3592, 3594, 3595, 3596, 3597, 3598, 3600, 3602, 3604, 3606, 3608, 3610, 3612, 3614, 3616, 3617, 3618, 3619, 3620, 3621, 3622, 3623, 3624, 3625, 3626, 3627, 3628, 3629, 3630, 3632, 3634, 3636, 3638, 3640, 3642, 3644, 3646, 3648, 3650, 3652, 3654, 3656, 3658, 3660, 3662, 3664, 3665, 3666, 3667, 3668, 3669, 3670, 3671, 3672, 3673, 3674, 3675, 3676, 3677, 3678, 3679, 3680, 3681, 3682, 3683, 3684, 3685, 3686, 3688, 3690, 3692, 3694, 3696, 3698, 3700, 3702, 3703, 3704, 3706, 3708, 3710, 3712, 3714, 3716, 3717, 3718, 3719, 3720, 3722, 3724, 3726, 3727, 3728, 3729, 3730, 3732, 3734, 3736, 3738, 3739, 3740, 3741, 3742, 3743, 3744, 3745, 3746, 3747, 3749, 3751, 3752, 3754, 3755, 3757, 3758, 3759, 3760, 3761, 3762, 3763, 3764, 3766, 3768, 3770, 3772, 3774, 3776, 3778, 3780, 3782, 3784, 3785, 3786, 3787, 3788, 3789, 3790, 3791, 3792, 3793, 3794, 3795, 3796, 3797, 3798, 3799, 3800, 3801, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 905, 910, 910, 905, 933, 938, 905, 910, 910, 905, 933, 938, 638, 633, 3818, 717, 712, 4024, 4026, 704, 699, 4028, 4030, 910, 905, 717, 712, 704, 699, 638, 633, 3832, 717, 712, 699, 704, 789, 784, 590, 585, 590, 585, 590, 585, 600, 610, 605, 580, 590, 585, 600, 595, 610, 605, 615, 4049, 3957, 4051, 3962, 656, 3965, 669, 590, 585, 590, 585, 590, 585, 600, 610, 605, 580, 590, 585, 600, 595, 610, 605, 615, 3957, 4053, 3960, 3962, 656, 3965, 669, 910, 905, 3843, 910, 905, 3845, 933, 938, 933, 938, 910, 905, 3852, 910, 905, 3854, 933, 938, 933, 938, 755, 789, 3859, 3861, 784, 789, 4071, 760, 760, 3864, 590, 585, 610, 605, 3869, 784, 590, 585, 3872, 717, 712, 760, 755, 789, 590, 585, 580, 615, 638, 633, 3884, 3885, 3887, 669, 343, 338, 656, 712, 717, 717, 712, 717, 712, 3896, 3898, 3900, 755, 755, 755, 784, 590, 585, 717, 712, 4077, 3980, 4079, 3980, 760, 755, 638, 633, 3910, 638, 633, 3911, 784, 789, 4081, 4083, 854, 859, 3917, 905, 910, 3921, 928, 905, 910, 3921, 928, 4086, 943, 859, 854, 3927, 877, 872, 877, 872, 859, 854, 3933, 877, 872, 877, 872, 590, 585, 590, 585, 590, 585, 600, 610, 605, 580, 590, 585, 600, 595, 610, 605, 615, 638, 633, 3957, 638, 633, 3960, 3962, 656, 3965, 669, 717, 712, 717, 712, 717, 712, 3971, 3972, 704, 699, 717, 712, 717, 712, 3977, 3978, 3980, 760, 755, 760, 755, 760, 755, 765, 789, 784, 789, 784, 4116, 789, 784, 826, 821, 836, 831, 846, 841, 4119, 826, 821, 836, 831, 846, 841, 4121, 859, 854, 4007, 877, 872, 887, 882, 910, 905, 900, 910, 905, 915, 933, 938, 928, 938, 933, 943, 4140, 4143, 4127, 4128, 4129, 4145, 4147, 4149, 4151, 4153, 4158, 1521, 1516, 1521, 1516, 1545, 1540, 1545, 1540, 1547, 1547, 1270, 1268, 1276, 1271, 4170, 1545, 1540, 1148, 1148, 1148, 1521, 1516, 1547, 1363, 4178, 4128, 4129, 1363, 1547, 4180, 4103, 4129, 4185, 4073, 1545, 1540, 4187, 4075, 4076, 4193, 1276, 1271, 4195, 1276, 1271, 1281, 1281, 1281, 4197, 4199, 1545, 1540, 1545, 1540, 1545, 1540, 4103, 4128, 4129, 4103, 4128, 4129, 1521, 1516, 4106, 1521, 1516, 4108, 4124, 4126, 4127, 4128, 4129, 4210, 1521, 1516, 4132, 1521, 1516, 4135, 1545, 1540, 1545, 1540, 4209, 4208, 4209, 4208, 4209, 4208, 2077, 2077, 4209, 4208, 4209, 4208, 4209, 4208, 2148, 2077, 2179, 2077, 2179, 2077, 2179, 2179, 2077, 2179, 2158, 2158, 2077, 2179, 2077, 2179, 2077, 2179, 2077, 2179, 2158, 2153, 2148, 2158, 2153, 2163, 2077, 2179, 2174, 2158, 2153, 2148, 2158, 2153, 2163, 2077, 2179, 2184, 2135, 2133, 2077, 2179, 4231, 2174, 4233, 2174, 4235, 4238, 2135, 2133, 2135, 2133, 2158, 2153, 2148, 2158, 2153, 2163, 2179, 2179, 2179, 2184, 4228, 4227, 4242, 4241, 4228, 4227, 4228, 4227, 4228, 4227, 4228, 4227, 4242, 4241, 15, 4256, 4257, 4258, 4259, 4260, 4261, 4262, 4263, 4264, 4265, 4266, 4267, 4268, 4269, 4270, 4271, 4272, 4275, 4276, 4279, 4280, 4281, 4282, 4283, 4284, 4285, 4286, 4287, 4288, 4289, 4290, 4291, 4292, 4293, 4294, 4295, 4296, 4297, 4298, 4299, 4300, 4301, 4302, 4303, 4304, 4305, 4306, 4307, 4308, 4309, 4310, 4312, 4314, 4315, 4316, 4317, 4318, 4319, 4320, 4321, 4322, 4323, 4324, 4325, 4326, 4327, 4328, 4329, 4330, 4331, 4332, 4333, 4334, 4335, 4337, 4338, 4339, 4340, 4341, 4342, 4343, 4344, 4345, 4346, 4347, 4348, 4349, 4350, 4351, 4352, 4353, 4354, 4355, 4356, 4357, 4358, 4359, 4360, 4361, 4362, 4363, 4364, 4365, 4366, 4367, 4369, 4370, 4371, 4372, 4373, 4374, 4375, 4376, 4377, 4378, 4379, 4380, 4381, 4382, 4383, 4384, 4385, 4386, 4387, 4388, 4389, 4390, 4391, 4392, 4393, 4394, 4395, 4396, 4397, 4398, 4399, 4400, 4401, 4402, 4403, 4404, 4405, 4406, 4407, 4408, 4409, 4410, 4411, 4412, 4413, 4414, 4415, 4417, 4419, 4420, 4421, 4422, 4423, 4424, 4425, 4426, 4427, 4428, 4429, 4432, 4433, 4434, 4435, 4436, 4437, 4438, 4439, 4440, 4441, 4442, 4444, 4445, 4446, 4447, 4448, 4449, 4450, 4451, 4452, 4453, 4454, 4455, 4456, 4457, 4458, 4459, 4460, 4461, 4462, 4463, 4464, 4465, 4466, 4467, 4468, 4469, 4470, 4471, 4472, 4473, 4474, 4475, 4476, 4477, 4478, 4479, 4480, 4481, 4482, 4483, 4484, 4485, 4486, 4487, 4488, 4489, 4490, 4491, 4492, 4493, 4494, 4495, 4496, 4497, 4498, 4499, 4500, 4501, 4502, 4503, 4504, 4505, 4506, 4507, 4508, 4509, 4510, 4511, 4512, 4513, 4515, 4516, 4517, 4518, 4519, 4520, 4521, 4522, 4524, 4525, 4526, 4527, 4528, 4529, 4531, 4532, 4533, 4534, 4535, 4536, 4537, 4538, 4539, 4540, 4541, 4542, 4543, 4544, 4545, 4546, 4547, 4548, 4549, 4552, 4553, 4554, 4114, 4113, 4114, 4113, 4114, 4113, 4561, 4562, 4563, 4564, 4565, 4566, 4567, 4568, 4569, 4570, 4571, 4572, 4573, 4574, 4576, 4577, 4578, 4579, 4580, 4581, 4582, 4583, 4584, 4586, 4587, 4588, 4589, 4591, 4592, 4594, 4595, 4596, 4598, 4599, 4601, 4602, 4604, 4605, 4606, 4607, 4608, 4611, 4612, 4613, 4614, 4615, 4616, 4617, 4618, 4619, 4620, 4621, 4622, 4623, 4624, 4625, 4626, 4627, 4628, 4629, 4630, 4631, 4632, 4633, 4635, 4636, 4637, 4638, 4639, 4640, 4641, 4642, 4643, 4644, 4555, 4645, 4646, 4558, 4647, 4648, 4560, 4649, 4650, 4651, 4652, 4555, 4653, 4654, 4156, 4155, 4558, 4655, 4656, 4156, 4155, 4560, 4657, 4658, 4659, 4660, 4661, 4662, 4663, 4664, 4665, 4666, 4667, 4668, 4669, 4670, 4671, 4672, 4673, 4674, 4675, 4676, 4677, 4678, 4208, 4208, 4679, 4680, 4681, 4682, 4683, 4684, 4685, 4686, 4687, 4688, 4689, 4690, 4691, 4692, 4693, 4694, 4695, 4696, 4697, 4698, 4699, 4700, 4702, 4704, 4707, 4708, 4709, 4710, 4711, 4712, 4713, 4714, 4715, 4716, 4717, 4718, 4719, 4720, 4721, 4722, 4237, 4241, 4723, 4724, 4240, 4242, 4725, 4726, 4727, 4728, 4729, 4730, 4731, 4732, 4733, 4734, 4240, 4237, 4240, 4237, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 4736, 4738, 4740, 4742, 4744, 4746, 4748, 4751, 4753, 4755, 4757, 4759, 4761, 4764, 4766, 4768, 4770, 4772, 4774, 4777, 4780, 4782, 4784, 4792, 4794, 4796, 4799, 4802, 4804, 4806, 4815, 4818, 4821, 4823, 4825, 4828, 4831, 4833, 4839, 4844, 4846, 4850, 4853, 4855, 4858, 4862, 4868, 4871, 4873, 4875, 4884, 4886, 4890, 4892, 4895, 4898, 4900, 4903, 4907, 4912, 4915, 4917, 4919, 4922, 4924, 4926, 4928, 4930, 4933, 4936, 4938, 4940, 4943, 4946, 4953, 4955, 4957, 4961, 4963, 4965, 4970, 4972, 4974, 4977, 4979, 4981, 4983, 4985, 4987, 4989, 4991, 4993, 4995, 4998, 5000, 5002, 5005, 5008, 5011, 4952, 4950, 4969, 5017, 5018, 5019, 5020, 4952, 4950, 4969, 5021, 5022, 4952, 4950, 4969, 4969, 4952, 4950, 4969, 5023, 5025, 5027, 5029, 4810, 4787, 4791, 4789, 4810, 4809, 4814, 4812, 5033, 5035, 5037, 5042, 5044, 5048, 4867, 4838, 4976, 4114, 4113, 4115, 4950, 4867, 4838, 4867, 4976, 4114, 4113, 4115, 4950, 4867, 4879, 4118, 4952, 4870, 4879, 4118, 4867, 4879, 4976, 4114, 4113, 4115, 5053, 4952, 4950, 4416, 4888, 4418, 4889, 4952, 4950, 4969, 4911, 4906, 4911, 4910, 4911, 4906, 5057, 4911, 4910, 5059, 5064, 5066, 5068, 5076, 5079, 4952, 4950, 4969, 5087, 5090, 5093, 5095, 5081, 5078, 5097, 5100, 5103, 4634, 4209, 4208, 5083, 5108, 5111, 5112, 5113, 5116, 5117, 5118, 5122, 5124, 5126, 5129, 5081, 5078, 5081, 5052, 5133, 5135, 5137, 4634, 4209, 4208, 5139, 4634, 4209, 5141, 4634, 4209, 5142, 5083, 5083, 4634, 4209, 4208, 5081, 5078, 2135, 2133, 5143, 5146, 5149, 4634, 4209, 4208, 5081, 5052, 2135, 2133, 5152, 5155, 5158, 4634, 4209, 4208, 5081, 5078, 5163, 4634, 4209, 4208, 4634, 4209, 4208, 5083, 4634, 4209, 4208, 5167, 5169, 5171, 5174, 5176, 5176, 5121, 5183, 5184, 5180, 5180, 5165, 5187, 5188, 5176, 5121, 5180, 5165, 5176, 5176, 5176, 5173, 5176, 5176, 5180, 5165, 5199, 5200, 5201, 5202, 5166, 5180, 4242, 4241, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 4033, 4032, 4055, 4033, 4032, 4056, 4932, 4110, 4109, 4860, 5286, 4861, 4948, 4945, 5315, 5316, 4959, 4112, 4111, 5230, 4967, 4112, 4111, 5317, 5318, 4118, 4514, 4115, 5230, 5320, 4932, 4110, 4109, 4860, 5286, 4861, 4948, 4945, 5322, 5323, 4959, 4112, 4111, 5224, 4967, 4112, 4111, 5324, 5325, 4514, 4118, 4115, 4033, 4032, 4058, 4057, 4932, 4110, 4109, 4860, 5286, 4861, 4948, 4945, 5327, 5328, 4959, 4112, 4111, 5227, 4967, 4112, 4111, 5329, 4976, 4514, 4118, 4115, 4948, 4945, 4959, 4112, 4111, 5293, 4967, 4112, 4111, 5330, 4976, 4514, 4118, 4115, 5286, 4861, 4932, 4110, 4109, 4860, 4948, 4945, 5331, 5332, 4967, 4112, 4111, 5333, 4959, 4112, 4111, 5230, 4114, 4113, 4514, 4118, 4115, 4039, 4038, 4776, 4110, 4109, 4779, 5237, 4786, 5338, 5339, 5340, 5341, 4798, 4110, 4109, 4801, 5244, 4808, 5342, 5343, 5344, 5345, 4820, 4817, 4056, 4055, 4830, 4827, 4058, 4057, 4932, 4109, 4110, 4860, 5286, 4861, 4848, 4864, 4870, 5352, 4877, 4112, 4111, 5353, 5354, 5355, 5356, 5357, 4932, 4109, 4110, 4860, 5286, 4861, 4948, 4837, 5358, 5359, 4877, 4112, 4111, 5360, 4976, 4114, 4113, 4368, 4932, 4109, 4110, 4870, 5361, 4877, 4112, 4111, 5362, 5363, 5364, 5365, 4932, 4109, 4110, 4860, 5286, 4861, 4848, 4945, 5366, 5367, 4877, 4112, 4111, 5368, 4976, 4114, 4113, 5369, 4932, 4110, 4109, 4865, 4864, 5370, 5371, 4877, 4112, 4111, 5372, 4976, 4114, 4113, 5373, 4932, 4110, 4109, 4860, 4861, 4865, 4864, 4870, 5374, 4877, 4112, 4111, 5375, 5376, 5377, 5378, 5379, 4098, 4096, 4932, 4110, 4109, 4935, 5286, 4942, 4897, 4894, 5381, 5382, 4112, 4111, 5383, 5384, 4976, 4114, 4113, 4118, 4115, 4514, 4112, 4111, 5385, 4112, 4111, 5386, 4976, 4114, 4113, 4932, 4110, 4109, 4935, 5286, 4942, 4897, 4894, 5387, 5388, 4959, 5293, 4967, 5389, 4976, 4114, 4113, 4118, 4115, 4514, 4097, 4099, 5390, 5391, 5392, 5393, 4902, 4905, 5394, 5395, 4909, 5397, 5398, 4914, 4097, 4096, 4921, 4099, 4098, 4932, 4110, 4109, 4935, 5286, 4942, 4948, 4945, 5405, 5406, 4959, 4112, 4111, 5293, 4967, 4112, 4111, 5407, 4976, 4114, 4113, 4118, 4514, 4115, 5303, 4523, 5306, 4530, 4997, 5310, 5007, 5004, 5013, 5010, 5412, 5413, 5414, 5415, 5416, 4551, 5417, 5418, 5419, 5420, 5421, 5422, 5424, 5425, 5427, 4161, 4160, 4201, 4201, 5031, 5032, 5347, 5432, 5433, 5434, 5435, 4201, 5439, 5440, 5441, 4201, 5443, 5444, 5081, 5078, 5350, 5446, 5447, 5081, 5052, 5351, 5449, 5450, 5451, 5452, 5453, 5454, 5455, 4590, 5456, 5457, 5461, 5462, 5463, 5464, 5465, 4597, 5466, 5467, 5396, 5396, 5396, 5399, 5471, 5472, 5473, 5474, 5475, 4201, 4201, 4201, 4204, 4204, 5477, 5478, 5479, 5480, 5481, 5482, 5081, 5078, 4206, 4206, 5483, 5484, 5485, 5486, 5092, 5089, 4213, 4213, 5491, 5166, 5492, 5493, 4240, 4242, 4241, 4237, 5176, 5173, 5494, 5176, 5173, 5496, 5497, 5498, 5499, 5501, 5502, 4240, 4242, 4241, 4237, 5503, 5504, 5505, 4240, 5166, 4237, 5148, 5145, 5151, 5506, 4240, 4237, 5507, 5508, 5165, 4240, 4237, 5509, 5166, 5148, 5145, 5151, 5157, 5154, 5160, 5148, 5145, 5151, 5157, 5154, 5160, 5510, 5166, 5511, 5512, 5176, 5173, 5517, 5515, 5176, 5173, 5518, 5519, 5520, 9, 10, 11, 12, 13, 14, 15, 5536, 5537, 5538, 5539, 5540, 5541, 5542, 5543, 5544, 5545, 5546, 5547, 5548, 5549, 5550, 5552, 5553, 5554, 5555, 5556, 5557, 5558, 5561, 5562, 5563, 5564, 5566, 5567, 5568, 5569, 5570, 5571, 5572, 5573, 5574, 5576, 5577, 5578, 5579, 5580, 5581, 5582, 5585, 5586, 5587, 5588, 5589, 5590, 5591, 5592, 5593, 5594, 5595, 5596, 5597, 5598, 5599, 5600, 5602, 5603, 5604, 5605, 5606, 5607, 5608, 5610, 5611, 5612, 5613, 5614, 5615, 5616, 5617, 5618, 5619, 5620, 5621, 5622, 5624, 5625, 5626, 5627, 5628, 5629, 5630, 5631, 5632, 5633, 5634, 5635, 5636, 5638, 5639, 5640, 5642, 5643, 5644, 5645, 5646, 5647, 5648, 5649, 5650, 5651, 5652, 5653, 5654, 5655, 5656, 5657, 5658, 5659, 5661, 5663, 5664, 5665, 5666, 5667, 5668, 5669, 5671, 5673, 5674, 5675, 5676, 5677, 5678, 5679, 5680, 5681, 5682, 5683, 5684, 5685, 5686, 5687, 5688, 5689, 5691, 5692, 5693, 5695, 5699, 5700, 5701, 5702, 5703, 5704, 5705, 5706, 5707, 5709, 5710, 5711, 5713, 5714, 5715, 5716, 5717, 5718, 5719, 5720, 5722, 5723, 5724, 5725, 5729, 5730, 5731, 5732, 5733, 5734, 5735, 5736, 5737, 5739, 5740, 5741, 5743, 5744, 5745, 5747, 5748, 5749, 5750, 5751, 5752, 5754, 5755, 5756, 5758, 5759, 5760, 5762, 5763, 5764, 5765, 5766, 5767, 5768, 5769, 5771, 5772, 5773, 5775, 5779, 5780, 5781, 5782, 5783, 5784, 5785, 5786, 5787, 5788, 5789, 5791, 5792, 5795, 5796, 5797, 5798, 5799, 5800, 5801, 5802, 5804, 5805, 5807, 5808, 5809, 5810, 5811, 5812, 5813, 5814, 5815, 5816, 5817, 5818, 5820, 5821, 5822, 5824, 5825, 5826, 5827, 5828, 5829, 5830, 5831, 5832, 5834, 5836, 5837, 5838, 5840, 5841, 5843, 5844, 5845, 5846, 5847, 5848, 5849, 5850, 5851, 5852, 5853, 5854, 5855, 5856, 5857, 5859, 5860, 5861, 5862, 5863, 5864, 5865, 5867, 5868, 5869, 5870, 5871, 5872, 5873, 5874, 5875, 5876, 5877, 5878, 5879, 5880, 5881, 5882, 5883, 5885, 5886, 5887, 5888, 5889, 5893, 5895, 5897, 5898, 5899, 5900, 5901, 5902, 5903, 5904, 5905, 5907, 5909, 5910, 5913, 5914, 5916, 5917, 5918, 5919, 5921, 5922, 5923, 5926, 5929, 5931, 5932, 5934, 5937, 5939, 5940, 5942, 5943, 5944, 5945, 5946, 5949, 5951, 5952, 5953, 5954, 5955, 5956, 5959, 5962, 5963, 5964, 5965, 5967, 5970, 5971, 5972, 5973, 5974, 5975, 5976, 5978, 5979, 5980, 5981, 5982, 5983, 5985, 5986, 5987, 5988, 5991, 5993, 5994, 5995, 5996, 5997, 5999, 6000, 6001, 6002, 6003, 6004, 6005, 6006, 6007, 6008, 6009, 6011, 6012, 6013, 6014, 6015, 6016, 6017, 6018, 6019, 6020, 6021, 6022, 6023, 6024, 6025, 6026, 6027, 6028, 6029, 6030, 6031, 6032, 6033, 6035, 6036, 6037, 6038, 12, 13, 14, 15, 6048, 6051, 6054, 6060, 6063, 6067, 6070, 6074, 6080, 6083, 6087, 6090, 6093, 6095, 6097, 6103, 6106, 6110, 6114, 6117, 6119, 6123, 6127, 6132, 6136, 6139, 6142, 6146, 6148, 6151, 6153, 6161, 6169, 6171, 6173, 6175, 6177, 6183, 6185, 6186, 6189, 6190, 6196, 6199, 6202, 6206, 6209, 6210, 6213, 6214, 6220, 6223, 6226, 6229, 6232, 6235, 6238, 6241, 6246, 6248, 6249, 6252, 6255, 6261, 6264, 6266, 6269, 6272, 6274, 6276, 6279, 6285, 6291, 6294, 6307, 6310, 6312, 6318, 6321, 6325, 6328, 6331, 6340, 6342, 6349, 6102, 6059, 6079, 6337, 6335, 6339, 6059, 6131, 6079, 6102, 6131, 6339, 6353, 6158, 6160, 6166, 6168, 6363, 6365, 6366, 6369, 6370, 6182, 6219, 6245, 6195, 6335, 6182, 6195, 6245, 6219, 6284, 6245, 6337, 6373, 6377, 6317, 6337, 6335, 6254, 6253, 6299, 6260, 6284, 5823, 6289, 6337, 6335, 6298, 6297, 6299, 6300, 6317, 6337, 6335, 6303, 6305, 6385, 6392, 6393, 6394, 6317, 6337, 6335, 6339, 6398, 6399, 5488, 5162, 5161, 6403, 5896, 5894, 5488, 5162, 5161, 6406, 6408, 5488, 5487, 6410, 6376, 6380, 5488, 5487, 6412, 6414, 5896, 5894, 5488, 5162, 5161, 6417, 6419, 5488, 5487, 5488, 5161, 5162, 6422, 6423, 6376, 6426, 6380, 5488, 5162, 5161, 6429, 6430, 5161, 5162, 5488, 6433, 5488, 5162, 5161, 6436, 6376, 6438, 6380, 6441, 6376, 6444, 6380, 6447, 5162, 5161, 5488, 6450, 5488, 5487, 6452, 6453, 5488, 5487, 6454, 5488, 5487, 6457, 6459, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 6466, 6468, 6469, 6470, 6471, 6473, 6474, 6475, 6478, 6480, 6481, 6482, 6484, 6485, 6486, 6487, 6489, 6490, 6492, 6494, 6495, 6500, 6503, 6505, 6507, 6508, 6509, 6511, 6513, 6515, 6516, 6517, 6519, 6520, 6521, 6524, 6526, 6529, 6530, 6533, 6534, 6536, 6537, 6540, 6542, 6543, 6544, 6545, 6549, 6105, 6550, 6062, 6551, 6082, 6552, 6553, 6554, 6053, 6050, 6555, 6062, 6556, 6138, 6557, 6082, 6477, 6558, 6105, 6287, 6559, 6138, 6560, 6547, 6562, 6563, 6564, 6565, 6499, 6497, 6571, 6502, 5698, 6572, 6222, 6573, 6523, 5778, 6574, 6198, 6575, 6538, 6539, 6547, 6576, 6502, 5698, 6577, 6198, 6578, 6510, 5728, 6579, 6222, 6580, 6234, 6581, 6523, 5778, 6582, 6547, 6585, 6320, 6586, 6587, 6588, 6589, 6590, 6591, 6263, 5794, 5793, 5806, 5803, 6592, 6287, 6593, 6594, 6595, 6596, 6597, 6598, 6599, 6600, 6601, 6320, 6602, 6603, 6538, 6539, 6604, 6605, 6539, 6538, 6610, 6320, 6611, 6612, 6613, 6547, 6344, 6616, 6617, 6618, 6561, 6620, 6621, 6622, 6623, 6624, 6625, 6615, 6627, 6628, 6374, 6630, 6631, 6615, 6632, 6633, 6615, 6561, 6636, 6637, 6638, 6639, 6640, 6641, 6643, 6644, 6615, 6645, 6646, 6647, 6649, 6360, 6650, 6361, 6652, 6609, 6653, 6654, 6655, 6658, 6659, 6660, 6661, 6386, 6662, 6663, 6664, 6568, 6666, 6570, 6668, 6374, 6670, 6378, 6672, 6386, 6674, 6675, 6676, 6615, 6678, 6679, 6615, 6609, 6682, 6683, 6615, 6685, 6686, 6404, 5984, 6635, 6415, 5990, 6421, 6428, 6657, 6437, 6440, 6443, 6446, 6449, 6451, 6680, 6681, 6456, 6688, 6100, 6753, 5609, 6109, 6715, 6057, 6755, 5559, 6066, 6707, 6077, 6757, 5583, 6086, 6711, 6758, 6761, 6762, 6057, 6764, 5559, 6066, 6707, 6135, 6766, 5623, 6073, 6718, 6077, 6768, 5583, 6086, 6711, 6769, 6100, 6771, 5609, 6109, 6715, 6772, 5623, 6122, 6718, 6135, 6774, 6145, 5641, 6722, 6776, 6156, 6164, 6781, 6782, 6180, 6784, 5694, 6785, 6217, 6787, 5742, 5746, 6244, 6789, 5774, 6790, 6193, 6792, 5712, 6205, 6794, 6795, 6796, 6180, 6798, 5694, 6799, 6193, 6801, 5712, 6205, 6282, 6803, 5757, 6804, 6217, 6806, 5742, 5746, 6282, 6808, 5757, 5761, 6244, 6810, 5774, 6811, 6813, 6315, 6815, 5866, 6324, 6751, 6816, 6818, 6258, 6822, 6823, 6824, 6742, 6825, 6826, 6746, 6282, 6828, 6829, 6746, 6831, 6833, 6315, 6838, 5866, 6324, 6751, 6839, 6841, 6842, 6845, 6846, 6315, 6848, 5866, 6324, 6751, 6849, 6852, 6384, 6381, 6384, 6382, 6853, 6854, 6857, 6860, 6864, 6865, 6867, 6870, 6871, 6873, 6874, 6877, 6881, 6883, 6884, 6888, 6890, 6384, 6381, 6892, 6893, 6896, 6384, 6381, 6384, 6382, 6900, 6901, 6904, 6906, 6908, 6910, 6384, 6381, 6384, 6382, 6384, 6383, 6912, 6913, 6916, 6917, 6919, 6920, 6921, 6923, 6924, 6926, 6863, 6927, 6928, 6929, 6930, 6880, 6931, 6887, 6932, 6933, 6899, 6934, 6935, 6936, 6937, 6938, 6939, 6940, 6941, 6942, 6943, 6944, 6946, 6947, 6948, 6949, 6951, 6952, 6953, 6954, 6956, 6957, 6958, 6960, 6962, 6964, 6965, 6966, 6967, 6969, 6970, 6971, 6972, 6974, 6975, 6976, 6978, 6980, 6981, 6982, 6984, 6985, 6986, 6987, 6989, 6990, 6991, 6993, 6994, 6995, 6997, 6999, 7001, 7003, 7004, 7005, 7007, 7009, 7011, 7012, 7013, 7016, 7018, 7020, 7022, 7023, 7024, 7026, 7028, 7030, 7031, 7032, 7034, 7035, 7036, 7038, 7041, 7043, 7044, 7045, 7048, 7050, 7052, 7053, 7055, 7056, 7059, 7062, 7064, 7065, 7066, 7068, 7070, 7072, 7074, 7075, 7076, 7047, 7079, 7080, 7061, 7081, 7082, 7084, 6760, 7085, 7086, 6851, 5966, 6851, 5966, 6760, 5925, 6775, 5925, 7093, 7094, 7097, 5924, 7047, 7100, 7101, 7103, 7104, 7047, 7105, 7106, 7061, 7107, 7108, 7110, 5924, 5924, 5925, 7047, 7115, 7116, 7061, 7117, 7118, 7119, 7120, 7122, 6851, 5966, 6851, 5966, 7131, 7088, 6868, 6869, 7091, 7095, 7127, 7136, 7095, 7138, 6889, 6891, 7141, 6905, 6907, 6909, 6911, 7124, 7127, 7127, 7129, 13, 14, 15, 6752, 7153, 6754, 7157, 6756, 7161, 6763, 7166, 6765, 7170, 6767, 7174, 6770, 7178, 7181, 7184, 7185, 6777, 6779, 6783, 6786, 6788, 6791, 6797, 6800, 6802, 6805, 6807, 6809, 6814, 7218, 6821, 6827, 6837, 7229, 6847, 7235, 7238, 7239, 7223, 7241, 7242, 7245, 5892, 7246, 7248, 7249, 7250, 7251, 7252, 5892, 7253, 7254, 7255, 7256, 7233, 6359, 7200, 7008, 7201, 7259, 7260, 7261, 7265, 7266, 7223, 7268, 7269, 7008, 7195, 7200, 7000, 7201, 7272, 7000, 7195, 7008, 7200, 7201, 7273, 7019, 7206, 7027, 7211, 7214, 7039, 7232, 7274, 7275, 7276, 7223, 7225, 7227, 7278, 7279, 7232, 7281, 7233, 7284, 7285, 7286, 7287, 7244, 7289, 7290, 7291, 7292, 7293, 7294, 7296, 7258, 7298, 7299, 7263, 7264, 7271, 7301, 7302, 7303, 7304, 7283, 7305, 7306, 7307, 7308, 13, 14, 15, 7042, 7220, 7049, 7351, 6950, 7159, 6945, 7155, 6955, 7163, 7355, 7073, 7237, 7073, 7237, 6945, 7155, 6950, 7159, 6955, 7163, 7362, 6963, 7168, 6968, 7172, 6973, 7176, 6979, 7180, 6983, 7183, 6988, 7187, 6780, 6778, 7237, 7367, 7368, 7010, 7369, 7006, 7370, 7371, 7042, 7220, 7042, 7220, 7049, 7377, 7006, 7380, 7002, 7381, 7010, 7382, 6998, 7383, 7384, 6998, 7386, 7002, 7387, 7006, 7388, 7010, 7389, 7390, 7017, 7392, 7021, 7393, 7025, 7394, 7029, 7395, 7033, 7396, 7037, 7397, 7398, 7042, 7220, 7049, 7402, 7403, 7057, 7404, 7063, 7231, 7407, 7409, 7073, 7237, 7073, 7237, 7350, 7353, 7414, 7247, 7358, 7360, 7363, 7365, 7257, 7422, 7374, 7425, 7426, 7376, 7379, 7427, 7401, 7406, 7432, 7411, 7413, 7132, 7146, 7139, 7133, 7134, 7135, 7137, 7146, 7139, 7144, 7143, 7146, 7145, 7148, 7149, 7150, 7151, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 7440, 7441, 7442, 7444, 7445, 7446, 7447, 7448, 7449, 7451, 7452, 7453, 7454, 7455, 7456, 7457, 7458, 7459, 7460, 7462, 7463, 7464, 7465, 7466, 7467, 7468, 7469, 7470, 7471, 7472, 7473, 7474, 7475, 7476, 7479, 7481, 7484, 7485, 7486, 7487, 7488, 7490, 7492, 7494, 7496, 7499, 7501, 7503, 7505, 7508, 7510, 7512, 7514, 7516, 7518, 7521, 7522, 7523, 7526, 7528, 7529, 7532, 7533, 7534, 7535, 7536, 7537, 7450, 7539, 7540, 7541, 7461, 7542, 7543, 7544, 7478, 7372, 7546, 7549, 7550, 7385, 7391, 7399, 7552, 7553, 7408, 7411, 7555, 7556, 7130, 7557, 7558, 7559, 7560, 7561, 7562, 7563, 7297, 7564, 7565, 7140, 7300, 7142, 7566, 7567, 7568, 7569, 7147, 7570, 7571, 7572, 7573, 7615, 7585, 7527, 7525, 7443, 7592, 7606, 7588, 7612, 7590, 7651, 7594, 7596, 7598, 7612, 7600, 7606, 7602, 7655, 7612, 7604, 7606, 7610, 7608, 7614, 7612, 7610, 7659, 7513, 7482, 7480, 7517, 7500, 7502, 7660, 7621, 7623, 7527, 7525, 7489, 7493, 7497, 7517, 7491, 7495, 7513, 7664, 7502, 7506, 7517, 7500, 7513, 7504, 7665, 7511, 7513, 7509, 7519, 7517, 7515, 7666, 7640, 7527, 7525, 7524, 7644, 7669, 7670, 7646, 7648, 7673, 7288, 7675, 7295, 7681, 7682, 7684, 7685, 7686, 7687, 7689, 7691, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 7697, 7698, 7699, 7700, 7701, 7702, 7703, 7704, 7705, 7707, 7708, 7709, 7710, 7711, 7712, 7713, 7715, 7716, 7717, 7718, 7719, 7720, 7721, 7722, 7617, 7724, 7725, 7726, 7727, 7728, 7729, 7731, 7732, 7733, 7734, 7735, 7736, 7737, 7738, 7739, 7740, 7741, 7743, 7744, 7745, 7746, 7747, 7748, 7750, 7751, 7752, 7753, 7754, 7755, 7757, 7758, 7759, 7760, 7761, 7764, 7765, 7767, 7769, 15, 7793, 7796, 7798, 7803, 7805, 7808, 7810, 7813, 7816, 7817, 7819, 7821, 7825, 7828, 7830, 7832, 7834, 7836, 7838, 7840, 7842, 7844, 7847, 7762, 7649, 7653, 7654, 7762, 7661, 7762, 7662, 7762, 7667, 7763, 7671, 7672, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 7856, 7857, 7859, 7861, 7863, 7865, 7868, 7869, 7872, 7875, 7878, 7879, 7880, 7881, 7882, 7723, 7883, 7884, 7885, 7886, 7887, 7888, 7889, 7890, 7891, 9, 10, 11, 12, 13, 14, 15, 7905, 7906, 7907, 7909, 7911, 7912, 7913, 7650, 7657, 7657, 7919, 7663, 7663, 7668, 7677, 7678, 7694, 7680, 7674, 7695, 7773, 7679, 7692, 7693, 8, 9, 10, 11, 12, 13, 14, 15, 7943, 7944, 7656, 7706, 7945, 7656, 7714, 7756, 7730, 7947, 7948, 7756, 7742, 7756, 7749, 7949, 7950, 7951, 7952, 7953, 7954, 7770, 7955, 7956, 7957, 7958, 7959, 11, 12, 13, 14, 15, 7915, 7970, 7971, 7973, 7974, 7975, 7976, 7920, 7922, 7979, 7980, 7981, 7982, 7924, 7989, 7985, 7992, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 8000, 7969, 7972, 8005, 8007, 8008, 8009, 8011, 8013, 8014, 10, 11, 12, 13, 14, 15, 8033, 8034, 7771, 7766, 7775, 7777, 7772, 7774, 7776, 7768, 10, 11, 12, 13, 14, 15, 8050, 7853, 8051, 8052, 8053, 8054, 8055, 8056, 7854, 8057, 10, 11, 12, 13, 14, 15, 8065, 8072, 7984, 8066, 7988, 7991, 8070, 7, 8, 9, 10, 11, 12, 13, 14, 15, 7987, 8081, 8082, 8084, 8085, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 8096, 8086, 8099, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 8098, 8113, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 8128, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 8144, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
int h_C[]= {
1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 33, 35, 37, 39, 41, 43, 45, 47, 49, 51, 54, 56, 58, 60, 63, 65, 67, 69, 71, 73, 75, 77, 79, 81, 83, 85, 89, 91, 93, 95, 97, 99, 101, 103, 105, 107, 109, 111, 113, 115, 117, 119, 121, 123, 125, 127, 129, 131, 133, 135, 139, 141, 143, 145, 147, 149, 151, 153, 155, 157, 159, 161, 163, 165, 167, 169, 171, 173, 176, 178, 180, 182, 185, 187, 189, 191, 194, 196, 198, 200, 202, 204, 206, 208, 210, 212, 215, 217, 219, 221, 223, 225, 227, 229, 231, 233, 235, 237, 239, 241, 243, 245, 247, 249, 251, 253, 255, 257, 259, 261, 263, 265, 267, 269, 271, 273, 275, 277, 279, 281, 283, 285, 287, 289, 291, 293, 295, 297, 299, 301, 303, 305, 307, 309, 311, 313, 315, 317, 319, 321, 323, 325, 327, 329, 331, 333, 335, 337, 340, 342, 345, 347, 349, 351, 353, 355, 357, 359, 361, 363, 365, 367, 369, 371, 373, 375, 377, 379, 381, 383, 385, 387, 389, 391, 393, 395, 397, 399, 401, 403, 407, 409, 411, 413, 415, 417, 419, 421, 423, 425, 427, 429, 431, 433, 435, 437, 440, 442, 444, 446, 448, 450, 452, 454, 456, 458, 460, 462, 464, 466, 468, 470, 472, 474, 477, 479, 481, 483, 485, 487, 489, 491, 493, 495, 497, 499, 501, 503, 505, 507, 509, 511, 513, 515, 517, 519, 521, 523, 525, 527, 530, 532, 534, 536, 539, 541, 543, 545, 548, 550, 552, 554, 557, 559, 561, 563, 565, 567, 569, 571, 573, 575, 577, 579, 582, 584, 587, 589, 592, 594, 597, 599, 602, 604, 607, 609, 612, 614, 617, 619, 621, 623, 626, 628, 630, 632, 635, 637, 641, 643, 645, 647, 649, 651, 653, 655, 658, 660, 662, 664, 666, 668, 671, 673, 675, 677, 679, 681, 683, 685, 687, 689, 692, 694, 696, 698, 701, 703, 706, 708, 714, 716, 720, 722, 725, 727, 729, 731, 733, 735, 737, 739, 741, 743, 746, 748, 752, 754, 757, 759, 762, 764, 767, 769, 771, 773, 775, 777, 781, 783, 786, 788, 793, 795, 797, 799, 801, 803, 805, 807, 809, 811, 813, 815, 818, 820, 823, 825, 828, 830, 833, 835, 838, 840, 843, 845, 851, 853, 856, 858, 861, 863, 865, 867, 869, 871, 874, 876, 879, 881, 884, 886, 889, 891, 893, 895, 897, 899, 902, 904, 907, 909, 912, 914, 917, 919, 921, 923, 925, 927, 930, 932, 935, 937, 940, 942, 52, 52, 52, 52, 61, 61, 61, 61, 183, 183, 690, 723, 690, 723, 192, 192, 996, 998, 1000, 1002, 1004, 1006, 1009, 1011, 1013, 1015, 1017, 1019, 1021, 1023, 1025, 1027, 1029, 1031, 136, 136, 136, 136, 137, 137, 174, 174, 213, 213, 1108, 1110, 1112, 1114, 1116, 1118, 1120, 1122, 1125, 1127, 1129, 1131, 1133, 1135, 1137, 1139, 1141, 1143, 1145, 1147, 1150, 1152, 1154, 1156, 779, 779, 1209, 1211, 1213, 1215, 1217, 1219, 1222, 1224, 404, 404, 405, 405, 438, 438, 438, 438, 1258, 1260, 475, 475, 1273, 1275, 1278, 1280, 1284, 1286, 1288, 1290, 1292, 1294, 1296, 1298, 1300, 1302, 1304, 1306, 528, 537, 528, 537, 1326, 1328, 1330, 1332, 1334, 1336, 1338, 1340, 1343, 1345, 1347, 1349, 1351, 1353, 1355, 1357, 1359, 1361, 546, 555, 710, 710, 744, 749, 790, 779, 779, 790, 816, 816, 848, 848, 1467, 1469, 1471, 1473, 1475, 1477, 1479, 1481, 1483, 1485, 1487, 1489, 1493, 1495, 1500, 1502, 1504, 1506, 1509, 1511, 1513, 1515, 1518, 1520, 1524, 1526, 1528, 1530, 1532, 1534, 1537, 1539, 1542, 1544, 1123, 1123, 1491, 1362, 1362, 1497, 1497, 1282, 1282, 1282, 1282, 1497, 1497, 1123, 1123, 1507, 1522, 1491, 1497, 1497, 1507, 1522, 1704, 1706, 1708, 1710, 1712, 1714, 1716, 1718, 1720, 1722, 1724, 1726, 1730, 1732, 1767, 1769, 1123, 1123, 1775, 1777, 1779, 1781, 1783, 1785, 1787, 1789, 1794, 1796, 1798, 1800, 1491, 1491, 1546, 1546, 1912, 1914, 1916, 1918, 1920, 1922, 1282, 1282, 1546, 1546, 1935, 1937, 1939, 1941, 1943, 1945, 1947, 1949, 1256, 1256, 1269, 1269, 1282, 1282, 1282, 1282, 1546, 2040, 2042, 2044, 2046, 1362, 2059, 2061, 1362, 2073, 2075, 1490, 1490, 1497, 1497, 1548, 1546, 1548, 2137, 2139, 2141, 2143, 2145, 2147, 2150, 2152, 2155, 2157, 2160, 2162, 2165, 2167, 2170, 2172, 2176, 2178, 2181, 2183, 2076, 1765, 2036, 2037, 2038, 2057, 2057, 2057, 2076, 2076, 2076, 2076, 2168, 2076, 2076, 2173, 2168, 2173, 10, 11, 12, 13, 14, 15, 3057, 3059, 3061, 3063, 3065, 3067, 3069, 3071, 3073, 3075, 3077, 3079, 3081, 3083, 3085, 3087, 3089, 3091, 3093, 3095, 3097, 3099, 3101, 3103, 3105, 3107, 3109, 3111, 3113, 3115, 3117, 3119, 3121, 3123, 3125, 3127, 3129, 3131, 3133, 3135, 3137, 3139, 3141, 3143, 3145, 3147, 3149, 3151, 3153, 3155, 3157, 3159, 3161, 3163, 3165, 3167, 3169, 3171, 3173, 3175, 3177, 3179, 3181, 3183, 3185, 3187, 3189, 3191, 3193, 3195, 3197, 3199, 3201, 3203, 3205, 3207, 3209, 3211, 3213, 3215, 3217, 3219, 3221, 3223, 3225, 3227, 3229, 3231, 3233, 3235, 3237, 3239, 3241, 3243, 3245, 3247, 3249, 3251, 3253, 3255, 3257, 3259, 3261, 3263, 3265, 3267, 3269, 3271, 3273, 3275, 3277, 3279, 3281, 3283, 3285, 3287, 3289, 3291, 3293, 3295, 3297, 3299, 3301, 3303, 3305, 3307, 3309, 3311, 3313, 3315, 3317, 3319, 3321, 3323, 3325, 3327, 3329, 3331, 3333, 3335, 3337, 3339, 3341, 3343, 3345, 3347, 3349, 3351, 3353, 3355, 3357, 3359, 3361, 3363, 3365, 3367, 3369, 3371, 3373, 3375, 3377, 3379, 3381, 3383, 3385, 3387, 3389, 3391, 3393, 3395, 3397, 3399, 3401, 3403, 3405, 3407, 3409, 3411, 3413, 3415, 3417, 3419, 3421, 3423, 3425, 3427, 3429, 3431, 3433, 3435, 3437, 3439, 3441, 3443, 3445, 3447, 3449, 3451, 3453, 3455, 3457, 3459, 3461, 3463, 3465, 3467, 3469, 3471, 3473, 3475, 3477, 3479, 3481, 3483, 3485, 3487, 961, 962, 963, 964, 967, 968, 969, 970, 973, 974, 977, 980, 981, 982, 992, 993, 3505, 3507, 3509, 3511, 3513, 3515, 3517, 3519, 3521, 1049, 1050, 1052, 1053, 1076, 1077, 1091, 1094, 1103, 1106, 3533, 3535, 3537, 3539, 3541, 3543, 3545, 3547, 3549, 3551, 3553, 3555, 1163, 1164, 3559, 3561, 3563, 3565, 1229, 1230, 1232, 1233, 1245, 1246, 1247, 1248, 3575, 1265, 1266, 3579, 3581, 3583, 3585, 3587, 3589, 3591, 3593, 1312, 1315, 1321, 1324, 3599, 3601, 3603, 3605, 3607, 3609, 3611, 3613, 3615, 1366, 1369, 1405, 1406, 1414, 1417, 1423, 1426, 1427, 1430, 1437, 1438, 1445, 1446, 3631, 3633, 3635, 3637, 3639, 3641, 3643, 3645, 3647, 3649, 3651, 3653, 3655, 3657, 3659, 3661, 3663, 1550, 1551, 1552, 1553, 1554, 1678, 1679, 1680, 1681, 1682, 1683, 1684, 1685, 1686, 1687, 1688, 1689, 1690, 1691, 1692, 1695, 1698, 3687, 3689, 3691, 3693, 3695, 3697, 3699, 3701, 1770, 1771, 3705, 3707, 3709, 3711, 3713, 3715, 1805, 1806, 1909, 1910, 3721, 3723, 3725, 1925, 1926, 1930, 1931, 3731, 3733, 3735, 3737, 2011, 2012, 2018, 2019, 2025, 2026, 2027, 2028, 2035, 3748, 3750, 2047, 3753, 2071, 3756, 2115, 2117, 2119, 2120, 2129, 2132, 2134, 3765, 3767, 3769, 3771, 3773, 3775, 3777, 3779, 3781, 3783, 2197, 2300, 2484, 2485, 2486, 2491, 2497, 2498, 2507, 2508, 2510, 2511, 2512, 2513, 2514, 2515, 2545, 2547, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 3808, 4016, 4016, 3809, 3811, 3810, 3812, 4016, 4016, 4016, 3814, 3813, 3816, 3815, 3817, 3820, 3819, 4025, 4027, 3822, 3821, 4029, 4031, 3824, 3823, 3826, 3825, 3828, 3827, 3830, 3829, 3831, 3834, 3833, 3836, 3835, 3991, 3837, 3939, 3938, 3941, 3940, 3943, 3942, 3944, 3946, 3838, 3839, 3949, 3948, 3951, 3950, 3953, 3952, 3840, 4050, 624, 4052, 3961, 3963, 3964, 3966, 3939, 3938, 3941, 3940, 3943, 3942, 3944, 3946, 3945, 3947, 3949, 3948, 3951, 3950, 3953, 3952, 3954, 624, 4054, 639, 3961, 3963, 3964, 3966, 3842, 3841, 183, 4016, 3844, 192, 3847, 3846, 3849, 3848, 3851, 3850, 183, 4016, 3853, 192, 3856, 3855, 3858, 3857, 3875, 3877, 624, 3860, 3863, 3862, 4072, 3876, 3985, 3902, 3866, 3865, 3868, 3867, 639, 3877, 3870, 3948, 3871, 3874, 3873, 3876, 3875, 3877, 3879, 3878, 3880, 3881, 3883, 3882, 624, 639, 3886, 3888, 3890, 3889, 3891, 3893, 3892, 3976, 3894, 3976, 3895, 718, 3897, 3899, 3901, 3983, 3984, 3902, 3948, 3903, 3905, 3904, 4078, 404, 4080, 405, 3907, 3906, 3909, 3908, 624, 3959, 3959, 639, 3913, 3912, 4082, 4084, 3915, 3914, 3916, 3919, 3918, 3920, 3922, 3919, 3918, 3920, 3922, 4087, 3923, 3925, 3924, 3926, 4009, 3928, 4009, 3929, 3931, 3930, 3932, 3935, 3934, 3937, 3936, 3939, 3938, 3941, 3940, 3943, 3942, 3944, 3946, 3945, 3947, 3949, 3948, 3951, 3950, 3953, 3952, 3954, 3956, 3955, 624, 3959, 3958, 639, 3961, 3963, 3964, 3966, 3968, 3967, 3976, 3969, 3976, 3970, 718, 690, 3974, 3973, 3976, 3975, 3976, 3976, 718, 723, 3979, 3982, 3981, 3985, 3983, 3985, 3984, 3986, 3988, 3987, 3991, 3989, 4117, 3991, 3990, 3993, 3992, 3995, 3994, 3997, 3996, 4120, 3999, 3998, 4001, 4000, 4003, 4002, 4122, 4005, 4004, 4006, 4009, 4008, 4011, 4010, 4013, 4012, 4014, 4016, 4015, 4017, 4019, 4018, 4020, 4022, 4021, 4023, 4141, 4144, 1491, 1491, 1491, 4146, 4148, 4150, 4152, 4154, 4159, 4041, 4040, 4134, 4042, 4044, 4043, 4046, 4045, 4047, 4048, 4060, 4059, 4062, 4061, 4171, 4064, 4063, 4100, 4101, 4102, 4066, 4065, 4068, 4067, 4179, 1491, 1491, 4070, 4069, 4181, 1341, 1341, 4186, 1507, 4091, 4074, 4188, 1220, 1220, 4194, 4088, 4085, 4196, 4088, 4088, 4089, 4101, 4102, 4198, 4200, 4091, 4090, 4093, 4092, 4095, 4094, 4100, 4101, 4102, 1341, 1341, 1341, 4105, 4104, 1507, 4134, 4107, 1522, 4123, 4125, 1491, 1491, 1491, 4211, 4131, 4130, 1507, 4134, 4133, 1522, 4137, 4136, 4139, 4138, 4142, 4142, 4142, 4142, 4142, 4142, 4175, 4192, 4157, 4157, 4157, 4157, 4157, 4157, 4162, 4192, 4163, 4165, 4164, 4167, 4166, 4168, 4192, 4169, 4216, 4219, 4192, 4172, 4192, 4173, 4175, 4174, 4177, 4176, 4216, 4215, 4189, 4191, 4190, 4220, 4183, 4182, 4184, 4216, 4215, 4189, 4191, 4190, 4220, 4192, 4223, 4224, 4212, 4212, 4203, 4202, 4232, 4205, 4234, 4207, 4236, 4239, 4212, 4212, 4214, 4214, 4216, 4215, 4217, 4219, 4218, 4220, 4221, 4222, 4223, 4224, 4229, 4229, 4225, 4225, 4226, 4226, 4229, 4229, 4229, 4229, 4229, 4229, 4230, 4230, 15, 944, 945, 946, 947, 948, 949, 950, 951, 952, 953, 954, 955, 956, 957, 958, 959, 960, 965, 966, 971, 972, 975, 976, 978, 979, 983, 984, 985, 986, 987, 988, 989, 990, 991, 1032, 1033, 1034, 1035, 1036, 1037, 1038, 1039, 1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047, 1048, 1051, 1054, 1055, 1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063, 1064, 1065, 1066, 1067, 1068, 1069, 1070, 1071, 1072, 1073, 1074, 1075, 1078, 1079, 1080, 1081, 1082, 1083, 1084, 1085, 1086, 1087, 1088, 1089, 1090, 1092, 1093, 1095, 1096, 1097, 1098, 1099, 1100, 1101, 1102, 1104, 1105, 1157, 1158, 1159, 1160, 1161, 1162, 1165, 1166, 1167, 1168, 1169, 1170, 1171, 1172, 1173, 1174, 1175, 1176, 1177, 1178, 1179, 1180, 1181, 1182, 1183, 1184, 1185, 1186, 1187, 1188, 1189, 1190, 1191, 1192, 1193, 1194, 1195, 1196, 1197, 1198, 1199, 1200, 1201, 1202, 1203, 1204, 1205, 1206, 1207, 1225, 1226, 1227, 1228, 1231, 1234, 1235, 1236, 1237, 1238, 1239, 1240, 1241, 1242, 1243, 1244, 1249, 1250, 1251, 1252, 1253, 1254, 1255, 1261, 1262, 1263, 1264, 1267, 1307, 1308, 1309, 1310, 1311, 1313, 1314, 1316, 1317, 1318, 1319, 1320, 1322, 1323, 1364, 1365, 1367, 1368, 1370, 1371, 1372, 1373, 1374, 1375, 1376, 1377, 1378, 1379, 1380, 1381, 1382, 1383, 1384, 1385, 1386, 1387, 1388, 1389, 1390, 1391, 1392, 1393, 1394, 1395, 1396, 1397, 1398, 1399, 1400, 1401, 1402, 1403, 1404, 1407, 1408, 1409, 1410, 1411, 1412, 1413, 1415, 1416, 1418, 1419, 1420, 1421, 1422, 1424, 1425, 1428, 1429, 1431, 1432, 1433, 1434, 1435, 1436, 1439, 1440, 1441, 1442, 1443, 1444, 1447, 1448, 1449, 1450, 1451, 1452, 1453, 1454, 1455, 1456, 1457, 1458, 1459, 1460, 1461, 1462, 1463, 1464, 1465, 1555, 1556, 1557, 4274, 4273, 4274, 4273, 4278, 4277, 1693, 1694, 1696, 1697, 1699, 1700, 1701, 1702, 1727, 1728, 1761, 1762, 1763, 1764, 1772, 1773, 1790, 1791, 1792, 1801, 1802, 1803, 1804, 1807, 1808, 1809, 1810, 1923, 1924, 1927, 1928, 1929, 1932, 1933, 2013, 2014, 2020, 2021, 2022, 2023, 2024, 2029, 2030, 2031, 2032, 2033, 2034, 2054, 2055, 2056, 2062, 2063, 2064, 2065, 2066, 2067, 2068, 2069, 2070, 2112, 2113, 2114, 2116, 2118, 2121, 2122, 2123, 2124, 2125, 2126, 2127, 2128, 2130, 2131, 4142, 2188, 2189, 4142, 2191, 2192, 4142, 2194, 2195, 2198, 2199, 4157, 2265, 2266, 4557, 4556, 4157, 2270, 2271, 4575, 4559, 4157, 2275, 2276, 2281, 2282, 2283, 2284, 2285, 2286, 2287, 2290, 2301, 2302, 2308, 2309, 2310, 2311, 2312, 2313, 2314, 2315, 2320, 2321, 4585, 4585, 2400, 2401, 2402, 2403, 2404, 2405, 2406, 2407, 2408, 2417, 2418, 2419, 2420, 2421, 2422, 2423, 2424, 2425, 2481, 2483, 2487, 2488, 2499, 2509, 2533, 2534, 2536, 2537, 2538, 2539, 2540, 2541, 2542, 2543, 2544, 2546, 2548, 2549, 2565, 2566, 4705, 4703, 2612, 2613, 4706, 4703, 2668, 2669, 2696, 2697, 2723, 2724, 2803, 2804, 2810, 2811, 4701, 4701, 4706, 4705, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 4737, 4739, 4741, 4743, 4745, 4747, 4749, 4752, 4754, 4756, 4758, 4760, 4762, 4765, 4767, 4769, 4771, 4773, 4775, 4778, 4781, 4783, 4785, 4793, 4795, 4797, 4800, 4803, 4805, 4807, 4816, 4819, 4822, 4824, 4826, 4829, 4832, 4834, 4840, 4845, 4847, 4851, 4854, 4856, 4859, 4863, 4869, 4872, 4874, 4876, 4885, 4887, 4891, 4893, 4896, 4899, 4901, 4904, 4908, 4913, 4916, 4918, 4920, 4923, 4925, 4927, 4929, 4931, 4934, 4937, 4939, 4941, 4944, 4947, 4954, 4956, 4958, 4962, 4964, 4966, 4971, 4973, 4975, 4978, 4980, 4982, 4984, 4986, 4988, 4990, 4992, 4994, 4996, 4999, 5001, 5003, 5006, 5009, 5012, 4951, 4750, 4035, 1582, 1583, 1588, 1589, 4951, 4763, 4968, 1608, 1609, 4951, 4763, 4035, 4037, 4951, 4763, 4968, 5024, 5026, 5028, 5030, 4313, 4311, 4790, 4788, 4336, 4336, 4813, 4811, 5034, 5036, 5038, 5043, 5045, 5049, 4951, 4878, 4882, 4881, 4835, 4836, 4852, 4866, 4878, 4951, 4842, 4842, 4841, 4843, 4852, 4951, 4878, 4849, 4951, 4852, 4878, 4857, 4866, 4878, 4882, 4881, 4880, 4883, 5054, 4951, 4949, 4960, 4968, 4960, 4968, 4951, 4949, 4968, 4431, 4430, 4431, 4431, 4443, 4443, 5058, 4443, 4443, 5060, 5065, 5067, 5069, 5077, 5080, 4951, 4949, 4968, 5088, 5091, 5094, 5096, 4575, 4550, 2187, 2190, 2193, 5016, 5015, 5014, 5082, 2264, 2267, 2268, 2269, 2272, 2273, 2274, 5123, 5125, 5127, 5130, 4575, 4575, 4575, 4575, 5134, 5136, 5138, 5041, 5040, 5039, 5140, 5047, 5046, 2324, 5047, 5046, 2330, 5082, 5082, 5051, 5074, 5050, 4610, 4593, 5056, 5055, 5144, 5147, 5150, 5051, 5074, 5050, 4610, 4593, 5056, 5055, 5153, 5156, 5159, 5063, 5062, 5061, 4610, 4609, 5164, 5072, 5071, 5070, 5075, 5074, 5073, 5082, 5086, 5085, 5084, 5168, 5170, 5172, 5175, 4229, 5132, 5131, 2597, 2598, 4225, 5107, 5106, 2617, 2618, 5132, 5131, 5179, 5128, 4226, 4229, 5132, 5131, 4229, 4229, 4230, 4701, 2815, 2816, 2826, 2827, 4703, 5179, 5178, 5177, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 5217, 5216, 5218, 5220, 5219, 5221, 5283, 5282, 5257, 5284, 5285, 5256, 5289, 5222, 1572, 1573, 5292, 5291, 5223, 4034, 5295, 5295, 5294, 1581, 5319, 5301, 5300, 5299, 4036, 5321, 5283, 5282, 5266, 5284, 5285, 5256, 5289, 5228, 1598, 1599, 5292, 5291, 5229, 4960, 5295, 5295, 5294, 1607, 5326, 5231, 5301, 5299, 5312, 5225, 5314, 5313, 5283, 5282, 5281, 5284, 5285, 5256, 5289, 5228, 1625, 1626, 5292, 5291, 5226, 4034, 5295, 5295, 5294, 1634, 5298, 5231, 5301, 5271, 5289, 5228, 5292, 5291, 5229, 4036, 5295, 5295, 5294, 1648, 5298, 5231, 5301, 5271, 5285, 5256, 5283, 5282, 5281, 5284, 5289, 5228, 1661, 1662, 5295, 5295, 5294, 1666, 5292, 5291, 5229, 4960, 5297, 5268, 5231, 5301, 5271, 5312, 5311, 5234, 5233, 5232, 5235, 5236, 5238, 1739, 1740, 1741, 1742, 5241, 5240, 5239, 5242, 5243, 5245, 1749, 1750, 1751, 1752, 5247, 5246, 5249, 5248, 5251, 5250, 5253, 5252, 5283, 5281, 5260, 5284, 5285, 5256, 5289, 5261, 5262, 1820, 5265, 5264, 5263, 1824, 1825, 1826, 1827, 1828, 5283, 5281, 5255, 5284, 5285, 5256, 5289, 5261, 1837, 1838, 5265, 5264, 5258, 1842, 5298, 5297, 5296, 5254, 5283, 5281, 5255, 5262, 1851, 5265, 5264, 5263, 1855, 1856, 1857, 1858, 5283, 5281, 5255, 5284, 5285, 5256, 5289, 5261, 1867, 1868, 5265, 5264, 5258, 1872, 5298, 5297, 5259, 1876, 5283, 5282, 5257, 5289, 5261, 1882, 1883, 5265, 5264, 5258, 1887, 5298, 5297, 5259, 1891, 5283, 5260, 5266, 5284, 5287, 5289, 5261, 5262, 1900, 5265, 5264, 5263, 1904, 1905, 1906, 1907, 1908, 5279, 5276, 5283, 5282, 5266, 5284, 5285, 5287, 5270, 5269, 1960, 1961, 5291, 5267, 1964, 1965, 5298, 5297, 5268, 5301, 5271, 5300, 5291, 5290, 1974, 5295, 5294, 1977, 5298, 5297, 5268, 5283, 5282, 5281, 5284, 5285, 5287, 5270, 5269, 1989, 1990, 5292, 4960, 5295, 1994, 5298, 5297, 5296, 5301, 5271, 5300, 5277, 5280, 2003, 2004, 2005, 2006, 5272, 5273, 2009, 2010, 5274, 2016, 2017, 5275, 5277, 5276, 5278, 5280, 5279, 5283, 5282, 5281, 5284, 5285, 5287, 5289, 5288, 2086, 2087, 5292, 5291, 5290, 4960, 5295, 5295, 5294, 2095, 5298, 5297, 5296, 5301, 5300, 5299, 5302, 5304, 5305, 5307, 5308, 5309, 5312, 5311, 5314, 5313, 2185, 2186, 5098, 5101, 5104, 5380, 2200, 2201, 2202, 2226, 5109, 5423, 5114, 5426, 5119, 5335, 5334, 5336, 5337, 5410, 5411, 5346, 2303, 2304, 2305, 2306, 5348, 2316, 2317, 2318, 5411, 2322, 2323, 5404, 5349, 5380, 2328, 2329, 5404, 5403, 5380, 2358, 2391, 2392, 2393, 2394, 2395, 2396, 5380, 2398, 2399, 2409, 2410, 2411, 2412, 2413, 5380, 2415, 2416, 4600, 4600, 4600, 4603, 2474, 2475, 2476, 2477, 2478, 5400, 5401, 5402, 5410, 5411, 2494, 2495, 2496, 2500, 2501, 2502, 5404, 5403, 5410, 5411, 2526, 2527, 2528, 2529, 5409, 5408, 5410, 5411, 2564, 5442, 2582, 2583, 5437, 5430, 5429, 5428, 5490, 5489, 5495, 5490, 5489, 2611, 2614, 2615, 5500, 2648, 2649, 5437, 5430, 5429, 5428, 2656, 2657, 2667, 5437, 5438, 5431, 5459, 5458, 5460, 2695, 5437, 5436, 2703, 2704, 5438, 5437, 5436, 2722, 5442, 5459, 5458, 5460, 5469, 5468, 5470, 5459, 5458, 5460, 5469, 5468, 5470, 2802, 5476, 2809, 2814, 5490, 5489, 2828, 5516, 5490, 5489, 2838, 2839, 2840, 9, 10, 11, 12, 13, 14, 15, 1558, 1559, 1560, 1561, 1562, 1563, 1564, 1565, 1566, 1567, 1568, 1569, 1570, 1571, 5551, 1574, 1575, 1576, 1577, 1578, 1579, 1580, 1584, 1585, 1586, 1587, 1590, 1591, 1592, 1593, 1594, 1595, 1596, 1597, 5575, 1600, 1601, 1602, 1603, 1604, 1605, 1606, 1610, 1611, 1612, 1613, 1614, 1615, 1616, 1617, 1618, 1619, 1620, 1621, 1622, 1623, 1624, 5601, 1627, 1628, 1629, 1630, 1631, 1632, 1633, 1635, 1636, 1637, 1638, 1639, 1640, 1641, 1642, 1643, 1644, 1645, 1646, 1647, 1649, 1650, 1651, 1652, 1653, 1654, 1655, 1656, 1657, 1658, 1659, 1660, 5637, 1663, 1664, 1665, 1667, 1668, 1669, 1670, 1671, 1672, 1673, 1674, 1675, 1676, 1677, 1733, 1734, 1735, 1736, 1737, 1738, 5660, 5662, 1743, 1744, 1745, 1746, 1747, 1748, 5670, 5672, 1753, 1754, 1755, 1756, 1757, 1758, 1759, 1760, 1811, 1812, 1813, 1814, 1815, 1816, 1817, 1818, 1819, 1821, 1822, 1823, 5696, 1829, 1830, 1831, 1832, 1833, 1834, 1835, 1836, 5708, 1839, 1840, 1841, 1843, 1844, 1845, 1846, 1847, 1848, 1849, 1850, 1852, 1853, 1854, 5726, 1859, 1860, 1861, 1862, 1863, 1864, 1865, 1866, 5738, 1869, 1870, 1871, 1873, 1874, 1875, 1877, 1878, 1879, 1880, 1881, 5753, 1884, 1885, 1886, 1888, 1889, 1890, 1892, 1893, 1894, 1895, 1896, 1897, 1898, 1899, 1901, 1902, 1903, 5776, 1950, 1951, 1952, 1953, 1954, 1955, 1956, 1957, 1958, 1959, 5790, 1962, 1963, 1966, 1967, 1968, 1969, 1970, 1971, 1972, 1973, 1975, 1976, 1978, 1979, 1980, 1981, 1982, 1983, 1984, 1985, 1986, 1987, 1988, 5819, 1991, 1992, 1993, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 5833, 5835, 2007, 2008, 5839, 2015, 5842, 2048, 2049, 2050, 2051, 2052, 2053, 2078, 2079, 2080, 2081, 2082, 2083, 2084, 2085, 5858, 2088, 2089, 2090, 2091, 2092, 2093, 2094, 2096, 2097, 2098, 2099, 2100, 2101, 2102, 2103, 2104, 2105, 2106, 2107, 2108, 2109, 2110, 2111, 5884, 5099, 5102, 5105, 2196, 5890, 5110, 5115, 5120, 2277, 2278, 2279, 2280, 2288, 2289, 2299, 5906, 5908, 2307, 5911, 2319, 5915, 2325, 2326, 2327, 5920, 2331, 2332, 2333, 5927, 5930, 2397, 5933, 5935, 5938, 2414, 5941, 2437, 2458, 2471, 2473, 5947, 5950, 2479, 2480, 2482, 2489, 2490, 5957, 5960, 2503, 2504, 2505, 2506, 5968, 2530, 2531, 2532, 2535, 5181, 2567, 5977, 2584, 2585, 2586, 2587, 2595, 2596, 2609, 2610, 5185, 5989, 5992, 2650, 2651, 2652, 2653, 5998, 5189, 2670, 2671, 2672, 2681, 2682, 2683, 5191, 2698, 2699, 6010, 2705, 2706, 2707, 5193, 2725, 2738, 2739, 2740, 2743, 2744, 2745, 2772, 2773, 2774, 2777, 2778, 2779, 5195, 2805, 5197, 5513, 2824, 2825, 6034, 2836, 2837, 6039, 12, 13, 14, 15, 6049, 6052, 6055, 6061, 6064, 6068, 6071, 6075, 6081, 6084, 6088, 6091, 6094, 6096, 6098, 6104, 6107, 6111, 6115, 6118, 6120, 6124, 6128, 6133, 6137, 6140, 6143, 6147, 6149, 6152, 6154, 6162, 6170, 6172, 6174, 6176, 6178, 6184, 5690, 6187, 5697, 6191, 6197, 6200, 6203, 6207, 5721, 6211, 5727, 6215, 6221, 6224, 6227, 6230, 6233, 6236, 6239, 6242, 6247, 5770, 6250, 5777, 6256, 6262, 6265, 6267, 6270, 6273, 6275, 6277, 6280, 6286, 6292, 6295, 6308, 6311, 6313, 6319, 6322, 6326, 6329, 6332, 6341, 6343, 5891, 6101, 6058, 6078, 6336, 6334, 6338, 6058, 6283, 6078, 6101, 6130, 6338, 6354, 6157, 6159, 6165, 6167, 5912, 5445, 6367, 5448, 6371, 6181, 6218, 6283, 6194, 6334, 6181, 6194, 6283, 6218, 6283, 6283, 6336, 5928, 5936, 6316, 6336, 6334, 6301, 6309, 6302, 6259, 6283, 6290, 6288, 6336, 6334, 6309, 6306, 6302, 6304, 6316, 6336, 6334, 6302, 6304, 5948, 5958, 5961, 6395, 6316, 6336, 6334, 6338, 5969, 6400, 6364, 6389, 6388, 5182, 6346, 6345, 6364, 6356, 6355, 6407, 6409, 6397, 6396, 6411, 6348, 6348, 6391, 6390, 6413, 5186, 6351, 6350, 6364, 6356, 6355, 6418, 6420, 6358, 6357, 6364, 6362, 6389, 5190, 6424, 6368, 6427, 6372, 6364, 6389, 6388, 5192, 6431, 6362, 6389, 6387, 6434, 6364, 6389, 6388, 5194, 6368, 6439, 6372, 6442, 6375, 6445, 6379, 6448, 6389, 6388, 6387, 5196, 6391, 6390, 5198, 5514, 6397, 6396, 6455, 6402, 6401, 6458, 6040, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 6056, 6065, 6069, 6072, 6076, 6085, 6089, 6092, 6099, 6108, 6112, 6116, 6121, 6125, 6129, 6134, 6141, 6144, 6150, 6155, 6163, 6179, 6188, 6192, 6201, 6204, 6208, 6212, 6216, 6225, 6228, 6231, 6237, 6240, 6243, 6251, 6257, 6268, 6271, 6278, 6281, 6293, 6296, 6314, 6323, 6327, 6330, 6333, 2203, 6479, 2209, 6467, 2215, 6472, 2221, 2222, 2223, 6465, 6464, 2227, 6467, 2233, 6483, 2239, 6472, 6476, 2246, 6479, 6483, 2257, 6488, 2262, 6493, 2291, 2293, 2294, 2296, 6498, 6496, 2334, 6501, 6504, 2339, 6514, 2344, 6522, 6525, 2349, 6506, 2354, 6301, 6309, 6546, 2359, 6501, 6504, 2364, 6506, 2369, 6518, 6512, 2374, 6514, 2379, 6518, 2384, 6522, 6525, 2389, 6546, 2426, 6541, 2432, 2433, 2434, 2435, 2436, 2438, 6527, 6532, 6528, 6532, 6531, 2447, 6535, 2450, 2451, 2453, 2454, 2455, 2456, 2457, 2459, 2460, 6541, 2466, 2467, 6301, 6309, 2470, 2472, 6309, 6306, 2516, 6541, 2522, 2523, 2524, 6546, 6607, 2561, 2562, 2563, 6347, 2577, 2578, 2579, 2580, 2581, 6626, 6548, 2593, 2594, 6583, 2600, 2601, 6614, 2607, 2608, 6548, 6352, 2643, 2644, 2645, 2646, 2647, 6642, 2654, 2655, 6607, 2664, 2665, 2666, 6425, 6583, 2680, 6584, 2685, 6607, 2692, 2693, 2694, 2700, 2701, 2702, 6435, 6566, 2719, 2720, 2721, 6567, 2737, 6569, 2742, 6583, 2771, 6584, 2776, 6606, 2799, 2800, 2801, 6614, 2807, 2808, 6607, 6608, 2822, 2823, 6614, 2834, 2835, 6619, 6629, 6634, 6684, 6684, 6684, 6651, 6656, 6665, 6667, 6669, 6671, 6673, 6677, 6684, 6684, 6684, 6687, 6712, 2205, 6714, 6713, 6113, 6704, 2211, 6706, 6705, 5560, 6708, 2217, 6710, 6709, 5584, 6759, 2224, 2225, 6704, 2229, 6706, 6705, 5560, 6735, 2235, 6717, 6716, 5565, 6708, 2241, 6710, 6709, 5584, 2245, 6712, 2248, 6714, 6713, 6113, 2252, 6717, 6716, 6126, 6719, 2258, 6721, 6720, 6491, 2263, 6723, 6724, 2297, 2298, 6725, 2336, 6726, 2338, 6732, 2341, 6733, 6734, 6738, 2346, 6739, 2348, 6727, 2351, 6728, 6729, 2355, 2356, 2357, 6725, 2361, 6726, 2363, 6727, 2366, 6728, 6729, 6730, 2371, 6731, 2373, 6732, 2376, 6733, 6734, 6735, 2381, 6736, 6737, 6738, 2386, 6739, 2388, 2390, 6747, 2428, 6749, 6748, 6750, 6817, 6819, 6740, 2440, 2441, 2442, 6741, 2444, 2445, 6743, 6744, 2449, 6830, 6745, 6832, 6834, 6747, 2462, 6749, 6748, 6750, 6840, 2468, 2469, 2492, 2493, 6747, 2518, 6749, 6748, 6750, 6850, 2525, 6836, 6820, 6836, 6835, 2560, 6855, 2576, 6861, 2592, 6866, 2599, 2606, 6872, 2616, 2642, 6878, 6882, 2663, 6885, 2679, 2684, 6836, 6820, 2691, 6894, 6897, 6836, 6820, 6836, 6835, 2718, 6902, 2736, 2741, 2770, 2775, 6836, 6820, 6836, 6835, 6844, 6843, 2798, 6914, 2806, 6918, 2813, 2821, 6922, 2833, 6925, 2848, 6405, 2860, 2866, 2868, 2870, 6416, 2890, 6648, 2903, 2908, 6432, 2918, 2927, 2929, 2945, 2947, 2957, 2959, 2962, 2966, 2970, 2204, 2206, 2207, 2208, 2210, 2212, 2213, 2214, 2216, 2218, 2219, 2220, 6961, 2228, 2230, 2231, 2232, 2234, 2236, 2237, 2238, 2240, 2242, 2243, 2244, 2247, 2249, 2250, 2251, 2253, 2254, 2255, 2256, 2259, 2260, 2261, 2292, 2295, 6996, 2335, 2337, 2340, 2342, 2343, 2345, 2347, 2350, 2352, 2353, 7014, 2360, 2362, 2365, 2367, 2368, 2370, 2372, 2375, 2377, 2378, 2380, 2382, 2383, 2385, 2387, 2427, 2429, 2430, 2431, 2439, 7051, 2443, 7054, 2446, 2448, 2452, 2461, 2463, 2464, 2465, 7069, 7071, 2517, 2519, 2520, 2521, 7046, 2553, 2554, 7060, 2558, 2559, 6856, 6959, 6858, 6862, 7077, 7078, 7077, 7078, 6959, 6977, 7067, 6992, 6875, 6879, 6886, 7015, 7046, 2689, 2690, 6895, 6898, 7046, 2711, 2712, 7060, 2716, 2717, 6903, 7015, 7015, 7040, 7046, 2783, 2784, 7060, 2791, 2792, 2796, 2797, 6915, 7077, 7078, 7077, 7078, 2856, 7087, 7089, 7114, 7090, 7125, 7092, 2888, 7126, 2894, 7098, 7099, 2910, 7111, 7112, 7113, 7114, 7123, 7125, 7126, 7128, 13, 14, 15, 7152, 7154, 7156, 7158, 7160, 7162, 7165, 7167, 7169, 7171, 7173, 7175, 7177, 7179, 7182, 6773, 7186, 7188, 7189, 7191, 7193, 7196, 7198, 7202, 7204, 7207, 7209, 7212, 7215, 7217, 7219, 7221, 7226, 7228, 7230, 7234, 7236, 2552, 7240, 7222, 2557, 7243, 2574, 7164, 6859, 2590, 2591, 2604, 2605, 2625, 7164, 2633, 2640, 2641, 6876, 7077, 7190, 7199, 7197, 6793, 2678, 2688, 7262, 2710, 7267, 7222, 2715, 7270, 7197, 7194, 7199, 7192, 6793, 2735, 7192, 7194, 7197, 7199, 6793, 2755, 7203, 7205, 7208, 7210, 7213, 7216, 6812, 2769, 2782, 7277, 7222, 7224, 7058, 2790, 7280, 7067, 7282, 7077, 2819, 2820, 2831, 2832, 7083, 2859, 2861, 2862, 2865, 2867, 2869, 2889, 7096, 2902, 2904, 7102, 7126, 7109, 2926, 2928, 2944, 2946, 7121, 2958, 2961, 2965, 2969, 13, 14, 15, 7341, 7342, 7343, 2556, 7314, 7315, 7312, 7313, 7316, 7317, 2575, 7347, 7348, 7347, 7348, 7312, 7313, 7314, 7315, 7316, 7317, 2626, 7318, 7319, 7320, 7321, 7322, 7323, 7324, 7325, 7339, 7326, 7327, 7328, 7330, 7329, 7348, 2661, 2662, 7334, 2674, 7333, 2676, 2677, 7341, 7342, 7341, 7342, 7343, 2714, 7333, 2727, 7332, 2729, 7334, 2731, 7331, 2733, 2734, 7331, 2747, 7332, 2749, 7333, 2751, 7334, 2753, 2754, 7335, 2757, 7336, 2759, 7337, 2761, 7338, 2763, 7339, 2765, 7340, 2767, 2768, 7341, 7342, 7343, 2786, 2787, 7344, 2789, 7345, 7346, 2795, 2812, 7347, 7348, 7347, 7348, 7349, 7352, 2847, 7356, 7357, 7359, 7364, 7364, 7366, 2893, 7373, 2907, 2909, 7375, 7378, 2917, 7400, 7405, 2956, 7410, 7412, 7415, 7417, 7416, 7418, 7419, 7420, 7421, 7424, 7423, 7429, 7428, 7431, 7430, 7433, 7434, 7435, 7436, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 2550, 2551, 2555, 2568, 2569, 2570, 2571, 2572, 2573, 2588, 2589, 2602, 2603, 2619, 2620, 2621, 2622, 2623, 2624, 2627, 2628, 2629, 2630, 2631, 2632, 2634, 2635, 2636, 2637, 2638, 2639, 2658, 2659, 2660, 2673, 2675, 2686, 2687, 2708, 2709, 2713, 2726, 2728, 2730, 2732, 2746, 2748, 2750, 2752, 2756, 2758, 2760, 2762, 2764, 2766, 2780, 2781, 2785, 2788, 2793, 2794, 2817, 2818, 2829, 2830, 2842, 2846, 7354, 2855, 2858, 2864, 7361, 2882, 2886, 2887, 7477, 7483, 2906, 2912, 2916, 7498, 7507, 7520, 2949, 2953, 7530, 7531, 2964, 2968, 7538, 2980, 2981, 2982, 2984, 2985, 2986, 2991, 7545, 2996, 2997, 7547, 7548, 7551, 3009, 3010, 3013, 3014, 7554, 3019, 3021, 3023, 3025, 7616, 7584, 7642, 7642, 7586, 7591, 7605, 7587, 7611, 7589, 2854, 7593, 7595, 7597, 7611, 7599, 7605, 7601, 2876, 7611, 7603, 7605, 7609, 7607, 7613, 7611, 7609, 2892, 7635, 7619, 7618, 7637, 7629, 7630, 2901, 7620, 7622, 7642, 7642, 7624, 7626, 7628, 7637, 7625, 7627, 7635, 2925, 7630, 7632, 7637, 7629, 7635, 7631, 2936, 7634, 7635, 7633, 7638, 7637, 7636, 2943, 7639, 7642, 7642, 7641, 7643, 2955, 2960, 7645, 7647, 2974, 7652, 7676, 7658, 2993, 7683, 3001, 3002, 3006, 7688, 7690, 3018, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 2841, 2843, 2844, 2845, 2849, 2850, 2851, 2852, 2853, 2857, 2863, 2871, 2872, 2873, 2874, 2875, 2877, 2878, 2879, 2880, 2881, 2883, 2884, 2885, 7696, 2895, 2896, 2897, 2898, 2899, 2900, 2905, 2911, 2913, 2914, 2915, 2919, 2920, 2921, 2922, 2923, 2924, 2930, 2931, 2932, 2933, 2934, 2935, 2937, 2938, 2939, 2940, 2941, 2942, 2948, 2950, 2951, 2952, 2954, 2963, 2967, 2978, 2990, 15, 7794, 7797, 7799, 7804, 7806, 7809, 7811, 7814, 2891, 7818, 7820, 7822, 7826, 7829, 7831, 7833, 7835, 7837, 7839, 7841, 7843, 7845, 7848, 7850, 7792, 7801, 7802, 7850, 7823, 7850, 7824, 7850, 7846, 7851, 7851, 7852, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 7795, 7858, 7860, 7862, 7815, 7866, 7827, 7870, 7873, 7876, 7849, 2971, 2973, 2979, 2983, 7864, 2998, 3000, 3003, 3005, 3015, 3017, 3020, 3022, 3024, 9, 10, 11, 12, 13, 14, 15, 7800, 7807, 7812, 7867, 7871, 7874, 7877, 7904, 7908, 7908, 2992, 7910, 7910, 7914, 7918, 7926, 7927, 7927, 7917, 7928, 7926, 7927, 7927, 7926, 8, 9, 10, 11, 12, 13, 14, 15, 2972, 2975, 7938, 7936, 2987, 7938, 7937, 7942, 7939, 2999, 3004, 7942, 7940, 7942, 7941, 3016, 3026, 3028, 3029, 3030, 3034, 7946, 3037, 3038, 3040, 3041, 3045, 11, 12, 13, 14, 15, 7968, 2976, 2977, 2988, 2989, 2994, 2995, 7977, 7978, 3007, 3008, 3011, 3012, 7983, 3036, 7986, 7993, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 7916, 8001, 8003, 8006, 7921, 7923, 8010, 8012, 7925, 7990, 10, 11, 12, 13, 14, 15, 8002, 8004, 8035, 8032, 8038, 8040, 8036, 8037, 8039, 8035, 10, 11, 12, 13, 14, 15, 3027, 8048, 3032, 3033, 3035, 3039, 3042, 3043, 8049, 3046, 10, 11, 12, 13, 14, 15, 3031, 3044, 8064, 8067, 8068, 8069, 8071, 7, 8, 9, 10, 11, 12, 13, 14, 15, 8080, 7994, 8015, 8041, 8016, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 8083, 8097, 8100, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 8112, 8073, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 8114, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 8129, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
bool h_Op[]= {
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
#define THREADS_PER_BLOCK 16
#define BLOCKS_PER_GRID 1
#define SIZE_OF_IN 3056
#define SIZE_OF_AC 5120
__device__ void
ac(float *A, const int *B, const int *C, const bool *Op, int n_iter) {
int i= blockDim.x * blockIdx.x + threadIdx.x;
__shared__ float R[511*THREADS_PER_BLOCK];
const int t= THREADS_PER_BLOCK;
__shared__ float final;
final=0;
R[i + 0*t] = A[i + 0*t];
R[i + 1*t] = A[i + 1*t];
R[i + 2*t] = A[i + 2*t];
R[i + 3*t] = A[i + 3*t];
R[i + 4*t] = A[i + 4*t];
R[i + 5*t] = A[i + 5*t];
R[i + 6*t] = A[i + 6*t];
R[i + 7*t] = A[i + 7*t];
R[i + 8*t] = A[i + 8*t];
R[i + 9*t] = A[i + 9*t];
R[i + 10*t] = A[i + 10*t];
R[i + 11*t] = A[i + 11*t];
R[i + 12*t] = A[i + 12*t];
R[i + 13*t] = A[i + 13*t];
R[i + 14*t] = A[i + 14*t];
R[i + 15*t] = A[i + 15*t];
R[i + 16*t] = A[i + 16*t];
R[i + 17*t] = A[i + 17*t];
R[i + 18*t] = A[i + 18*t];
R[i + 19*t] = A[i + 19*t];
R[i + 20*t] = A[i + 20*t];
R[i + 21*t] = A[i + 21*t];
R[i + 22*t] = A[i + 22*t];
R[i + 23*t] = A[i + 23*t];
R[i + 24*t] = A[i + 24*t];
R[i + 25*t] = A[i + 25*t];
R[i + 26*t] = A[i + 26*t];
R[i + 27*t] = A[i + 27*t];
R[i + 28*t] = A[i + 28*t];
R[i + 29*t] = A[i + 29*t];
R[i + 30*t] = A[i + 30*t];
R[i + 31*t] = A[i + 31*t];
R[i + 32*t] = A[i + 32*t];
R[i + 33*t] = A[i + 33*t];
R[i + 34*t] = A[i + 34*t];
R[i + 35*t] = A[i + 35*t];
R[i + 36*t] = A[i + 36*t];
R[i + 37*t] = A[i + 37*t];
R[i + 38*t] = A[i + 38*t];
R[i + 39*t] = A[i + 39*t];
R[i + 40*t] = A[i + 40*t];
R[i + 41*t] = A[i + 41*t];
R[i + 42*t] = A[i + 42*t];
R[i + 43*t] = A[i + 43*t];
R[i + 44*t] = A[i + 44*t];
R[i + 45*t] = A[i + 45*t];
R[i + 46*t] = A[i + 46*t];
R[i + 47*t] = A[i + 47*t];
R[i + 48*t] = A[i + 48*t];
R[i + 49*t] = A[i + 49*t];
R[i + 50*t] = A[i + 50*t];
R[i + 51*t] = A[i + 51*t];
R[i + 52*t] = A[i + 52*t];
R[i + 53*t] = A[i + 53*t];
R[i + 54*t] = A[i + 54*t];
R[i + 55*t] = A[i + 55*t];
R[i + 56*t] = A[i + 56*t];
R[i + 57*t] = A[i + 57*t];
R[i + 58*t] = A[i + 58*t];
R[i + 59*t] = A[i + 59*t];
R[i + 60*t] = A[i + 60*t];
R[i + 61*t] = A[i + 61*t];
R[i + 62*t] = A[i + 62*t];
R[i + 63*t] = A[i + 63*t];
R[i + 64*t] = A[i + 64*t];
R[i + 65*t] = A[i + 65*t];
R[i + 66*t] = A[i + 66*t];
R[i + 67*t] = A[i + 67*t];
R[i + 68*t] = A[i + 68*t];
R[i + 69*t] = A[i + 69*t];
R[i + 70*t] = A[i + 70*t];
R[i + 71*t] = A[i + 71*t];
R[i + 72*t] = A[i + 72*t];
R[i + 73*t] = A[i + 73*t];
R[i + 74*t] = A[i + 74*t];
R[i + 75*t] = A[i + 75*t];
R[i + 76*t] = A[i + 76*t];
R[i + 77*t] = A[i + 77*t];
R[i + 78*t] = A[i + 78*t];
R[i + 79*t] = A[i + 79*t];
R[i + 80*t] = A[i + 80*t];
R[i + 81*t] = A[i + 81*t];
R[i + 82*t] = A[i + 82*t];
R[i + 83*t] = A[i + 83*t];
R[i + 84*t] = A[i + 84*t];
R[i + 85*t] = A[i + 85*t];
R[i + 86*t] = A[i + 86*t];
R[i + 87*t] = A[i + 87*t];
R[i + 88*t] = A[i + 88*t];
R[i + 89*t] = A[i + 89*t];
R[i + 90*t] = A[i + 90*t];
R[i + 91*t] = A[i + 91*t];
R[i + 92*t] = A[i + 92*t];
R[i + 93*t] = A[i + 93*t];
R[i + 94*t] = A[i + 94*t];
R[i + 95*t] = A[i + 95*t];
R[i + 96*t] = A[i + 96*t];
R[i + 97*t] = A[i + 97*t];
R[i + 98*t] = A[i + 98*t];
R[i + 99*t] = A[i + 99*t];
R[i + 100*t] = A[i + 100*t];
R[i + 101*t] = A[i + 101*t];
R[i + 102*t] = A[i + 102*t];
R[i + 103*t] = A[i + 103*t];
R[i + 104*t] = A[i + 104*t];
R[i + 105*t] = A[i + 105*t];
R[i + 106*t] = A[i + 106*t];
R[i + 107*t] = A[i + 107*t];
R[i + 108*t] = A[i + 108*t];
R[i + 109*t] = A[i + 109*t];
R[i + 110*t] = A[i + 110*t];
R[i + 111*t] = A[i + 111*t];
R[i + 112*t] = A[i + 112*t];
R[i + 113*t] = A[i + 113*t];
R[i + 114*t] = A[i + 114*t];
R[i + 115*t] = A[i + 115*t];
R[i + 116*t] = A[i + 116*t];
R[i + 117*t] = A[i + 117*t];
R[i + 118*t] = A[i + 118*t];
R[i + 119*t] = A[i + 119*t];
R[i + 120*t] = A[i + 120*t];
R[i + 121*t] = A[i + 121*t];
R[i + 122*t] = A[i + 122*t];
R[i + 123*t] = A[i + 123*t];
R[i + 124*t] = A[i + 124*t];
R[i + 125*t] = A[i + 125*t];
R[i + 126*t] = A[i + 126*t];
R[i + 127*t] = A[i + 127*t];
R[i + 128*t] = A[i + 128*t];
R[i + 129*t] = A[i + 129*t];
R[i + 130*t] = A[i + 130*t];
R[i + 131*t] = A[i + 131*t];
R[i + 132*t] = A[i + 132*t];
R[i + 133*t] = A[i + 133*t];
R[i + 134*t] = A[i + 134*t];
R[i + 135*t] = A[i + 135*t];
R[i + 136*t] = A[i + 136*t];
R[i + 137*t] = A[i + 137*t];
R[i + 138*t] = A[i + 138*t];
R[i + 139*t] = A[i + 139*t];
R[i + 140*t] = A[i + 140*t];
R[i + 141*t] = A[i + 141*t];
R[i + 142*t] = A[i + 142*t];
R[i + 143*t] = A[i + 143*t];
R[i + 144*t] = A[i + 144*t];
R[i + 145*t] = A[i + 145*t];
R[i + 146*t] = A[i + 146*t];
R[i + 147*t] = A[i + 147*t];
R[i + 148*t] = A[i + 148*t];
R[i + 149*t] = A[i + 149*t];
R[i + 150*t] = A[i + 150*t];
R[i + 151*t] = A[i + 151*t];
R[i + 152*t] = A[i + 152*t];
R[i + 153*t] = A[i + 153*t];
R[i + 154*t] = A[i + 154*t];
R[i + 155*t] = A[i + 155*t];
R[i + 156*t] = A[i + 156*t];
R[i + 157*t] = A[i + 157*t];
R[i + 158*t] = A[i + 158*t];
R[i + 159*t] = A[i + 159*t];
R[i + 160*t] = A[i + 160*t];
R[i + 161*t] = A[i + 161*t];
R[i + 162*t] = A[i + 162*t];
R[i + 163*t] = A[i + 163*t];
R[i + 164*t] = A[i + 164*t];
R[i + 165*t] = A[i + 165*t];
R[i + 166*t] = A[i + 166*t];
R[i + 167*t] = A[i + 167*t];
R[i + 168*t] = A[i + 168*t];
R[i + 169*t] = A[i + 169*t];
R[i + 170*t] = A[i + 170*t];
R[i + 171*t] = A[i + 171*t];
R[i + 172*t] = A[i + 172*t];
R[i + 173*t] = A[i + 173*t];
R[i + 174*t] = A[i + 174*t];
R[i + 175*t] = A[i + 175*t];
R[i + 176*t] = A[i + 176*t];
R[i + 177*t] = A[i + 177*t];
R[i + 178*t] = A[i + 178*t];
R[i + 179*t] = A[i + 179*t];
R[i + 180*t] = A[i + 180*t];
R[i + 181*t] = A[i + 181*t];
R[i + 182*t] = A[i + 182*t];
R[i + 183*t] = A[i + 183*t];
R[i + 184*t] = A[i + 184*t];
R[i + 185*t] = A[i + 185*t];
R[i + 186*t] = A[i + 186*t];
R[i + 187*t] = A[i + 187*t];
R[i + 188*t] = A[i + 188*t];
R[i + 189*t] = A[i + 189*t];
R[i + 190*t] = A[i + 190*t];
__syncthreads();
for (int iter=0; iter< n_iter; iter++) {
R[i + 191*t] = Op[i + 0*t] ? R[B[i + 0*t]] * R[C[i + 0*t]] : R[B[i + 0*t]] + R[C[i + 0*t]];
R[i + 192*t] = Op[i + 1*t] ? R[B[i + 1*t]] * R[C[i + 1*t]] : R[B[i + 1*t]] + R[C[i + 1*t]];
R[i + 193*t] = Op[i + 2*t] ? R[B[i + 2*t]] * R[C[i + 2*t]] : R[B[i + 2*t]] + R[C[i + 2*t]];
R[i + 194*t] = Op[i + 3*t] ? R[B[i + 3*t]] * R[C[i + 3*t]] : R[B[i + 3*t]] + R[C[i + 3*t]];
R[i + 195*t] = Op[i + 4*t] ? R[B[i + 4*t]] * R[C[i + 4*t]] : R[B[i + 4*t]] + R[C[i + 4*t]];
R[i + 196*t] = Op[i + 5*t] ? R[B[i + 5*t]] * R[C[i + 5*t]] : R[B[i + 5*t]] + R[C[i + 5*t]];
R[i + 197*t] = Op[i + 6*t] ? R[B[i + 6*t]] * R[C[i + 6*t]] : R[B[i + 6*t]] + R[C[i + 6*t]];
R[i + 198*t] = Op[i + 7*t] ? R[B[i + 7*t]] * R[C[i + 7*t]] : R[B[i + 7*t]] + R[C[i + 7*t]];
R[i + 199*t] = Op[i + 8*t] ? R[B[i + 8*t]] * R[C[i + 8*t]] : R[B[i + 8*t]] + R[C[i + 8*t]];
R[i + 200*t] = Op[i + 9*t] ? R[B[i + 9*t]] * R[C[i + 9*t]] : R[B[i + 9*t]] + R[C[i + 9*t]];
R[i + 201*t] = Op[i + 10*t] ? R[B[i + 10*t]] * R[C[i + 10*t]] : R[B[i + 10*t]] + R[C[i + 10*t]];
R[i + 202*t] = Op[i + 11*t] ? R[B[i + 11*t]] * R[C[i + 11*t]] : R[B[i + 11*t]] + R[C[i + 11*t]];
R[i + 203*t] = Op[i + 12*t] ? R[B[i + 12*t]] * R[C[i + 12*t]] : R[B[i + 12*t]] + R[C[i + 12*t]];
R[i + 204*t] = Op[i + 13*t] ? R[B[i + 13*t]] * R[C[i + 13*t]] : R[B[i + 13*t]] + R[C[i + 13*t]];
R[i + 205*t] = Op[i + 14*t] ? R[B[i + 14*t]] * R[C[i + 14*t]] : R[B[i + 14*t]] + R[C[i + 14*t]];
R[i + 206*t] = Op[i + 15*t] ? R[B[i + 15*t]] * R[C[i + 15*t]] : R[B[i + 15*t]] + R[C[i + 15*t]];
R[i + 207*t] = Op[i + 16*t] ? R[B[i + 16*t]] * R[C[i + 16*t]] : R[B[i + 16*t]] + R[C[i + 16*t]];
R[i + 208*t] = Op[i + 17*t] ? R[B[i + 17*t]] * R[C[i + 17*t]] : R[B[i + 17*t]] + R[C[i + 17*t]];
R[i + 209*t] = Op[i + 18*t] ? R[B[i + 18*t]] * R[C[i + 18*t]] : R[B[i + 18*t]] + R[C[i + 18*t]];
R[i + 210*t] = Op[i + 19*t] ? R[B[i + 19*t]] * R[C[i + 19*t]] : R[B[i + 19*t]] + R[C[i + 19*t]];
R[i + 211*t] = Op[i + 20*t] ? R[B[i + 20*t]] * R[C[i + 20*t]] : R[B[i + 20*t]] + R[C[i + 20*t]];
R[i + 212*t] = Op[i + 21*t] ? R[B[i + 21*t]] * R[C[i + 21*t]] : R[B[i + 21*t]] + R[C[i + 21*t]];
R[i + 213*t] = Op[i + 22*t] ? R[B[i + 22*t]] * R[C[i + 22*t]] : R[B[i + 22*t]] + R[C[i + 22*t]];
R[i + 214*t] = Op[i + 23*t] ? R[B[i + 23*t]] * R[C[i + 23*t]] : R[B[i + 23*t]] + R[C[i + 23*t]];
R[i + 215*t] = Op[i + 24*t] ? R[B[i + 24*t]] * R[C[i + 24*t]] : R[B[i + 24*t]] + R[C[i + 24*t]];
R[i + 216*t] = Op[i + 25*t] ? R[B[i + 25*t]] * R[C[i + 25*t]] : R[B[i + 25*t]] + R[C[i + 25*t]];
R[i + 217*t] = Op[i + 26*t] ? R[B[i + 26*t]] * R[C[i + 26*t]] : R[B[i + 26*t]] + R[C[i + 26*t]];
R[i + 218*t] = Op[i + 27*t] ? R[B[i + 27*t]] * R[C[i + 27*t]] : R[B[i + 27*t]] + R[C[i + 27*t]];
R[i + 219*t] = Op[i + 28*t] ? R[B[i + 28*t]] * R[C[i + 28*t]] : R[B[i + 28*t]] + R[C[i + 28*t]];
R[i + 220*t] = Op[i + 29*t] ? R[B[i + 29*t]] * R[C[i + 29*t]] : R[B[i + 29*t]] + R[C[i + 29*t]];
R[i + 221*t] = Op[i + 30*t] ? R[B[i + 30*t]] * R[C[i + 30*t]] : R[B[i + 30*t]] + R[C[i + 30*t]];
R[i + 222*t] = Op[i + 31*t] ? R[B[i + 31*t]] * R[C[i + 31*t]] : R[B[i + 31*t]] + R[C[i + 31*t]];
R[i + 223*t] = Op[i + 32*t] ? R[B[i + 32*t]] * R[C[i + 32*t]] : R[B[i + 32*t]] + R[C[i + 32*t]];
R[i + 224*t] = Op[i + 33*t] ? R[B[i + 33*t]] * R[C[i + 33*t]] : R[B[i + 33*t]] + R[C[i + 33*t]];
R[i + 225*t] = Op[i + 34*t] ? R[B[i + 34*t]] * R[C[i + 34*t]] : R[B[i + 34*t]] + R[C[i + 34*t]];
R[i + 226*t] = Op[i + 35*t] ? R[B[i + 35*t]] * R[C[i + 35*t]] : R[B[i + 35*t]] + R[C[i + 35*t]];
R[i + 227*t] = Op[i + 36*t] ? R[B[i + 36*t]] * R[C[i + 36*t]] : R[B[i + 36*t]] + R[C[i + 36*t]];
R[i + 228*t] = Op[i + 37*t] ? R[B[i + 37*t]] * R[C[i + 37*t]] : R[B[i + 37*t]] + R[C[i + 37*t]];
R[i + 229*t] = Op[i + 38*t] ? R[B[i + 38*t]] * R[C[i + 38*t]] : R[B[i + 38*t]] + R[C[i + 38*t]];
R[i + 230*t] = Op[i + 39*t] ? R[B[i + 39*t]] * R[C[i + 39*t]] : R[B[i + 39*t]] + R[C[i + 39*t]];
R[i + 231*t] = Op[i + 40*t] ? R[B[i + 40*t]] * R[C[i + 40*t]] : R[B[i + 40*t]] + R[C[i + 40*t]];
R[i + 232*t] = Op[i + 41*t] ? R[B[i + 41*t]] * R[C[i + 41*t]] : R[B[i + 41*t]] + R[C[i + 41*t]];
R[i + 233*t] = Op[i + 42*t] ? R[B[i + 42*t]] * R[C[i + 42*t]] : R[B[i + 42*t]] + R[C[i + 42*t]];
R[i + 234*t] = Op[i + 43*t] ? R[B[i + 43*t]] * R[C[i + 43*t]] : R[B[i + 43*t]] + R[C[i + 43*t]];
R[i + 235*t] = Op[i + 44*t] ? R[B[i + 44*t]] * R[C[i + 44*t]] : R[B[i + 44*t]] + R[C[i + 44*t]];
R[i + 236*t] = Op[i + 45*t] ? R[B[i + 45*t]] * R[C[i + 45*t]] : R[B[i + 45*t]] + R[C[i + 45*t]];
R[i + 237*t] = Op[i + 46*t] ? R[B[i + 46*t]] * R[C[i + 46*t]] : R[B[i + 46*t]] + R[C[i + 46*t]];
__syncthreads();
R[i + 238*t] = Op[i + 47*t] ? R[B[i + 47*t]] * R[C[i + 47*t]] : R[B[i + 47*t]] + R[C[i + 47*t]];
R[i + 239*t] = Op[i + 48*t] ? R[B[i + 48*t]] * R[C[i + 48*t]] : R[B[i + 48*t]] + R[C[i + 48*t]];
R[i + 240*t] = Op[i + 49*t] ? R[B[i + 49*t]] * R[C[i + 49*t]] : R[B[i + 49*t]] + R[C[i + 49*t]];
R[i + 241*t] = Op[i + 50*t] ? R[B[i + 50*t]] * R[C[i + 50*t]] : R[B[i + 50*t]] + R[C[i + 50*t]];
R[i + 242*t] = Op[i + 51*t] ? R[B[i + 51*t]] * R[C[i + 51*t]] : R[B[i + 51*t]] + R[C[i + 51*t]];
R[i + 243*t] = Op[i + 52*t] ? R[B[i + 52*t]] * R[C[i + 52*t]] : R[B[i + 52*t]] + R[C[i + 52*t]];
R[i + 244*t] = Op[i + 53*t] ? R[B[i + 53*t]] * R[C[i + 53*t]] : R[B[i + 53*t]] + R[C[i + 53*t]];
R[i + 245*t] = Op[i + 54*t] ? R[B[i + 54*t]] * R[C[i + 54*t]] : R[B[i + 54*t]] + R[C[i + 54*t]];
R[i + 246*t] = Op[i + 55*t] ? R[B[i + 55*t]] * R[C[i + 55*t]] : R[B[i + 55*t]] + R[C[i + 55*t]];
R[i + 247*t] = Op[i + 56*t] ? R[B[i + 56*t]] * R[C[i + 56*t]] : R[B[i + 56*t]] + R[C[i + 56*t]];
R[i + 248*t] = Op[i + 57*t] ? R[B[i + 57*t]] * R[C[i + 57*t]] : R[B[i + 57*t]] + R[C[i + 57*t]];
R[i + 249*t] = Op[i + 58*t] ? R[B[i + 58*t]] * R[C[i + 58*t]] : R[B[i + 58*t]] + R[C[i + 58*t]];
R[i + 250*t] = Op[i + 59*t] ? R[B[i + 59*t]] * R[C[i + 59*t]] : R[B[i + 59*t]] + R[C[i + 59*t]];
R[i + 251*t] = Op[i + 60*t] ? R[B[i + 60*t]] * R[C[i + 60*t]] : R[B[i + 60*t]] + R[C[i + 60*t]];
R[i + 252*t] = Op[i + 61*t] ? R[B[i + 61*t]] * R[C[i + 61*t]] : R[B[i + 61*t]] + R[C[i + 61*t]];
R[i + 253*t] = Op[i + 62*t] ? R[B[i + 62*t]] * R[C[i + 62*t]] : R[B[i + 62*t]] + R[C[i + 62*t]];
R[i + 254*t] = Op[i + 63*t] ? R[B[i + 63*t]] * R[C[i + 63*t]] : R[B[i + 63*t]] + R[C[i + 63*t]];
R[i + 255*t] = Op[i + 64*t] ? R[B[i + 64*t]] * R[C[i + 64*t]] : R[B[i + 64*t]] + R[C[i + 64*t]];
R[i + 256*t] = Op[i + 65*t] ? R[B[i + 65*t]] * R[C[i + 65*t]] : R[B[i + 65*t]] + R[C[i + 65*t]];
R[i + 257*t] = Op[i + 66*t] ? R[B[i + 66*t]] * R[C[i + 66*t]] : R[B[i + 66*t]] + R[C[i + 66*t]];
R[i + 258*t] = Op[i + 67*t] ? R[B[i + 67*t]] * R[C[i + 67*t]] : R[B[i + 67*t]] + R[C[i + 67*t]];
R[i + 259*t] = Op[i + 68*t] ? R[B[i + 68*t]] * R[C[i + 68*t]] : R[B[i + 68*t]] + R[C[i + 68*t]];
R[i + 260*t] = Op[i + 69*t] ? R[B[i + 69*t]] * R[C[i + 69*t]] : R[B[i + 69*t]] + R[C[i + 69*t]];
R[i + 261*t] = Op[i + 70*t] ? R[B[i + 70*t]] * R[C[i + 70*t]] : R[B[i + 70*t]] + R[C[i + 70*t]];
R[i + 262*t] = Op[i + 71*t] ? R[B[i + 71*t]] * R[C[i + 71*t]] : R[B[i + 71*t]] + R[C[i + 71*t]];
R[i + 263*t] = Op[i + 72*t] ? R[B[i + 72*t]] * R[C[i + 72*t]] : R[B[i + 72*t]] + R[C[i + 72*t]];
R[i + 264*t] = Op[i + 73*t] ? R[B[i + 73*t]] * R[C[i + 73*t]] : R[B[i + 73*t]] + R[C[i + 73*t]];
R[i + 265*t] = Op[i + 74*t] ? R[B[i + 74*t]] * R[C[i + 74*t]] : R[B[i + 74*t]] + R[C[i + 74*t]];
__syncthreads();
R[i + 266*t] = Op[i + 75*t] ? R[B[i + 75*t]] * R[C[i + 75*t]] : R[B[i + 75*t]] + R[C[i + 75*t]];
R[i + 267*t] = Op[i + 76*t] ? R[B[i + 76*t]] * R[C[i + 76*t]] : R[B[i + 76*t]] + R[C[i + 76*t]];
R[i + 268*t] = Op[i + 77*t] ? R[B[i + 77*t]] * R[C[i + 77*t]] : R[B[i + 77*t]] + R[C[i + 77*t]];
R[i + 269*t] = Op[i + 78*t] ? R[B[i + 78*t]] * R[C[i + 78*t]] : R[B[i + 78*t]] + R[C[i + 78*t]];
R[i + 270*t] = Op[i + 79*t] ? R[B[i + 79*t]] * R[C[i + 79*t]] : R[B[i + 79*t]] + R[C[i + 79*t]];
R[i + 271*t] = Op[i + 80*t] ? R[B[i + 80*t]] * R[C[i + 80*t]] : R[B[i + 80*t]] + R[C[i + 80*t]];
R[i + 272*t] = Op[i + 81*t] ? R[B[i + 81*t]] * R[C[i + 81*t]] : R[B[i + 81*t]] + R[C[i + 81*t]];
R[i + 273*t] = Op[i + 82*t] ? R[B[i + 82*t]] * R[C[i + 82*t]] : R[B[i + 82*t]] + R[C[i + 82*t]];
R[i + 274*t] = Op[i + 83*t] ? R[B[i + 83*t]] * R[C[i + 83*t]] : R[B[i + 83*t]] + R[C[i + 83*t]];
R[i + 275*t] = Op[i + 84*t] ? R[B[i + 84*t]] * R[C[i + 84*t]] : R[B[i + 84*t]] + R[C[i + 84*t]];
R[i + 276*t] = Op[i + 85*t] ? R[B[i + 85*t]] * R[C[i + 85*t]] : R[B[i + 85*t]] + R[C[i + 85*t]];
R[i + 277*t] = Op[i + 86*t] ? R[B[i + 86*t]] * R[C[i + 86*t]] : R[B[i + 86*t]] + R[C[i + 86*t]];
R[i + 278*t] = Op[i + 87*t] ? R[B[i + 87*t]] * R[C[i + 87*t]] : R[B[i + 87*t]] + R[C[i + 87*t]];
R[i + 279*t] = Op[i + 88*t] ? R[B[i + 88*t]] * R[C[i + 88*t]] : R[B[i + 88*t]] + R[C[i + 88*t]];
R[i + 280*t] = Op[i + 89*t] ? R[B[i + 89*t]] * R[C[i + 89*t]] : R[B[i + 89*t]] + R[C[i + 89*t]];
R[i + 281*t] = Op[i + 90*t] ? R[B[i + 90*t]] * R[C[i + 90*t]] : R[B[i + 90*t]] + R[C[i + 90*t]];
R[i + 282*t] = Op[i + 91*t] ? R[B[i + 91*t]] * R[C[i + 91*t]] : R[B[i + 91*t]] + R[C[i + 91*t]];
R[i + 283*t] = Op[i + 92*t] ? R[B[i + 92*t]] * R[C[i + 92*t]] : R[B[i + 92*t]] + R[C[i + 92*t]];
R[i + 284*t] = Op[i + 93*t] ? R[B[i + 93*t]] * R[C[i + 93*t]] : R[B[i + 93*t]] + R[C[i + 93*t]];
R[i + 285*t] = Op[i + 94*t] ? R[B[i + 94*t]] * R[C[i + 94*t]] : R[B[i + 94*t]] + R[C[i + 94*t]];
R[i + 286*t] = Op[i + 95*t] ? R[B[i + 95*t]] * R[C[i + 95*t]] : R[B[i + 95*t]] + R[C[i + 95*t]];
R[i + 287*t] = Op[i + 96*t] ? R[B[i + 96*t]] * R[C[i + 96*t]] : R[B[i + 96*t]] + R[C[i + 96*t]];
R[i + 288*t] = Op[i + 97*t] ? R[B[i + 97*t]] * R[C[i + 97*t]] : R[B[i + 97*t]] + R[C[i + 97*t]];
R[i + 289*t] = Op[i + 98*t] ? R[B[i + 98*t]] * R[C[i + 98*t]] : R[B[i + 98*t]] + R[C[i + 98*t]];
R[i + 290*t] = Op[i + 99*t] ? R[B[i + 99*t]] * R[C[i + 99*t]] : R[B[i + 99*t]] + R[C[i + 99*t]];
R[i + 291*t] = Op[i + 100*t] ? R[B[i + 100*t]] * R[C[i + 100*t]] : R[B[i + 100*t]] + R[C[i + 100*t]];
R[i + 292*t] = Op[i + 101*t] ? R[B[i + 101*t]] * R[C[i + 101*t]] : R[B[i + 101*t]] + R[C[i + 101*t]];
R[i + 293*t] = Op[i + 102*t] ? R[B[i + 102*t]] * R[C[i + 102*t]] : R[B[i + 102*t]] + R[C[i + 102*t]];
R[i + 294*t] = Op[i + 103*t] ? R[B[i + 103*t]] * R[C[i + 103*t]] : R[B[i + 103*t]] + R[C[i + 103*t]];
R[i + 295*t] = Op[i + 104*t] ? R[B[i + 104*t]] * R[C[i + 104*t]] : R[B[i + 104*t]] + R[C[i + 104*t]];
__syncthreads();
R[i + 296*t] = Op[i + 105*t] ? R[B[i + 105*t]] * R[C[i + 105*t]] : R[B[i + 105*t]] + R[C[i + 105*t]];
R[i + 297*t] = Op[i + 106*t] ? R[B[i + 106*t]] * R[C[i + 106*t]] : R[B[i + 106*t]] + R[C[i + 106*t]];
R[i + 298*t] = Op[i + 107*t] ? R[B[i + 107*t]] * R[C[i + 107*t]] : R[B[i + 107*t]] + R[C[i + 107*t]];
R[i + 299*t] = Op[i + 108*t] ? R[B[i + 108*t]] * R[C[i + 108*t]] : R[B[i + 108*t]] + R[C[i + 108*t]];
R[i + 300*t] = Op[i + 109*t] ? R[B[i + 109*t]] * R[C[i + 109*t]] : R[B[i + 109*t]] + R[C[i + 109*t]];
R[i + 301*t] = Op[i + 110*t] ? R[B[i + 110*t]] * R[C[i + 110*t]] : R[B[i + 110*t]] + R[C[i + 110*t]];
R[i + 302*t] = Op[i + 111*t] ? R[B[i + 111*t]] * R[C[i + 111*t]] : R[B[i + 111*t]] + R[C[i + 111*t]];
R[i + 303*t] = Op[i + 112*t] ? R[B[i + 112*t]] * R[C[i + 112*t]] : R[B[i + 112*t]] + R[C[i + 112*t]];
R[i + 304*t] = Op[i + 113*t] ? R[B[i + 113*t]] * R[C[i + 113*t]] : R[B[i + 113*t]] + R[C[i + 113*t]];
R[i + 305*t] = Op[i + 114*t] ? R[B[i + 114*t]] * R[C[i + 114*t]] : R[B[i + 114*t]] + R[C[i + 114*t]];
R[i + 306*t] = Op[i + 115*t] ? R[B[i + 115*t]] * R[C[i + 115*t]] : R[B[i + 115*t]] + R[C[i + 115*t]];
R[i + 307*t] = Op[i + 116*t] ? R[B[i + 116*t]] * R[C[i + 116*t]] : R[B[i + 116*t]] + R[C[i + 116*t]];
R[i + 308*t] = Op[i + 117*t] ? R[B[i + 117*t]] * R[C[i + 117*t]] : R[B[i + 117*t]] + R[C[i + 117*t]];
R[i + 309*t] = Op[i + 118*t] ? R[B[i + 118*t]] * R[C[i + 118*t]] : R[B[i + 118*t]] + R[C[i + 118*t]];
R[i + 310*t] = Op[i + 119*t] ? R[B[i + 119*t]] * R[C[i + 119*t]] : R[B[i + 119*t]] + R[C[i + 119*t]];
R[i + 311*t] = Op[i + 120*t] ? R[B[i + 120*t]] * R[C[i + 120*t]] : R[B[i + 120*t]] + R[C[i + 120*t]];
R[i + 312*t] = Op[i + 121*t] ? R[B[i + 121*t]] * R[C[i + 121*t]] : R[B[i + 121*t]] + R[C[i + 121*t]];
R[i + 313*t] = Op[i + 122*t] ? R[B[i + 122*t]] * R[C[i + 122*t]] : R[B[i + 122*t]] + R[C[i + 122*t]];
R[i + 314*t] = Op[i + 123*t] ? R[B[i + 123*t]] * R[C[i + 123*t]] : R[B[i + 123*t]] + R[C[i + 123*t]];
R[i + 315*t] = Op[i + 124*t] ? R[B[i + 124*t]] * R[C[i + 124*t]] : R[B[i + 124*t]] + R[C[i + 124*t]];
R[i + 316*t] = Op[i + 125*t] ? R[B[i + 125*t]] * R[C[i + 125*t]] : R[B[i + 125*t]] + R[C[i + 125*t]];
R[i + 317*t] = Op[i + 126*t] ? R[B[i + 126*t]] * R[C[i + 126*t]] : R[B[i + 126*t]] + R[C[i + 126*t]];
R[i + 318*t] = Op[i + 127*t] ? R[B[i + 127*t]] * R[C[i + 127*t]] : R[B[i + 127*t]] + R[C[i + 127*t]];
R[i + 319*t] = Op[i + 128*t] ? R[B[i + 128*t]] * R[C[i + 128*t]] : R[B[i + 128*t]] + R[C[i + 128*t]];
R[i + 320*t] = Op[i + 129*t] ? R[B[i + 129*t]] * R[C[i + 129*t]] : R[B[i + 129*t]] + R[C[i + 129*t]];
R[i + 321*t] = Op[i + 130*t] ? R[B[i + 130*t]] * R[C[i + 130*t]] : R[B[i + 130*t]] + R[C[i + 130*t]];
R[i + 322*t] = Op[i + 131*t] ? R[B[i + 131*t]] * R[C[i + 131*t]] : R[B[i + 131*t]] + R[C[i + 131*t]];
R[i + 323*t] = Op[i + 132*t] ? R[B[i + 132*t]] * R[C[i + 132*t]] : R[B[i + 132*t]] + R[C[i + 132*t]];
R[i + 324*t] = Op[i + 133*t] ? R[B[i + 133*t]] * R[C[i + 133*t]] : R[B[i + 133*t]] + R[C[i + 133*t]];
R[i + 325*t] = Op[i + 134*t] ? R[B[i + 134*t]] * R[C[i + 134*t]] : R[B[i + 134*t]] + R[C[i + 134*t]];
__syncthreads();
R[i + 326*t] = Op[i + 135*t] ? R[B[i + 135*t]] * R[C[i + 135*t]] : R[B[i + 135*t]] + R[C[i + 135*t]];
R[i + 327*t] = Op[i + 136*t] ? R[B[i + 136*t]] * R[C[i + 136*t]] : R[B[i + 136*t]] + R[C[i + 136*t]];
R[i + 328*t] = Op[i + 137*t] ? R[B[i + 137*t]] * R[C[i + 137*t]] : R[B[i + 137*t]] + R[C[i + 137*t]];
R[i + 329*t] = Op[i + 138*t] ? R[B[i + 138*t]] * R[C[i + 138*t]] : R[B[i + 138*t]] + R[C[i + 138*t]];
R[i + 330*t] = Op[i + 139*t] ? R[B[i + 139*t]] * R[C[i + 139*t]] : R[B[i + 139*t]] + R[C[i + 139*t]];
R[i + 331*t] = Op[i + 140*t] ? R[B[i + 140*t]] * R[C[i + 140*t]] : R[B[i + 140*t]] + R[C[i + 140*t]];
R[i + 332*t] = Op[i + 141*t] ? R[B[i + 141*t]] * R[C[i + 141*t]] : R[B[i + 141*t]] + R[C[i + 141*t]];
R[i + 333*t] = Op[i + 142*t] ? R[B[i + 142*t]] * R[C[i + 142*t]] : R[B[i + 142*t]] + R[C[i + 142*t]];
R[i + 334*t] = Op[i + 143*t] ? R[B[i + 143*t]] * R[C[i + 143*t]] : R[B[i + 143*t]] + R[C[i + 143*t]];
R[i + 335*t] = Op[i + 144*t] ? R[B[i + 144*t]] * R[C[i + 144*t]] : R[B[i + 144*t]] + R[C[i + 144*t]];
R[i + 336*t] = Op[i + 145*t] ? R[B[i + 145*t]] * R[C[i + 145*t]] : R[B[i + 145*t]] + R[C[i + 145*t]];
R[i + 337*t] = Op[i + 146*t] ? R[B[i + 146*t]] * R[C[i + 146*t]] : R[B[i + 146*t]] + R[C[i + 146*t]];
R[i + 338*t] = Op[i + 147*t] ? R[B[i + 147*t]] * R[C[i + 147*t]] : R[B[i + 147*t]] + R[C[i + 147*t]];
R[i + 339*t] = Op[i + 148*t] ? R[B[i + 148*t]] * R[C[i + 148*t]] : R[B[i + 148*t]] + R[C[i + 148*t]];
R[i + 340*t] = Op[i + 149*t] ? R[B[i + 149*t]] * R[C[i + 149*t]] : R[B[i + 149*t]] + R[C[i + 149*t]];
R[i + 341*t] = Op[i + 150*t] ? R[B[i + 150*t]] * R[C[i + 150*t]] : R[B[i + 150*t]] + R[C[i + 150*t]];
R[i + 342*t] = Op[i + 151*t] ? R[B[i + 151*t]] * R[C[i + 151*t]] : R[B[i + 151*t]] + R[C[i + 151*t]];
R[i + 343*t] = Op[i + 152*t] ? R[B[i + 152*t]] * R[C[i + 152*t]] : R[B[i + 152*t]] + R[C[i + 152*t]];
R[i + 344*t] = Op[i + 153*t] ? R[B[i + 153*t]] * R[C[i + 153*t]] : R[B[i + 153*t]] + R[C[i + 153*t]];
R[i + 345*t] = Op[i + 154*t] ? R[B[i + 154*t]] * R[C[i + 154*t]] : R[B[i + 154*t]] + R[C[i + 154*t]];
__syncthreads();
R[i + 346*t] = Op[i + 155*t] ? R[B[i + 155*t]] * R[C[i + 155*t]] : R[B[i + 155*t]] + R[C[i + 155*t]];
R[i + 347*t] = Op[i + 156*t] ? R[B[i + 156*t]] * R[C[i + 156*t]] : R[B[i + 156*t]] + R[C[i + 156*t]];
R[i + 348*t] = Op[i + 157*t] ? R[B[i + 157*t]] * R[C[i + 157*t]] : R[B[i + 157*t]] + R[C[i + 157*t]];
R[i + 349*t] = Op[i + 158*t] ? R[B[i + 158*t]] * R[C[i + 158*t]] : R[B[i + 158*t]] + R[C[i + 158*t]];
R[i + 350*t] = Op[i + 159*t] ? R[B[i + 159*t]] * R[C[i + 159*t]] : R[B[i + 159*t]] + R[C[i + 159*t]];
R[i + 351*t] = Op[i + 160*t] ? R[B[i + 160*t]] * R[C[i + 160*t]] : R[B[i + 160*t]] + R[C[i + 160*t]];
R[i + 352*t] = Op[i + 161*t] ? R[B[i + 161*t]] * R[C[i + 161*t]] : R[B[i + 161*t]] + R[C[i + 161*t]];
R[i + 353*t] = Op[i + 162*t] ? R[B[i + 162*t]] * R[C[i + 162*t]] : R[B[i + 162*t]] + R[C[i + 162*t]];
R[i + 354*t] = Op[i + 163*t] ? R[B[i + 163*t]] * R[C[i + 163*t]] : R[B[i + 163*t]] + R[C[i + 163*t]];
R[i + 355*t] = Op[i + 164*t] ? R[B[i + 164*t]] * R[C[i + 164*t]] : R[B[i + 164*t]] + R[C[i + 164*t]];
R[i + 356*t] = Op[i + 165*t] ? R[B[i + 165*t]] * R[C[i + 165*t]] : R[B[i + 165*t]] + R[C[i + 165*t]];
R[i + 357*t] = Op[i + 166*t] ? R[B[i + 166*t]] * R[C[i + 166*t]] : R[B[i + 166*t]] + R[C[i + 166*t]];
R[i + 358*t] = Op[i + 167*t] ? R[B[i + 167*t]] * R[C[i + 167*t]] : R[B[i + 167*t]] + R[C[i + 167*t]];
R[i + 359*t] = Op[i + 168*t] ? R[B[i + 168*t]] * R[C[i + 168*t]] : R[B[i + 168*t]] + R[C[i + 168*t]];
R[i + 360*t] = Op[i + 169*t] ? R[B[i + 169*t]] * R[C[i + 169*t]] : R[B[i + 169*t]] + R[C[i + 169*t]];
R[i + 361*t] = Op[i + 170*t] ? R[B[i + 170*t]] * R[C[i + 170*t]] : R[B[i + 170*t]] + R[C[i + 170*t]];
R[i + 362*t] = Op[i + 171*t] ? R[B[i + 171*t]] * R[C[i + 171*t]] : R[B[i + 171*t]] + R[C[i + 171*t]];
R[i + 363*t] = Op[i + 172*t] ? R[B[i + 172*t]] * R[C[i + 172*t]] : R[B[i + 172*t]] + R[C[i + 172*t]];
R[i + 364*t] = Op[i + 173*t] ? R[B[i + 173*t]] * R[C[i + 173*t]] : R[B[i + 173*t]] + R[C[i + 173*t]];
R[i + 365*t] = Op[i + 174*t] ? R[B[i + 174*t]] * R[C[i + 174*t]] : R[B[i + 174*t]] + R[C[i + 174*t]];
R[i + 366*t] = Op[i + 175*t] ? R[B[i + 175*t]] * R[C[i + 175*t]] : R[B[i + 175*t]] + R[C[i + 175*t]];
R[i + 367*t] = Op[i + 176*t] ? R[B[i + 176*t]] * R[C[i + 176*t]] : R[B[i + 176*t]] + R[C[i + 176*t]];
R[i + 368*t] = Op[i + 177*t] ? R[B[i + 177*t]] * R[C[i + 177*t]] : R[B[i + 177*t]] + R[C[i + 177*t]];
R[i + 369*t] = Op[i + 178*t] ? R[B[i + 178*t]] * R[C[i + 178*t]] : R[B[i + 178*t]] + R[C[i + 178*t]];
R[i + 370*t] = Op[i + 179*t] ? R[B[i + 179*t]] * R[C[i + 179*t]] : R[B[i + 179*t]] + R[C[i + 179*t]];
R[i + 371*t] = Op[i + 180*t] ? R[B[i + 180*t]] * R[C[i + 180*t]] : R[B[i + 180*t]] + R[C[i + 180*t]];
R[i + 372*t] = Op[i + 181*t] ? R[B[i + 181*t]] * R[C[i + 181*t]] : R[B[i + 181*t]] + R[C[i + 181*t]];
R[i + 373*t] = Op[i + 182*t] ? R[B[i + 182*t]] * R[C[i + 182*t]] : R[B[i + 182*t]] + R[C[i + 182*t]];
R[i + 374*t] = Op[i + 183*t] ? R[B[i + 183*t]] * R[C[i + 183*t]] : R[B[i + 183*t]] + R[C[i + 183*t]];
R[i + 375*t] = Op[i + 184*t] ? R[B[i + 184*t]] * R[C[i + 184*t]] : R[B[i + 184*t]] + R[C[i + 184*t]];
R[i + 376*t] = Op[i + 185*t] ? R[B[i + 185*t]] * R[C[i + 185*t]] : R[B[i + 185*t]] + R[C[i + 185*t]];
R[i + 377*t] = Op[i + 186*t] ? R[B[i + 186*t]] * R[C[i + 186*t]] : R[B[i + 186*t]] + R[C[i + 186*t]];
__syncthreads();
R[i + 378*t] = Op[i + 187*t] ? R[B[i + 187*t]] * R[C[i + 187*t]] : R[B[i + 187*t]] + R[C[i + 187*t]];
R[i + 379*t] = Op[i + 188*t] ? R[B[i + 188*t]] * R[C[i + 188*t]] : R[B[i + 188*t]] + R[C[i + 188*t]];
R[i + 380*t] = Op[i + 189*t] ? R[B[i + 189*t]] * R[C[i + 189*t]] : R[B[i + 189*t]] + R[C[i + 189*t]];
R[i + 381*t] = Op[i + 190*t] ? R[B[i + 190*t]] * R[C[i + 190*t]] : R[B[i + 190*t]] + R[C[i + 190*t]];
R[i + 382*t] = Op[i + 191*t] ? R[B[i + 191*t]] * R[C[i + 191*t]] : R[B[i + 191*t]] + R[C[i + 191*t]];
R[i + 383*t] = Op[i + 192*t] ? R[B[i + 192*t]] * R[C[i + 192*t]] : R[B[i + 192*t]] + R[C[i + 192*t]];
R[i + 384*t] = Op[i + 193*t] ? R[B[i + 193*t]] * R[C[i + 193*t]] : R[B[i + 193*t]] + R[C[i + 193*t]];
R[i + 385*t] = Op[i + 194*t] ? R[B[i + 194*t]] * R[C[i + 194*t]] : R[B[i + 194*t]] + R[C[i + 194*t]];
R[i + 386*t] = Op[i + 195*t] ? R[B[i + 195*t]] * R[C[i + 195*t]] : R[B[i + 195*t]] + R[C[i + 195*t]];
R[i + 387*t] = Op[i + 196*t] ? R[B[i + 196*t]] * R[C[i + 196*t]] : R[B[i + 196*t]] + R[C[i + 196*t]];
R[i + 388*t] = Op[i + 197*t] ? R[B[i + 197*t]] * R[C[i + 197*t]] : R[B[i + 197*t]] + R[C[i + 197*t]];
R[i + 389*t] = Op[i + 198*t] ? R[B[i + 198*t]] * R[C[i + 198*t]] : R[B[i + 198*t]] + R[C[i + 198*t]];
R[i + 390*t] = Op[i + 199*t] ? R[B[i + 199*t]] * R[C[i + 199*t]] : R[B[i + 199*t]] + R[C[i + 199*t]];
R[i + 391*t] = Op[i + 200*t] ? R[B[i + 200*t]] * R[C[i + 200*t]] : R[B[i + 200*t]] + R[C[i + 200*t]];
R[i + 392*t] = Op[i + 201*t] ? R[B[i + 201*t]] * R[C[i + 201*t]] : R[B[i + 201*t]] + R[C[i + 201*t]];
R[i + 393*t] = Op[i + 202*t] ? R[B[i + 202*t]] * R[C[i + 202*t]] : R[B[i + 202*t]] + R[C[i + 202*t]];
R[i + 394*t] = Op[i + 203*t] ? R[B[i + 203*t]] * R[C[i + 203*t]] : R[B[i + 203*t]] + R[C[i + 203*t]];
R[i + 395*t] = Op[i + 204*t] ? R[B[i + 204*t]] * R[C[i + 204*t]] : R[B[i + 204*t]] + R[C[i + 204*t]];
R[i + 396*t] = Op[i + 205*t] ? R[B[i + 205*t]] * R[C[i + 205*t]] : R[B[i + 205*t]] + R[C[i + 205*t]];
R[i + 397*t] = Op[i + 206*t] ? R[B[i + 206*t]] * R[C[i + 206*t]] : R[B[i + 206*t]] + R[C[i + 206*t]];
R[i + 398*t] = Op[i + 207*t] ? R[B[i + 207*t]] * R[C[i + 207*t]] : R[B[i + 207*t]] + R[C[i + 207*t]];
R[i + 399*t] = Op[i + 208*t] ? R[B[i + 208*t]] * R[C[i + 208*t]] : R[B[i + 208*t]] + R[C[i + 208*t]];
R[i + 400*t] = Op[i + 209*t] ? R[B[i + 209*t]] * R[C[i + 209*t]] : R[B[i + 209*t]] + R[C[i + 209*t]];
R[i + 401*t] = Op[i + 210*t] ? R[B[i + 210*t]] * R[C[i + 210*t]] : R[B[i + 210*t]] + R[C[i + 210*t]];
R[i + 402*t] = Op[i + 211*t] ? R[B[i + 211*t]] * R[C[i + 211*t]] : R[B[i + 211*t]] + R[C[i + 211*t]];
R[i + 403*t] = Op[i + 212*t] ? R[B[i + 212*t]] * R[C[i + 212*t]] : R[B[i + 212*t]] + R[C[i + 212*t]];
__syncthreads();
R[i + 404*t] = Op[i + 213*t] ? R[B[i + 213*t]] * R[C[i + 213*t]] : R[B[i + 213*t]] + R[C[i + 213*t]];
R[i + 405*t] = Op[i + 214*t] ? R[B[i + 214*t]] * R[C[i + 214*t]] : R[B[i + 214*t]] + R[C[i + 214*t]];
R[i + 406*t] = Op[i + 215*t] ? R[B[i + 215*t]] * R[C[i + 215*t]] : R[B[i + 215*t]] + R[C[i + 215*t]];
R[i + 407*t] = Op[i + 216*t] ? R[B[i + 216*t]] * R[C[i + 216*t]] : R[B[i + 216*t]] + R[C[i + 216*t]];
R[i + 408*t] = Op[i + 217*t] ? R[B[i + 217*t]] * R[C[i + 217*t]] : R[B[i + 217*t]] + R[C[i + 217*t]];
R[i + 409*t] = Op[i + 218*t] ? R[B[i + 218*t]] * R[C[i + 218*t]] : R[B[i + 218*t]] + R[C[i + 218*t]];
R[i + 410*t] = Op[i + 219*t] ? R[B[i + 219*t]] * R[C[i + 219*t]] : R[B[i + 219*t]] + R[C[i + 219*t]];
R[i + 411*t] = Op[i + 220*t] ? R[B[i + 220*t]] * R[C[i + 220*t]] : R[B[i + 220*t]] + R[C[i + 220*t]];
R[i + 412*t] = Op[i + 221*t] ? R[B[i + 221*t]] * R[C[i + 221*t]] : R[B[i + 221*t]] + R[C[i + 221*t]];
R[i + 413*t] = Op[i + 222*t] ? R[B[i + 222*t]] * R[C[i + 222*t]] : R[B[i + 222*t]] + R[C[i + 222*t]];
R[i + 414*t] = Op[i + 223*t] ? R[B[i + 223*t]] * R[C[i + 223*t]] : R[B[i + 223*t]] + R[C[i + 223*t]];
R[i + 415*t] = Op[i + 224*t] ? R[B[i + 224*t]] * R[C[i + 224*t]] : R[B[i + 224*t]] + R[C[i + 224*t]];
R[i + 416*t] = Op[i + 225*t] ? R[B[i + 225*t]] * R[C[i + 225*t]] : R[B[i + 225*t]] + R[C[i + 225*t]];
R[i + 417*t] = Op[i + 226*t] ? R[B[i + 226*t]] * R[C[i + 226*t]] : R[B[i + 226*t]] + R[C[i + 226*t]];
R[i + 418*t] = Op[i + 227*t] ? R[B[i + 227*t]] * R[C[i + 227*t]] : R[B[i + 227*t]] + R[C[i + 227*t]];
__syncthreads();
R[i + 419*t] = Op[i + 228*t] ? R[B[i + 228*t]] * R[C[i + 228*t]] : R[B[i + 228*t]] + R[C[i + 228*t]];
R[i + 420*t] = Op[i + 229*t] ? R[B[i + 229*t]] * R[C[i + 229*t]] : R[B[i + 229*t]] + R[C[i + 229*t]];
R[i + 421*t] = Op[i + 230*t] ? R[B[i + 230*t]] * R[C[i + 230*t]] : R[B[i + 230*t]] + R[C[i + 230*t]];
R[i + 422*t] = Op[i + 231*t] ? R[B[i + 231*t]] * R[C[i + 231*t]] : R[B[i + 231*t]] + R[C[i + 231*t]];
R[i + 423*t] = Op[i + 232*t] ? R[B[i + 232*t]] * R[C[i + 232*t]] : R[B[i + 232*t]] + R[C[i + 232*t]];
R[i + 424*t] = Op[i + 233*t] ? R[B[i + 233*t]] * R[C[i + 233*t]] : R[B[i + 233*t]] + R[C[i + 233*t]];
R[i + 425*t] = Op[i + 234*t] ? R[B[i + 234*t]] * R[C[i + 234*t]] : R[B[i + 234*t]] + R[C[i + 234*t]];
R[i + 426*t] = Op[i + 235*t] ? R[B[i + 235*t]] * R[C[i + 235*t]] : R[B[i + 235*t]] + R[C[i + 235*t]];
R[i + 427*t] = Op[i + 236*t] ? R[B[i + 236*t]] * R[C[i + 236*t]] : R[B[i + 236*t]] + R[C[i + 236*t]];
R[i + 428*t] = Op[i + 237*t] ? R[B[i + 237*t]] * R[C[i + 237*t]] : R[B[i + 237*t]] + R[C[i + 237*t]];
R[i + 429*t] = Op[i + 238*t] ? R[B[i + 238*t]] * R[C[i + 238*t]] : R[B[i + 238*t]] + R[C[i + 238*t]];
R[i + 430*t] = Op[i + 239*t] ? R[B[i + 239*t]] * R[C[i + 239*t]] : R[B[i + 239*t]] + R[C[i + 239*t]];
R[i + 431*t] = Op[i + 240*t] ? R[B[i + 240*t]] * R[C[i + 240*t]] : R[B[i + 240*t]] + R[C[i + 240*t]];
R[i + 432*t] = Op[i + 241*t] ? R[B[i + 241*t]] * R[C[i + 241*t]] : R[B[i + 241*t]] + R[C[i + 241*t]];
R[i + 433*t] = Op[i + 242*t] ? R[B[i + 242*t]] * R[C[i + 242*t]] : R[B[i + 242*t]] + R[C[i + 242*t]];
__syncthreads();
R[i + 434*t] = Op[i + 243*t] ? R[B[i + 243*t]] * R[C[i + 243*t]] : R[B[i + 243*t]] + R[C[i + 243*t]];
R[i + 435*t] = Op[i + 244*t] ? R[B[i + 244*t]] * R[C[i + 244*t]] : R[B[i + 244*t]] + R[C[i + 244*t]];
R[i + 436*t] = Op[i + 245*t] ? R[B[i + 245*t]] * R[C[i + 245*t]] : R[B[i + 245*t]] + R[C[i + 245*t]];
R[i + 437*t] = Op[i + 246*t] ? R[B[i + 246*t]] * R[C[i + 246*t]] : R[B[i + 246*t]] + R[C[i + 246*t]];
R[i + 438*t] = Op[i + 247*t] ? R[B[i + 247*t]] * R[C[i + 247*t]] : R[B[i + 247*t]] + R[C[i + 247*t]];
R[i + 439*t] = Op[i + 248*t] ? R[B[i + 248*t]] * R[C[i + 248*t]] : R[B[i + 248*t]] + R[C[i + 248*t]];
R[i + 440*t] = Op[i + 249*t] ? R[B[i + 249*t]] * R[C[i + 249*t]] : R[B[i + 249*t]] + R[C[i + 249*t]];
R[i + 441*t] = Op[i + 250*t] ? R[B[i + 250*t]] * R[C[i + 250*t]] : R[B[i + 250*t]] + R[C[i + 250*t]];
R[i + 442*t] = Op[i + 251*t] ? R[B[i + 251*t]] * R[C[i + 251*t]] : R[B[i + 251*t]] + R[C[i + 251*t]];
R[i + 443*t] = Op[i + 252*t] ? R[B[i + 252*t]] * R[C[i + 252*t]] : R[B[i + 252*t]] + R[C[i + 252*t]];
R[i + 444*t] = Op[i + 253*t] ? R[B[i + 253*t]] * R[C[i + 253*t]] : R[B[i + 253*t]] + R[C[i + 253*t]];
R[i + 445*t] = Op[i + 254*t] ? R[B[i + 254*t]] * R[C[i + 254*t]] : R[B[i + 254*t]] + R[C[i + 254*t]];
R[i + 446*t] = Op[i + 255*t] ? R[B[i + 255*t]] * R[C[i + 255*t]] : R[B[i + 255*t]] + R[C[i + 255*t]];
__syncthreads();
R[i + 447*t] = Op[i + 256*t] ? R[B[i + 256*t]] * R[C[i + 256*t]] : R[B[i + 256*t]] + R[C[i + 256*t]];
R[i + 448*t] = Op[i + 257*t] ? R[B[i + 257*t]] * R[C[i + 257*t]] : R[B[i + 257*t]] + R[C[i + 257*t]];
R[i + 449*t] = Op[i + 258*t] ? R[B[i + 258*t]] * R[C[i + 258*t]] : R[B[i + 258*t]] + R[C[i + 258*t]];
R[i + 450*t] = Op[i + 259*t] ? R[B[i + 259*t]] * R[C[i + 259*t]] : R[B[i + 259*t]] + R[C[i + 259*t]];
R[i + 451*t] = Op[i + 260*t] ? R[B[i + 260*t]] * R[C[i + 260*t]] : R[B[i + 260*t]] + R[C[i + 260*t]];
R[i + 452*t] = Op[i + 261*t] ? R[B[i + 261*t]] * R[C[i + 261*t]] : R[B[i + 261*t]] + R[C[i + 261*t]];
R[i + 453*t] = Op[i + 262*t] ? R[B[i + 262*t]] * R[C[i + 262*t]] : R[B[i + 262*t]] + R[C[i + 262*t]];
R[i + 454*t] = Op[i + 263*t] ? R[B[i + 263*t]] * R[C[i + 263*t]] : R[B[i + 263*t]] + R[C[i + 263*t]];
R[i + 455*t] = Op[i + 264*t] ? R[B[i + 264*t]] * R[C[i + 264*t]] : R[B[i + 264*t]] + R[C[i + 264*t]];
R[i + 456*t] = Op[i + 265*t] ? R[B[i + 265*t]] * R[C[i + 265*t]] : R[B[i + 265*t]] + R[C[i + 265*t]];
__syncthreads();
R[i + 457*t] = Op[i + 266*t] ? R[B[i + 266*t]] * R[C[i + 266*t]] : R[B[i + 266*t]] + R[C[i + 266*t]];
R[i + 458*t] = Op[i + 267*t] ? R[B[i + 267*t]] * R[C[i + 267*t]] : R[B[i + 267*t]] + R[C[i + 267*t]];
R[i + 459*t] = Op[i + 268*t] ? R[B[i + 268*t]] * R[C[i + 268*t]] : R[B[i + 268*t]] + R[C[i + 268*t]];
R[i + 460*t] = Op[i + 269*t] ? R[B[i + 269*t]] * R[C[i + 269*t]] : R[B[i + 269*t]] + R[C[i + 269*t]];
R[i + 461*t] = Op[i + 270*t] ? R[B[i + 270*t]] * R[C[i + 270*t]] : R[B[i + 270*t]] + R[C[i + 270*t]];
R[i + 462*t] = Op[i + 271*t] ? R[B[i + 271*t]] * R[C[i + 271*t]] : R[B[i + 271*t]] + R[C[i + 271*t]];
R[i + 463*t] = Op[i + 272*t] ? R[B[i + 272*t]] * R[C[i + 272*t]] : R[B[i + 272*t]] + R[C[i + 272*t]];
R[i + 464*t] = Op[i + 273*t] ? R[B[i + 273*t]] * R[C[i + 273*t]] : R[B[i + 273*t]] + R[C[i + 273*t]];
__syncthreads();
R[i + 465*t] = Op[i + 274*t] ? R[B[i + 274*t]] * R[C[i + 274*t]] : R[B[i + 274*t]] + R[C[i + 274*t]];
R[i + 466*t] = Op[i + 275*t] ? R[B[i + 275*t]] * R[C[i + 275*t]] : R[B[i + 275*t]] + R[C[i + 275*t]];
R[i + 467*t] = Op[i + 276*t] ? R[B[i + 276*t]] * R[C[i + 276*t]] : R[B[i + 276*t]] + R[C[i + 276*t]];
R[i + 468*t] = Op[i + 277*t] ? R[B[i + 277*t]] * R[C[i + 277*t]] : R[B[i + 277*t]] + R[C[i + 277*t]];
R[i + 469*t] = Op[i + 278*t] ? R[B[i + 278*t]] * R[C[i + 278*t]] : R[B[i + 278*t]] + R[C[i + 278*t]];
R[i + 470*t] = Op[i + 279*t] ? R[B[i + 279*t]] * R[C[i + 279*t]] : R[B[i + 279*t]] + R[C[i + 279*t]];
R[i + 471*t] = Op[i + 280*t] ? R[B[i + 280*t]] * R[C[i + 280*t]] : R[B[i + 280*t]] + R[C[i + 280*t]];
R[i + 472*t] = Op[i + 281*t] ? R[B[i + 281*t]] * R[C[i + 281*t]] : R[B[i + 281*t]] + R[C[i + 281*t]];
R[i + 473*t] = Op[i + 282*t] ? R[B[i + 282*t]] * R[C[i + 282*t]] : R[B[i + 282*t]] + R[C[i + 282*t]];
__syncthreads();
R[i + 474*t] = Op[i + 283*t] ? R[B[i + 283*t]] * R[C[i + 283*t]] : R[B[i + 283*t]] + R[C[i + 283*t]];
R[i + 475*t] = Op[i + 284*t] ? R[B[i + 284*t]] * R[C[i + 284*t]] : R[B[i + 284*t]] + R[C[i + 284*t]];
R[i + 476*t] = Op[i + 285*t] ? R[B[i + 285*t]] * R[C[i + 285*t]] : R[B[i + 285*t]] + R[C[i + 285*t]];
R[i + 477*t] = Op[i + 286*t] ? R[B[i + 286*t]] * R[C[i + 286*t]] : R[B[i + 286*t]] + R[C[i + 286*t]];
R[i + 478*t] = Op[i + 287*t] ? R[B[i + 287*t]] * R[C[i + 287*t]] : R[B[i + 287*t]] + R[C[i + 287*t]];
R[i + 479*t] = Op[i + 288*t] ? R[B[i + 288*t]] * R[C[i + 288*t]] : R[B[i + 288*t]] + R[C[i + 288*t]];
R[i + 480*t] = Op[i + 289*t] ? R[B[i + 289*t]] * R[C[i + 289*t]] : R[B[i + 289*t]] + R[C[i + 289*t]];
__syncthreads();
R[i + 481*t] = Op[i + 290*t] ? R[B[i + 290*t]] * R[C[i + 290*t]] : R[B[i + 290*t]] + R[C[i + 290*t]];
R[i + 482*t] = Op[i + 291*t] ? R[B[i + 291*t]] * R[C[i + 291*t]] : R[B[i + 291*t]] + R[C[i + 291*t]];
R[i + 483*t] = Op[i + 292*t] ? R[B[i + 292*t]] * R[C[i + 292*t]] : R[B[i + 292*t]] + R[C[i + 292*t]];
R[i + 484*t] = Op[i + 293*t] ? R[B[i + 293*t]] * R[C[i + 293*t]] : R[B[i + 293*t]] + R[C[i + 293*t]];
R[i + 485*t] = Op[i + 294*t] ? R[B[i + 294*t]] * R[C[i + 294*t]] : R[B[i + 294*t]] + R[C[i + 294*t]];
R[i + 486*t] = Op[i + 295*t] ? R[B[i + 295*t]] * R[C[i + 295*t]] : R[B[i + 295*t]] + R[C[i + 295*t]];
__syncthreads();
R[i + 487*t] = Op[i + 296*t] ? R[B[i + 296*t]] * R[C[i + 296*t]] : R[B[i + 296*t]] + R[C[i + 296*t]];
R[i + 488*t] = Op[i + 297*t] ? R[B[i + 297*t]] * R[C[i + 297*t]] : R[B[i + 297*t]] + R[C[i + 297*t]];
R[i + 489*t] = Op[i + 298*t] ? R[B[i + 298*t]] * R[C[i + 298*t]] : R[B[i + 298*t]] + R[C[i + 298*t]];
R[i + 490*t] = Op[i + 299*t] ? R[B[i + 299*t]] * R[C[i + 299*t]] : R[B[i + 299*t]] + R[C[i + 299*t]];
__syncthreads();
R[i + 491*t] = Op[i + 300*t] ? R[B[i + 300*t]] * R[C[i + 300*t]] : R[B[i + 300*t]] + R[C[i + 300*t]];
R[i + 492*t] = Op[i + 301*t] ? R[B[i + 301*t]] * R[C[i + 301*t]] : R[B[i + 301*t]] + R[C[i + 301*t]];
R[i + 493*t] = Op[i + 302*t] ? R[B[i + 302*t]] * R[C[i + 302*t]] : R[B[i + 302*t]] + R[C[i + 302*t]];
__syncthreads();
R[i + 494*t] = Op[i + 303*t] ? R[B[i + 303*t]] * R[C[i + 303*t]] : R[B[i + 303*t]] + R[C[i + 303*t]];
R[i + 495*t] = Op[i + 304*t] ? R[B[i + 304*t]] * R[C[i + 304*t]] : R[B[i + 304*t]] + R[C[i + 304*t]];
__syncthreads();
R[i + 496*t] = Op[i + 305*t] ? R[B[i + 305*t]] * R[C[i + 305*t]] : R[B[i + 305*t]] + R[C[i + 305*t]];
R[i + 497*t] = Op[i + 306*t] ? R[B[i + 306*t]] * R[C[i + 306*t]] : R[B[i + 306*t]] + R[C[i + 306*t]];
__syncthreads();
R[i + 498*t] = Op[i + 307*t] ? R[B[i + 307*t]] * R[C[i + 307*t]] : R[B[i + 307*t]] + R[C[i + 307*t]];
R[i + 499*t] = Op[i + 308*t] ? R[B[i + 308*t]] * R[C[i + 308*t]] : R[B[i + 308*t]] + R[C[i + 308*t]];
__syncthreads();
R[i + 500*t] = Op[i + 309*t] ? R[B[i + 309*t]] * R[C[i + 309*t]] : R[B[i + 309*t]] + R[C[i + 309*t]];
R[i + 501*t] = Op[i + 310*t] ? R[B[i + 310*t]] * R[C[i + 310*t]] : R[B[i + 310*t]] + R[C[i + 310*t]];
__syncthreads();
R[i + 502*t] = Op[i + 311*t] ? R[B[i + 311*t]] * R[C[i + 311*t]] : R[B[i + 311*t]] + R[C[i + 311*t]];
__syncthreads();
R[i + 503*t] = Op[i + 312*t] ? R[B[i + 312*t]] * R[C[i + 312*t]] : R[B[i + 312*t]] + R[C[i + 312*t]];
__syncthreads();
R[i + 504*t] = Op[i + 313*t] ? R[B[i + 313*t]] * R[C[i + 313*t]] : R[B[i + 313*t]] + R[C[i + 313*t]];
__syncthreads();
R[i + 505*t] = Op[i + 314*t] ? R[B[i + 314*t]] * R[C[i + 314*t]] : R[B[i + 314*t]] + R[C[i + 314*t]];
__syncthreads();
R[i + 506*t] = Op[i + 315*t] ? R[B[i + 315*t]] * R[C[i + 315*t]] : R[B[i + 315*t]] + R[C[i + 315*t]];
__syncthreads();
R[i + 507*t] = Op[i + 316*t] ? R[B[i + 316*t]] * R[C[i + 316*t]] : R[B[i + 316*t]] + R[C[i + 316*t]];
__syncthreads();
R[i + 508*t] = Op[i + 317*t] ? R[B[i + 317*t]] * R[C[i + 317*t]] : R[B[i + 317*t]] + R[C[i + 317*t]];
__syncthreads();
R[i + 509*t] = Op[i + 318*t] ? R[B[i + 318*t]] * R[C[i + 318*t]] : R[B[i + 318*t]] + R[C[i + 318*t]];
__syncthreads();
R[i + 510*t] = Op[i + 319*t] ? R[B[i + 319*t]] * R[C[i + 319*t]] : R[B[i + 319*t]] + R[C[i + 319*t]];
if (i==0) { final += R[510*t]; }
__syncthreads();
}
if (i==0) { A[0]= final;}
}
|
22,616 | #include "includes.h"
__global__ static void transform_vert_to_fit(const int* src, int* dst, const int nb_vert)
{
const int p = blockIdx.x * blockDim.x + threadIdx.x;
if(p < nb_vert) dst[p] = src[p] < 0 ? 0 : 1;
} |
22,617 | /*************************************************************************
>> File Name: MatrixMultipl.c
>> Author: chenjunjie
>> Mail: 2716705056qq.com
>> Created Time: 2019.06.07
************************************************************************/
#include<stdio.h>
#include<stdlib.h>
#include<cuda.h>
#define Width 4
#define Block_width 2
//device code
__global__ void MatrixMulKernel(int *d_M, int *d_N, int *d_P, int width)
{
//计算P和M的行
int Row = blockIdx.y * blockDim.y + threadIdx.y;
//计算P和N的列
int Col = blockIdx.x * blockDim.x + threadIdx.x;
printf("Row = %d, Col = %d\n", Row, Col);
if((Row < Width) && (Col < Width))
{
int Pvalue = 0;
for(int i = 0; i < Width; ++i)
{
Pvalue += d_M[Row*Width + i]*d_N[i*Width + Col];
}
d_P[Row*Width + Col] = Pvalue;
}
}
void showaray(int *A, int row, int col)
{
for(int i = 0; i< row; ++i)
{
for(int j = 0; j < col; ++j)
{
printf("%3d",A[i*Width + j]);
}
printf("\n");
}
}
//Host code
int main(int argc, char *argv[])
{
int dev_count;
cudaDeviceProp dev_prop;
cudaGetDeviceCount(&dev_count);
printf("设备数:%d\n",dev_count);
for(int i = 0; i < dev_count; ++i)
{
cudaGetDeviceProperties(&dev_prop,i);
printf("每个block支持的最大线程数: %d\n", dev_prop.maxThreadsPerBlock);
printf("设备中SM数: %d\n", dev_prop.multiProcessorCount);
printf("时钟频率为: %d\n",dev_prop.clockRate);
printf("每个Block的x方向的最大线程数: %d\n", dev_prop.maxThreadsDim[0]);
printf("每个Block的y方向的最大线程数: %d\n", dev_prop.maxThreadsDim[1]);
printf("每个Block的z方向的最大线程数: %d\n", dev_prop.maxThreadsDim[2]);
printf("每个grid的x方向支持的最大的block数: %d\n", dev_prop.maxGridSize[0]);
printf("每个grid的y方向支持的最大的block数: %d\n", dev_prop.maxGridSize[1]);
printf("每个grid的z方向支持的最大的block数: %d\n", dev_prop.maxGridSize[2]);
printf("每个warp包含的线程数为: %d\n", dev_prop.warpSize);
printf("每个SM里面的registers变量个数:%d\n", dev_prop.regsPerBlock);
printf("每个Block里共享内存:%lu\n", dev_prop.sharedMemPerBlock);
}
int N = Width * Width;
size_t size = Width * Width * sizeof(int);
int *h_M = (int *)malloc(size);
int *h_N = (int *)malloc(size);
int *h_P = (int *)malloc(size);
//初始化h_M, h_N;
for(int i = 0; i < N; ++i)
{
h_M[i] = 1;
h_N[i] = 1;
h_P[i] = 0;
}
printf("h_M, h_N,h_P分配成功\n");
printf("打印h_M\n");
showaray(h_M, Width, Width);
printf("打印h_N\n");
showaray(h_N, Width, Width);
printf("打印h_P\n");
showaray(h_P, Width, Width);
int *d_M;
cudaMalloc(&d_M, size);
int *d_N;
cudaMalloc(&d_N, size);
int *d_P;
cudaMalloc(&d_P, size);
printf("d_M, d_N, d_P分配成功\n");
cudaMemcpy(d_M, h_M, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_N, h_N, size, cudaMemcpyHostToDevice);
printf("d_M, d_N, d_P拷贝到h成功\n");
int NumBlocks = Width/Block_width;
if(Width%Block_width )
NumBlocks++;
dim3 grid(NumBlocks, NumBlocks, 1);
dim3 block(Block_width, Block_width, 1);
MatrixMulKernel<<<grid, block>>>(d_M, d_N, d_P, Width);
cudaMemcpy(h_P, d_P, size, cudaMemcpyDeviceToHost);
printf("计算成功\n");
showaray(h_P, Width, Width);
cudaFree(d_M);
cudaFree(d_M);
cudaFree(d_P);
printf("释放d_M, d_P, d_N成功\n");
return 0;
}
|
22,618 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
float * multiplicar (float * mat1, float *mat2, int n)
{
float *res; int i=0; int j=0; int k=0;
res = (float*) malloc(n * n * sizeof(float));
for (i = 0; i<n; i++)
{
for (j = 0; j<n; j++)
{
res[i*n+j]=0;
for (k = 0; k < n; k++)
{
res[i*n+j] = (res[i*n+j]) + (mat1[i*n + k] * mat2[k*n + j]);
}
}
}
return res;
}
void printM(float * data, int rows, int cols)
{
for (int i = 0; i < rows; i++)
{
for (int j = 0; j < cols; j++)
{
printf("%.0f ", data[i*cols+j]);
}
printf("\n");
}
printf("\n");
}
int main (int argc, char *argv[] )
{
if ( argc != 2 ) /* argc should be 2 for correct execution */
{
/* We print argv[0] assuming it is the program name */
printf( "usage: %s N", argv[0] );
}
else
{
int n = atoi(argv[1]);
float *mat1;
float *mat2;
mat1 = (float*) malloc(n * n * sizeof(float));
mat2 = (float*) malloc(n * n * sizeof(float));
srand (time(NULL));
cudaEvent_t inicio, fin;
float tiempo;
for(int i = 0; i<n*n; i++)
{
mat1[i] = rand()%991 + 10;
}
for(int i = 0; i<n*n; i++)
{
mat2[i] = rand()%991 + 10;
}
/* printM(mat1, n, n);
printM(mat2, n, n);
*/
cudaEventCreate( &inicio );
cudaEventCreate( &fin );
cudaEventRecord( inicio, 0 );
float * res = multiplicar(mat1, mat2, n);
cudaEventRecord( fin, 0 );
cudaEventSynchronize( fin );
cudaEventElapsedTime( &tiempo, inicio, fin );
if (res !=0)
{
//printf("\nMatriz de Resultado\n\n");
//printM(res, n, n);
}
printf("tiempo total en ms: %f\n", tiempo);
return 0;
}
}
|
22,619 | extern "C"
/*
Pointer kernelParameters = Pointer.to(
// Dots properties
Pointer.to(gDots.iGA_Float[GPUDots.PX].gpuArray),
Pointer.to(gDots.iGA_Float[GPUDots.PY].gpuArray),
Pointer.to(gDots.iGA_Float[GPUDots.PZ].gpuArray),
Pointer.to(gDots.iGA_Float[GPUDots.SUPER_DOT_RADIUS_SQUARED].gpuArray), // necessary for the mapping
Pointer.to(gDots.iGA_Int[GPUDots.ALL_NEIGHBORS_HAVE_CONVERGED].gpuArray),
// Blocks Properties
Pointer.to(iGA_arrayDotsIndexes.gpuArray),
Pointer.to(iGA_addrStartBlock0.gpuArray),Pointer.to(iGA_nPtBlock0.gpuArray),
Pointer.to(iGA_addrStartBlock1.gpuArray),Pointer.to(iGA_nPtBlock1.gpuArray),
Pointer.to(avgX.gpuArray), Pointer.to(avgY.gpuArray),Pointer.to(avgZ.gpuArray),
Pointer.to(dirX.gpuArray), Pointer.to(dirY.gpuArray),Pointer.to(dirZ.gpuArray),
Pointer.to(iGA_blockLevel.gpuArray), // to know between level 0 and above
Pointer.to(iGA_idBlock.gpuArray),
Pointer.to(iGA_offsIntBlock.gpuArray),
// Output values
Pointer.to(fGA_pScalVal.gpuArray),
Pointer.to(iGA_rkBlPos.gpuArray),
Pointer.to(iGA_rkBlNeg.gpuArray),
Pointer.to(iGA_rkBlMid0.gpuArray),
Pointer.to(iGA_rkBlMid1.gpuArray),
//
Pointer.to(iGA_nPtBlPos.gpuArray),
Pointer.to(iGA_nPtBlNeg.gpuArray),
Pointer.to(iGA_nPtBlMid0.gpuArray),
Pointer.to(iGA_nPtBlMid1.gpuArray),
//
Pointer.to(iGA_newBlockCvg.gpuArray),
//
Pointer.to(new int[]{nBlocks}),
Pointer.to(new int[]{nDots}) // offset for blocks : 0 or 1
);
*/
__global__ void mapDots(// Dots props
float* pX,
float* pY,
float* pZ,
float* sDotRadiusSquared,
int* allNeighConverged,
//Tree specs
// per Block In
int* dotIndexes,
int* stBl0, int* nPtBl0,
int* stBl1, int* nPtBl1,
float* avgPX,
float* avgPY,
float* avgPZ,
float* dirX,
float* dirY,
float* dirZ,
int* blLevel,
// per GPU Block In
int* idBl,
int* offsBl,
// output values, per dot
float* pScalVal,
int* rkBlPos,
int* rkBlNeg,
int* rkBlMid0,
int* rkBlMid1,
// output value, per Blocks Out
int* nPtBlPos,
int* nPtBlNeg,
int* nPtBlMid0,
int* nPtBlMid1,
int* newBlockCvg,
int nBlocksIn,
int nDotsIn,
float sqInteract
)
{
extern __shared__ int array[];
float* avg = (float*)&array[7];
float* dir = (float*)&avg[3];
// Fetch block data
int iGPUBlock=blockIdx.x;
int iThread=threadIdx.x;
int idBloc;
if (iThread==0) {
idBloc=idBl[iGPUBlock];
array[0]=offsBl[iGPUBlock];
array[1]=stBl0[idBloc];
array[2]=nPtBl0[idBloc];
array[3]=stBl1[idBloc];
array[4]=nPtBl1[idBloc];
array[5]=blLevel[idBloc];
array[6]=idBloc;
avg[0]=avgPX[idBloc];
avg[1]=avgPY[idBloc];
avg[2]=avgPZ[idBloc];
dir[0]=dirX[idBloc];
dir[1]=dirY[idBloc];
dir[2]=dirZ[idBloc];
}
__syncthreads();
int offsPt = array[0];
int startIndexBl0 = array[1];
int nPtBlock0 = array[2];
int startIndexBl1 = array[3]; // useless in fact
int nPtBlock1 = array[4];
int blockLevel = array[5];
int nPts = nPtBlock0 + nPtBlock1;
int ptToBeComputed = iThread+offsPt;
idBloc = array[6];
if (ptToBeComputed<nPts) {
// prevents overflow
int addr_pt = startIndexBl0+ptToBeComputed;
int id_pt=dotIndexes[addr_pt];
// put all negative
rkBlNeg[addr_pt]=-1;
rkBlPos[addr_pt]=-1;
rkBlMid0[addr_pt]=-1;
rkBlMid1[addr_pt]=-1;
rkBlNeg[addr_pt+nDotsIn]=-1;
rkBlPos[addr_pt+nDotsIn]=-1;
rkBlMid0[addr_pt+nDotsIn]=-1;
rkBlMid1[addr_pt+nDotsIn]=-1;
//
int cvg = allNeighConverged[id_pt]; // 1 if all converged; 0 otherwise
int dx = pX[id_pt]-avg[0];
int dy = pY[id_pt]-avg[1];
int dz = pZ[id_pt]-avg[2];
float pScal = (dx*dir[0]+dy*dir[1]+dz*dir[2])/sqInteract;
pScalVal[id_pt] = pScal;
int inBloc1 = (ptToBeComputed>=nPtBlock0);
float sDRadius = sqrtf(sDotRadiusSquared[id_pt]);
if (pScal<0) {
rkBlNeg[addr_pt+inBloc1*nDotsIn] = atomicAdd(& nPtBlNeg[idBloc+inBloc1*nBlocksIn], 1);
if (cvg==0) {newBlockCvg[4*idBloc]=1;} // quick convergence block test*/
if ((sDRadius>0)&&(pScal+sDRadius/sqInteract>=0)) {
rkBlPos[addr_pt] = atomicAdd(& nPtBlPos[idBloc], 1);
}
} else {
rkBlPos[addr_pt+inBloc1*nDotsIn] = atomicAdd(& nPtBlPos[idBloc+inBloc1*nBlocksIn], 1);
if (cvg==0) {newBlockCvg[4*idBloc+1]=1;} // quick convergence block test*/
if ((sDRadius>0)&&(pScal-sDRadius/sqInteract<0)) {
rkBlNeg[addr_pt] = atomicAdd(& nPtBlNeg[idBloc], 1);
}
}
if (sDRadius==0) {
// not a superdot
if (blockLevel==0) {
if ((pScal>-1)&&(pScal<0)) {
rkBlMid0[addr_pt] = atomicAdd(& nPtBlMid0[idBloc], 1);
if (cvg==0) {newBlockCvg[4*idBloc+2]=1;} // quick convergence block test
}
if ((pScal<1)&&(pScal>=0)) {
rkBlMid0[addr_pt+nDotsIn] = atomicAdd(& nPtBlMid0[idBloc+nBlocksIn], 1);
if (cvg==0) {newBlockCvg[4*idBloc+2]=1;} // quick convergence block test
}
} else {
if ((pScal>-1)&&(pScal<0)) {
if (inBloc1) {
//Mid1Block0
rkBlMid1[addr_pt] = atomicAdd(& nPtBlMid1[idBloc], 1);
if (cvg==0) {newBlockCvg[4*idBloc+3]=1;}
} else {
//Mid0Block0
rkBlMid0[addr_pt] = atomicAdd(& nPtBlMid0[idBloc], 1);
if (cvg==0) {newBlockCvg[4*idBloc+2]=1;}
}
}
if ((pScal<1)&&(pScal>=0)) {
if (inBloc1) {
//Mid0Block1
rkBlMid0[addr_pt+nDotsIn] = atomicAdd(& nPtBlMid0[idBloc+nBlocksIn], 1);
if (cvg==0) {newBlockCvg[4*idBloc+2]=1;}
} else {
//Mid1Block1
rkBlMid1[addr_pt+nDotsIn] = atomicAdd(& nPtBlMid1[idBloc+nBlocksIn], 1);
if (cvg==0) {newBlockCvg[4*idBloc+3]=1;}
}
}
}
}
}
}
|
22,620 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define TILE_WIDTH 16
__global__ void Matrix_Mul_Kernel(float* d_M, float* d_N, float* d_P, int Width)
{
__shared__ float Mds[TILE_WIDTH][TILE_WIDTH];
__shared__ float Nds[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int Row = by * TILE_WIDTH + ty;
int Col = bx * TILE_WIDTH + tx;
float Pvalue = 0;
for(int ph = 0; ph < Width/TILE_WIDTH; ++ph)
{
Mds[ty][tx] = d_M[Row * Width + ph * TILE_WIDTH + tx];
Nds[ty][tx] = d_N[(ph * TILE_WIDTH + ty) * Width + Col];
__syncthreads(); //Sincroniza todos los hilos en un bloque
//Asegúrarse de que todos los datos estén cargados.
for (int k = 0; k < TILE_WIDTH; ++k){
Pvalue += Mds[ty][k] * Nds[k][tx];
}
__syncthreads();//Evita los peligros de la memoria.
//Asegurarse de que los calculos se realizen antes de la
//siguiente fase
}
d_P[Row * Width + Col] = Pvalue;
}
void cpu_matrix_mult(float *M, float *N, float *P, int Width) {
for (int i = 0; i < Width; ++i)
{
for (int j = 0; j < Width; ++j)
{
int tmp = 0.0;
for(int k = 0; k < Width; ++k)
{
tmp += M[i * Width + k] * N[k * Width + j];
}
P[i * Width + j] = tmp;
}
}
}
int main()
{
int Width = 1024;
srand(3333);
float *h_a=0, *h_b=0, *h_c=0, *h_cc=0;
cudaMallocHost((void **) &h_a, sizeof(float)*Width*Width);
cudaMallocHost((void **) &h_b, sizeof(float)*Width*Width);
cudaMallocHost((void **) &h_c, sizeof(float)*Width*Width);
cudaMallocHost((void **) &h_cc, sizeof(float)*Width*Width);
if(h_a==0 || h_b==0 || h_c==0 || h_cc==0)
{
printf("No asignacion de memoria\n");
return 1;
}
for (int i = 0; i < Width; ++i) {
for (int j = 0; j < Width; ++j) {
h_a[i * Width + j] = rand()%1024;
}
}
for (int i = 0; i < Width; ++i) {
for (int j = 0; j < Width; ++j) {
h_b[i * Width + j] = rand()%1024;
}
}
float gpu_time_ms, cpu_time_ms;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
float *d_a=0, *d_b=0, *d_c=0;
cudaMalloc((void **) &d_a, sizeof(float)*Width*Width);
cudaMalloc((void **) &d_b, sizeof(float)*Width*Width);
cudaMalloc((void **) &d_c, sizeof(float)*Width*Width);
if(d_a==0 || d_b==0 || d_c==0)
{
printf("No asignacion Gpu\n");
return 1;
}
cudaMemcpy(d_a, h_a, sizeof(float)*Width*Width, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, sizeof(float)*Width*Width, cudaMemcpyHostToDevice);
dim3 dimBlock(TILE_WIDTH,TILE_WIDTH);
dim3 dimGrid((int)ceil(float(Width)/dimBlock.x), (int)ceil(float(Width)/dimBlock.y));
Matrix_Mul_Kernel<<<dimGrid, dimBlock>>>(d_a, d_b, d_c, Width);
cudaMemcpy(h_c, d_c, sizeof(float)*Width*Width, cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&gpu_time_ms, start, stop);
printf("Tiempo transcurrido en GPU: %f ms.\n\n", gpu_time_ms);
//CPU version
cudaEventRecord(start, 0);
cpu_matrix_mult(h_a, h_b, h_cc, Width);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&cpu_time_ms, start, stop);
printf("Tiempo transcurrido en CPU: %f ms.\n\n", cpu_time_ms);
//Validando resultados
int all_ok = 1;
for (int i = 0; i < Width; ++i)
{
for (int j = 0; j < Width; ++j)
{
if(h_c[i*Width + j] != h_cc[i*Width + j])
{
all_ok = 0;
}
}
}
if(all_ok)
{
printf("Todo bien!!, speedup = %f\n", cpu_time_ms / gpu_time_ms);
}
else
{
printf("Error\n");
}
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cudaFreeHost(h_a);
cudaFreeHost(h_b);
cudaFreeHost(h_c);
cudaFreeHost(h_cc);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return 0;
}
|
22,621 | #ifdef _GLIBCXX_USE_INT128
#undef _GLIBCXX_USE_INT128
#endif
#ifdef _GLIBCXX_ATOMIC_BUILTINS
#undef _GLIBCXX_ATOMIC_BUILTINS
#endif
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/sort.h>
#include <thrust/scan.h>
#include <thrust/iterator/zip_iterator.h>
#include <iostream>
#include <iterator>
#include <cstdlib>
// This example compute the mode [1] of a set of numbers. If there
// are multiple modes, one with the smallest value it returned.
//
// [1] http://en.wikipedia.org/wiki/Mode_(statistics)
template <typename Tuple>
struct combine_counts : thrust::binary_function<Tuple,Tuple,Tuple>
{
__host__ __device__
Tuple operator()(Tuple a, Tuple b)
{
if(thrust::get<0>(a) == thrust::get<0>(b))
return Tuple(thrust::get<0>(a), thrust::get<1>(a) + thrust::get<1>(b));
else
return Tuple(thrust::get<0>(b), thrust::get<1>(b));
}
};
int main(void)
{
const size_t N = 30;
// generate random data on the host
thrust::host_vector<int> h_data(N);
for(size_t i = 0; i < N; i++)
h_data[i] = rand() % 10;
// transfer data to device
thrust::device_vector<int> d_data(h_data);
// print the initial data
std::cout << "initial data" << std::endl;
thrust::copy(d_data.begin(), d_data.end(), std::ostream_iterator<int>(std::cout, " "));
std::cout << std::endl;
// sort data to bring equal elements together
thrust::sort(d_data.begin(), d_data.end());
// print the sorted data
std::cout << "sorted data" << std::endl;
thrust::copy(d_data.begin(), d_data.end(), std::ostream_iterator<int>(std::cout, " "));
std::cout << std::endl;
// scan the values and counts together, adding the counts
// together when the two values are equal
thrust::device_vector<unsigned int> d_counts(N, 1);
thrust::inclusive_scan(thrust::make_zip_iterator(thrust::make_tuple(d_data.begin(), d_counts.begin())),
thrust::make_zip_iterator(thrust::make_tuple(d_data.end(), d_counts.end())),
thrust::make_zip_iterator(thrust::make_tuple(d_data.begin(), d_counts.begin())),
combine_counts< thrust::tuple<int,unsigned int> >());
// print the counts
std::cout << "counts" << std::endl;
thrust::copy(d_counts.begin(), d_counts.end(), std::ostream_iterator<int>(std::cout, " "));
std::cout << std::endl;
// find the index of the maximum count
thrust::device_vector<unsigned int>::iterator mode_iter;
mode_iter = thrust::max_element(d_counts.begin(), d_counts.end());
int mode = d_data[mode_iter - d_counts.begin()];
unsigned int occurances = *mode_iter;
std::cout << "Modal value " << mode << " occurs " << occurances << " times " << std::endl;
std::cout << "TEST PASSED\n";
return 0;
}
|
22,622 | #include "includes.h"
__global__ void __fillToIndsLongX(long long A, long long *B, long long len) {
int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
int step = blockDim.x * gridDim.x * gridDim.y;
long long i;
for (i = tid; i < len; i += step) {
B[i] = A;
}
} |
22,623 | #include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
/*
__global__ void kernel(float* input0,float* input1,float* output0){
extern __shared__ __attribute__ ((aligned(16))) uint8_t sbase[];
float v3;
v3 = 0.0;
for (int i4 = 0;i4 < 256;i4++){
v3 = (v3+(input0[((blockIdx.x*256)+i4)]*input1[((i4*256)+threadIdx.x)]));
}
((float*)(sbase+1024))[threadIdx.x] = v3;
((float*)sbase)[threadIdx.x] = ((float*)(sbase+1024))[threadIdx.x];
output0[((blockIdx.x*256)+threadIdx.x)] = ((float*)sbase)[threadIdx.x];
}
__global__ void kernel(float* input0,float* input1,float* output0){
extern __shared__ __attribute__ ((aligned(16))) uint8_t sbase[];
float v1;
v1 = 0.0;
for (int i2 = 0;i2 < 256;i2++){
v1 = (v1+(input0[((blockIdx.x*256)+i2)]*input1[((i2*256)+threadIdx.x)]));
}
output0[((blockIdx.x*256)+threadIdx.x)] = v1;
}
__global__ void kernel(float* input0,float* input1,float* output0){
float v1;
v1 = 0.0;
for (int i2 = 0;i2 < 256;i2++){
v1 = (v1+(input0[((blockIdx.x*256)+i2)]*input1[((i2*256)+threadIdx.x)]));
}
output0[((blockIdx.x*256)+threadIdx.x)] = v1;
}
*/
/* number of threads needed 256*/
__global__ void kernel(float* input0,float* input1,float* output0){
float v1;
v1 = 0.0;
for (int i2 = 0;i2 < 256;i2++){
v1 = (v1+(input0[((threadIdx.x*256)+i2)]*input1[((i2*256)+threadIdx.x)]));
}
__syncthreads();
output0[((blockIdx.x*256)+threadIdx.x)] = v1;
}
#define N (256*256)
int main(int argc, char **argv){
float *v1;
float *v2;
float *r;
float *rc;
float *dv1;
float *dv2;
float *dr;
v1 = (float*)malloc(sizeof(float) *N);
v2 = (float*)malloc(sizeof(float) *N);
r = (float*)malloc(sizeof(float) *N);
rc = (float*)malloc(sizeof(float) *N);
//generate input data
for (int i = 0; i < N; ++i) {
v1[i] = i % 4 ;
v2[i] = 1.34;
//int j = i / 256;
//int k = i % 256;
//if (j == k) v2[i] = 1.0;
}
cudaMalloc((void**)&dv1, sizeof(float) * N );
cudaMalloc((void**)&dv2, sizeof(float) * N );
cudaMalloc((void**)&dr, sizeof(float) * N );
cudaMemcpy(dv1, v1, sizeof(float) * N, cudaMemcpyHostToDevice);
cudaMemcpy(dv2, v2, sizeof(float) * N, cudaMemcpyHostToDevice);
kernel<<<256, 256,2048* sizeof(float)>>>(dv1,dv2,dr);
cudaMemcpy(r, dr, sizeof(float) * N , cudaMemcpyDeviceToHost);
cudaFree(dv1);
cudaFree(dv2);
cudaFree(dr);
// show results
for (int i = 0; i < N; ++i) {
printf("%f ", r[i]);
}
/* CPU Matrix mult */
for (int i=0;i<256;i++){
for(int j=0;j<256;j++){
for(int k=0;k<256;k++){
rc[i*256+j] += v1[i*256+k] * v2[k*256+j];
}
}
}
for (int i = 0; i < N; ++i) {
if (rc[i] != r[i]) {
printf("differs! %f %f \n ", rc[i], r[i]);
}
}
/* compare few first */
printf("\n Compare a few \n");
for (int i = 0; i < 10; ++i) {
printf("%f %f \n", rc[i], r[i]);
}
}
|
22,624 | __global__ void forwardReductionKernel(const double *a_d,
const double *b_d,
const double *c_d,
double *d_d,
const double *k1_d,
const double *k2_d,
const double *b_first_d,
const double *k1_first_d,
const double *k1_last_d,
const int n,
int stride)
{
int tix = threadIdx.x;
int offset = blockIdx.x*n;
int i;
int j, k;
int idx;
double x_j, x_k;
// forward reduction
if (stride == n)
{
stride /= 2;
j = log2((float)stride) - 1;
k = log2((float)stride); // the last element
x_j = (d_d[offset+stride-1]*b_d[k] - c_d[j]*d_d[offset+2*stride-1])/ \
(b_first_d[j]*b_d[k] - c_d[j]*a_d[k]);
x_k = (b_first_d[j]*d_d[offset+2*stride-1] - d_d[offset+stride-1]*a_d[k])/ \
(b_first_d[j]*b_d[k] - c_d[j]*a_d[k]);
d_d[offset+stride-1] = x_j;
d_d[offset+2*stride-1] = x_k;
}
else
{
i = (stride-1) + tix*stride;
idx = log2((float)stride) - 1;
if (tix == 0)
{
d_d[offset+i] = d_d[offset+i] - d_d[offset+i-stride/2]*k1_first_d[idx] - d_d[offset+i+stride/2]*k2_d[idx];
}
else if (i == (n-1))
{
d_d[offset+i] = d_d[offset+i] - d_d[offset+i-stride/2]*k1_last_d[idx];
}
else
{
d_d[offset+i] = d_d[offset+i] - d_d[offset+i-stride/2]*k1_d[idx] - d_d[offset+i+stride/2]*k2_d[idx];
}
}
}
__global__ void backwardSubstitutionKernel(const double *a_d,
const double *b_d,
const double *c_d,
double *d_d,
const double *b_first_d,
const double b1,
const double c1,
const double ai,
const double bi,
const double ci,
const int n,
const int stride)
{
int tix = threadIdx.x;
int offset = blockIdx.x*n;
int i;
int idx;
i = (stride/2-1) + tix*stride;
if (stride == 2)
{
if (i == 0)
{
d_d[offset+i] = (d_d[offset+i] - c1*d_d[offset+i+1])/b1;
}
else
{
d_d[offset+i] = (d_d[offset+i] - (ai)*d_d[offset+i-1] - (ci)*d_d[offset+i+1])/bi;
}
}
else
{
// rint rounds to the nearest integer
idx = rint(log2((double)stride)) - 2;
if (tix == 0)
{
d_d[offset+i] = (d_d[offset+i] - c_d[idx]*d_d[offset+i+stride/2])/b_first_d[idx];
}
else
{
d_d[offset+i] = (d_d[offset+i] - a_d[idx]*d_d[offset+i-stride/2] - c_d[idx]*d_d[offset+i+stride/2])/b_d[idx];
}
}
}
|
22,625 | /*
* nn.cu
*
* Created on: Apr 18, 2017
* Author: sara
*/
#include "nn.cuh"
#include "nn_kernels.cuh"
#include <iostream>
#include <fstream>
#include <iomanip>
# define MAX_THREADS 1024
/******************************************************************************
* data_buffer_split: creating data buffer split, according to the input data
* sizes and layers' outputs
*
* Arguments:
* data_S: input data size
* layers: vector of nn layers (possible: fcn, cnn, pool)
* W_sizes: sizes of fcn or/and cnn kernels' weights
* b_sizes: sizes of fcn or/and cnn biases
* pool_sizes: sizes of pool strides
* data_split: array to store data buffer split
*****************************************************************************/
void data_buffer_split(unsigned *data_S, std::vector<std::string> layers,
unsigned **W_sizes, unsigned **pool_sizes,
unsigned *data_split)
{
unsigned idx = 0;
unsigned idx_w = 0;
unsigned idx_p = 0;
unsigned int w_in = data_S[0];
unsigned int h_in = data_S[1];
unsigned int d_in = data_S[2];
data_split[idx] = w_in * h_in * d_in;
idx += 1;
for(unsigned int i = 0; i < layers.size(); i++)
{
if(!strcmp(layers.at(i).c_str(), "fcn"))
{
data_split[idx] = W_sizes[idx_w][1];
idx_w += 1;
idx += 1;
}
if(!strcmp(layers.at(i).c_str(), "conv"))
{
w_in = (w_in - W_sizes[idx_w][0] + 1);
h_in = (h_in - W_sizes[idx_w][1] + 1);
d_in = W_sizes[idx_w][3];
data_split[idx] = w_in * h_in * d_in;
idx_w += 1;
idx += 1;
}
if(!strcmp(layers.at(i).c_str(), "pool"))
{
w_in = w_in / pool_sizes[idx_p][0];
h_in = h_in / pool_sizes[idx_p][1];
d_in = d_in / pool_sizes[idx_p][2];
idx_p += 1;
idx += 1;
}
}
}
/******************************************************************************
* transfer_trainable_parameters_to_gpu: transferring all trainable parameters
* from the cput to the gpu (kernels' weights and biases)
*
* Arguments:
* layers: vector of layers that are part of network
* weights: pointer to the pointers to the fcn or/and cnn kernels' weights
* biases: pointer to the pointers to the fcn or/and cnn biases
* W_sizes: pointer to the kernels' sizes
* b_sizes: pointer to the biases' sizes
* weights_d: pointer to the pointer to the weights at gpu
* biases_d: pointer to the pointer to the biases at gpu
* delta_weights_d: pointer to the pointer to the weights' updates at gpu
* delta_biases_d: pointer to the pointer to the biases' updates at gpu
*****************************************************************************/
void transfer_trainable_parameters_to_gpu_(std::vector<std::string> layers,
float **weights, unsigned **W_sizes,
float **biases, unsigned **b_sizes,
float **weights_d, float **biases_d,
float **delta_weights_d,
float **delta_biases_d)
{
/**************************************************************************
* 1. Count trainable weights and biases
**************************************************************************/
long weights_N = 0;
long biases_N = 0;
unsigned idx = 0;
for(unsigned int i = 0; i < layers.size(); i++)
if(!strcmp(layers.at(i).c_str(), "fcn") or
!strcmp(layers.at(i).c_str(), "conv"))
{
weights_N += (W_sizes[idx][0] * W_sizes[idx][1] *
W_sizes[idx][2] * W_sizes[idx][3]);
biases_N += b_sizes[idx][0];
idx += 1;
}
/**************************************************************************
* 2. Allocate memory on gpu for the trainable parameters and its updates
**************************************************************************/
cudaMalloc((void **)weights_d, weights_N * sizeof(float));
cudaMalloc((void **)biases_d, biases_N * sizeof(float));
cudaMalloc((void **)delta_weights_d, weights_N * sizeof(float));
cudaMalloc((void **)delta_biases_d, biases_N * sizeof(float));
/**************************************************************************
* 3. Transfer trainable parameters
**************************************************************************/
unsigned tmp_w = 0;
unsigned tmp_b = 0;
weights_N = 0;
biases_N = 0;
idx = 0;
for(unsigned int i = 0; i < layers.size(); i++)
if(!strcmp(layers.at(i).c_str(), "fcn") or
!strcmp(layers.at(i).c_str(), "conv"))
{
tmp_w = W_sizes[idx][0] * W_sizes[idx][1] *
W_sizes[idx][2] * W_sizes[idx][3];
cudaMemcpy(&((*weights_d)[weights_N]), weights[idx],
tmp_w * sizeof(float), cudaMemcpyHostToDevice);
weights_N += tmp_w;
tmp_b = b_sizes[idx][0];
cudaMemcpy(&((*biases_d)[biases_N]), biases[idx],
tmp_b * sizeof(float), cudaMemcpyHostToDevice);
biases_N += tmp_b;
idx += 1;
}
}
/******************************************************************************
* transfer_trainable_parameters_to_cpu: transferring all trainable parameters
* from the gpu to the cpu (kernels' weights and biases)
*
* Arguments:
* layers: vector of layers that are part of network
* weights: pointer to the pointers to the fcn or/and cnn kernels' weights
* biases: pointer to the pointers to the fcn or/and cnn biases
* W_sizes: pointer to the kernels' sizes
* b_sizes: pointer to the biases' sizes
* weights_d: pointer to the pointer to the weights at gpu
* biases_d: pointer to the pointer to the biases at gpu
*****************************************************************************/
void transfer_trainable_parameters_to_cpu_(std::vector<std::string> layers,
float **weights, unsigned **W_sizes,
float **biases, unsigned **b_sizes,
float **weights_d, float **biases_d)
{
/**************************************************************************
* 1. Transfer trainable parameters
**************************************************************************/
unsigned tmp_w = 0;
unsigned tmp_b = 0;
unsigned weights_N = 0;
unsigned biases_N = 0;
unsigned idx = 0;
for(unsigned int i = 0; i < layers.size(); i++)
if(!strcmp(layers.at(i).c_str(), "fcn") or
!strcmp(layers.at(i).c_str(), "conv"))
{
tmp_w = W_sizes[idx][0] * W_sizes[idx][1] *
W_sizes[idx][2] * W_sizes[idx][3];
cudaMemcpy(weights[idx], &weights_d[0][weights_N],
tmp_w * sizeof(float), cudaMemcpyDeviceToHost);
weights_N += tmp_w;
tmp_b = b_sizes[idx][0];
cudaMemcpy(biases[idx], &biases_d[0][biases_N],
tmp_b * sizeof(float), cudaMemcpyDeviceToHost);
biases_N += tmp_b;
idx += 1;
}
}
/******************************************************************************
* propagate_forward_gpu_train: propagate training images trough the network
* and save all neuron inputs and outputs for the further training
*
* Arguments:
* train_imgs: input training images
* train_S: size of the input data
* train_neuron_in_d: pointer to the pointer to the array where neuron
* inputs would be placed on the gpu
* train_neuron_out_d: pointer to the pointer to the array where neuron
* outputs would be placed on the gpu
* layers: vector of model's layers
* weights_d: pointer to the pointer to the array of kernels' weights
* on the gpu
* W_sizes: pointer to the kernels' sizes
* biases_d: pointer to the pointer to the array of biases on the gpu
* b_sizes: pointer to the biases' sizes
* pool_sizes: pool strides' sizes
*****************************************************************************/
void propagate_forward_gpu_train(float *train_imgs, unsigned *train_S,
float **train_neuron_in_d,
float **train_neuron_out_d,
std::vector<std::string> layers,
float **weights_d, unsigned **W_sizes,
float **biases_d, unsigned **b_sizes,
unsigned **pool_sizes,
unsigned na)
{
/**************************************************************************
* 1. Allocate and transfer training data to gpu
*************************************************************************/
unsigned *train_split = new unsigned[layers.size() + 1];
data_buffer_split(train_S, layers, W_sizes, pool_sizes, train_split);
unsigned int neuron_out_len = 0;
unsigned int neuron_in_len = 0;
for(unsigned int i = 0; i < (layers.size() + 1); i++)
{
neuron_out_len += train_split[i];
if(i > 0)
neuron_in_len += train_split[i];
}
neuron_out_len *= train_S[3];
neuron_in_len *= train_S[3];
cudaMalloc((void **)train_neuron_out_d, neuron_out_len * sizeof(float));
cudaMemcpy(train_neuron_out_d[0], train_imgs,
train_split[0] * train_S[3] * sizeof(float),
cudaMemcpyHostToDevice);
cudaMalloc((void **)train_neuron_in_d, neuron_in_len * sizeof(float));
/**************************************************************************
* 2. Propagate training data trough layers and store neuron inputs and
* outputs
*************************************************************************/
long *start_ni = new long[layers.size() + 1];
long *start_no = new long[layers.size() + 1];
long *start_w = new long[layers.size() + 1];
long *start_b = new long[layers.size() + 1];
start_ni[0] = 0;
start_no[0] = 0;
start_w[0] = 0;
start_b[0] = 0;
for(unsigned int i = 0; i < layers.size(); i++)
{
if(!strcmp(layers.at(i).c_str(), "fcn"))
{
dim3 th_per_block(BLOCK_SIZE, BLOCK_SIZE);
unsigned n_blocks_1 = W_sizes[i][1] / BLOCK_SIZE;
if (W_sizes[i][1] > n_blocks_1 * BLOCK_SIZE)
n_blocks_1 += 1;
unsigned n_blocks_2 = train_S[3] / BLOCK_SIZE;
if (train_S[3] > n_blocks_2 *BLOCK_SIZE)
n_blocks_2 += 1;
dim3 blc_per_grid(n_blocks_1, n_blocks_2);
propagate_forward_fcn_gpu_train<<<blc_per_grid, th_per_block>>>(
train_neuron_in_d[0], train_neuron_out_d[0],
weights_d[0], biases_d[0], train_S[3],
W_sizes[i][0], W_sizes[i][1],
start_ni[i], start_no[i], start_w[i], start_b[i], na);
start_ni[i + 1] = (start_ni[i] + train_S[3] * W_sizes[i][1]);
start_no[i + 1] = (start_no[i] + train_S[3] * W_sizes[i][0]);
start_w[i + 1] = (start_w[i] + W_sizes[i][0] * W_sizes[i][1]);
start_b[i + 1] = (start_b[i] + W_sizes[i][1]);
}
}
/**************************************************************************
* 3. Release memory
*************************************************************************/
delete [] train_split;
delete [] start_ni;
delete [] start_no;
delete [] start_w;
delete [] start_b;
}
/******************************************************************************
* propagate_forward_gpu_test: propagate testing images trough the network
* and save all neuron outputs
*
* Arguments:
* test_imgs: input testing images
* test_S: size of the input data
* train_neuron_out_d: pointer to the pointer to the array where neuron
* outputs would be placed on the gpu
* layers: vector of model's layers
* weights_d: pointer to the pointer to the array of kernels' weights
* on the gpu
* W_sizes: pointer to the kernels' sizes
* biases_d: pointer to the pointer to the array of biases on the gpu
* b_sizes: pointer to the biases' sizes
* pool_sizes: pool strides' sizes
* scores: pointer to the array where the output scores would be stored
*****************************************************************************/
void propagate_forward_gpu_test(float *test_imgs, unsigned *test_S,
float **test_neuron_out_d,
std::vector<std::string> layers,
float **weights_d, unsigned **W_sizes,
float **biases_d, unsigned **b_sizes,
unsigned **pool_sizes,
float *scores, unsigned na)
{
/**************************************************************************
* 1. Allocate and transfer training data to gpu
*************************************************************************/
unsigned *test_split = new unsigned[layers.size() + 1];
data_buffer_split(test_S, layers, W_sizes, pool_sizes, test_split);
unsigned int neuron_out_len = 0;
for(unsigned int i = 0; i < (layers.size() + 1); i++)
neuron_out_len += test_split[i];
neuron_out_len *= test_S[3];
cudaMalloc((void **)test_neuron_out_d, neuron_out_len * sizeof(float));
cudaMemcpy(test_neuron_out_d[0], test_imgs,
test_split[0] * test_S[3] * sizeof(float),
cudaMemcpyHostToDevice);
/**************************************************************************
* 2. Propagate training data trough layers and store neuron inputs and
* outputs
*************************************************************************/
long *start_ni = new long[layers.size() + 1];
long *start_no = new long[layers.size() + 1];
long *start_w = new long[layers.size() + 1];
long *start_b = new long[layers.size() + 1];
start_ni[0] = 0;
start_no[0] = 0;
start_w[0] = 0;
start_b[0] = 0;
for(unsigned int i = 0; i < layers.size(); i++)
{
if(!strcmp(layers.at(i).c_str(), "fcn"))
{
dim3 th_per_block(BLOCK_SIZE, BLOCK_SIZE);
unsigned n_blocks_1 = W_sizes[i][1] / BLOCK_SIZE;
if (W_sizes[i][1] > n_blocks_1 * BLOCK_SIZE)
n_blocks_1 += 1;
unsigned n_blocks_2 = test_S[3] / BLOCK_SIZE;
if (test_S[3] > n_blocks_2 *BLOCK_SIZE)
n_blocks_2 += 1;
dim3 blc_per_grid(n_blocks_1, n_blocks_2);
propagate_forward_fcn_gpu_test<<<blc_per_grid, th_per_block>>>(
test_neuron_out_d[0],
weights_d[0], biases_d[0], test_S[3],
W_sizes[i][0], W_sizes[i][1],
start_ni[i], start_no[i], start_w[i], start_b[i], na);
start_ni[i + 1] = (start_ni[i] + test_S[3] * W_sizes[i][1]);
start_no[i + 1] = (start_no[i] + test_S[3] * W_sizes[i][0]);
start_w[i + 1] = (start_w[i] + W_sizes[i][0] * W_sizes[i][1]);
start_b[i + 1] = (start_b[i] + W_sizes[i][1]);
}
}
cudaMemcpy(scores, &test_neuron_out_d[0][start_no[layers.size()]],
test_S[3] * b_sizes[layers.size() - 1][0] * sizeof(float),
cudaMemcpyDeviceToHost);
/**************************************************************************
* 3. Release memory
*************************************************************************/
cudaFree(test_neuron_out_d[0]);
cudaFree(test_neuron_out_d);
delete [] test_split;
delete [] start_ni;
delete [] start_no;
delete [] start_w;
delete [] start_b;
}
/******************************************************************************
* compute_error_gpu: propagate testing images trough the network
* and save all neuron outputs
*
* Arguments:
* data_gt: data ground truth
* data_S: data size
* data_neuron_in_d: pointer to the pointer to the array where neuron
* inputs would be placed on the gpu
* data_neuron_out_d: pointer to the pointer to the array where neuron
* outputs would be placed on the gpu
* weights_d: pointer to the pointer to the weights at gpu
* delta_weights_d: pointer to the pointer to the weights' updates at gpu
* W_sizes: pointer to the kernels' sizes
* biases_d: pointer to the pointer to the biases at gpu
* delta_biases_d: pointer to the pointer to the biases' updates at gpu
* b_sizes: pointer to the biases' sizes
* pool_sizes: pool strides' sizes
*****************************************************************************/
float compute_error_gpu(float *data_gt, unsigned *data_S,
float **data_neuron_inputs_d,
float **data_neuron_outputs_d,
std::vector<std::string> layers,
float **weights_d, float **delta_weights_d,
unsigned **W_sizes,
float **biases_d, float **delta_biases_d,
unsigned **b_sizes,
unsigned **pool_sizes)
{
/**************************************************************************
* 1. Count number of kernels's weights, biases, input and output places
*************************************************************************/
long weights_N = 0;
long biases_N = 0;
long inputs_N = 0;
long delta_N = 0;
long outputs_N = data_S[0] * data_S[1] * data_S[2] * data_S[3];
unsigned idx = 0;
for(unsigned int i = 0; i < layers.size(); i++)
if(!strcmp(layers.at(i).c_str(), "fcn") or
!strcmp(layers.at(i).c_str(), "conv"))
{
weights_N += (W_sizes[idx][0] * W_sizes[idx][1] *
W_sizes[idx][2] * W_sizes[idx][3]);
biases_N += b_sizes[idx][0];
inputs_N += data_S[3] * W_sizes[idx][1];
idx += 1;
}
outputs_N += inputs_N;
delta_N = biases_N * data_S[3];
/**************************************************************************
* 2. Transfer ground truth to the gpu
*************************************************************************/
float *data_gt_d;
cudaMalloc((void **)&data_gt_d,
data_S[3] * W_sizes[layers.size() - 1][1] * sizeof(float));
cudaMemcpy(data_gt_d, data_gt,
data_S[3] * W_sizes[layers.size() - 1][1] * sizeof(float),
cudaMemcpyHostToDevice);
/**************************************************************************
* 3. Compute the average error
*************************************************************************/
float *avg_error = new float[1];
avg_error[0] = 0;
float *avg_error_d;
cudaMalloc((void **)&avg_error_d, sizeof(float));
cudaMemcpy(avg_error_d, avg_error, sizeof(float), cudaMemcpyHostToDevice);
evaluate_error<<<data_S[3], W_sizes[layers.size() - 1][1]>>>
(data_gt_d, data_S[3], W_sizes[layers.size() - 1][1],
data_neuron_outputs_d[0], outputs_N,
avg_error_d);
cudaMemcpy(avg_error, avg_error_d, sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(data_gt_d);
cudaFree(data_neuron_inputs_d[0]);
cudaFree(data_neuron_outputs_d[0]);
cudaFree(data_neuron_inputs_d);
cudaFree(data_neuron_outputs_d);
float avg_p = avg_error[0];
delete [] avg_error;
return avg_p;
}
/******************************************************************************
* propagate_backwards_gpu_train: propagate training images trough the network
* and save all neuron inputs and outputs for the further training
*
* Arguments:
* data_gt: data ground truth
* data_S: data size
* train_neuron_in_d: pointer to the pointer to the array where neuron
* inputs would be placed on the gpu
* train_neuron_out_d: pointer to the pointer to the array where neuron
* outputs would be placed on the gpu
* layers: vector of model's layers
* weights_d: pointer to the pointer to the weights at gpu
* delta_weights_d: pointer to the pointer to the weights' updates at gpu
* W_sizes: pointer to the kernels' sizes
* biases_d: pointer to the pointer to the biases at gpu
* delta_biases_d: pointer to the pointer to the biases' updates at gpu
* b_sizes: pointer to the biases' sizes
* pool_sizes: pool strides' sizes
* learning_rate: rate by which trainable parameters will be updated
*****************************************************************************/
float propagate_backwards_gpu_train(float *data_gt, unsigned *data_S,
float **train_neuron_inputs_d,
float **train_neuron_outputs_d,
std::vector<std::string> layers,
float **weights_d,
float **delta_weights_d,
unsigned **W_sizes,
float **biases_d,
float **delta_biases_d,
unsigned **b_sizes,
unsigned **pool_sizes,
float learning_rate,
unsigned na)
{
/**************************************************************************
* 1. Allocate memory for weights and biases on GPU and initialize
* them with zeros.
*************************************************************************/
long weights_N = 0;
long biases_N = 0;
long inputs_N = 0;
long delta_N = 0;
long outputs_N = data_S[0] * data_S[1] * data_S[2] * data_S[3];
unsigned idx = 0;
for(unsigned int i = 0; i < layers.size(); i++)
if(!strcmp(layers.at(i).c_str(), "fcn") or
!strcmp(layers.at(i).c_str(), "conv"))
{
weights_N += (W_sizes[idx][0] * W_sizes[idx][1] *
W_sizes[idx][2] * W_sizes[idx][3]);
biases_N += b_sizes[idx][0];
inputs_N += data_S[3] * W_sizes[idx][1];
idx += 1;
}
outputs_N += inputs_N;
delta_N = biases_N * data_S[3];
/**************************************************************************
* 2. Transfer ground truth to gpu
*************************************************************************/
float *data_gt_d;
cudaMalloc((void **)&data_gt_d,
data_S[3] * W_sizes[layers.size() - 1][1] * sizeof(float));
cudaMemcpy(data_gt_d, data_gt,
data_S[3] * W_sizes[layers.size() - 1][1] * sizeof(float),
cudaMemcpyHostToDevice);
/**************************************************************************
* 3.
*************************************************************************/
float *delta_x_d;
cudaMalloc((void **)&delta_x_d, biases_N * data_S[3] * sizeof(float));
cost_function_backprop<<<data_S[3], W_sizes[layers.size() - 1][1]>>>
(data_gt_d,
train_neuron_inputs_d[0], inputs_N,
train_neuron_outputs_d[0], outputs_N,
delta_x_d, delta_N,
data_S[3], W_sizes[layers.size() - 1][1]);
unsigned N_tot = delta_N;
for(unsigned i = (layers.size() - 1); i > 0; i--)
{
dim3 th_per_block(BLOCK_SIZE, BLOCK_SIZE);
unsigned n_blocks_1 = W_sizes[i][0] / BLOCK_SIZE;
if (W_sizes[i][1] > n_blocks_1 * BLOCK_SIZE)
n_blocks_1 += 1;
unsigned n_blocks_2 = data_S[3] / BLOCK_SIZE;
if (data_S[3] > n_blocks_2 *BLOCK_SIZE)
n_blocks_2 += 1;
dim3 blc_per_grid(n_blocks_1, n_blocks_2);
weights_N -= (W_sizes[i][0] * W_sizes[i][1]);
delta_N -= (data_S[3] * W_sizes[i][1]);
inputs_N -= (data_S[3] * W_sizes[i][1]);
backpropagate_fcn_gpu_train<<<blc_per_grid, th_per_block>>>
(weights_d[0], weights_N,
delta_x_d, delta_N,
train_neuron_inputs_d[0], inputs_N,
data_S[3], W_sizes[i][0], W_sizes[i][1], na);
}
/**************************************************************************
* 4. Compute error
*************************************************************************/
float *avg_error = new float[1];
avg_error[0] = 0;
float *avg_error_d;
cudaMalloc((void **)&avg_error_d, sizeof(float));
cudaMemcpy(avg_error_d, avg_error, sizeof(float), cudaMemcpyHostToDevice);
evaluate_error<<<data_S[3], W_sizes[layers.size() - 1][1]>>>
(data_gt_d, data_S[3], W_sizes[layers.size() - 1][1],
train_neuron_outputs_d[0], outputs_N,
avg_error_d);
cudaMemcpy(avg_error, avg_error_d, sizeof(float), cudaMemcpyDeviceToHost);
/**************************************************************************
* 5. Update weights and biases
*************************************************************************/
weights_N = 0;
biases_N = 0;
outputs_N = 0;
delta_N = 0;
for(unsigned int i = 0; i < layers.size(); i++)
{
if(!strcmp(layers.at(i).c_str(), "fcn"))
{
dim3 th_per_block(BLOCK_SIZE, BLOCK_SIZE);
unsigned n_blocks_1 = W_sizes[i][1] / BLOCK_SIZE;
if (W_sizes[i][1] > n_blocks_1 * BLOCK_SIZE)
n_blocks_1 += 1;
unsigned n_blocks_2 = W_sizes[i][0] / BLOCK_SIZE;
if (W_sizes[i][0] > n_blocks_2 *BLOCK_SIZE)
n_blocks_2 += 1;
dim3 blc_per_grid(n_blocks_1, n_blocks_2);
update_weights_and_biases<<<blc_per_grid, th_per_block>>>
(weights_d[0], biases_d[0], weights_N, biases_N,
W_sizes[i][0], W_sizes[i][1],
train_neuron_outputs_d[0], outputs_N,
delta_x_d, delta_N,
data_S[3], learning_rate);
weights_N += (W_sizes[i][0] * W_sizes[i][1]);
biases_N += W_sizes[i][1];
outputs_N += W_sizes[i][0] * data_S[3];
delta_N += W_sizes[i][1] * data_S[3];
}
}
cudaFree(data_gt_d);
cudaFree(avg_error_d);
cudaFree(delta_x_d);
cudaFree(train_neuron_inputs_d[0]);
cudaFree(train_neuron_outputs_d[0]);
cudaFree(train_neuron_inputs_d);
cudaFree(train_neuron_outputs_d);
float avg_p = avg_error[0];
delete [] avg_error;
return avg_p;
}
|
22,626 | #include <stdio.h>
#include <cuda_runtime.h>
#include <iostream>
#include <cstdlib>
#include <curand.h>
#include <curand_kernel.h>
#include <ctime>
#include <fstream>
//Función que genera una jewel al azar
int createJewel(int difficulty) {
srand(time(NULL));
switch (difficulty) {
case 1: {
int randomJewel = rand() % 4 + 1;
return randomJewel;
}
case 2: {
int randomJewel = rand() % 6 + 1;
return randomJewel;
}
case 3: {
int randomJewel = rand() % 8 + 1;
return randomJewel;
}
}
return -1;
}
void initialTablePopulation(float *table, int difficulty, int width, int height) {
srand(time(NULL));
for (int i = 0; i < height*width; i++) {
switch (difficulty) {
case 1: {
int randomJewel = rand() % 4 + 1;
table[i] = randomJewel;
break;
}
case 2: {
int randomJewel = rand() % 6 + 1;
table[i] = randomJewel;
break;
}
case 3: {
int randomJewel = rand() % 8 + 1;
table[i] = randomJewel;
break;
}
}
}
}
void printTable(float* table, int width, int height) {
for (int i = height - 1; i >= 0; i--) {
std::cout<<std::endl;;
for (int j = 0; j < width; j++) {
printf("%d ", (int)table[j + i*width]);
}
}
std::cout<<std::endl;;
}
void eraseJewels(float* table, float* jewelsToErase, int difficulty, int width, int height) {
int max = 0;
if (height >= width) max = height;
else max = width;
int end = 0;
bool altered = false;
//Calcula cuál es el valor escrito de entre aquellos a eliminar, para poder encontrar potenciales huecos
for (int i = 0; i < max * 2; i++) {
if (jewelsToErase[i] < 0) {
end = i;
altered = true;
break;
}
}
//Todos los valeros están escritos
if (!altered) end = max * 2;
srand(time(NULL));
if (jewelsToErase[0] != jewelsToErase[2]) {
for (int y = jewelsToErase[1]; y < height; y++) {
for (int x = jewelsToErase[0]; x <= jewelsToErase[end - 2]; x++) {
if (y + 1 < height) {
table[x + (y)*(width)] = table[x + (y + 1)*width];
switch (difficulty) {
case 1: {
int randomJewel = rand() % 4 + 1;
table[x + (y+1)*width] = randomJewel;
break;
}
case 2: {
int randomJewel = rand() % 6 + 1;
table[x + (y+1)*width] = randomJewel;
break;
}
case 3: {
int randomJewel = rand() % 8 + 1;
table[x + (y+1)*width] = randomJewel;
break;
}
}
}
else {
switch (difficulty) {
case 1: {
int randomJewel = rand() % 4 + 1;
table[x + y*width] = randomJewel;
break;
}
case 2: {
int randomJewel = rand() % 6 + 1;
table[x + y*width] = randomJewel;
break;
}
case 3: {
int randomJewel = rand() % 8 + 1;
table[x + y*width] = randomJewel;
break;
}
}
}
}
}
}else{
int spot = jewelsToErase[0] + jewelsToErase[1] * width;
float value = table[spot];
for (int y = jewelsToErase[1]; y < height; y++) {
for (int x = jewelsToErase[0]; x <= jewelsToErase[end - 2]; x++) {
if (y < height) {
if (y >= jewelsToErase[end-2]) {
table[x + (y-end/2)*(width)] = table[x + (y)*width];
switch (difficulty) {
case 1: {
int randomJewel = rand() % 4 + 1;
table[x + (y)*width] = randomJewel;
break;
}
case 2: {
int randomJewel = rand() % 6 + 1;
table[x + (y)*width] = randomJewel;
break;
}
case 3: {
int randomJewel = rand() % 8 + 1;
table[x + (y)*width] = randomJewel;
break;
}
}
}
else {
switch (difficulty) {
case 1: {
int randomJewel = rand() % 4 + 1;
table[x + (y)*width] = randomJewel;
break;
}
case 2: {
int randomJewel = rand() % 6 + 1;
table[x + (y)*width] = randomJewel;
break;
}
case 3: {
int randomJewel = rand() % 8 + 1;
table[x + (y)*width] = randomJewel;
break;
}
}
}
}
}
}
}
}
void manualModeTableAnalysis(int difficulty, float* table, int width, int height, int x, int y) {
int max = 0;
int size = width*height;
if (height >= width) max = height;
else max = width;
//Eliminaremos, como mucho, max jewels, almacenando su posición (x, y) en el proceso
float* jewelsToErase = (float*)malloc(2 * max * sizeof(float));
for (int i = 0; i < max; i++) {
jewelsToErase[i] = -1;
}
int leftHndPotentialJewels = 0;
int rightHndPotentialJewels = 0;
//Exploración por la izquierda
if ((x - 1 + y*width >= 0) && table[x - 1 + y*width] == table[x + y*width]) {
int i = 1;
while ((x - i + y*width >= 0) && (x -i>=0) && table[x - i + y*width] == table[x + y*width]) {
leftHndPotentialJewels++;
i++;
}
}
//Exploración por la derecha
if ((x + 1 + y*width <= size) && table[x + 1 + y*width] == table[x + y*width]) {
int i = 1;
while ((x + i + y*width <= size) && (x + i < width) && table[x + i + y*width] == table[x + y*width]) {
rightHndPotentialJewels++;
i++;
}
}
//Existe la posibilidad de eliminar horizontalmente
if (1 + leftHndPotentialJewels + rightHndPotentialJewels >= 3) {
int stride = 0;
for (int j = leftHndPotentialJewels; j >= (1); j--) {
jewelsToErase[stride] = x - j;
jewelsToErase[stride + 1] = y;
stride += 2;
}
jewelsToErase[leftHndPotentialJewels*2] = x;
jewelsToErase[leftHndPotentialJewels*2+1] = y;
stride = 2;
for (int k = 1; k <= rightHndPotentialJewels; k++) {
jewelsToErase[stride + leftHndPotentialJewels*2] = x + k;
jewelsToErase[stride + leftHndPotentialJewels*2 + 1] = y;
stride += 2;
}
}
else { //Exploración de la columna
int potentialJewelsOver = 0;
int potentialJewelsBelow = 0;
//Exploración por debajo
if ((x + (y - 1)*width >= 0) && table[x + (y - 1)*width] == table[x + y*width]) {
int i = 1;
while ((x + (y - i)*width >= 0) && table[x + (y - i)*width] == table[x + y*width]) {
potentialJewelsBelow++;
i++;
}
}
//Exploración por encima
if ((x + 1 + y*width <= size) && table[x + (y + 1)*width] == table[x + y*width]) {
int i = 1;
while ((x + (y + i)*width <= size) && table[x + (y + i)*width] == table[x + y*width]) {
potentialJewelsOver++;
i++;
}
}
//Existe la posibilidad de realizar una eliminación vertical
if (1 + potentialJewelsBelow + potentialJewelsOver >= 3) {
int stride = 0;
for (int j = potentialJewelsBelow; j >= (1); j--) {
jewelsToErase[stride] = x;
jewelsToErase[stride + 1] = y - j;
stride += 2;
}
jewelsToErase[potentialJewelsBelow*2] = x;
jewelsToErase[potentialJewelsBelow*2+1] = y;
stride = 2;
for (int k = 1; k <= potentialJewelsOver; k++) {
jewelsToErase[stride + potentialJewelsBelow*2] = x;
jewelsToErase[stride + potentialJewelsBelow*2 + 1] = y + k;
stride += 2;
}
}
}
eraseJewels(table, jewelsToErase, difficulty, width, height);
free(jewelsToErase);
}
void switchSpots(float* table, int jewel1X, int jewel1Y, int direction, int width, int height, int selection, int difficulty) {
int jewel2_x = jewel1X;
int jewel2_y = jewel1Y;
switch (direction)
{
case 1: //Arriba
{
jewel2_y += 1;
break;
}
case 2: //Abajo
{
jewel2_y -= 1;
break;
}
case 3: //Izquierda
{
jewel2_x -= 1;
break;
}
case 4: //Derecha
{
jewel2_x += 1;
break;
}
}
int aux1;
aux1 = table[jewel2_x + jewel2_y*width];
table[jewel2_x + jewel2_y*width] = table[jewel1X + jewel1Y*width];
table[jewel1X + jewel1Y*width] = aux1;
if (selection == 2)
manualModeTableAnalysis(difficulty, table, width, height, jewel2_x, jewel2_y);
}
//Función de análisis del tablero en modo automático en CPU
void autoModeTableAnalisis(int difficulty, float* table, int width, int height) {
int max = 0;
int size = width*height;
int rightHndPotentialJewels = 0;
if (height >= width) max = height;
else max = width;
//Se eliminana, como mucho, max jewels, almacenando su posición (x, y)
float* jewelsToErase = (float*)malloc(2 * max * sizeof(float));
//Tablero auxiliar para seleccionar el mejor caso
float* auxTable = (float*)malloc(size * sizeof(float));
for (int i = 0; i < max; i++) {
jewelsToErase[i] = -1;
}
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
rightHndPotentialJewels = 0;
//Si tiene por la derecha
if ((x + 2) < width) {
if (((x + 2) + y*width <= size) && table[x + 2 + y*width] == table[x + y*width]) {
int i = 2;
while ((x + i + y*width <= size) && table[x + i + y*width] == table[x + y*width]) {
rightHndPotentialJewels++;
i++;
}
auxTable[x + y*width] = rightHndPotentialJewels + 1;
}
else {
auxTable[x + y*width] = 1;
}
}
else {
auxTable[x + y*width] = 1;
}
}
}
int bestX = 0;
int bestY = 0;
int bestValue = 0;
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
if (auxTable[x + y*width] > bestValue) {
bestX = x;
bestY = y;
bestValue = auxTable[x + y*width];
}
}
}
switchSpots(table, bestX, bestY, 4, width, height, 1, difficulty);
//Se puede realizar eliminaciones
if (bestValue >= 3) {
jewelsToErase[0] = bestX;
jewelsToErase[1] = bestY;
int stride = 2;
for (int j = 1; j <= (bestValue); j++) {
jewelsToErase[stride] = bestX + j;
jewelsToErase[stride + 1] = bestY;
stride += 2;
}
}
eraseJewels(table, jewelsToErase, difficulty, width, height);
free(jewelsToErase);
free(auxTable);
}
bool preloadGame(int& width, int& height, int& difficulty, char* row)
{
std::ifstream fwidth("width.txt");
if (!fwidth.is_open())
{
std::cerr << "ERROR: Archivo de guardado (width.txt) no encontrado." << std::endl;
return false;
}
fwidth >> width;
fwidth.close();
std::ifstream fheight("height.txt");
if (!fheight.is_open())
{
std::cout << "ERROR: Archivo de guardado (height.txt) no encontrado." << std::endl;
return false;
}
fheight >> height;
fheight.close();
std::ifstream fdifficulty("difficulty.txt");
if (!fdifficulty.is_open())
{
std::cout << "ERROR: Archivo de guardado (difficulty.txt) no encontrado." << std::endl;
return false;
}
fdifficulty >> difficulty;
fdifficulty.close();
std::ifstream fLoad(row);
if (!fLoad.is_open())
{
std::cout << "ERROR: Archivo de guardado no encontrado." << std::endl;
return false;
}
fLoad.close();
return true;
}
void loadFile(int width, int height, float* table, char* row)
{
int aux;
char* array = (char*)malloc(width*height + 1);
std::ifstream fLoad(row);
fLoad.getline(array, width*height + 1);
for (int i = 0; i < width*height; i++)
{
aux = (array[i] - 48);
table[i] = (float)aux;
}
free(array);
fLoad.close();
}
void saveFile(float* table, int width, int height, int difficulty, char* row)
{
//Sistema de saveFile
std::ofstream rowWidth;
rowWidth.open("width.txt");
rowWidth.clear();
rowWidth << width;
rowWidth.close();
std::ofstream rowHeight;
rowHeight.open("height.txt");
rowHeight.clear();
rowHeight << height;
rowHeight.close();
std::ofstream rowDifficulty;
rowDifficulty.open("difficulty.txt");
rowDifficulty.clear();
rowDifficulty << difficulty;
rowDifficulty.close();
std::ofstream saveF;
saveF.open(row);
saveF.clear();
for (int index = 0; index < width*height; index++)
{
saveF << table[index];
}
saveF.close();
}
void rowBomb(float* table, int width, int height, int difficulty, int row) {
for (int i = 0; (i + row) < height; i++)
{
for (int j = 0; j < width; j++)
{
if ((i + row + 1) < height)
{
table[(i + row)*width + j] = table[(i + row + 1)*height + j];
}
else {
table[(i + row)*width + j] = createJewel(difficulty);
}
}
}
}
void columnBomb(float* table, int width, int height, int difficulty, int column) {
for (int i = 0; i < height; i++)
{
for (int j = 0; (column - j) > 0; j++)
{
if ((column - j - 1) < 0)
{
table[(i*width) + (column - j)] = createJewel(difficulty);
}
else {
table[(i*width) + (column - j)] = table[(i*height) + (column - j - 1)];
}
}
}
}
void pivotBomb(float* table, int width, int height, int row, int column)
{
float aux[9];
int index = 0;
for (int j = column - 1; j <= column + 1; j++)
{
for (int i = row + 1; i >= row - 1; i--)
{
aux[index] = table[i*width + j];
index++;
}
}
index = 0;
for (int i = 0; i < 3; i++)
{
for (int icolumn = 0; icolumn < 3; icolumn++)
{
table[(i + row - 1)*width + (column - 1) + icolumn] = aux[index];
index++;
}
}
}
int main(int argc, char** argv) {
//Matriz dinámica de elementos de tipo float, con tamaño width*height
int width;
int height;
int difficulty;
char mode;
bool autoPlay = true;
int size;
char saveF[9] = "save.txt";
bool found = false;
int selection;
float *table;
//Entrada de la configuración del juego
if (argc == 1)
{
std::cout << "Anchura del tablero: ";
std::cin >> width;
std::cout << "Altura del tablero: ";
std::cin >> height;
std::cout << "Elija dificultad:"<<std::endl<<"\t1.-\tFacil"<<std::endl<<"\t2.-\tMedia"<<std::endl<<"\t3.-\tDificil\n";
std::cin >> difficulty;
std::cout << "Jugar automaticamente?"<<std::endl<<"\t1.-\tSI"<<std::endl<<"\t2.-\tNO"<<std::endl;
std::cin >> selection;
}
else
{
mode = argv[1][1];
difficulty = atoi(argv[2]);
width = atoi(argv[3]);
height = atoi(argv[4]);
switch (mode) {
case 'a': {selection = 1;
break; }
case 'm': {selection = 2;
break; }
default: std::cerr<<"ERROR: Valor de modo inválido. Por favor, inserte 'a' o 'm' como modo de juego en la línea de comandos."<<std::endl;
return -1;
}
}
bool playing = true;
/* Establecer autoPlay como mode de juego */
size = width*height;
table = (float*)malloc(size * sizeof(float));
//Se inicializa la matriz
initialTablePopulation(table, difficulty, width, height);
//Bucle principal del juego
while (playing) {
printTable(table, width, height);
int jewel1X = 0;
int jewel1Y = 0;
int command = 0;
std::cout << "Acción a realizar:"<<std::endl;
std::cout << "1.-\tIntercambiar Jewels"<<std::endl;
std::cout << "2.-\tGuardar partida"<<std::endl;
std::cout << "3.-\tCargar partida"<<std::endl;
std::cout << "9.-\tUsar una bomba"<<std::endl;
std::cout << "0.-\tSalir"<<std::endl;
std::cout << "Inserte seleccion: ";
std::cin >> command;
switch (command) {
case 0: {
free(table);
return 0;
break;
}
case 1: {
if (selection == 2)
{
std::cout << "Posicion de la jewel (el primer valor es 0):"<<std::endl;
std::cout << "\tColumna: ";
std::cin >> jewel1X;
std::cout << "\tFila: ";
std::cin >> jewel1Y;
if (!((jewel1X < width) && (jewel1X >= 0) && (jewel1Y < height) && (jewel1Y >= 0))) {
printf("Posicion invalida.\n");
continue;
}
int direction = 0;
std::cout << "Direccion del movimiento:<<"<<std::endl<<"\t1.-\tArriba"<<std::endl<<"\t2.-\tAbajo"<<std::endl<<"\t3.-\tIzquierda"<<std::endl<<"\t4.-\tDerecha"<<std::endl;
std::cin >> direction;
if (direction > 4 && direction > 1) {
printf("Movimiento invalido.\n");
continue;
}
else {
switch (direction)
{
case 1: //Arriba
{
if (jewel1Y == height)
{
printf("No se puede realizar el intercambio especificado.\n");
continue;
}
break;
}
case 2: //Abajo
{
if (jewel1Y == 0)
{
printf("No se puede realizar el intercambio especificado.\n");
continue;
}
break;
}
case 3: //Izquierda
{
if (jewel1X == 0)
{
printf("No se puede realizar el intercambio especificado.\n");
continue;
}
break;
}
case 4: //Derecha
{
if (jewel1X == width - 1)
{
printf("No se puede realizar el intercambio especificado.\n");
continue;
}
break;
}
}
}
// Desplaza las jewels como se ha indicado
switchSpots(table, jewel1X, jewel1Y, direction, width, height, selection, difficulty);
}
else if (selection == 1)
{
// Modo automático
autoModeTableAnalisis(difficulty, table, width, height);
}
break;
}
case 2: {
saveFile(table, width, height, difficulty, saveF);
std::cout << "Guardado realizado."<<std::endl;
break;
}
case 3: {
//Precargado del tablero
int found = preloadGame(width, height, difficulty, saveF);
size = width*height;
if (found)
{
free(table);
table = (float*)malloc(size * sizeof(float));
// Cargado del tablero
loadFile(width, height, table, saveF);
std::cout << "Juego automatico?"<<std::endl<<"\t1.-\tSI"<<std::endl<<"\t2.-\tNO"<<std::endl;
std::cin >> selection;
std::cout << "Se ha cargado el estado del tablero:"<<std::endl;
}
else {
std::cout << "No hay partidas guardadas."<<std::endl;
}
break;
}
case 9: {
int bomb = 0;
int row = 0; int column = 0;
std::cout << "Seleccione el tipo de bomba:"<<std::endl;
// Bombas según la dificultad
switch (difficulty) {
case 1: {
std::cout << "1.-\tBomba sobre fila"<<std::endl;
std::cout << "\tSeleccion: "<<std::endl;
std::cin >> bomb;
if (bomb != 1)
{
printf("Tipo de bomba inexistente.\n");
continue;
}
std::cout << "X: ";
std::cin >> row;
rowBomb(table, width, height, difficulty, row);
break;
}
case 2: {
std::cout << "1.-\tBomba sobre fila"<<std::endl;
std::cout << "2.-\tBomba sobre columna"<<std::endl;
std::cout << "\tSeleccion: "<<std::endl;
std::cin >> bomb;
if (bomb < 1 && bomb > 2)
{
printf("Tipo de bomba inexistente.\n");
continue;
}
switch (bomb) {
case 1:
{
std::cout << "X: ";
std::cin >> row;
rowBomb(table, width, height, difficulty, row);
break;
}
case 2:
{
std::cout << "Y: ";
std::cin >> column;
columnBomb(table, width, height, difficulty, column);
break;
}
}
break;
}
case 3: {
std::cout << "1.-\tBomba sobre fila"<<std::endl;
std::cout << "2.-\tBomba sobre columna"<<std::endl;
std::cout << "3.-\tBomba de rotacion 3x3"<<std::endl;
std::cout << "\tSeleccion: "<<std::endl;
std::cin >> bomb;
if (bomb < 1 && bomb > 3)
{
printf("Tipo de bomba inexistente.\n");
continue;
}
switch (bomb) {
case 1:
{
std::cout << "X: ";
std::cin >> row;
rowBomb(table, width, height, difficulty, row);
break;
}
case 2:
{
std::cout << "Y: ";
std::cin >> column;
columnBomb(table, width, height, difficulty, column);
break;
}
case 3:
{
for (int row = 1; row < width; row += 3)
{
for (int column = 1; column < height; column += 3)
{
if (!((row - 1) < 0 || (row + 1) >= height || (column - 1) < 0 || (column + 1) >= width))
{
pivotBomb(table, width, height, row, column);
}
}
}
break;
}
}
break;
}
}
break;
}
}
}
free(table);
return 0;
} |
22,627 | #include <stdlib.h>
#include <stdio.h>
#include <time.h>
typedef struct
{
int width;
int height;
float* elements;
} Matrix;
#define BLOCK_SIZE 2
#define MATRIX_SIZE 2
__global__ void MatMulKernel(const Matrix, const Matrix, const Matrix);
void MatMul(const Matrix A, const Matrix B, Matrix C)
{
Matrix d_A;
d_A.width = A.width;
d_A.height = A.height;
size_t size_a = A.width * A.height * sizeof(float);
cudaMalloc(&d_A.elements, size_a);
cudaMemcpy(d_A.elements, A.elements, size_a, cudaMemcpyHostToDevice);
Matrix d_B;
d_B.width = B.width;
d_B.height = B.height;
size_t size_b = B.width * B.height * sizeof(float);
cudaMalloc(&d_B.elements, size_b);
cudaMemcpy(d_B.elements, B.elements, size_b, cudaMemcpyHostToDevice);
Matrix d_C;
d_C.width = C.width;
d_C.height = C.height;
size_t size_c = B.width * B.height * sizeof(float);
cudaMalloc(&d_C.elements, size_c);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y);
MatMulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
cudaMemcpy(C.elements, d_C.elements, size_c, cudaMemcpyDeviceToHost);
printf("MutMul: %f, %f", C.elements[0], C.elements[MATRIX_SIZE*MATRIX_SIZE-1]);
cudaFree(d_A.elements);
cudaFree(d_B.elements);
cudaFree(d_C.elements);
}
__global__ void MatMulKernel(Matrix A, Matrix B, Matrix C)
{
float Cvalue = 0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
for (int e=0; e<A.width; ++e)
{
Cvalue += A.elements[row*A.width+e] * B.elements[e*B.width+col];
}
C.elements[row*C.width+col] = Cvalue;
printf("Kernel: Cvalue = %f\n", Cvalue);
}
void initialData(float *ip, int size)
{
time_t t;
srand((unsigned int) time(&t));
for (int i=0; i<size; ++i)
{
ip[i] = (float)( rand() & 0xFF )/10.0f;
}
}
int main()
{
Matrix A;
A.width = MATRIX_SIZE;
A.height = MATRIX_SIZE;
A.elements = (float*)malloc(sizeof(float)*MATRIX_SIZE*MATRIX_SIZE);
initialData(A.elements, MATRIX_SIZE*MATRIX_SIZE);
Matrix B;
B.width = MATRIX_SIZE;
B.height = MATRIX_SIZE;
B.elements = (float*)malloc(sizeof(float)*MATRIX_SIZE*MATRIX_SIZE);
initialData(A.elements, MATRIX_SIZE*MATRIX_SIZE);
Matrix C;
C.width = MATRIX_SIZE;
C.height = MATRIX_SIZE;
C.elements = (float*)malloc(sizeof(float)*MATRIX_SIZE*MATRIX_SIZE);
printf("Main1, first element: %f, last element: %f\n", A.elements[0], A.elements[MATRIX_SIZE*MATRIX_SIZE-1]);
MatMul(A, B, C);
printf("Main2, first element: %f, last element: %f\n", C.elements[0], C.elements[MATRIX_SIZE*MATRIX_SIZE-1]);
free(A.elements);
free(B.elements);
} |
22,628 | #include "includes.h"
__global__ void kMultDiagonalScalar(float* mat, float val, float* tgtMat, unsigned int width) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width; i += numThreads) {
tgtMat[width*i + i] = mat[width*i + i] * val;
}
} |
22,629 | #include "includes.h"
__global__ void kernel_bfs_t(int *g_push_reser, int *g_sink_weight, int *g_graph_height, bool *g_pixel_mask, int vertex_num, int width, int height, int vertex_num1, int width1, int height1)
{
int thid = __umul24(blockIdx.x, blockDim.x) + threadIdx.x ;
if(thid < vertex_num && g_pixel_mask[thid] == true )
{
int col = thid % width1 , row = thid / width1 ;
if(col > 0 && row > 0 && col < width - 1 && row < height - 1 && g_push_reser[thid] > 0 )
{
g_graph_height[thid] = 1 ;
g_pixel_mask[thid] = false ;
}
else
if(g_sink_weight[thid] > 0)
{
g_graph_height[thid] = -1 ;
g_pixel_mask[thid] = false ;
}
}
} |
22,630 | #include "includes.h"
__global__ void copySimilarity(float* similarities, int active_slices, int slices, int* activeMask, int target, int source)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= active_slices)
return;
int slice = activeMask[i];
similarities[target*slices + slice] = similarities[source*slices + slice];
} |
22,631 | #include "includes.h"
__global__ void _logploss(int nrows, int ncols, float *y, float *dy) {
/* Similar to softmaxloss, except y is assumed normalized logp and is not overwritten.
y is layer output, i.e. normalized log probabilities.
dy is the label matrix: each column is a one-hot vector indicating the correct label.
On output dy will be the gradient of softmax loss wrt log probabilities.
*/
int col = threadIdx.x + blockIdx.x * blockDim.x;
int i0, i1;
while (col < ncols) {
i0 = col * nrows;
i1 = i0 + nrows;
for (int i=i0; i<i1; i++) {
dy[i] = (exp(y[i]) - dy[i]) / ncols;
}
col += blockDim.x * gridDim.x;
}
} |
22,632 | #if !defined(_VEICULOS_CU_)
#define _VEICULOS_CU_
class Veiculo{
public:
//Metodos
__host__ __device__ Veiculo(){};
__host__ __device__ Veiculo(int id){
ID = id + 11;
x = 0;
y = 0;
vel = 0;
tam = 0;
vMax = 0;
};
//Atributos
int ID, x, y, tam, vel, vMax;
};
class Carro : public Veiculo{
public:
__host__ __device__ Carro(){};
__host__ __device__ Carro(int id){
ID = id+11;
x = 0;
y = 0;
vel = 0;
tam = 7; //metros de comprimento
vMax = 28; //28 metros/s de velocidade maxima
};
};
class Onibus : public Veiculo{
public:
__host__ __device__ Onibus(){};
__host__ __device__ Onibus(int id){
ID = id + 11;
x = 0;
y = 0;
vel = 0;
tam = 14; //metros de comprimento
vMax = 23; //28 metros/s de velocidade maxima
};
};
#endif
|
22,633 | #include <stdio.h>
#include <stdint.h>
#include <string>
#include <cmath>
#include <algorithm>
using namespace std;
#define CHECK(call)\
{\
const cudaError_t error = call;\
if (error != cudaSuccess)\
{\
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__);\
fprintf(stderr, "code: %d, reason: %s\n", error,\
cudaGetErrorString(error));\
exit(EXIT_FAILURE);\
}\
}
struct GpuTimer
{
cudaEvent_t start;
cudaEvent_t stop;
GpuTimer()
{
cudaEventCreate(&start);
cudaEventCreate(&stop);
}
~GpuTimer()
{
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
void Start()
{
cudaEventRecord(start, 0);
cudaEventSynchronize(start);
}
void Stop()
{
cudaEventRecord(stop, 0);
}
float Elapsed()
{
float elapsed;
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
return elapsed;
}
};
void readPnm(char * fileName, int &width, int &height, uchar3 * &pixels)
{
FILE * f = fopen(fileName, "r");
if (f == NULL)
{
printf("Cannot read %s\n", fileName);
exit(EXIT_FAILURE);
}
char type[3];
fscanf(f, "%s", type);
if (strcmp(type, "P3") != 0) // In this exercise, we don't touch other types
{
fclose(f);
printf("Cannot read %s\n", fileName);
exit(EXIT_FAILURE);
}
fscanf(f, "%i", &width);
fscanf(f, "%i", &height);
int max_val;
fscanf(f, "%i", &max_val);
if (max_val > 255) // In this exercise, we assume 1 byte per value
{
fclose(f);
printf("Cannot read %s\n", fileName);
exit(EXIT_FAILURE);
}
pixels = (uchar3 *)malloc(width * height * sizeof(uchar3));
for (int i = 0; i < width * height; i++)
fscanf(f, "%hhu%hhu%hhu", &pixels[i].x, &pixels[i].y, &pixels[i].z);
fclose(f);
}
void writePnm(uchar3 *pixels, int width, int height, int originalWidth, char *fileName)
{
FILE * f = fopen(fileName, "w");
if (f == NULL)
{
printf("Cannot write %s\n", fileName);
exit(EXIT_FAILURE);
}
fprintf(f, "P3\n%i\n%i\n255\n", width, height);
for (int r = 0; r < height; ++r) {
for (int c = 0; c < width; ++c) {
int i = r * originalWidth + c;
fprintf(f, "%hhu\n%hhu\n%hhu\n", pixels[i].x, pixels[i].y, pixels[i].z);
}
}
fclose(f);
}
int xSobel[3][3] = {{1,0,-1},{2,0,-2},{1,0,-1}};
int ySobel[3][3] = {{1,2,1},{0,0,0},{-1,-2,-1}};
void trace(int *score, int *leastSignificantPixel, int width, int height, int originalWidth) {
int minCol = 0, r = height - 1;
for (int c = 1; c < width; ++c) {
if (score[r * originalWidth + c] < score[r * originalWidth + minCol])
minCol = c;
}
for (; r >= 0; --r) {
leastSignificantPixel[r] = minCol;
if (r > 0) {
int aboveIdx = (r - 1) * originalWidth + minCol;
int min = score[aboveIdx], minColCpy = minCol;
if (minColCpy > 0 && score[aboveIdx - 1] < min) {
min = score[aboveIdx - 1];
minCol = minColCpy - 1;
}
if (minColCpy < width - 1 && score[aboveIdx + 1] < min) {
minCol = minColCpy + 1;
}
}
}
}
uint8_t getClosest(uint8_t *pixels, int r, int c, int width, int height, int originalWidth)
{
if (r < 0) {
r = 0;
} else if (r >= height) {
r = height - 1;
}
if (c < 0) {
c = 0;
} else if (c >= width) {
c = width - 1;
}
return pixels[r * originalWidth + c];
}
int computePixelPriority(uint8_t * grayPixels, int row, int col, int width, int height, int originalWidth) {
int x = 0, y = 0;
for (int i = 0; i < 3; ++i) {
for (int j = 0; j < 3; ++j) {
uint8_t closest = getClosest(grayPixels, row - 1 + i, col - 1 + j, width, height, originalWidth);
x += closest * xSobel[i][j];
y += closest * ySobel[i][j];
}
}
return abs(x) + abs(y);
}
void convertRgb2Gray(uchar3 * inPixels, int width, int height, uint8_t * outPixels) {
for (int r = 0; r < height; ++r) {
for (int c = 0; c < width; ++c) {
int i = r * width + c;
outPixels[i] = 0.299f * inPixels[i].x + 0.587f * inPixels[i].y + 0.114f * inPixels[i].z;
}
}
}
void computeSeamScoreTable(int *priority, int *score, int width, int height, int originalWidth) {
for (int c = 0; c < width; ++c) {
score[c] = priority[c];
}
for (int r = 1; r < height; ++r) {
for (int c = 0; c < width; ++c) {
int idx = r * originalWidth + c;
int aboveIdx = (r - 1) * originalWidth + c;
int min = score[aboveIdx];
if (c > 0 && score[aboveIdx - 1] < min) {
min = score[aboveIdx - 1];
}
if (c < width - 1 && score[aboveIdx + 1] < min) {
min = score[aboveIdx + 1];
}
score[idx] = min + priority[idx];
}
}
}
void seamCarvingByHostNaive(uchar3 *inPixels, int width, int height, int targetWidth, uchar3* outPixels) {
GpuTimer timer;
timer.Start();
memcpy(outPixels, inPixels, width * height * sizeof(uchar3));
const int originalWidth = width;
// allocate memory
int *priority = (int *)malloc(width * height * sizeof(int));
int *score = (int *)malloc(width * height * sizeof(int));
uint8_t *grayPixels= (uint8_t *)malloc(width * height * sizeof(uint8_t));
// turn input image to grayscale
convertRgb2Gray(inPixels, width, height, grayPixels);
// compute pixel priority
for (int r = 0; r < height; ++r) {
for (int c = 0; c < width; ++c) {
priority[r * originalWidth + c] = computePixelPriority(grayPixels, r, c, width, height, width);
}
}
while (width > targetWidth) {
// compute min seam table
computeSeamScoreTable(priority, score, width, height, originalWidth);
// find min index of last row
int minCol = 0, r = height - 1;
for (int c = 1; c < width; ++c) {
if (score[r * originalWidth + c] < score[r * originalWidth + minCol])
minCol = c;
}
// trace and remove seam from last to first row
for (; r >= 0; --r) {
// remove seam pixel on row r
for (int i = minCol; i < width - 1; ++i) {
outPixels[r * originalWidth + i] = outPixels[r * originalWidth + i + 1];
grayPixels[r * originalWidth + i] = grayPixels[r * originalWidth + i + 1];
priority[r * originalWidth + i] = priority[r * originalWidth + i + 1];
}
// update priority
if (r < height - 1) {
for (int affectedCol = 0; affectedCol < width - 1; ++affectedCol) {
priority[(r + 1) * originalWidth + affectedCol] = computePixelPriority(grayPixels, r + 1, affectedCol, width - 1, height, originalWidth);
}
}
// trace up
if (r > 0) {
int aboveIdx = (r - 1) * originalWidth + minCol;
int min = score[aboveIdx], minColCpy = minCol;
if (minColCpy > 0 && score[aboveIdx - 1] < min) {
min = score[aboveIdx - 1];
minCol = minColCpy - 1;
}
if (minColCpy < width - 1 && score[aboveIdx + 1] < min) {
minCol = minColCpy + 1;
}
}
}
for (int affectedCol = 0; affectedCol < width - 1; ++affectedCol) {
priority[affectedCol] = computePixelPriority(grayPixels, 0, affectedCol, width - 1, height, originalWidth);
}
--width;
}
free(grayPixels);
free(score);
free(priority);
timer.Stop();
float time = timer.Elapsed();
printf("Processing time (use host): %f ms\n\n", time);
}
void seamCarvingByHostOptimized(uchar3 *inPixels, int width, int height, int targetWidth, uchar3* outPixels) {
GpuTimer timer;
timer.Start();
memcpy(outPixels, inPixels, width * height * sizeof(uchar3));
const int originalWidth = width;
// allocate memory
int *priority = (int *)malloc(width * height * sizeof(int));
int *score = (int *)malloc(width * height * sizeof(int));
uint8_t *grayPixels= (uint8_t *)malloc(width * height * sizeof(uint8_t));
// turn input image to grayscale
convertRgb2Gray(inPixels, width, height, grayPixels);
// compute pixel priority
for (int r = 0; r < height; ++r) {
for (int c = 0; c < width; ++c) {
priority[r * originalWidth + c] = computePixelPriority(grayPixels, r, c, width, height, width);
}
}
while (width > targetWidth) {
// compute min seam table
computeSeamScoreTable(priority, score, width, height, originalWidth);
// find min index of last row
int minCol = 0, r = height - 1, prevMinCol;
for (int c = 1; c < width; ++c) {
if (score[r * originalWidth + c] < score[r * originalWidth + minCol])
minCol = c;
}
// trace and remove seam from last to first row
for (; r >= 0; --r) {
// remove seam pixel on row r
for (int i = minCol; i < width - 1; ++i) {
outPixels[r * originalWidth + i] = outPixels[r * originalWidth + i + 1];
grayPixels[r * originalWidth + i] = grayPixels[r * originalWidth + i + 1];
priority[r * originalWidth + i] = priority[r * originalWidth + i + 1];
}
// update priority
if (r < height - 1) {
for (int affectedCol = max(0, prevMinCol - 2); affectedCol <= prevMinCol + 2 && affectedCol < width - 1; ++affectedCol) {
priority[(r + 1) * originalWidth + affectedCol] = computePixelPriority(grayPixels, r + 1, affectedCol, width - 1, height, originalWidth);
}
}
// trace up
if (r > 0) {
prevMinCol = minCol;
int aboveIdx = (r - 1) * originalWidth + minCol;
int min = score[aboveIdx], minColCpy = minCol;
if (minColCpy > 0 && score[aboveIdx - 1] < min) {
min = score[aboveIdx - 1];
minCol = minColCpy - 1;
}
if (minColCpy < width - 1 && score[aboveIdx + 1] < min) {
minCol = minColCpy + 1;
}
}
}
for (int affectedCol = max(0, minCol - 2); affectedCol <= minCol + 2 && affectedCol < width - 1; ++affectedCol) {
priority[affectedCol] = computePixelPriority(grayPixels, 0, affectedCol, width - 1, height, originalWidth);
}
--width;
}
free(grayPixels);
free(score);
free(priority);
timer.Stop();
float time = timer.Elapsed();
printf("Processing time (use host): %f ms\n\n", time);
}
float computeError(uchar3 * a1, uchar3 * a2, int n)
{
float err = 0;
for (int i = 0; i < n; i++)
{
err += abs((int)a1[i].x - (int)a2[i].x);
err += abs((int)a1[i].y - (int)a2[i].y);
err += abs((int)a1[i].z - (int)a2[i].z);
}
err /= (n * 3);
return err;
}
char *concatStr(const char * s1, const char * s2)
{
char * result = (char *)malloc(strlen(s1) + strlen(s2) + 1);
strcpy(result, s1);
strcat(result, s2);
return result;
}
void printDeviceInfo()
{
cudaDeviceProp devProv;
CHECK(cudaGetDeviceProperties(&devProv, 0));
printf("**********GPU info**********\n");
printf("Name: %s\n", devProv.name);
printf("Compute capability: %d.%d\n", devProv.major, devProv.minor);
printf("Num SMs: %d\n", devProv.multiProcessorCount);
printf("Max num threads per SM: %d\n", devProv.maxThreadsPerMultiProcessor);
printf("Max num warps per SM: %d\n", devProv.maxThreadsPerMultiProcessor / devProv.warpSize);
printf("GMEM: %lu bytes\n", devProv.totalGlobalMem);
printf("CMEM: %lu bytes\n", devProv.totalConstMem);
printf("L2 cache: %i bytes\n", devProv.l2CacheSize);
printf("SMEM / one SM: %lu bytes\n", devProv.sharedMemPerMultiprocessor);
printf("****************************\n\n");
}
int main(int argc, char ** argv)
{
if (argc != 4 && argc != 6)
{
printf("The number of arguments is invalid\n");
return EXIT_FAILURE;
}
printDeviceInfo();
// Read input RGB image file
int width, height;
uchar3 *inPixels;
readPnm(argv[1], width, height, inPixels);
printf("Image size (width x height): %i x %i\n\n", width, height);
int numSeamRemoved = stoi(argv[3]);
if (numSeamRemoved <= 0 || numSeamRemoved >= width)
return EXIT_FAILURE; // invalid ratio
printf("Number of seam removed: %d\n\n", numSeamRemoved);
int targetWidth = width - numSeamRemoved;
// seam carving using host
uchar3 * correctOutPixels = (uchar3 *)malloc(width * height * sizeof(uchar3));
seamCarvingByHostNaive(inPixels, width, height, targetWidth, correctOutPixels);
// seam carving using device
uchar3 * outPixels= (uchar3 *)malloc(width * height * sizeof(uchar3));
seamCarvingByHostOptimized(inPixels, width, height, targetWidth, outPixels);
// Compute mean absolute error between host result and device result
float err = computeError(outPixels, correctOutPixels, width * height);
printf("Error between device result and host result: %f\n", err);
// Write results to files
char *outFileNameBase = strtok(argv[2], "."); // Get rid of extension
writePnm(correctOutPixels, targetWidth, height, width, concatStr(outFileNameBase, "_host_naive.pnm"));
writePnm(outPixels, targetWidth, height, width, concatStr(outFileNameBase, "_host_optimized.pnm"));
// Free memories
free(inPixels);
free(correctOutPixels);
free(outPixels);
}
|
22,634 | // extern __shared__ uchar3 s_inPixels[];
// int idxR = blockIdx.y * blockDim.y + threadIdx.y;
// int idxC = blockIdx.x * blockDim.x + threadIdx.x;
// int filterPadding = filterWidth/2;
// int shareBlockWidth = blockDim.x + filterPadding;
// int inR = idxR - filterPadding;
// int inC = idxC - filterPadding;
// inR = min(height - 1, max(0, inR));
// inC = min(width - 1, max(0, inC));
// s_inPixels[threadIdx.y * shareBlockWidth + threadIdx.x] = inPixels[inR * width + inC];
// if (floorf(threadIdx.x / filterWidth) == 0) {
// inR = idxR;
// inC = idxC + blockDim.x - filterPadding;
// inR = min(height - 1, max(0, inR));
// inC = min(width - 1, max(0, inC));
// s_inPixels[(threadIdx.y + blockDim.y) * shareBlockWidth + (threadIdx.x + blockDim.x)] = inPixels[inR * width + inC];
// }
// if (floorf(threadIdx.y / filterWidth) == 0) {
// inR = idxR + blockDim.y - filterPadding;
// inC = idxC;
// inR = min(height - 1, max(0, inR));
// inC = min(width - 1, max(0, inC));
// s_inPixels[(threadIdx.y + blockDim.y) * shareBlockWidth + threadIdx.x] = inPixels[inR * width + inC];
// }
// if (floorf(threadIdx.y / filterWidth) == 0 && floorf(threadIdx.x / filterWidth) == 0) {
// inR = idxR + blockDim.y - filterPadding;
// inC = idxC + blockDim.x - filterPadding;
// inR = min(height - 1, max(0, inR));
// inC = min(width - 1, max(0, inC));
// s_inPixels[(threadIdx.y + blockDim.y) * shareBlockWidth + (threadIdx.x + blockDim.x)] = inPixels[inR * width + inC];
// }
// __syncthreads();
// if (idxR < height && idxC < width)
// {
// float3 outPixel = make_float3(0, 0, 0);
// for (int fR = 0; fR < filterWidth; fR++){
// for (int fC = 0; fC < filterWidth; fC++){
// float filterVal = filter[fR * filterWidth + fC];
// int inPixelR = threadIdx.y + fR;
// int inPixelC = threadIdx.x + fC;
// uchar3 inPixel = s_inPixels[inPixelR * shareBlockWidth + inPixelC];
// outPixel.x += filterVal * inPixel.x;
// outPixel.y += filterVal * inPixel.y;
// outPixel.z += filterVal * inPixel.z;
// }
// }
// int idx = idxR * width + idxC;
// outPixels[idx] = make_uchar3(outPixel.x, outPixel.y, outPixel.z);
// } |
22,635 | /*
CSC501 - Operating System - Spring 2012 - North Carolina State University
HomeWork2 Prob4. See - http://courses.ncsu.edu/csc501/lec/001/hw/hw2/
Author: Salil Kanitkar (sskanitk@ncsu.edu)
For Compiling -
$ make clean ; make a4
For Executing -
$ ./a4 <path-to-log-file> <path-to-process-list-file>
*/
#include<stdio.h>
#include<stdlib.h>
#include<string.h>
#include <cuda_runtime.h>
#include<sys/types.h>
#include <math.h>
#ifndef _WIN32
#include<sys/time.h>
#endif
/* Uncomment the below line to enable debug prints
*/
//#define VERBOSE 1
#define MAX_LOGFILE_SIZE (1<<20)
#define MAX_LOGLINE_SIZE 100
#define MAX_PROC_NUM 10
#define MAX_PNAME_LEN 50
#define MAX_NUM_THREADS 100*500
#define MAX_NUM_BLOCKS 100
#define MAX_THREADS_PER_BLOCK 400
/* struct to hold process names read from the proclistfile. */
typedef struct _proc_entry_t {
char pname[MAX_PNAME_LEN];
int count;
}proc_entry_t;
/* struct for each thread to put the data calculated by it. */
typedef struct _stats_entry_t {
proc_entry_t proclist[MAX_PROC_NUM];
}stats_entry_t;
/* CUDA device local func for string copy. */
__device__ void dev_mystrcpy(char *t, char *s)
{
while ( *s != '\0' ) {
*t++ = *s++;
}
*t = '\0';
}
/* CUDA device local func for getting string length. */
__device__ int dev_my_strlen(char *src)
{
int len=0;
while ( *src++ != '\0' )
len++;
return (len);
}
/* CUDA device func for comparing strings. */
__device__ int dev_my_strcmp(char *s, char *d)
{
int len = dev_my_strlen(s), tmplen = dev_my_strlen(d);
int i=0;
if (len != tmplen)
return 1;
while (i < len) {
if (*(s+i) != *(d+i))
return 1;
i += 1;
}
return 0;
}
/* The global kernel func.
For the block that a thread is supposed to work with, the below function will calculate the results and populate the corresponding cell
in the dev_stats memory array.
*/
__global__ void dev_calc_stats(char *dev_fileBuf, int *dev_blockStart, int *dev_blockEnd, int numProcs, stats_entry_t *dev_stats, int paddedFileSize, int fileSize)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
int i=0, j=0, k=0, bufSize;
char buf[10000], logline[MAX_LOGLINE_SIZE], tmp[MAX_PNAME_LEN];
dev_mystrcpy(buf, "");
dev_mystrcpy(logline, "");
dev_mystrcpy(tmp, "");
if (dev_blockStart[idx] > fileSize || dev_blockEnd[idx] > paddedFileSize || dev_blockStart[idx] >= dev_blockEnd[idx])
return;
for (i=dev_blockStart[idx] ; i <= paddedFileSize && i <= dev_blockEnd[idx] ; i++) {
buf[j++] = dev_fileBuf[i];
}
buf[j] = '\0';
bufSize = j;
i = 0; j = 0;
for (i=0 ; i < bufSize ; i++) {
if (buf[i] == '\n') {
if (j <= MAX_LOGLINE_SIZE)
logline[j] = '\0';
else {
j = 0;
continue;
}
k = 0;
while (k+16 < 100 && logline[k+16] != '[') {
tmp[k] = logline[k+16];
k += 1;
}
tmp[k] = '\0';
for (j=0 ; j < numProcs ; j++) {
if (dev_my_strcmp(dev_stats[idx].proclist[j].pname, tmp) == 0)
dev_stats[idx].proclist[j].count += 1;
}
j = 0;
}
else {
if (j < MAX_LOGLINE_SIZE)
logline[j] = buf[i];
j += 1;
}
}
}
__global__ void reducerFunc(stats_entry_t *input_stats,stats_entry_t *output_stats,int numProcesses,int totalThreads)
{
int j = 0;
unsigned int myID = blockIdx.x*blockDim.x + threadIdx.x;
//extern __shared__ stats_entry_t shared_stats[][];
/*for (j = 0; i < numProcesses; j++)
{
shared_stats[tid].proclist[j].count = input_stats[i].proclist[j].count;
}
__syncthreads();*/
// do reduction in shared mem
if (totalThreads > 1 && (myID *2 + 1) < totalThreads)
{
for (j = 0; j < numProcesses; j++)
{
output_stats[myID].proclist[j].count = input_stats[2*myID].proclist[j].count + input_stats[2*myID + 1].proclist[j].count;
}
}
}
int main(int argc, char *argv[])
{
FILE *fp_logfile, *fp_proclist;
char *fileBuf=(char *)malloc(sizeof(char)*MAX_LOGFILE_SIZE);
char *procBuf=(char *)malloc(sizeof(char)*MAX_PROC_NUM*MAX_PNAME_LEN);
int numThreads=0, numBlocks=0, numThreadsPerBlock=0, paddedFileSize=0, blockSize=0;
long fileSize = 0,i = 0;
int *blockStart=0, *blockEnd=0;
int numProcs, count, tot_count, pflag=0,done = 0;
int reducerBlocks, reducerThreadsPerBlock;
int j, k, start,blockLoop,threadLoop;
stats_entry_t *stats=0;
char *pname, proclist[MAX_PROC_NUM][MAX_PNAME_LEN];
char *dev_fileBuf;
int *dev_blockStart, *dev_blockEnd;
stats_entry_t *dev_stats,*dev_reducer_stats;
#ifndef _WIN32
struct timeval t_start, t_end;
#endif
cudaEvent_t dev_t_start, dev_t_end;
float time_elapsed;
if (argc != 3) {
printf("Usage: ./log_stats path-to-log-file path-to-process-list-file\n");
exit(1);
}
if (!(fp_logfile = fopen(argv[1], "r"))) {
printf("Error opening Log File!\n");
exit(1);
}
if (!(fp_proclist = fopen(argv[2], "r"))) {
printf("Error opening Process listing file!\n");
exit(1);
}
/* Read up the proclistfile in a local buffer in memory. */
i = 0;
for (i=0 ; !feof(fp_proclist) ; ) {
i += fread(&(procBuf[i]), 1, 1, fp_proclist);
}
/* Read up the entire logfile in a local buffer in memory. */
i = 0;
for (i=0 ; !feof(fp_logfile) ; ) {
i += fread(&(fileBuf[i]), 1, 1, fp_logfile);
}
fileSize = i;
#ifdef VERBOSE
printf("procListFile:\n%s", procBuf);
#endif
/* Extract out all the process names from proclistfile and populate the proclist array. */
i = 0;
pname = strtok(procBuf, "\n");
while (pname) {
strcpy(proclist[i], pname);
i++;
pname = strtok(NULL, "\n");
}
numProcs = i;
#ifdef VERBOSE
printf("numProcs:%d\n", numProcs);
for (i=0 ; i < numProcs ; i++) {
printf("%s\n", proclist[i]);
}
#endif
if (fileSize < 65536) {
cudaMalloc((void **)&dev_fileBuf, sizeof(char)*MAX_LOGFILE_SIZE);
cudaMemset((void *)dev_fileBuf, 0, sizeof(char)*MAX_LOGFILE_SIZE);
cudaMalloc((void **)&dev_blockStart, sizeof(int)*MAX_NUM_THREADS);
cudaMemset((void *)dev_blockStart, 0, sizeof(int)*MAX_NUM_THREADS);
cudaMalloc((void **)&dev_blockEnd, sizeof(int)*MAX_NUM_THREADS);
cudaMemset((void *)dev_blockEnd, 0, sizeof(int)*MAX_NUM_THREADS);
cudaMalloc((void **)&dev_stats, sizeof(stats_entry_t)*MAX_NUM_THREADS);
cudaMemset((void *)dev_stats, 0, sizeof(stats_entry_t)*MAX_NUM_THREADS);
cudaMalloc((void **)&dev_reducer_stats, sizeof(stats_entry_t)*MAX_NUM_THREADS);
cudaMemset((void *)dev_reducer_stats, 0, sizeof(stats_entry_t)*MAX_NUM_THREADS);
blockStart = (int *)malloc(sizeof(int)*MAX_NUM_THREADS);
blockEnd = (int *)malloc(sizeof(int)*MAX_NUM_THREADS);
stats = (stats_entry_t *)malloc(sizeof(stats_entry_t)*(MAX_NUM_THREADS));
}
for (blockLoop = 1; pow((float)2,blockLoop) < MAX_NUM_BLOCKS; blockLoop++)
{
numBlocks = pow((float)2,blockLoop);
/* Vary the number of threads per block by some offset. */
for (threadLoop = 1; pow((float)2,threadLoop) < MAX_THREADS_PER_BLOCK; threadLoop++)
{
numThreadsPerBlock = pow((float)2,threadLoop);
//numBlocks = 25 ; numThreadsPerBlock = 324;
/* The actual number of threads to be used for this run of the program. */
numThreads = numBlocks * numThreadsPerBlock;
if (fileSize > 65535) {
blockStart = (int *)malloc(sizeof(int)*MAX_NUM_THREADS);
blockEnd = (int *)malloc(sizeof(int)*MAX_NUM_THREADS);
stats = (stats_entry_t *)malloc(sizeof(stats_entry_t)*(MAX_NUM_THREADS));
}
for (i=0 ; i < MAX_NUM_THREADS ; i++) {
blockStart[i] = 0;
}
for (i=0 ; i < MAX_NUM_THREADS ; i++) {
blockEnd[i] = 0;
}
for (i=0 ; i < MAX_NUM_THREADS ; i++) {
for (j=0 ; j < numProcs ; j++) {
strcpy(stats[i].proclist[j].pname, "");
stats[i].proclist[j].count = 0;
}
}
for (i=0 ; i < numThreads ; i++) {
for (j=0 ; j < numProcs ; j++) {
strcpy(stats[i].proclist[j].pname, proclist[j]);
stats[i].proclist[j].count = 0;
}
}
/* Do padding etc. Adjust the length. */
paddedFileSize = fileSize;
blockSize = (int)fileSize/numThreads;
if ( fileSize%numThreads != 0 ) {
paddedFileSize = fileSize + (numThreads - (fileSize%numThreads));
blockSize = (int)paddedFileSize/numThreads;
memset(&(fileBuf[fileSize]), 0, paddedFileSize - fileSize);
}
if (blockSize < 20 || blockSize >= 10000) { ;
/* If the blockSize falls below 20, then no single block can contain any process name. So skip this invocation.
Uncomment the below line to display the corresponding message in the program output.
*/
/* printf("blockSize:%d numThreads:%d - No legal processing possible for this configuration.!\n", blockSize, numThreads);*/
continue;
}
#ifdef VERBOSE
printf("LogFile:\n%s\n", fileBuf);
printf("fileSize:%d paddedFileSize:%d blockSize:%d\n\n", fileSize, paddedFileSize, blockSize);
#endif
int x;
//int activeThreads;
/* Build up blockStart and blockEnd arrays. They will keep track of start and end of every block for this run. */
for (i=0, k=0, start=0 ; i < numThreads; i++, j++) {
blockStart[i] = start;
k = 0;
if (start+blockSize >= paddedFileSize) {
blockEnd[i] = paddedFileSize;
//activeThreads = i;
for (x = i+1 ; x < numThreads ; x++) {
blockStart[x] = paddedFileSize;
blockEnd[x] = paddedFileSize;
}
break;
}
if (fileBuf[(start+blockSize)] != '\n') {
k = 1;
while (((start+blockSize+k) <= paddedFileSize) && (fileBuf[start+blockSize+k] != '\n'))
k += 1;
blockEnd[i] = start + blockSize + k;
} else {
blockEnd[i] = start + blockSize;
}
if (blockEnd[i] > paddedFileSize)
blockEnd[i] = paddedFileSize;
if ((blockEnd[i]+1) <= paddedFileSize)
start = blockEnd[i] + 1;
else
start = paddedFileSize;
}
#ifdef VERBOSE
printf("Initialized Data as follows:\n");
for (i=0 ; i < numThreads ; i++) {
printf("Block %d\n", i);
printf("blockStart:%d blockEnd:%d\n", blockStart[i], blockEnd[i]);
for (j=blockStart[i] ; j<blockEnd[i] ; j++) { ;
printf("%c", fileBuf[j]);
}
printf("\nStats:\n");
for (j=0 ; j < numProcs ; j++) {
printf("%s %d\n", stats[i].proclist[j].pname, stats[i].proclist[j].count);
}
printf("\n\n");
}
#endif
if (fileSize > 65536) {
cudaMalloc((void **)&dev_fileBuf, sizeof(char)*MAX_LOGFILE_SIZE);
cudaMemset((void *)dev_fileBuf, 0, sizeof(char)*MAX_LOGFILE_SIZE);
cudaMalloc((void **)&dev_blockStart, sizeof(int)*MAX_NUM_THREADS);
cudaMemset((void *)dev_blockStart, 0, sizeof(int)*MAX_NUM_THREADS);
cudaMalloc((void **)&dev_blockEnd, sizeof(int)*MAX_NUM_THREADS);
cudaMemset((void *)dev_blockEnd, 0, sizeof(int)*MAX_NUM_THREADS);
cudaMalloc((void **)&dev_stats, sizeof(stats_entry_t)*MAX_NUM_THREADS);
cudaMemset((void *)dev_stats, 0, sizeof(stats_entry_t)*MAX_NUM_THREADS);
cudaMalloc((void **)&dev_reducer_stats, sizeof(stats_entry_t)*MAX_NUM_THREADS);
cudaMemset((void *)dev_reducer_stats, 0, sizeof(stats_entry_t)*MAX_NUM_THREADS);
}
cudaEventCreate(&dev_t_start);
cudaEventCreate(&dev_t_end);
cudaThreadSynchronize();
#ifndef _WIN32
gettimeofday(&t_start, NULL);
#endif
/* Copy the data over to Device's Global Memory. */
cudaMemcpy(dev_fileBuf, fileBuf, sizeof(char)*paddedFileSize, cudaMemcpyHostToDevice);
cudaMemcpy(dev_blockStart, blockStart, sizeof(int)*numThreads, cudaMemcpyHostToDevice);
cudaMemcpy(dev_blockEnd, blockEnd, sizeof(int)*numThreads, cudaMemcpyHostToDevice);
cudaMemcpy(dev_stats, stats, sizeof(stats_entry_t)*numThreads, cudaMemcpyHostToDevice);
cudaEventRecord(dev_t_start, 0);
dev_calc_stats <<< numBlocks, numThreadsPerBlock >>> (dev_fileBuf, dev_blockStart, dev_blockEnd, numProcs, dev_stats, paddedFileSize, fileSize);
cudaEventRecord(dev_t_end, 0);
cudaEventSynchronize(dev_t_end);
cudaEventElapsedTime(&time_elapsed, dev_t_start, dev_t_end );
cudaEventDestroy(dev_t_start);
cudaEventDestroy(dev_t_end);
cudaThreadSynchronize();
//cudaMemcpy(stats, dev_stats, sizeof(stats_entry_t)*numThreads, cudaMemcpyDeviceToHost);
#ifdef VERBOSE
printf("Final Data as follows:\n");
for (i=0 ; i < numThreads ; i++) {
printf("Block %d\n", i);
printf("blockStart:%d blockEnd:%d\n", blockStart[i], blockEnd[i]);
for (j=blockStart[i] ; j<blockEnd[i] ; j++) { ;
printf("%c", fileBuf[j]);
}
printf("\nStats:\n");
for (j=0 ; j < numProcs ; j++) { ;
printf("%s %d\n", stats[i].proclist[j].pname, stats[i].proclist[j].count);
}
printf("\n\n");
}
#endif
done = 0;
reducerBlocks = numBlocks;
reducerThreadsPerBlock = numThreadsPerBlock;
cudaMemcpy(dev_reducer_stats, dev_stats, sizeof(stats_entry_t)*numThreads, cudaMemcpyDeviceToDevice);
while (done == 0)
{
cudaMemcpy(stats, dev_stats, sizeof(stats_entry_t)*numThreads, cudaMemcpyDeviceToHost);
/*for (i=0 ; i < numThreads ; i++) {
printf("\nBefore Reduction: Thread %d - ",i);
for (j=0 ; j < numProcs ; j++) {
printf("%s %d\n", stats[i].proclist[j].pname, stats[i].proclist[j].count);
}
}*/
if (reducerThreadsPerBlock == 1)
{
reducerBlocks = reducerBlocks / 2;
}
else
{
reducerThreadsPerBlock = reducerThreadsPerBlock / 2;
}
if (reducerThreadsPerBlock == 1 && reducerBlocks == 1)
{
done = 1;
}
//printf("Reducing %d,%d\n",reducerBlocks,reducerThreadsPerBlock);
reducerFunc <<< reducerBlocks,reducerThreadsPerBlock >>> (dev_stats,dev_reducer_stats,numProcs,numBlocks * numThreadsPerBlock);
cudaMemcpy(dev_stats, dev_reducer_stats, sizeof(stats_entry_t)*numThreads, cudaMemcpyDeviceToDevice);
}
cudaMemcpy(stats, dev_stats, sizeof(stats_entry_t)*numThreads, cudaMemcpyDeviceToHost);
/*for (i=0 ; i < numThreads ; i++) {
printf("\nAfter Reduction Thread %d - ",i);
for (j=0 ; j < numProcs ; j++) {
printf("%s %d\n", stats[i].proclist[j].pname, stats[i].proclist[j].count);
}
}*/
/* Aggregate the results calculated by each block.
tot_count = 0;
for (j=0 ; j < numProcs ; j++) {
count = 0;
for (i=0 ; i < numThreads ; i++) {
count += stats[i].proclist[j].count;
}
if (!pflag)
printf("pName: %s count: %d\n", stats[0].proclist[j].pname, count);
tot_count += count;
}
if (!pflag)
printf("Total Number of loglines: %d\n", tot_count);*/
tot_count = 0;
for (j=0 ; j < numProcs ; j++) {
if (!pflag)
printf("pName: %s count: %d\n", stats[0].proclist[j].pname, stats[0].proclist[j].count);
tot_count += stats[0].proclist[j].count;
}
if (!pflag)
printf("Total Number of loglines: %d\n", tot_count);
#ifndef _WIN32
gettimeofday(&t_end, NULL);
printf("blockSize:%d numThreads:%d totalCount:%d CPUTime:%8ld GPUTime:%f %d %d\n", blockSize, numThreads, tot_count, t_end.tv_usec - t_start.tv_usec + (t_end.tv_sec*1000000 - t_start.tv_sec*1000000),time_elapsed, numBlocks, numThreadsPerBlock);
#else
printf("blockSize:%d numThreads:%d totalCount:%d GPUTime:%f\n", blockSize, numThreads, tot_count,time_elapsed);
#endif
if (!pflag)
pflag = 1;
if (fileSize > 65536) {
cudaFree(dev_stats);
cudaFree(dev_blockStart);
cudaFree(dev_blockEnd);
cudaFree(dev_fileBuf);
}
if (fileSize > 65535) {
free(blockStart);
free(blockEnd);
free(stats);
}
}
}
return 0;
}
|
22,636 | #include "includes.h"
__global__ void reductionKernel(float* vec, int width, double* sumUp){
//shared memory instantiation
extern __shared__ float partialSum[];
//index for global memory
int g_idx = blockDim.x * blockIdx.x + threadIdx.x;
//index for shared memory
int b_idx = threadIdx.x;
//load shared memory from global memory
partialSum[b_idx] = g_idx < width ? vec[g_idx] : 0;
//reduction inside blocks
for(int stride = blockDim.x/2; stride >= 1 ; stride = stride/2){
__syncthreads();
if(b_idx < stride ){
partialSum[b_idx] = partialSum[b_idx] + partialSum[b_idx + stride];
}
}
//reduction for grid using just thread 0 of each block
if(b_idx == 0){
//coppy value back to global memory
vec[g_idx] = partialSum[b_idx];
//reduction
for(int stride = (gridDim.x * blockDim.x)/2; stride>=blockDim.x; stride = stride/2){
__syncthreads();
if(g_idx < stride){
vec[g_idx] = vec[g_idx] + vec[g_idx + stride];
}
}
}
//save result in output variable
if(g_idx == 0)
(*sumUp) = vec[g_idx];
} |
22,637 | // Vector addition (device code)
// extern C for host program load correct function name
extern "C" __global__ void Sum(int *a, int *b, int *c, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < n)
c[tid] = a[tid] + b[tid];
}
|
22,638 | #include "includes.h"
extern "C"
__global__ void sumSquareError (int nBatch, int rbs, int rScale, int nCoeff, float *DA, float *CA, float *EA, float *SA)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < nBatch)
{
const int daOffset = i * rbs * rScale * nCoeff;
const int caOffset = i * nCoeff;
const int eaOffset = i * rbs * rScale;
SA[i] = 0;
for(int j = 0; j < rbs * rScale ; j++){
float fx = 0.0f;
for(int k = 0 ; k < nCoeff ; k++){
fx += DA[daOffset + rbs * rScale * k + j] * CA[caOffset + k];
}
float error = EA[eaOffset + j] - fx;
SA[i] += error*error; // sum square error
}
}
} |
22,639 | #include <iostream>
#include <random>
using namespace std;
// Matrices are stored in row-major order:
// M(row, column) = *(M.elements + row * M.width + col)
typedef struct
{
int width;
int height;
float * elements;
} Matrix;
// Thread block size
#define BLOCK_SIZE 16
// Forward declaration of the matrix multiplication kernel
__global__ void MatMulKernel(const Matrix, const Matrix, Matrix);
// Forward declaration of sequential CPU function
void sequential_cpu(Matrix A, Matrix B, Matrix C);
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
void MatMu(const Matrix A, const Matrix B, Matrix C)
{
// Load A and B to device memory
Matrix d_A;
d_A.width = A.width;
d_A.height = A.height;
size_t size = A.width * A.height * sizeof(float);
cudaMalloc(&d_A.elements, size);
cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice);
Matrix d_B;
d_B.width = B.width;
d_B.height = B.height;
size = B.width * B.height * sizeof(float);
cudaMalloc(&d_B.elements, size);
cudaMemcpy(d_B.elements, B.elements, size, cudaMemcpyHostToDevice);
// Allocate C in device memory
Matrix d_C;
d_C.width = C.width;
d_C.height = C.height;
size = C.width * C.height * sizeof(float);
cudaMalloc(&d_C.elements, size);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Invoke kernel
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(B.width/dimBlock.x, A.height/dimBlock.y);
cudaEventRecord(start);
MatMulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
cudaEventRecord(stop);
// Read C from device memory
cudaMemcpy(C.elements, d_C.elements, size, cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
cout << "Kernel call took " << milliseconds << " milliseconds" << endl;
// Free device memory
cudaFree(d_A.elements);
cudaFree(d_B.elements);
cudaFree(d_C.elements);
/*
cudaEventRecord(start);
sequential_cpu(A, B, C);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
cout << "Sequential CPU function call took " << milliseconds << " milliseconds" << endl;
*/
}
// Matrix multiplication kernel called by MatMul()
__global__ void MatMulKernel(Matrix A, Matrix B, Matrix C)
{
// Each thread computes one element of C
// by accumulating results into Cvalue
float Cvalue = 0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
for (int e = 0; e < A.width; ++e)
Cvalue += A.elements[row * A.width + e] * B.elements[e * B.width + col];
C.elements[row * C.width + col] = Cvalue;
}
// Sequential CPU version is given for comparison
void sequential_cpu(Matrix A, Matrix B, Matrix C)
{
for(int i = 0; i < C.height; ++i)
{
for(int j = 0; j < C.width; ++j)
{
C.elements[i*C.width + j] = 0;
for(int ac = 0; ac < A.width; ++ac)
{
for(int br = 0; br < B.height; ++br)
{
C.elements[i*C.width + j] += A.elements[i*A.width + ac]*B.elements[j + br*B.width];
}
}
}
}
}
int main()
{
int n;
size_t size;
std::default_random_engine generator;
std::uniform_real_distribution<float> distribution(-1.0,1.0);
Matrix A;
A.width = BLOCK_SIZE*150;
A.height = BLOCK_SIZE*100;
n = A.width * A.height;
size = n * sizeof(float);
A.elements = (float*)malloc(size);
for(int i = 0; i < n; ++i)
A.elements[i] = distribution(generator);
Matrix B;
B.width = BLOCK_SIZE*200;
B.height = A.width;
n = B.width * B.height;
size = n * sizeof(float);
B.elements = (float*)malloc(size);
for(int i = 0; i < n; ++i)
B.elements[i] = distribution(generator);
Matrix C;
C.width = B.width;
C.height = A.height;
n = C.width * C.height;
size = n * sizeof(float);
C.elements = (float*)malloc(size);
for(int i = 0; i < 5; ++i)
{
printf("i=%d\n",i);
MatMu(A, B, C);
}
}
|
22,640 | #if GOOGLE_CUDA
#define EIGEN_USE_GPU
#define FLT_MAX 1e35
#include <cassert>
__device__ inline bool isvalidxy(const int h, const int w,const int y,const int x)
{
return (y >= 0) && (x >= 0) && (y < h) && (x < w);
}
__device__ inline void swapf(float & a, float & b)
{
float tmp = a;
a = b;
b = tmp;
}
__device__ inline void swap(int & a, int & b)
{
int tmp = a;
a = b;
b = tmp;
}
__device__ inline void bitonic_sort(float* dist, int* idx, int tmpn)
{
//Bitonic Sort
for (unsigned int t = 2; t <= tmpn ; t <<= 1)
{
// Bitonic merge:
for (unsigned int j = t >> 1; j>0; j >>= 1)
{
for (unsigned int tid = threadIdx.x ; tid < tmpn ; tid += blockDim.x )
{
unsigned int ixj = tid ^ j;
if (ixj > tid)
{
if ((tid & t) == 0)
{
if (dist[tid] > dist[ixj])
{
swapf(dist[tid], dist[ixj]);
swap(idx[2*tid], idx[2*ixj]);
swap(idx[2*tid+1], idx[2*ixj+1]);
}
}
else
{
if (dist[tid] < dist[ixj])
{
swapf(dist[tid], dist[ixj]);
swap(idx[2*tid], idx[2*ixj]);
swap(idx[2*tid+1], idx[2*ixj+1]);
}
}
}
}
__syncthreads();
}
}
}
__device__ inline void oddeven_sort(float* dist, int* idx , int tmpn)
{
for ( int cnt = 0 ; cnt < ( tmpn + 1 ) / 2 ; ++cnt )
{
for ( int j = 2*threadIdx.x + 1 ; j < tmpn ; j += 2*blockDim.x )
{
if ( dist[j] < dist[ j - 1 ] )
{
swapf(dist[j], dist[j-1]);
swap(idx[2*j], idx[2*(j-1)]);
swap(idx[2*j+1], idx[2*(j-1)+1]);
}
}
__syncthreads();
for ( int j = 2*threadIdx.x + 2 ; j < tmpn ; j += 2*blockDim.x )
{
if ( dist[j] < dist[ j - 1 ] )
{
swapf(dist[j], dist[j-1]);
swap(idx[2*j], idx[2*(j-1)]);
swap(idx[2*j+1], idx[2*(j-1)+1]);
}
}
__syncthreads();
}
}
__global__ void KnnKernel1(const int b,const int h,const int w,const int d,const int dh,const int dw,const int tmpn,const int k,const float * f,float* tmpd,int* tmpi,float * result,int * result_i)
{
int bi = blockIdx.x / h ;
int y = blockIdx.x % h ;
int x = blockIdx.y ;
const float* fcurrent = f + ((bi*h + y)*w+x)*d;
float* dist = tmpd + ((bi*h + y)*w+x)*tmpn;
int* idx = tmpi + ((bi*h + y)*w+x)*tmpn*2;
float dif;
float * r = result + ((bi*h + y)*w+x)*k;
int * ri = result_i + ((bi*h + y)*w+x)*k*3;
for( int j = threadIdx.x ; j < tmpn ; j += blockDim.x )
{
dist[j] = 3.33;
if( j >= (2*dh+1)*(2*dw+1) )
{
dist[j] = FLT_MAX;
idx[2*j] = INT_MAX;
idx[2*j+1] = INT_MAX;
continue;
}
int dy = j / (2*dh+1) - dh ;
int dx = j % (2*dh+1) - dw;
if( (dx==0) && (dy==0) )
{
dist[j] = 0.0;
idx[2*j] = y;
idx[2*j+1] = x;
continue;
}
if( ! isvalidxy(h,w, y+dy , x+dx ) )
{
dist[j] = FLT_MAX;
idx[2*j] = y + dy;
idx[2*j+1] = x + dx;
continue;
}
dist[j] = 0.0;
for ( int di = 0 ; di < d ; ++di )
{
dif = fcurrent[di] - f[((bi*h+(y+dy))*w+(x+dx))*d+di];
dist[j] += dif*dif;
}
idx[2*j] = y + dy;
idx[2*j+1] = x + dx;
}
__syncthreads();
bitonic_sort(dist,idx,tmpn);
//copy result
//float * r = result + ((bi*h + y)*w+x)*k;
//int * ri = result_i + ((bi*h + y)*w+x)*k*3;
for ( int ki = threadIdx.x ; ki < k ; ki += blockDim.x )
{
r[ki] = dist[ki];
ri[3*ki+0] = bi;
ri[3*ki+1] = idx[2*ki];
ri[3*ki+2] = idx[2*ki+1];
//debug
}
}
__global__ void KnnKernel2(const int b,const int h,const int w,const int d,const int dh,const int dw,const int tmpn,const int k,const float * f,float* tmpd,int* tmpi,float * result,int * result_i)
{
int bi = blockIdx.x ;
int y = blockIdx.y / w ;
int x = blockIdx.y % w ;
const float* fcurrent = f + ((bi*h + y)*w+x)*d;
float* dist = tmpd + ((bi*h + y)*w+x)*tmpn;
int* idx = tmpi + ((bi*h + y)*w+x)*tmpn*2;
float dif;
float * r = result + ((bi*h + y)*w+x)*k;
int * ri = result_i + ((bi*h + y)*w+x)*k*3;
for( int j = threadIdx.x ; j < tmpn ; j += blockDim.x )
{
dist[j] = 3.33;
if( j >= (2*dh+1)*(2*dw+1) )
{
dist[j] = FLT_MAX;
idx[2*j] = INT_MAX;
idx[2*j+1] = INT_MAX;
continue;
}
int dy = j / (2*dh+1) - dh ;
int dx = j % (2*dh+1) - dw;
if( (dx==0) && (dy==0) )
{
dist[j] = 0.0;
idx[2*j] = y;
idx[2*j+1] = x;
continue;
}
if( ! isvalidxy(h,w, y+dy , x+dx ) )
{
dist[j] = FLT_MAX;
idx[2*j] = y + dy;
idx[2*j+1] = x + dx;
continue;
}
dist[j] = 0.0;
for ( int di = 0 ; di < d ; ++di )
{
dif = fcurrent[di] - f[((bi*h+(y+dy))*w+(x+dx))*d+di];
dist[j] += dif*dif;
}
idx[2*j] = y + dy;
idx[2*j+1] = x + dx;
}
__syncthreads();
bitonic_sort(dist,idx,tmpn);
//copy result
//float * r = result + ((bi*h + y)*w+x)*k;
//int * ri = result_i + ((bi*h + y)*w+x)*k*3;
for ( int ki = threadIdx.x ; ki < k ; ki += blockDim.x )
{
r[ki] = dist[ki];
ri[3*ki+0] = bi;
ri[3*ki+1] = idx[2*ki];
ri[3*ki+2] = idx[2*ki+1];
//debug
}
}
void KnnKernelLauncher(const int b,const int h,const int w,const int d,const int dh,const int dw,const int tmpn,const int k,const float * f,float* tmpd,int* tmpi,float * result,int * result_i){
if( (b*h<=65536) && (w<=65536) )
{
KnnKernel1<<<dim3(b*h,w,1),512>>>(b,h,w,d,dh,dw,tmpn,k,f,tmpd,tmpi,result,result_i);
}else if( (b<=65536) && (h*w<=65536) ){
KnnKernel2<<<dim3(b,h*w,1),512>>>(b,h,w,d,dh,dw,tmpn,k,f,tmpd,tmpi,result,result_i);
}else{
assert( ((b*h<=65536)&&(w<=65536)) || ((b<=65536)&&(h*w<=65536)) );
}
}
#endif |
22,641 | #include <stdio.h>
#include <cuda_runtime.h>
__global__ void sample(int *A)
{
__shared__ int i;
i = 0;
if(threadIdx.x == 0)
{
for(int j = 0; j < 10000000; j++);
A[i] = 1;
atomicAdd(&i, 1);
__syncthreads();
for(int j = 0; j < 1000000; j++);
A[i] = 2;
atomicAdd(&i, 1);
}
else
{
A[i] = 3;
atomicAdd(&i, 1);
__syncthreads();
A[i] = 4;
atomicAdd(&i, 1);
}
}
int main()
{
int *D_A;
cudaMallocManaged((void**) &D_A, 4 * sizeof(int));
sample<<<1, 2>>>(D_A);
cudaDeviceSynchronize();
for(int i = 0; i < 4; i++)
printf("%d , ", D_A[i]);
printf("\n");
}
|
22,642 | #include<thrust/reduce.h>
|
22,643 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void unique_gid_calculation_2D_2D(int *input){
int tid = threadIdx.x + blockDim.x * threadIdx.y;
int num_threads_per_block = blockDim.x * blockDim.y;
int block_offset = blockIdx.x * num_threads_per_block;
int num_threads_per_row = num_threads_per_block * gridDim.x;
int row_offset = num_threads_per_row * blockIdx.y;
int gid = tid + block_offset + row_offset;
printf("blockIdx.x: %d, blockIdx.y: %d, threadIdx.x: %d, gid: %d, value: %d\n", blockIdx.x, blockIdx.y, tid, gid, input[gid]);
}
int main(){
int array_size = 16;
int array_bit_size = sizeof(int) * array_size;
int h_data[] = {23, 9, 4, 53, 65, 12, 1, 33, 3, 92, 41, 54, 68, 11, 45, 21};
for(int i = 0; i < array_size; i++){
printf("%d ", h_data[i]);
}
printf("\n\n");
int *d_data;
cudaMalloc((void **)&d_data, array_bit_size);
cudaMemcpy(d_data, h_data, array_bit_size, cudaMemcpyHostToDevice);
dim3 block(2, 2);
dim3 grid(2, 2);
unique_gid_calculation_2D_2D <<<grid, block>>>(d_data);
cudaDeviceSynchronize();
cudaDeviceReset();
return 0;
} |
22,644 | // nlm algorithm using shared memory
// also uses transpose *cube
// furthermore uses transpose shared array
// if blockSize=16 or 32 then avoids bank conflicts
__global__ void nlmSharedT(float *out, const float *in, const float *cube,
const int N, const int M, const int window,
const float filtSigma){
const int blockSize = blockDim.x;
const int tid = threadIdx.x;
const int i = blockSize * blockIdx.x + tid; // which pixel, row major
const int winSize = window*window;
const int picSize = N*M;
float sum = 0;
float Dsum = 0;
float maxD = 0;
float tempOut = 0;
float D;
extern __shared__ float sh_mem[];
float *sh_in = &sh_mem[0];
float *sh_cubeSelf = &sh_mem[blockSize];
float *sh_cubeElse = &sh_mem[blockSize+ blockSize*winSize];
if( i >= N*M) return;
const float inI = in[i];
for(int cubeLine=0; cubeLine < winSize; cubeLine++){
sh_cubeSelf[cubeLine*blockSize + tid] = cube[cubeLine*picSize + i];
}
int inIndex;
float temp;
for(int p=0; p<picSize/blockSize; p++){
//inIndex = (i+p*blockSize)%(picSize); // mod % to cycle arround to the start
inIndex = tid+p*blockSize; // this also works, every block starts from the same point
// sync before writing to make sure everyone has read
__syncthreads();
sh_in[tid] = in[inIndex];
//#pragma unroll 8
for(int cubeLine=0; cubeLine < winSize; cubeLine++){
sh_cubeElse[cubeLine*blockSize + tid] = cube[cubeLine*picSize + inIndex];
}
// sync before reading to make sure everyone has writen
__syncthreads();
for(int j=0; j<blockSize; j++){
#pragma unroll 8
for(int k=0; k<winSize; k++){
temp = sh_cubeSelf[k*blockSize + tid]-sh_cubeElse[k*blockSize + j];
sum += temp*temp;
}
D = expf(-sum/filtSigma);
if(D!=1){
Dsum += D;
tempOut += D*sh_in[j];
if( D > maxD){
maxD = D;
}
}
sum = 0;
}
}
tempOut += maxD*inI;
Dsum += maxD;
out[i] = tempOut/Dsum;
}
|
22,645 | #include <stdio.h>
#include <stdlib.h>
#include <string>
#include <iostream>
#include <algorithm>
using namespace std;
#define CAFFE_CUDA_NUM_THREADS 196
inline int CAFFE_GET_BLOCKS(const int N) {
return (N + CAFFE_CUDA_NUM_THREADS - 1) / CAFFE_CUDA_NUM_THREADS;
}
template <typename Dtype>
__global__ void ConvForward(const int nthreads,
const Dtype* const bottom_data, const int num, const int channels,
const int height, const int width,const int conved_height,
const int conved_width,const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
Dtype* const top_data,const Dtype* const weight,const Dtype* const bias,const bool bias_term_) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x){
const int pw = index % conved_width; // width position of output
const int ph = (index / conved_width) % conved_height;
const int c = (index / conved_width / conved_height) % channels;
const int n = index / conved_width / conved_height / channels;
int hstart = ph * stride_h - pad_h; // input pointer starting point
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height + pad_h); // boundary
int wend = min(wstart + kernel_w, width + pad_w);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, height); // height=output hight
wend = min(wend, width);
Dtype aveval = 0;
const Dtype* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
const Dtype* const weight_slice =
weight + c * kernel_h * kernel_w;
int khstart=hend<kernel_h?kernel_h-hend:0;
int kwstart=wend<kernel_w?kernel_w-wend:0;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
aveval += bottom_slice[h * width + w]*weight_slice[(khstart+h-hstart) * kernel_w + (kwstart+w-wstart)]; // (h-hstart)=>0~kernel_h
}
}
if(bias_term_) {
aveval+=bias[c];
}
top_data[index] = aveval;
}
}
template <typename Dtype>
__global__ void ConvForwardShared(const int nthreads,
const Dtype* const bottom_data, const int num, const int channels,
const int height, const int width,const int conved_height,
const int conved_width,const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
Dtype* const top_data,const Dtype* const weight,const Dtype* const bias,const bool bias_term_) {
__shared__ Dtype s_bottom[CAFFE_CUDA_NUM_THREADS], s_weight[CAFFE_CUDA_NUM_THREADS];
/*for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads * 4; index += blockDim.x * gridDim.x){
s_bottom[index%CAFFE_CUDA_NUM_THREADS]= bottom_data[index];
s_weight[index%CAFFE_CUDA_NUM_THREADS]= weight[index%CAFFE_CUDA_NUM_THREADS + blockIdx.x * kernel_h * kernel_w * 1];
}*/
//Dtype l_weight [9];
int index = blockIdx.x * blockDim.x + threadIdx.x;
if ( index % blockDim.x < kernel_h * kernel_w){
s_weight[index % (kernel_h * kernel_w)]= weight[index % (kernel_h * kernel_w) + blockIdx.x * kernel_h * kernel_w * 1];
}
#pragma unroll
for (int i=index % blockDim.x ; i < blockDim.x * 4 ; i += blockDim.x){
s_bottom[i]= bottom_data[blockIdx.x * blockDim.x * 4 +i];
}
__syncthreads();
//for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads ; index += blockDim.x * gridDim.x){
/*#pragma unroll
for(int i=0; i<9;i++)
l_weight[i]=s_weight[i];*/
if (index < nthreads){
Dtype out[4]={0}; //local output
const int pw = (index * 2) % conved_width; // width position of output
const int ph = (index * 2 / conved_width) * 2 % conved_height;
const int c = (index * 4 / conved_width / conved_height) % channels;
const int n = index / conved_width / conved_height / channels;// =0
/*const Dtype* const bottom_slice =
bottom_data + (n * channels + c) * height * width;*/
/*const Dtype* const weight_slice =
weight + c * kernel_h * kernel_w;*/
#pragma unroll
for(int j=0; j<2; j++)
#pragma unroll
for(int i=0; i<2; i++)
{
//Dtype aveval=0;
const int hstart = (ph + j )* stride_h - pad_h >0? (ph + j )* stride_h - pad_h :0;
const int wstart = (pw + i) * stride_w - pad_w >0? (pw + i) * stride_w - pad_w :0;
const int hend = (ph + j )* stride_h - pad_h + kernel_h< height? (ph + j )* stride_h - pad_h + kernel_h : height;
const int wend = (pw + i) * stride_w - pad_w + kernel_w< width? (pw + i) * stride_w - pad_w + kernel_w : width;
const int khstart=hend<kernel_h?kernel_h-hend:0;
const int kwstart=wend<kernel_w?kernel_w-wend:0;
#pragma unroll
for (int h = hstart; h < hend; ++h) {
#pragma unroll
for (int w = wstart; w < wend; ++w) {
//aveval += s_bottom[h * width + w ] * s_weight[(khstart+ h -hstart) * kernel_w + (kwstart + w -wstart)];
out[j*2+i]+= s_bottom[h * width + w ] * s_weight[(khstart+ h -hstart) * kernel_w + (kwstart + w -wstart)];
}
}
//if(bias_term_) aveval+=bias[c];
//top_data[(c * conved_height + ph + j) * conved_width + pw + i] = aveval;
}
#pragma unroll
for(int j=0; j<2; j++)
#pragma unroll
for(int i=0; i<2; i++)
top_data[(c * conved_height + ph + j) * conved_width + pw + i] = out[j*2+i];//hard code numbers here will increase speed
}
}
template <typename Dtype>
__global__ void ConvForwardCheat(const Dtype* const bottom_data, Dtype* const top_data,const Dtype* const weight) {
__shared__ Dtype s_bottom[196], s_weight[196];
int index = blockIdx.x * blockDim.x + threadIdx.x;
if ( index % blockDim.x < 9){
s_weight[index % 9]= weight[index % 9 + blockIdx.x *9];
}
#pragma unroll
for (int i=index % blockDim.x ; i < blockDim.x * 4 ; i += blockDim.x){
s_bottom[i]= bottom_data[blockIdx.x * blockDim.x * 4 +i];
}
__syncthreads();
if (index < 512*14*14){
Dtype out[4]={0};
const int pw = (index * 2) % 14; // width position of output
const int ph = (index * 2 / 14)*2 % 14;
const int c = (index /49) % 512;
//const int n = index / conved_width / conved_height / channels;// =0
#pragma unroll
for(int j=0; j<2; j++)
#pragma unroll
for(int i=0; i<2; i++)
{
Dtype aveval = 0;
const int hstart = (ph + j ) - 1 >0? (ph + j ) - 1 :0;
const int wstart = (pw + i) - 1 >0? (pw + i) - 1 :0;
const int hend = (ph + j )< 12? (ph + j) +2 : 14;
const int wend = (pw + i) < 12? (pw + i) +2 : 14;
const int khstart=hend<3?3-hend:0;
const int kwstart=wend<3?3-wend:0;
#pragma unroll
for (int h = hstart; h < hend; ++h) {
#pragma unroll
for (int w = wstart; w < wend; ++w) {
//aveval += s_bottom[h * 14 + w ] * s_weight[(khstart+ h -hstart) * 3 + (kwstart + w -wstart)];
out[j*2+i]+= s_bottom[h * 14 + w ] * s_weight[(khstart+ h -hstart) * 3 + (kwstart + w -wstart)];
}
}
//top_data[(c * 14 + ph + j) * 14 + pw + i] = aveval;
}
#pragma unroll
for(int j=0; j<2; j++)
#pragma unroll
for(int i=0; i<2; i++)
top_data[(c * 14 + ph + j) * 14 + pw + i] = out[j*2+i];
}
}
template <typename Dtype>
__global__ void GPU1x1Conv(const Dtype* const in, const Dtype* const weight, Dtype* const out, int const height, int const width, int const channels, int const out_channels)
{
const int blockSize = 256;
volatile __shared__ Dtype s_in[blockSize];// channel/2
unsigned int tid = threadIdx.x;
unsigned int startId = tid*width*height; // 1~256 slice
unsigned int stride = blockSize*width*height;
//w map to block.x; h map to block.y
const int pos = blockIdx.y*width + blockIdx.x;
for(int oc=0; oc< out_channels; oc++ )
{
s_in[tid] = in[startId+pos]*weight[oc*channels+tid] + in[startId+pos+stride]*weight[oc*channels+tid+blockSize];
__syncthreads();
if (tid < 128) { s_in[tid] += s_in[tid + 128]; }
__syncthreads();
if (tid < 64) { s_in[tid] += s_in[tid + 64]; }
__syncthreads();
if (tid < 32) {
s_in[tid] += s_in[tid + 32]; //__syncthreads();
s_in[tid] += s_in[tid + 16]; //__syncthreads();
s_in[tid] += s_in[tid + 8]; //__syncthreads();
s_in[tid] += s_in[tid + 4]; //__syncthreads();
s_in[tid] += s_in[tid + 2]; //__syncthreads();
s_in[tid] += s_in[tid + 1]; //__syncthreads();
}
if (tid == 0) out[oc*width*height+pos] = s_in[0];
}
}
template <typename Dtype>
__global__ void GPU1x1Conv2(const Dtype* const in, const Dtype* const weight, Dtype* const out, int const height, int const width, int const channels, int const out_channels)
{
__shared__ Dtype tmp_weight[512];// 14*14
unsigned int tid = threadIdx.x;
for(int i=tid; i<512;i+=192)
tmp_weight[i]=weight[channels*blockIdx.x+i];
__syncthreads();
float local_out=0;
for(int rc=0; rc< 512; rc++)
local_out+= in[tid+rc*width*height]*tmp_weight[rc];
out[blockIdx.x*width*height+tid]=local_out;
}
void CPU1x1Conv(float *in, float *weight, double *out, int const height, int const width, int const channels, int const out_channels)
{
for(int oc=0; oc< out_channels; oc++)
for(int h=0; h< height; h++)
for(int w=0; w< width; w++)
for(int c=0; c< channels; c++)
{
out[oc*height*width+ h*width + w] += in[c*height*width + h*width + w]*weight[oc*channels + c];
}
}
int main(int argc, char* argv[])
{
const int channels = 512;
const int height = 14;
const int width = 14;
const int kernel_h = 3;
const int kernel_w = 3;
const int stride_h = 1;
const int stride_w = 1;
const int pad_h = 1;
const int pad_w = 1;
const int conved_height = height;
const int conved_weight = width;
const bool bias_term = false;
const int n=channels * height * width;
const int wn=channels * channels;
float *d_weight, *d_bottom, *d_bottom_padded, *d_top1, *d_top2, *d_weight1x1, *d_saparable_out;
cudaMallocManaged(&d_weight, n*sizeof(float));
cudaMallocManaged(&d_weight1x1, wn*sizeof(float));
cudaMallocManaged(&d_bottom, n*sizeof(float));
cudaMallocManaged(&d_top1, n*sizeof(float));
cudaMallocManaged(&d_top2, n*sizeof(float));
for(int i=0;i<n;i++)
d_weight[i]=((double) rand() / (RAND_MAX)/10);
for(int i=0;i<n;i++)
d_bottom[i]=((double) rand() / (RAND_MAX)/10);
for(int i=0;i<wn;i++)
d_weight1x1[i]=((double) rand() / (RAND_MAX)/10);
int pcount = (height+pad_h*2)*(width+pad_w*2)*channels;
printf("numblocks=%d", CAFFE_GET_BLOCKS(n));
ConvForward<float><<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS>>>(
n, d_bottom, n, channels,
height, width,conved_height,conved_weight,kernel_h,
kernel_w, stride_h, stride_w, pad_h, pad_w, d_top1,d_weight,0,bias_term);
int nb=CAFFE_GET_BLOCKS(n);
int bs=CAFFE_CUDA_NUM_THREADS/4;
int nt=n/4;
ConvForwardShared<float><<<nb, bs>>>(
nt, d_bottom, n, channels,
height, width,conved_height,conved_weight,kernel_h,
kernel_w, stride_h, stride_w, pad_h, pad_w, d_top2,d_weight,0,bias_term);
//ConvForwardCheat<float><<<nb, bs>>>(d_bottom, d_top2,d_weight);
float *out1 = new float[n];
float *out2 = new float[n];
cudaMemcpy(out1, d_top1, n*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(out2, d_top2, n*sizeof(float), cudaMemcpyDeviceToHost);
int c=0;
for(int i=0;i<n;i++)
if(out1[i]!=out2[i]&&c<20)
{printf("top1[%d]=%f, top2[%d]=%f", i, out1[i], i, out2[i]);
c++;}
cudaFree(d_top2);
//saparable convolution
cudaMallocManaged(&d_saparable_out, n*sizeof(float));
float *weight1x1 = new float[wn];
double *saparable_out = new double[n];
cudaMemcpy(weight1x1, d_weight1x1, wn*sizeof(float), cudaMemcpyDeviceToHost);
for(int i=0; i<n; i++) saparable_out[i]=0;
CPU1x1Conv(out1, weight1x1, saparable_out, height, width, channels, channels);
dim3 numBlocks(14,14,1);
GPU1x1Conv2<float><<<512,196>>>(d_top1, d_weight1x1, d_saparable_out, height, width, channels, channels);
GPU1x1Conv2<float><<<512,196>>>(d_top1, d_weight1x1, d_saparable_out, height, width, channels, channels);
float *outc = new float[n];
cudaMemcpy(outc, d_saparable_out, n*sizeof(float), cudaMemcpyDeviceToHost);
c=0;
for(int i=0;i<n;i++)
if(abs(outc[i]-saparable_out[i])>0.1&&c<20)
//if(c<20)
{printf("outc[%d]=%f, saparable_out[%d]=%f", i, outc[i], i, saparable_out[i]);
c++;}
return 0;
}
|
22,646 | #include <iostream>
#include <stdio.h>
#include <iomanip>
#include <cuda_runtime.h>
using namespace std;
void MatrixRandBin(float *mat, int rows, int cols) {
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
if ((float)rand()/RAND_MAX > 0.5) {
mat[i*cols+j] = 1.0f;
}else {
mat[i*cols+j] = -1.0f;
}
}
}
}
void MatrixPrint(float *mat, int rows, int cols) {
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
cout << setw(2) << mat[i*cols+j] << " ";
}
cout << endl;
}
cout << endl;
}
void MatrixPrintD(int *mat, int rows, int cols) {
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
cout << setw(2) << mat[i*cols+j] << " ";
}
cout << endl;
}
cout << endl;
}
float MatrixCompare(float *a, float *b, int rows, int cols) {
float err = 0;
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
err += abs(a[i*cols+j]-b[i*cols+j]);
}
}
return err;
}
void MatrixMul_host(float *a, int a_rows, int a_cols, float *b, int b_rows, int b_cols, float *c) {
for (int i = 0; i < a_rows; i++) {
for (int j = 0; j < b_cols; j++) {
float t = 0;
for (int k = 0; k < b_rows; k++) {
t += a[i*a_cols+k]*b[k*b_cols+j];
}
c[i*b_cols+j] = t;
}
}
}
//horizontal
__global__ void AMatrix2Bin(float *a, int *a_bin, int pitch_a, int Pitch_a_bin, int a_rows, int MaxBlocks, int BINSIZE) {
int tix = threadIdx.x;
int bix = blockIdx.x;
int bdx = blockDim.x;
int gdx = gridDim.x;
int maxThreads = MaxBlocks*a_rows;
for (int id = bix*bdx+tix; id < maxThreads; id += gdx*bdx) {
int rid = id/MaxBlocks;
int cid = id%MaxBlocks;
int Integer = 0;
int base = 1;
for (int i = 0; i < BINSIZE; i++) {
if (a[rid*pitch_a+(cid+1)*BINSIZE-1-i] == 1.f) {
Integer += base;
}
base = base<<1;
}
a_bin[rid*Pitch_a_bin+cid] = Integer;
}
}
//vetical
__global__ void BMatrix2Bin(float *b, int *b_bin, int pitch_b, int Pitch_b_bin, int b_cols, int MaxBlocks, int BINSIZE) {
int tix = threadIdx.x;
int bix = blockIdx.x;
int bdx = blockDim.x;
int gdx = gridDim.x;
int maxThreads = MaxBlocks*b_cols;
for (int id = bix*bdx+tix; id < maxThreads; id += gdx*bdx) {
int cid = id/MaxBlocks;
int rid = id%MaxBlocks;
int Integer = 0;
int base = 1;
for (int i=0; i < BINSIZE; i++) {
if (b[((rid+1)*BINSIZE-1-i)*pitch_b+cid] == 1.f) {
Integer += base;
}
base = base<<1;
}
b_bin[rid*Pitch_b_bin+cid] = Integer;
}
}
// __device__ unsigned char __popcount_tab_copy[256];//__constant__ is slower than __device__
// __device__ int popcount (int x) {
// return __popcount_tab_copy[(x >> 0) & 0xff]
// + __popcount_tab_copy[(x >> 8) & 0xff]
// + __popcount_tab_copy[(x >> 16) & 0xff]
// + __popcount_tab_copy[(x >> 24) & 0xff];
// }
__global__ void MatrixMulXnor(int *a, int *b, float *result, unsigned char *__popcount_tab,
int pitch_a, int pitch_b, int pitch_result,
int midBlocks, int BINSIZE, int RealMidSize) {
int tiy = threadIdx.x;
int tix = threadIdx.y;
int bix = blockIdx.x;
int biy = blockIdx.y;
int gdx = gridDim.x;
int gdy = gridDim.y;
int RectSize = blockDim.x;
int rest = BINSIZE*RectSize*midBlocks-RealMidSize;
__shared__ unsigned char __popcount_tab_shared[256];
__shared__ int a_rect_shared[32][32];
__shared__ int b_rect_shared[32][32];
if (tix < 8) {
__popcount_tab_shared[tix*RectSize+tiy] = __popcount_tab[tix*RectSize+tiy];
}
__syncthreads();
int sum = 0;
for (int i = 0; i < midBlocks; i++) {
a_rect_shared[tix][tiy] = a[(bix*RectSize+tix)*pitch_a+i*RectSize+tiy];
b_rect_shared[tix][tiy] = b[(i*RectSize+tix)*pitch_b+biy*RectSize+tiy];
__syncthreads();
int bin = 0;
bin = a_rect_shared[tix][0]^b_rect_shared[0][tiy];
sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff]
+ __popcount_tab_shared[(bin >> 8) & 0xff]
+ __popcount_tab_shared[(bin >> 16) & 0xff]
+ __popcount_tab_shared[(bin >> 24) & 0xff]);
bin = a_rect_shared[tix][1]^b_rect_shared[1][tiy];
sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff]
+ __popcount_tab_shared[(bin >> 8) & 0xff]
+ __popcount_tab_shared[(bin >> 16) & 0xff]
+ __popcount_tab_shared[(bin >> 24) & 0xff]);
bin = a_rect_shared[tix][2]^b_rect_shared[2][tiy];
sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff]
+ __popcount_tab_shared[(bin >> 8) & 0xff]
+ __popcount_tab_shared[(bin >> 16) & 0xff]
+ __popcount_tab_shared[(bin >> 24) & 0xff]);
bin = a_rect_shared[tix][3]^b_rect_shared[3][tiy];
sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff]
+ __popcount_tab_shared[(bin >> 8) & 0xff]
+ __popcount_tab_shared[(bin >> 16) & 0xff]
+ __popcount_tab_shared[(bin >> 24) & 0xff]);
bin = a_rect_shared[tix][4]^b_rect_shared[4][tiy];
sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff]
+ __popcount_tab_shared[(bin >> 8) & 0xff]
+ __popcount_tab_shared[(bin >> 16) & 0xff]
+ __popcount_tab_shared[(bin >> 24) & 0xff]);
bin = a_rect_shared[tix][5]^b_rect_shared[5][tiy];
sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff]
+ __popcount_tab_shared[(bin >> 8) & 0xff]
+ __popcount_tab_shared[(bin >> 16) & 0xff]
+ __popcount_tab_shared[(bin >> 24) & 0xff]);
bin = a_rect_shared[tix][6]^b_rect_shared[6][tiy];
sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff]
+ __popcount_tab_shared[(bin >> 8) & 0xff]
+ __popcount_tab_shared[(bin >> 16) & 0xff]
+ __popcount_tab_shared[(bin >> 24) & 0xff]);
bin = a_rect_shared[tix][7]^b_rect_shared[7][tiy];
sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff]
+ __popcount_tab_shared[(bin >> 8) & 0xff]
+ __popcount_tab_shared[(bin >> 16) & 0xff]
+ __popcount_tab_shared[(bin >> 24) & 0xff]);
bin = a_rect_shared[tix][8]^b_rect_shared[8][tiy];
sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff]
+ __popcount_tab_shared[(bin >> 8) & 0xff]
+ __popcount_tab_shared[(bin >> 16) & 0xff]
+ __popcount_tab_shared[(bin >> 24) & 0xff]);
bin = a_rect_shared[tix][9]^b_rect_shared[9][tiy];
sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff]
+ __popcount_tab_shared[(bin >> 8) & 0xff]
+ __popcount_tab_shared[(bin >> 16) & 0xff]
+ __popcount_tab_shared[(bin >> 24) & 0xff]);
bin = a_rect_shared[tix][10]^b_rect_shared[10][tiy];
sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff]
+ __popcount_tab_shared[(bin >> 8) & 0xff]
+ __popcount_tab_shared[(bin >> 16) & 0xff]
+ __popcount_tab_shared[(bin >> 24) & 0xff]);
bin = a_rect_shared[tix][11]^b_rect_shared[11][tiy];
sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff]
+ __popcount_tab_shared[(bin >> 8) & 0xff]
+ __popcount_tab_shared[(bin >> 16) & 0xff]
+ __popcount_tab_shared[(bin >> 24) & 0xff]);
bin = a_rect_shared[tix][12]^b_rect_shared[12][tiy];
sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff]
+ __popcount_tab_shared[(bin >> 8) & 0xff]
+ __popcount_tab_shared[(bin >> 16) & 0xff]
+ __popcount_tab_shared[(bin >> 24) & 0xff]);
bin = a_rect_shared[tix][13]^b_rect_shared[13][tiy];
sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff]
+ __popcount_tab_shared[(bin >> 8) & 0xff]
+ __popcount_tab_shared[(bin >> 16) & 0xff]
+ __popcount_tab_shared[(bin >> 24) & 0xff]);
bin = a_rect_shared[tix][14]^b_rect_shared[14][tiy];
sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff]
+ __popcount_tab_shared[(bin >> 8) & 0xff]
+ __popcount_tab_shared[(bin >> 16) & 0xff]
+ __popcount_tab_shared[(bin >> 24) & 0xff]);
bin = a_rect_shared[tix][15]^b_rect_shared[15][tiy];
sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff]
+ __popcount_tab_shared[(bin >> 8) & 0xff]
+ __popcount_tab_shared[(bin >> 16) & 0xff]
+ __popcount_tab_shared[(bin >> 24) & 0xff]);
bin = a_rect_shared[tix][16]^b_rect_shared[16][tiy];
sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff]
+ __popcount_tab_shared[(bin >> 8) & 0xff]
+ __popcount_tab_shared[(bin >> 16) & 0xff]
+ __popcount_tab_shared[(bin >> 24) & 0xff]);
bin = a_rect_shared[tix][17]^b_rect_shared[17][tiy];
sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff]
+ __popcount_tab_shared[(bin >> 8) & 0xff]
+ __popcount_tab_shared[(bin >> 16) & 0xff]
+ __popcount_tab_shared[(bin >> 24) & 0xff]);
bin = a_rect_shared[tix][18]^b_rect_shared[18][tiy];
sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff]
+ __popcount_tab_shared[(bin >> 8) & 0xff]
+ __popcount_tab_shared[(bin >> 16) & 0xff]
+ __popcount_tab_shared[(bin >> 24) & 0xff]);
bin = a_rect_shared[tix][19]^b_rect_shared[19][tiy];
sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff]
+ __popcount_tab_shared[(bin >> 8) & 0xff]
+ __popcount_tab_shared[(bin >> 16) & 0xff]
+ __popcount_tab_shared[(bin >> 24) & 0xff]);
bin = a_rect_shared[tix][20]^b_rect_shared[20][tiy];
sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff]
+ __popcount_tab_shared[(bin >> 8) & 0xff]
+ __popcount_tab_shared[(bin >> 16) & 0xff]
+ __popcount_tab_shared[(bin >> 24) & 0xff]);
bin = a_rect_shared[tix][21]^b_rect_shared[21][tiy];
sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff]
+ __popcount_tab_shared[(bin >> 8) & 0xff]
+ __popcount_tab_shared[(bin >> 16) & 0xff]
+ __popcount_tab_shared[(bin >> 24) & 0xff]);
bin = a_rect_shared[tix][22]^b_rect_shared[22][tiy];
sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff]
+ __popcount_tab_shared[(bin >> 8) & 0xff]
+ __popcount_tab_shared[(bin >> 16) & 0xff]
+ __popcount_tab_shared[(bin >> 24) & 0xff]);
bin = a_rect_shared[tix][23]^b_rect_shared[23][tiy];
sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff]
+ __popcount_tab_shared[(bin >> 8) & 0xff]
+ __popcount_tab_shared[(bin >> 16) & 0xff]
+ __popcount_tab_shared[(bin >> 24) & 0xff]);
bin = a_rect_shared[tix][24]^b_rect_shared[24][tiy];
sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff]
+ __popcount_tab_shared[(bin >> 8) & 0xff]
+ __popcount_tab_shared[(bin >> 16) & 0xff]
+ __popcount_tab_shared[(bin >> 24) & 0xff]);
bin = a_rect_shared[tix][25]^b_rect_shared[25][tiy];
sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff]
+ __popcount_tab_shared[(bin >> 8) & 0xff]
+ __popcount_tab_shared[(bin >> 16) & 0xff]
+ __popcount_tab_shared[(bin >> 24) & 0xff]);
bin = a_rect_shared[tix][26]^b_rect_shared[26][tiy];
sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff]
+ __popcount_tab_shared[(bin >> 8) & 0xff]
+ __popcount_tab_shared[(bin >> 16) & 0xff]
+ __popcount_tab_shared[(bin >> 24) & 0xff]);
bin = a_rect_shared[tix][27]^b_rect_shared[27][tiy];
sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff]
+ __popcount_tab_shared[(bin >> 8) & 0xff]
+ __popcount_tab_shared[(bin >> 16) & 0xff]
+ __popcount_tab_shared[(bin >> 24) & 0xff]);
bin = a_rect_shared[tix][28]^b_rect_shared[28][tiy];
sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff]
+ __popcount_tab_shared[(bin >> 8) & 0xff]
+ __popcount_tab_shared[(bin >> 16) & 0xff]
+ __popcount_tab_shared[(bin >> 24) & 0xff]);
bin = a_rect_shared[tix][29]^b_rect_shared[29][tiy];
sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff]
+ __popcount_tab_shared[(bin >> 8) & 0xff]
+ __popcount_tab_shared[(bin >> 16) & 0xff]
+ __popcount_tab_shared[(bin >> 24) & 0xff]);
bin = a_rect_shared[tix][30]^b_rect_shared[30][tiy];
sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff]
+ __popcount_tab_shared[(bin >> 8) & 0xff]
+ __popcount_tab_shared[(bin >> 16) & 0xff]
+ __popcount_tab_shared[(bin >> 24) & 0xff]);
bin = a_rect_shared[tix][31]^b_rect_shared[31][tiy];
sum += BINSIZE-2*( __popcount_tab_shared[(bin >> 0) & 0xff]
+ __popcount_tab_shared[(bin >> 8) & 0xff]
+ __popcount_tab_shared[(bin >> 16) & 0xff]
+ __popcount_tab_shared[(bin >> 24) & 0xff]);
__syncthreads();
}
result[(bix*RectSize+tix)*pitch_result+biy*RectSize+tiy] = sum-rest;
// num=0;
// int rest=(BINSIZE*a_cols-RealMidSize);
// for(int i=bix;i<a_rows;i+=gdx){
// for(int j=tix;j<b_cols;j+=bdx){
// // printf("i=%d ; j=%d\n",i,j);
// int sum=0;
// for(int k=0;k<a_cols;k++){
// int bin=(a_shared[num*a_cols+k]^b[k*pitch_b+j]);
// int negnum=popcount(bin);
// int posnum=BINSIZE-negnum;
// //calculate ignores the rest of BINSIZE if the Matsize can't devided by BINSIZE ,it can cause err
// //(10/00)'(01/00) should be 0000 but it is 0011,so 1+1 is trash in the result.and it mislead a_rows*b_cols times.
// sum+=(posnum-negnum);
// }
// result[i*pitch_result+j]=sum-rest;
// }
// num++;
// }
}
void MatrixMul_device(float *a, float *b, int a_rows, int a_cols, int b_cols, float *result) {
int BINSIZE = 32;//size of bin2int, 32 means 0000 0000 0000 0000 0000 0000 0000 0000
int MaxBlocks = (a_cols-1)/BINSIZE+1;
int Copysize = MaxBlocks*BINSIZE;
float *a_copy;//a_rows * Copysize
float *b_copy;//Copysize * b_cols
size_t Pitch_a_copy, Pitch_b_copy;
cudaMallocPitch((void**)&a_copy, &Pitch_a_copy, sizeof(float)*Copysize, a_rows);
cudaMallocPitch((void**)&b_copy, &Pitch_b_copy, sizeof(float)*b_cols, Copysize);
cudaMemset(a_copy, 0, Pitch_a_copy*a_rows);
cudaMemset(b_copy, 0, Pitch_b_copy*Copysize);
cudaMemcpy2D(a_copy, Pitch_a_copy, a, sizeof(float)*a_cols, sizeof(float)*a_cols, a_rows, cudaMemcpyDeviceToDevice);
cudaMemcpy2D(b_copy, Pitch_b_copy, b, sizeof(float)*b_cols, sizeof(float)*b_cols, a_cols, cudaMemcpyDeviceToDevice);
//check oringin
// float *a_host;
// float *b_host;
// a_host = (float*) malloc(sizeof(float) * Copysize * a_rows);
// b_host = (float*) malloc(sizeof(float) * b_cols * Copysize);
// cudaMemcpy2D(a_host,sizeof(float) *Copysize, a_copy,Pitch_a_copy,sizeof(float) *Copysize , a_rows,cudaMemcpyDeviceToHost);
// cudaMemcpy2D(b_host,sizeof(float) *b_cols, b_copy,Pitch_b_copy,sizeof(float) *b_cols , Copysize,cudaMemcpyDeviceToHost);
// MatrixPrint(a_host,a_rows,Copysize);
// MatrixPrint(b_host,Copysize,b_cols);
int RectBlockSize = 32;
dim3 RectBlockNum_a_bin((a_rows-1)/RectBlockSize+1, (MaxBlocks-1)/RectBlockSize+1, 1);//with block multiply
dim3 RectBlockNum_b_bin((MaxBlocks-1)/RectBlockSize+1, (b_cols-1)/RectBlockSize+1, 1);
int *a_bin;
int *b_bin;
size_t Pitch_a_bin, Pitch_b_bin;
cudaMallocPitch((void**)&a_bin , &Pitch_a_bin , sizeof(int)*RectBlockSize*RectBlockNum_a_bin.y, RectBlockSize*RectBlockNum_a_bin.x);
cudaMallocPitch((void**)&b_bin , &Pitch_b_bin , sizeof(int)*RectBlockSize*RectBlockNum_b_bin.y, RectBlockSize*RectBlockNum_b_bin.x);
cudaMemset(a_bin, 0, Pitch_a_bin*RectBlockSize*RectBlockNum_a_bin.x);
cudaMemset(b_bin, 0, Pitch_b_bin*RectBlockSize*RectBlockNum_b_bin.x);
dim3 BS_BIN(512,1,1);
dim3 GS_BIN(6,1,1);
AMatrix2Bin<<< GS_BIN, BS_BIN >>>(a_copy, a_bin,
Pitch_a_copy/sizeof(float), Pitch_a_bin/sizeof(int), a_rows, MaxBlocks, BINSIZE);
BMatrix2Bin<<< GS_BIN, BS_BIN >>>(b_copy, b_bin,
Pitch_b_copy/sizeof(float), Pitch_b_bin/sizeof(int), b_cols, MaxBlocks, BINSIZE);
cudaFree(a_copy);
cudaFree(b_copy);
//check bin
// int *a_host_bin;
// int *b_host_bin;
// a_host_bin = (int*) malloc(sizeof(int) *MaxBlocks * a_rows);
// b_host_bin = (int*) malloc(sizeof(int) *b_cols * MaxBlocks);
// cudaMemcpy2D(a_host_bin,sizeof(int) *MaxBlocks, a_bin,Pitch_a_bin,sizeof(int) *MaxBlocks , a_rows ,cudaMemcpyDeviceToHost);
// cudaMemcpy2D(b_host_bin,sizeof(int) *b_cols, b_bin,Pitch_b_bin,sizeof(int) *b_cols , MaxBlocks ,cudaMemcpyDeviceToHost);
// MatrixPrintD(a_host_bin,a_rows,MaxBlocks);
// MatrixPrintD(b_host_bin,MaxBlocks,b_cols);
float *result_bin;//a_rows * b_cols
size_t Pitch_result_bin;
cudaMallocPitch((void**)&result_bin , &Pitch_result_bin , sizeof(float)*RectBlockSize*RectBlockNum_b_bin.y, RectBlockSize*RectBlockNum_a_bin.x);
const unsigned char __popcount_tab[] = {
0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,
1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,
1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,
2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,
1,2,2,3,2,3,3,4,2,3,3,4,3,4,4,5,2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,
2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,
2,3,3,4,3,4,4,5,3,4,4,5,4,5,5,6,3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,
3,4,4,5,4,5,5,6,4,5,5,6,5,6,6,7,4,5,5,6,5,6,6,7,5,6,6,7,6,7,7,8,
};
unsigned char *__popcount_tab_copy;
cudaMalloc((void**)&__popcount_tab_copy, sizeof(__popcount_tab));
cudaMemcpy(__popcount_tab_copy, __popcount_tab, sizeof(__popcount_tab), cudaMemcpyHostToDevice);
cudaEvent_t start_device, stop_device;
float time_device;
cudaEventCreate(&start_device);
cudaEventCreate(&stop_device);
cudaEventRecord(start_device, 0);
dim3 BS_MM(RectBlockSize, RectBlockSize, 1);
dim3 GS_MM(RectBlockNum_a_bin.x, RectBlockNum_b_bin.y, 1);
MatrixMulXnor<<< GS_MM, BS_MM >>>(a_bin, b_bin, result_bin, __popcount_tab_copy,
Pitch_a_bin/sizeof(int), Pitch_b_bin/sizeof(int), Pitch_result_bin/sizeof(float),
RectBlockNum_a_bin.y, BINSIZE, a_cols);
cudaEventRecord( stop_device, 0 );
cudaEventSynchronize( stop_device );
cudaEventElapsedTime( &time_device, start_device, stop_device );
cudaEventDestroy( start_device );
cudaEventDestroy( stop_device );
cout<<"gputime="<<time_device<<"ms"<<endl;
cudaMemcpy2D(result,sizeof(float) *b_cols, result_bin,Pitch_result_bin,sizeof(float) *b_cols , a_rows ,cudaMemcpyDeviceToDevice);
cudaFree(a_bin);
cudaFree(b_bin);
cudaFree(result_bin);
}
int main(){
//simulate pytorch param
int x=2000;
int n=2000;
int y=2000;
float *a_host;
float *b_host;
float *result_host;
a_host = (float*) malloc(sizeof(float) * x * n);
b_host = (float*) malloc(sizeof(float) * n * y);
result_host = (float*) malloc(sizeof(float) * x * y);
srand(0);
MatrixRandBin(a_host,x,n);
MatrixRandBin(b_host,n,y);
// cout<<MatrixCopysize<<endl;
float *a_copy;
float *b_copy;
float *result_device;
cudaMalloc((void**)&a_copy,sizeof(float) *x * n);
cudaMalloc((void**)&b_copy,sizeof(float) *n * y);
cudaMalloc((void**)&result_device,sizeof(float) *x * y);
cudaMemcpy(a_copy,a_host,sizeof(float) *x * n,cudaMemcpyHostToDevice);
cudaMemcpy(b_copy,b_host,sizeof(float) *n * y,cudaMemcpyHostToDevice);
// MatrixPrint(a_host,Matrixsize,Matrixsize);
// MatrixPrint(b_host,Matrixsize,Matrixsize);
//run in gpu warp in C code
MatrixMul_device(a_copy,b_copy,x,n,y,result_device);
cudaMemcpy(result_host, result_device,sizeof(float) *x * y,cudaMemcpyDeviceToHost);
cudaFree(a_copy);
cudaFree(b_copy);
cudaFree(result_device);
// MatrixPrint(result_host,Matrixsize,Matrixsize);
// //run in cpu
// float *result_cpu;
// result_cpu = (float*) malloc(sizeof(float) * x * y);
// clock_t start_host = clock();
// MatrixMul_host(a_host,x,n,b_host,n,y,result_cpu);
// cout<<"cputime="<<(double)(clock() - start_host)/1000<<"ms"<<endl;
// // MatrixPrint(result_cpu,Matrixsize,Matrixsize);
// //compare value of gpu and cpu
// float err=MatrixCompare(result_cpu,result_host,x,y);
// cout<<"err in gpu and cpu = "<<err<<endl;
return 0;
} |
22,647 | #include <stdio.h>
#include <stdlib.h>
#include "device_launch_parameters.h"
#include <cuda_runtime.h>
__global__ void
vecAdd(float *a,float *b, float *c, int len)
{
int i=threadIdx.x+blockDim.x*blockIdx.x;
if(i<len)
c[i] = a[i] + b[i];
}
void vecAdd_CPU(float *a,float *b, float *c, float len)
{
int i=0;
for(i=0;i<len;i++)
c[i] =a[i]+b[i];
}
void loadValue(char *FileInput,int len,float *input)
{
FILE *file;
int i=0;
char buff[100];
file = fopen(FileInput,"r");
if(!file)
{
printf("\nNo file found!");
system("pause");
exit(0);
}
while(fgets(buff,len,file))
{
input[i] = (float)atof(buff);
i++;
}
fclose(file);
}
void storeResult(char *fileOutput,float *arr,unsigned int len)
{
FILE *file;
unsigned int count=0;
file = fopen(fileOutput,"w");
if(!file)
{
printf("\nCannot create file!");
system("pause");
exit(0);
}
fprintf(file,"%d\n",len);
for(count =0 ;count<len;count++)
{
fprintf(file,"%1.1f\n",arr[count]);
}
fclose(file);
}
int main(int argc, char* argv[])
{
/*
arg 1 inputFile 1
arg 2 inputFile 2
arg 3 outputFile
arg 4 vector length
*/
float * hostInput1;
float * hostInput2;
float * hostOutput;
float *d_A0,*d_B0,*d_C0;//device memory for stream0
float *d_A1,*d_B1,*d_C1;//device memory for stream1
int inputLength = (int) (atoi)(argv[4]); // number of elements in the input list
int SegSize = inputLength/2;
cudaStream_t stream0,stream1;
cudaStreamCreate(&stream0);
cudaStreamCreate(&stream1);
hostInput1 = (float*)malloc(inputLength*sizeof(float));
hostInput2 = (float*)malloc(inputLength*sizeof(float));
hostOutput = (float*)malloc(inputLength*sizeof(float));
//cuda memory allocation on the device
cudaMalloc((void**)&d_A0,SegSize*sizeof(float));
cudaMalloc((void**)&d_B0,SegSize*sizeof(float));
cudaMalloc((void**)&d_C0,SegSize*sizeof(float));
cudaMalloc((void**)&d_A1,SegSize*sizeof(float));
cudaMalloc((void**)&d_B1,SegSize*sizeof(float));
cudaMalloc((void**)&d_C1,SegSize*sizeof(float));
printf("Loading values to the array...\n");
loadValue(argv[1],inputLength,hostInput1);
loadValue(argv[2],inputLength,hostInput2);
for(int i=0;i<inputLength;i+=SegSize*2)
{
cudaMemcpyAsync(d_A0, hostInput1+i, SegSize*sizeof(float),cudaMemcpyHostToDevice, stream0);
cudaMemcpyAsync(d_B0, hostInput2+i, SegSize*sizeof(float),cudaMemcpyHostToDevice, stream0);
cudaMemcpyAsync(d_A1, hostInput1+i+SegSize, SegSize*sizeof(float),cudaMemcpyHostToDevice,stream1);
cudaMemcpyAsync(d_B1, hostInput2+i+SegSize, SegSize*sizeof(float),cudaMemcpyHostToDevice,stream1);
vecAdd<<<(SegSize-1)/256+1, 256, 0, stream0>>>(d_A0, d_B0, d_C0, SegSize);
vecAdd<<<(SegSize-1)/256+1, 256, 0, stream1>>>(d_A1, d_B1,d_C1,SegSize);
cudaMemcpyAsync(hostOutput+i, d_C0, SegSize*sizeof(float),cudaMemcpyDeviceToHost, stream0);
cudaMemcpyAsync(hostOutput+i+SegSize, d_C1, SegSize*sizeof(float),cudaMemcpyDeviceToHost,stream1);
}
cudaStreamSynchronize(stream0);
cudaStreamSynchronize(stream1);
cudaDeviceSynchronize();
storeResult(argv[3],hostOutput,inputLength);
/*
free(hostInput1);
free(hostInput2);
free(hostOutput);
*/
cudaFree(d_A0);
cudaFree(d_B0);
cudaFree(d_C0);
cudaFree(d_A1);
cudaFree(d_B1);
cudaFree(d_C1);
return 0;
} |
22,648 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_profiler_api.h>
#include <math.h>
#include <curand_kernel.h>
#include <time.h>
#include <string.h>
int sudoku[81];
int state[81];
int len = 81;
__constant__ int mstate_d[81];
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__device__ int compute_score_d(int puzzle[9][9]){
int unique_count = 0;
for(int i = 0; i < 9; i++){
int count_row[10] = {0,0,0,0,0,0,0,0,0,0};
int count_col[10] = {0,0,0,0,0,0,0,0,0,0};
for(int j = 0; j < 9; j++){
count_row[puzzle[i][j]] += 1;
count_col[puzzle[j][i]] += 1;
}
for(int j = 0; j < 10; j++){
if(count_row[j] > 0){
unique_count++;
}
if(count_col[j] > 0){
unique_count++;
}
}
}
return (162 - unique_count);
}
int compute_score_h(int puzzle[81]){
int unique_count = 0;
for(int i = 0; i < 9; i++){
int count_row[10] = {0,0,0,0,0,0,0,0,0,0};
int count_col[10] = {0,0,0,0,0,0,0,0,0,0};
for(int j = 0; j < 9; j++){
count_row[puzzle[(i * 9) + j]] += 1;
count_col[puzzle[(j * 9) + i]] += 1;
}
for(int j = 0; j < 10; j++){
if(count_row[j] > 0){
unique_count++;
}
if(count_col[j] > 0){
unique_count++;
}
}
}
return (162 - unique_count);
}
__global__ void initCurand(curandState *rstate_b1,curandState *rstate_b2,curandState *rstate_3, unsigned long seed1,unsigned long seed2,unsigned long seed3){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
curand_init(seed1, idx , 0, &rstate_b1[idx]);
curand_init(seed2, idx , 0, &rstate_b2[idx]);
curand_init(seed3, idx , 0, &rstate_3[idx]);
}
__global__ void init_rsudoku(curandState *rstate_b1, curandState *rstate_b2, int * sudoku_db1, int * sudoku_db2){
__shared__ int shared_puzzle[9][9];
int thread_x = threadIdx.x;
int thread_y = threadIdx.y;
int thread_block_id = threadIdx.x*blockDim.x + threadIdx.y;
int block_num = blockIdx.x * blockDim.x + blockIdx.y;
int blockId = blockIdx.x + blockIdx.y * gridDim.x;
int threadId = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
shared_puzzle[thread_x][thread_y] = sudoku_db1[(thread_x * 9) + thread_y];
if(thread_block_id == 0){
int block_x;
int block_y;
int x1, y1, x2, y2;
int temp;
int new_score = 1000;
int current_score=compute_score_d(shared_puzzle);
if(block_num == 0){
block_x = 3*(int)(3.0*curand_uniform(&rstate_b1[block_num]));
block_y = 3*(int)(3.0*curand_uniform(&rstate_b1[block_num]));
do
{
x1=(int)3.0*curand_uniform(&rstate_b1[block_num]);
y1=(int)3.0*curand_uniform(&rstate_b1[block_num]);
}while(mstate_d[((block_x+x1) * 9 )+(block_y+y1)]==1);
do{
x2=(int)3.0*curand_uniform(&rstate_b1[block_num]);
y2=(int)3.0*curand_uniform(&rstate_b1[block_num]);
}while(mstate_d[((block_x+x2) * 9)+ (block_y+y2)]==1);
temp=shared_puzzle[block_x+x1][block_y+y1];
shared_puzzle[block_x+x1][block_y+y1]=shared_puzzle[block_x+x2][block_y+y2];
shared_puzzle[block_x+x2][block_y+y2]=temp;
new_score=compute_score_d(shared_puzzle);
if(new_score >= current_score){
temp=shared_puzzle[block_x+x1][block_y+y1];
shared_puzzle[block_x+x1][block_y+y1]=shared_puzzle[block_x+x2][block_y+y2];
shared_puzzle[block_x+x2][block_y+y2]=temp;
new_score = current_score;
}
}else{
if(block_num == 1){
block_x = 3*(int)(3.0*curand_uniform(&rstate_b2[block_num]));
block_y = 3*(int)(3.0*curand_uniform(&rstate_b2[block_num]));
do
{
x1=(int)3.0*curand_uniform(&rstate_b2[block_num]);
y1=(int)3.0*curand_uniform(&rstate_b2[block_num]);
}while(mstate_d[((block_x+x1) * 9 )+(block_y+y1)]==1);
do{
x2=(int)3.0*curand_uniform(&rstate_b2[block_num]);
y2=(int)3.0*curand_uniform(&rstate_b2[block_num]);
}while(mstate_d[((block_x+x2) * 9)+ (block_y+y2)]==1);
temp=shared_puzzle[block_x+x1][block_y+y1];
shared_puzzle[block_x+x1][block_y+y1]=shared_puzzle[block_x+x2][block_y+y2];
shared_puzzle[block_x+x2][block_y+y2]=temp;
new_score=compute_score_d(shared_puzzle);
if(new_score >= current_score){
temp=shared_puzzle[block_x+x1][block_y+y1];
shared_puzzle[block_x+x1][block_y+y1]=shared_puzzle[block_x+x2][block_y+y2];
shared_puzzle[block_x+x2][block_y+y2]=temp;
new_score = current_score;
}
}
}
}
if(block_num == 0){
sudoku_db1[(thread_x * 9) + thread_y] = shared_puzzle[thread_x][thread_y];
}else{
if (block_num == 1){
sudoku_db2[(thread_x * 9) + thread_y] = shared_puzzle[thread_x][thread_y];
}
}
}
__global__ void compute_score(int *sudoku_db1, int* sudoku_db2,int * d_score){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int count[10] = {0,0,0,0,0,0,0,0,0,0};
int row = (idx / 9) % 2;
int array = idx / 18;
for(int i = 0; i < 9; i++){
if((row == 0) && (array == 0)){
count[sudoku_db1[(idx * 9) + i]] += 1;
}else if ((row == 1) && (array == 0)){
count[sudoku_db1[(idx - 9) + (i * 9)]] += 1;
}else if ((row == 0) && (array == 1)){
count[sudoku_db2[((idx - 18) * 9) + i]] += 1;
}else if ((row == 1) && (array == 1)){
count[sudoku_db2[(idx - 27) + (i * 9)]] += 1;
}
}
int num_unique = 0;
for(int i = 0; i < 10; i++){
if(count[i] > 0){
num_unique++;
}
}
d_score[idx] = num_unique;
}
__global__ void crossover(int* rsrc, int *rdest, int * csrc, int * cdest, int r, int c){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
rdest[(r * 27) + idx] = rsrc[(r * 27) + idx];
cdest[(3 * c) + ((idx / 3) * 9) + (idx % 3)] = csrc[(3 * c) + ((idx / 3) * 9) + (idx % 3)];
}
__global__ void mos(int* sudoku_src, curandState * r3state,int current_score,int * score_block,int * block0,int * block1, int * block2, int * block3, int * block4, int * block5){
__shared__ int shared_puzzle[9][9];
int thread_x=threadIdx.x;
int thread_y=threadIdx.y;
int thread_block_id = threadIdx.x*blockDim.x + threadIdx.y;
int block_num= blockIdx.x*blockDim.x + blockIdx.y;
int block_x;
int block_y;
int x1, y1, x2, y2;
int temp;
int new_score = 1000;
float divisor = 0.4;
shared_puzzle[thread_x][thread_y]=sudoku_src[(thread_x * 9) + thread_y];
if(thread_block_id == 0){
for(int i = 0; i < 1000; i++){
block_x = 3*(int)(3.0*curand_uniform(&r3state[block_num]));
block_y = 3*(int)(3.0*curand_uniform(&r3state[block_num]));
do
{
x1=(int)3.0*curand_uniform(&r3state[block_num]);
y1=(int)3.0*curand_uniform(&r3state[block_num]);
}while(mstate_d[((block_x+x1) * 9 )+(block_y+y1)] == 1);
do{
x2=(int)3.0*curand_uniform(&r3state[block_num]);
y2=(int)3.0*curand_uniform(&r3state[block_num]);
}while(mstate_d[((block_x+x2) * 9)+ (block_y+y2)] == 1);
temp=shared_puzzle[block_x+x1][block_y+y1];
shared_puzzle[block_x+x1][block_y+y1]=shared_puzzle[block_x+x2][block_y+y2];
shared_puzzle[block_x+x2][block_y+y2]=temp;
new_score=compute_score_d(shared_puzzle);
if(new_score < current_score){
current_score = new_score;
}else{
if((exp((float)(current_score - new_score)/divisor)) > (curand_uniform(&r3state[block_num])))
{
current_score = new_score;
}
else{
temp=shared_puzzle[block_x+x1][block_y+y1];
shared_puzzle[block_x+x1][block_y+y1]=shared_puzzle[block_x+x2][block_y+y2];
shared_puzzle[block_x+x2][block_y+y2]=temp;
}
}
if(new_score == 0){
break;
}
}
for(int i=0;i<9;i++)
{
for(int j=0;j<9;j++)
{
if(block_num==0)
block0[9*i+j]=shared_puzzle[i][j];
if(block_num==1)
block1[9*i+j]=shared_puzzle[i][j];
if(block_num==2)
block2[9*i+j]=shared_puzzle[i][j];
if(block_num==3)
block3[9*i+j]=shared_puzzle[i][j];
if(block_num==4)
block4[9*i+j]=shared_puzzle[i][j];
if(block_num==5)
block5[9*i+j]=shared_puzzle[i][j];
}
}
score_block[block_num] = new_score;
}
}
void init_sudoku(char const * filename ){
FILE* file = fopen(filename, "r");
char line[256];
int a = 0;
int b = 0;
if(file == NULL){
perror("error while opening the file. \n");
exit(EXIT_FAILURE);
}
while (fgets(line, sizeof(line), file)) {
if( a == 9){
break;
}
for(b = 0; b < 9; b++){
char str[2];
str[0] = line[b];
str[1] = '\0';
sudoku[a * 9 + b] = (int)strtol(str, (char **)NULL, 10);
if(sudoku[a * 9 + b] != 0){
state[a * 9 + b] = 1;
}else{
state[a * 9 + b] = 0;
}
}
a++;
}
printf("input puzzle \n");
int row = 0;
int column = 0;
for(row=0;row<9;row++)
{
for(column=0;column<9;column++)
printf("%d ",sudoku[row * 9 + column]);
printf("\n");
}
int x, y;
int p, q;
int idx;
int nums_1[9],nums_2[9];
for(int block_i=0;block_i<3;block_i++)
{
for(int block_j=0;block_j<3;block_j++)
{
for(int k=0;k<9;k++)
nums_1[k]=k+1;
for(int i=0;i<3;i++)
{
for(int j=0;j<3;j++)
{
x = block_i*3 + i;
y = block_j*3 + j;
if(sudoku[x*9+y]!=0){
p = sudoku[x*9+y];
nums_1[p-1]=0;
}
}
}
q = -1;
for(int k=0;k<9;k++)
{
if(nums_1[k]!=0)
{
q+=1;
nums_2[q] = nums_1[k];
}
}
idx = 0;
for(int i=0;i<3;i++)
{
for(int j=0;j<3;j++)
{
x = block_i*3 + i;
y = block_j*3 + j;
if(sudoku[x*9+y]==0)
{
sudoku[x*9+y] = nums_2[idx];
idx+=1;
}
}
}
}
}
}
void sudoku_call_gpu(char const * filename1){
int device = 1;
cudaGetDevice(&device);
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop,device);
cudaSetDevice(1);
bool solved = false;
int * sudoku_db1;
int * sudoku_db2;
int * block10_d;
int * block11_d;
int * block12_d;
int * block13_d;
int * block14_d;
int * block15_d;
int * block20_d;
int * block21_d;
int * block22_d;
int * block23_d;
int * block24_d;
int * block25_d;
int * score1_block;
int * score2_block;
int score1_host[4];
int score2_host[4];
int * block_num_val;
int block_num_val_h[162];
int sudoku_hb1[81];
int sudoku_hb2[81];
int new_score;
int puzz1_score = 0;
int puzz2_score = 0;
int puzz1_prev_score = 0;
int puzz2_prev_score = 0;
int left_energy = 1000;
int min_score1 = 162;
int min_score2 = 162;
int min1_id = 0;
int min2_id = 0;
int itr = 100;
int itr1 = 3;
int itr2 = 4;
int itr3 = 5;
int copy = 0;
float divisor = 0.4;
int row = 0;
int column = 0;
size_t size = len * sizeof(int);
gpuErrchk(cudaMalloc((void**) &sudoku_db1,size));
gpuErrchk(cudaMemcpy(sudoku_db1,sudoku,size,cudaMemcpyHostToDevice));
gpuErrchk(cudaMalloc((void**) &sudoku_db2,size));
gpuErrchk(cudaMemcpy(sudoku_db2,sudoku,size,cudaMemcpyHostToDevice));
gpuErrchk(cudaMalloc((void**)&mstate_d,size));
gpuErrchk(cudaMemcpyToSymbol(mstate_d,state,size));
gpuErrchk(cudaMalloc((void**) &block_num_val,(162*sizeof(int))));
gpuErrchk(cudaMalloc((void**) &block10_d,size));
gpuErrchk(cudaMalloc((void**) &block11_d,size));
gpuErrchk(cudaMalloc((void**) &block12_d,size));
gpuErrchk(cudaMalloc((void**) &block13_d,size));
gpuErrchk(cudaMalloc((void**) &block14_d,size));
gpuErrchk(cudaMalloc((void**) &block15_d,size));
gpuErrchk(cudaMalloc((void**) &block20_d,size));
gpuErrchk(cudaMalloc((void**) &block21_d,size));
gpuErrchk(cudaMalloc((void**) &block22_d,size));
gpuErrchk(cudaMalloc((void**) &block23_d,size));
gpuErrchk(cudaMalloc((void**) &block24_d,size));
gpuErrchk(cudaMalloc((void**) &block25_d,size));
gpuErrchk(cudaMalloc((void**) &score1_block,sizeof(int)*6));
gpuErrchk(cudaMalloc((void**) &score2_block,sizeof(int)*6));
dim3 dimGrid(1,1);
dim3 dimBlock(9,9);
curandState *d_rstate_b1;
curandState *d_rstate_b2;
curandState *d_rstate_3;
curandState *hstate;
gpuErrchk(cudaMalloc(&d_rstate_b1, dimBlock.x* dimBlock.y * dimGrid.x * dimGrid.y * sizeof(curandState)));
gpuErrchk(cudaMalloc(&d_rstate_b2, dimBlock.x* dimBlock.y * dimGrid.x * dimGrid.y * sizeof(curandState)));
gpuErrchk(cudaMalloc(&d_rstate_3, dimBlock.x* dimBlock.y * dimGrid.x * dimGrid.y * sizeof(curandState)));
int * d_score;
int h_score[36];
gpuErrchk(cudaMalloc((void**) &d_score,(36 * sizeof(int))));
time_t t;
srand((unsigned) time(&t));
if(compute_score_h(sudoku) == 0){
solved = true;
}
while(itr2 > 0){
if(solved == true){
break;
}
int seed1 = rand() % 10000;
int seed2 = rand() % 10000;
int seed3 = rand() % 10000;
initCurand<<<dimGrid.x * dimGrid.y, dimBlock.x* dimBlock.y>>>(d_rstate_b1,d_rstate_b2,d_rstate_3 ,seed1,seed2,seed3);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
dim3 dimGrid1(1,2);
dim3 dimBlock1(9,9);
init_rsudoku<<<dimGrid1, dimBlock1>>>(d_rstate_b1,d_rstate_b2,sudoku_db1,sudoku_db2);
gpuErrchk(cudaMemcpy(sudoku_hb1,sudoku_db1,size,cudaMemcpyDeviceToHost));
gpuErrchk(cudaMemcpy(sudoku_hb2,sudoku_db2,size,cudaMemcpyDeviceToHost));
gpuErrchk(cudaMemcpy(block_num_val_h,block_num_val,(162*sizeof(int)),cudaMemcpyDeviceToHost));
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
dim3 dimGrid2(1,2);
dim3 dimBlock2(2,9);
int summed_score[12];
int j = 0;
int largest_grid_col = 0;
int c = 0;
int largest_grid_col_puzzle = 0;
int largest_grid_row = 0;
int largest_grid_row_puzzle = 0;
int r = 0;
compute_score<<<dimGrid2.x * dimGrid2.y,dimBlock2.x* dimBlock2.y>>>(sudoku_db1,sudoku_db2,d_score);
gpuErrchk(cudaMemcpy(h_score,d_score,(36*sizeof(int)),cudaMemcpyDeviceToHost));
puzz1_prev_score = puzz1_score;
puzz2_prev_score = puzz2_score;
puzz1_score = 0;
puzz2_score = 0;
for(int i = 0; i < 36; i++){
if((i % 3) == 0){
if(i != 0){
j++;
}
summed_score[j] = 0;
}
if(i < 18){
puzz1_score += h_score[i];
}else{
puzz2_score += h_score[i];
}
summed_score[j] += h_score[i];
}
puzz1_score = 162 - puzz1_score;
puzz2_score = 162 - puzz2_score;
for(int i = 0; i < 12; i++){
if(((i / 3) % 2) == 0){
if(largest_grid_row < summed_score[i]){
largest_grid_row = summed_score[i];
r = (i % 3);
largest_grid_row_puzzle = (i / 6);
}
}else{
if(largest_grid_col < summed_score[i]){
largest_grid_col = summed_score[i];
c = (i % 3);
largest_grid_col_puzzle = (i / 6);
}
}
}
dim3 dimGrid3(1,1);
dim3 dimBlock3(3,9);
if((largest_grid_row_puzzle == 0) && (largest_grid_col_puzzle == 0)) {
//crossover<<<dimGrid3.x * dimGrid3.y,dimBlock3.x* dimBlock3.y>>>(rsrc,rdest,csrc,cdest,r,c);
crossover<<<dimGrid3.x * dimGrid3.y,dimBlock3.x* dimBlock3.y>>>(sudoku_db1,sudoku_db2,sudoku_db1,sudoku_db2,r,c);
}else if((largest_grid_row_puzzle == 0) && (largest_grid_col_puzzle == 1)){
crossover<<<dimGrid3.x * dimGrid3.y,dimBlock3.x* dimBlock3.y>>>(sudoku_db1,sudoku_db2,sudoku_db2,sudoku_db1,r,c);
}else if((largest_grid_row_puzzle == 1) && (largest_grid_col_puzzle == 0)){
crossover<<<dimGrid3.x * dimGrid3.y,dimBlock3.x* dimBlock3.y>>>(sudoku_db2,sudoku_db1,sudoku_db1,sudoku_db2,r,c);
}else if((largest_grid_row_puzzle == 1) && (largest_grid_col_puzzle == 1)){
crossover<<<dimGrid3.x * dimGrid3.y,dimBlock3.x* dimBlock3.y>>>(sudoku_db2,sudoku_db1,sudoku_db2,sudoku_db1,r,c);
}
compute_score<<<dimGrid2.x * dimGrid2.y,dimBlock2.x* dimBlock2.y>>>(sudoku_db1,sudoku_db2,d_score);
gpuErrchk(cudaMemcpy(h_score,d_score,(36*sizeof(int)),cudaMemcpyDeviceToHost));
puzz1_prev_score = puzz1_score;
puzz2_prev_score = puzz2_score;
puzz1_score = 0;
puzz2_score = 0;
j = 0;
for(int i = 0; i < 36; i++){
if((i % 3) == 0){
if(i != 0){
j++;
}
summed_score[j] = 0;
}
if(i < 18){
puzz1_score += h_score[i];
}else{
puzz2_score += h_score[i];
}
summed_score[j] += h_score[i];
}
puzz1_score = 162 - puzz1_score;
puzz2_score = 162 - puzz2_score;
if(puzz1_score < puzz1_prev_score){
gpuErrchk(cudaMemcpy(sudoku_hb1,sudoku_db1,size,cudaMemcpyDeviceToHost));
}else
{
gpuErrchk(cudaMemcpy(sudoku_db1,sudoku_hb1,size,cudaMemcpyHostToDevice));
puzz1_score = puzz1_prev_score;
}
if(puzz2_score < puzz2_prev_score){
gpuErrchk(cudaMemcpy(sudoku_hb2,sudoku_db2,size,cudaMemcpyDeviceToHost));
}else{
gpuErrchk(cudaMemcpy(sudoku_db2,sudoku_hb2,size,cudaMemcpyHostToDevice));
puzz2_score = puzz2_prev_score;
}
dim3 dimGrid4(1,6);
dim3 dimBlock4(9,9);
while(itr3 > 0){
mos<<<dimGrid4,dimBlock4>>>(sudoku_db1, d_rstate_3,puzz1_score,score1_block,block10_d,block11_d,block12_d,block13_d,block14_d, block15_d);
gpuErrchk(cudaDeviceSynchronize());
mos<<<dimGrid4,dimBlock4>>>(sudoku_db2, d_rstate_3,puzz2_score,score2_block,block20_d,block21_d,block22_d,block23_d,block24_d, block25_d);
gpuErrchk(cudaDeviceSynchronize());
cudaMemcpy(score1_host,score1_block,sizeof(int)*6,cudaMemcpyDeviceToHost);
gpuErrchk(cudaDeviceSynchronize());
cudaMemcpy(score2_host,score2_block,sizeof(int)*6,cudaMemcpyDeviceToHost);
int g = 0;
for(g=0;g<6;g++)
{
if(score1_host[g] < min_score1)
{
min_score1 = score1_host[g];
min1_id = g;
}
if(score2_host[g] < min_score2)
{
min_score2 = score2_host[g];
min2_id = g;
}
}
puzz1_prev_score = puzz1_score;
puzz2_prev_score = puzz2_score;
if(min1_id==0)
{
cudaMemcpy(sudoku_db1,block10_d,size,cudaMemcpyDeviceToDevice);
puzz1_score=min_score1;
}
if(min1_id==1)
{
cudaMemcpy(sudoku_db1,block11_d,size,cudaMemcpyDeviceToDevice);
puzz1_score=min_score1;
}
if(min1_id==2)
{
cudaMemcpy(sudoku_db1,block12_d,size,cudaMemcpyDeviceToDevice);
puzz1_score=min_score1;
}
if(min1_id==3)
{
cudaMemcpy(sudoku_db1,block13_d,size,cudaMemcpyDeviceToDevice);
puzz1_score=min_score1;
}
if(min1_id==4)
{
cudaMemcpy(sudoku_db1,block14_d,size,cudaMemcpyDeviceToDevice);
puzz1_score=min_score1;
}
if(min1_id==5)
{
cudaMemcpy(sudoku_db1,block15_d,size,cudaMemcpyDeviceToDevice);
puzz1_score=min_score1;
}
if(min2_id==0)
{
cudaMemcpy(sudoku_db2,block20_d,size,cudaMemcpyDeviceToDevice);
puzz2_score=min_score2;
}
if(min2_id==1)
{
cudaMemcpy(sudoku_db2,block21_d,size,cudaMemcpyDeviceToDevice);
puzz2_score=min_score2;
}
if(min2_id==2)
{
cudaMemcpy(sudoku_db2,block22_d,size,cudaMemcpyDeviceToDevice);
puzz2_score=min_score2;
}
if(min2_id==3)
{
cudaMemcpy(sudoku_db2,block23_d,size,cudaMemcpyDeviceToDevice);
puzz2_score=min_score2;
}
if(min2_id==4)
{
cudaMemcpy(sudoku_db2,block24_d,size,cudaMemcpyDeviceToDevice);
puzz2_score=min_score2;
}
if(min2_id==5)
{
cudaMemcpy(sudoku_db2,block25_d,size,cudaMemcpyDeviceToDevice);
puzz2_score=min_score2;
}
if(puzz1_score == 0){
copy = 1;
break;
}else if(puzz2_score == 0){
copy = 1;
cudaMemcpy(sudoku_db1,sudoku_db2,size,cudaMemcpyDeviceToDevice);
break;
}
if((puzz1_score == puzz1_prev_score) || (puzz2_score == puzz2_prev_score)){
itr3--;
}else{
itr3 = 5;
}
}
if((puzz1_score == puzz1_prev_score) || (puzz2_score == puzz2_prev_score)){
itr2--;
}else{
itr2 = 4;
}
if(itr2 < 0){
cudaMemcpy(sudoku,sudoku_hb1,size,cudaMemcpyDeviceToHost);
int array[3]={0,3,6};
int tmp;
int random1=random()%3;
int random2=random()%3;
int x1,y1,x2,y2;
int block_x,block_y;
for(int suffle=0;suffle<random()%10;suffle++)
{
block_x = array[random1];
block_y = array[random2];
do{
x1=random()%3;
y1=random()%3;;
}while(state[((block_x+x1)*9)+(block_y+y1)]==1);
do{
x2=random()%3;;
y2=random()%3;;
}while(state[((block_x+x2)*9)+(block_y+y2)]==1);
tmp = sudoku[((block_x+x1)*9)+(block_y+y1)];
sudoku[((block_x+x1)*9)+(block_y+y1)]=sudoku[((block_x+x2)*9)+(block_y+y2)];
sudoku[((block_x+x2)*9)+(block_y+y2)]=tmp;
}
cudaMemcpy(sudoku_db1,sudoku,size,cudaMemcpyHostToDevice);
cudaMemcpy(sudoku_db2,sudoku,size,cudaMemcpyHostToDevice);
new_score = compute_score_h(sudoku);
if(new_score == 0){
solved = true;
//break;
}
puzz1_score = new_score;
puzz2_score = new_score;
itr2 = 4;
divisor = divisor + 0.1;
}
}
gpuErrchk(cudaMemcpy(sudoku_hb1,sudoku_db1,size,cudaMemcpyDeviceToHost));
printf("\n");
printf("Solved Puzzle \n");
for(row=0;row<9;row++)
{
for(column=0;column<9;column++)
printf("%d ",sudoku_hb1[row * 9 + column]);
printf("\n");
}
cudaFree(sudoku_db1);
cudaFree(sudoku_db2);
cudaFree(block10_d);
cudaFree(block11_d);
cudaFree(block12_d);
cudaFree(block13_d);
cudaFree(block14_d);
cudaFree(block15_d);
cudaFree(block20_d);
cudaFree(block21_d);
cudaFree(block22_d);
cudaFree(block23_d);
cudaFree(block24_d);
cudaFree(block25_d);
cudaFree(score1_block);
cudaFree(score2_block);
char * resstr;
char * lastdot;
strcpy (resstr, filename1);
lastdot = strrchr (resstr, '.');
if (lastdot != NULL)
*lastdot = '\0';
resstr = strcat(resstr,".sol\0");
//printf(resstr);
FILE *f = fopen(resstr, "w+");
if (f == NULL)
{
printf("Error opening outputfile!\n");
exit(1);
}
for(row = 0; row < 9; row++){
for(column = 0; column < 9; column++){
if(copy == 1){
fprintf(f,"%d",sudoku_hb1[row * 9 + column]);
}
else{
fprintf(f,"%d",sudoku[row * 9 + column]);
}
}
fprintf(f,"\n");
}
fclose(f);
}
int main(int argc, char *argv[])
{
if(argc < 2){
printf("Input and output file path required.");
exit(-1);
}
init_sudoku(argv[1]);
sudoku_call_gpu(argv[1]);
return 0;
}
|
22,649 | #include "includes.h"
__global__ void sino_uncmprss(unsigned int * dsino, unsigned char * p1sino, unsigned char * d1sino, int ifrm, int nele)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx<nele) {
d1sino[2 * idx] = (unsigned char)((dsino[ifrm*nele + idx] >> 8) & 0x000000ff);
d1sino[2 * idx + 1] = (unsigned char)((dsino[ifrm*nele + idx] >> 24) & 0x000000ff);
p1sino[2 * idx] = (unsigned char)(dsino[ifrm*nele + idx] & 0x000000ff);
p1sino[2 * idx + 1] = (unsigned char)((dsino[ifrm*nele + idx] >> 16) & 0x000000ff);
}
} |
22,650 | #include <iostream>
#define N (2048*2048)
#define THREADS_PER_BLOCK 512
// #define N (8*8)
// #define THREADS_PER_BLOCK 8
__global__ void add(int *a, int *b, int *c) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
c[index] = a[index] + b[index];
}
void random_ints(int *a, int size) {
for (int i = 0; i < size; i++) {
a[i] = rand();
}
}
int main(void) {
srand(100);
int *a, *b, *c; // host копии a, b, c
int *dev_a, *dev_b, *dev_c; // device копии of a, b, c
int size = N * sizeof(int);
//выделяем память на device для of a, b, c
cudaMalloc((void**)&dev_a, size);
cudaMalloc((void**)&dev_b, size);
cudaMalloc((void**)&dev_c, size);
a = (int*)malloc(size);
b = (int*)malloc(size);
c = (int*)malloc(size);
random_ints(a, N);
random_ints(b, N);
//копируем ввод на device
cudaMemcpy(dev_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, size, cudaMemcpyHostToDevice);
//запускаем на выполнение add() kernel с блоками и тредами
add<<<N/THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(dev_a, dev_b, dev_c);
// копируем результат работы device на host ( копия c )
cudaMemcpy(c, dev_c, size, cudaMemcpyDeviceToHost);
for (int i = 0; i < N; i++) {
std::cout << c[i] << " ";
}
std::cout << "\n";
free(a); free(b); free(c);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
} |
22,651 | #include<stdio.h>
#define NUM_BLOCKS 8
#define BLOCK_WIDTH 5
__global__ void hello(){
printf("\nHello from Thread [%d] inside Block [%d]", threadIdx.x, blockIdx.x);
}
int main(){
hello<<<NUM_BLOCKS, BLOCK_WIDTH>>>();
cudaDeviceSynchronize();
printf("\nDONE");
return 0;
} |
22,652 | /*
============================================================================
Filename : algorithm.c
Author : Your name goes here
SCIPER : Your SCIPER number
============================================================================
*/
#include <iostream>
#include <iomanip>
#include <sys/time.h>
#include <cuda_runtime.h>
using namespace std;
// CPU Baseline
void array_process(double *input, double *output, int length, int iterations)
{
double *temp;
for(int n=0; n<(int) iterations; n++)
{
for(int i=1; i<length-1; i++)
{
for(int j=1; j<length-1; j++)
{
output[(i)*(length)+(j)] = (input[(i-1)*(length)+(j-1)] +
input[(i-1)*(length)+(j)] +
input[(i-1)*(length)+(j+1)] +
input[(i)*(length)+(j-1)] +
input[(i)*(length)+(j)] +
input[(i)*(length)+(j+1)] +
input[(i+1)*(length)+(j-1)] +
input[(i+1)*(length)+(j)] +
input[(i+1)*(length)+(j+1)] ) / 9;
}
}
output[(length/2-1)*length+(length/2-1)] = 1000;
output[(length/2)*length+(length/2-1)] = 1000;
output[(length/2-1)*length+(length/2)] = 1000;
output[(length/2)*length+(length/2)] = 1000;
temp = input;
input = output;
output = temp;
}
}
__global__
void gpu_calculation(double* input, double* output, int length)
{
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int index = y * length + x;
if(x > 1 && x < length - 1 && y > 1 && y < length - 1) {
output[index] = 0;/*(input[(x-1)*(length)+(y-1)] +
input[(x-1)*(length)+(y)] +
input[(x-1)*(length)+(y+1)] +
input[(x)*(length)+(y-1)] +
input[(x)*(length)+(y)] +
input[(x)*(length)+(y+1)] +
input[(x+1)*(length)+(y-1)] +
input[(x+1)*(length)+(y)] +
input[(x+1)*(length)+(y+1)]) / 9;*/
}
/*if(x == length / 2 - 1 && y == length / 2 - 1) {
return;
}
if(x == length / 2 && y == length / 2 - 1) {
return;
}
if(x == length / 2 - 1 && y == length / 2) {
return;
}
if(x == length / 2 && y == length / 2) {
return;
}*/
}
// GPU Optimized function
void GPU_array_process(double *input, double *output, int length, int iterations)
{
//Cuda events for calculating elapsed time
cudaEvent_t cpy_H2D_start, cpy_H2D_end, comp_start, comp_end, cpy_D2H_start, cpy_D2H_end;
cudaEventCreate(&cpy_H2D_start);
cudaEventCreate(&cpy_H2D_end);
cudaEventCreate(&cpy_D2H_start);
cudaEventCreate(&cpy_D2H_end);
cudaEventCreate(&comp_start);
cudaEventCreate(&comp_end);
/* Preprocessing goes here */
size_t size = length*length*sizeof(double);
double* gpu_input;
double* gpu_output;
cout<<cudaSuccess<<endl;
cudaEventRecord(cpy_H2D_start);
/* Copying array from host to device goes here */
cout<<cudaMalloc( (void**)&gpu_input, size)<<endl;
cout<<cudaMalloc( (void**)&gpu_output, size)<<endl;
cout<<cudaMemcpy((void*)gpu_input, (void*)input, size, cudaMemcpyHostToDevice)<<endl;
cout<<cudaMemcpy((void*)gpu_output, (void*)output, size, cudaMemcpyHostToDevice)<<endl;
cudaDeviceSynchronize();
cudaEventRecord(cpy_H2D_end);
cudaEventSynchronize(cpy_H2D_end);
//Copy array from host to device
cudaEventRecord(comp_start);
/* GPU calculation goes here */
int thrsPerBlock(64);
int nBlks(length/64 + 1);
/*for(int i = 0; i < iterations-1; i++){
gpu_calculation <<< 2048, 2048 >>>(gpu_input, gpu_output, length);
cudaDeviceSynchronize();
cout<<cudaGetLastError()<<endl;
double * temp = gpu_output;
gpu_output = gpu_input;
gpu_input = temp;
}*/
gpu_calculation <<< 2048, 2048 >>>(gpu_input, gpu_output, length);
cudaDeviceSynchronize();
cudaEventRecord(comp_end);
cudaEventSynchronize(comp_end);
cudaEventRecord(cpy_D2H_start);
/* Copying array from device to host goes here */
cout<<cudaMemcpy((void*)output, (void*)gpu_output, size, cudaMemcpyDeviceToHost)<<endl;
cudaDeviceSynchronize();
cudaEventRecord(cpy_D2H_end);
cudaEventSynchronize(cpy_D2H_end);
/* Postprocessing goes here */
cudaFree(gpu_input);
cudaFree(gpu_output);
float time;
cudaEventElapsedTime(&time, cpy_H2D_start, cpy_H2D_end);
cout<<"Host to Device MemCpy takes "<<setprecision(4)<<time/1000<<"s"<<endl;
cudaEventElapsedTime(&time, comp_start, comp_end);
cout<<"Computation takes "<<setprecision(4)<<time/1000<<"s"<<endl;
cudaEventElapsedTime(&time, cpy_D2H_start, cpy_D2H_end);
cout<<"Device to Host MemCpy takes "<<setprecision(4)<<time/1000<<"s"<<endl;
} |
22,653 | #include <stdio.h>
#include <cuda_runtime.h>
#include <iostream>
#define N 2048 * 2048 // Number of elements in each vector
/*
* Optimize this already-accelerated codebase. Work iteratively
* and use profiler to check your progress
*
* Aim to profile `saxpy` (without modifying `N`) running under
* 25us.
*
* Some bugs have been placed in this codebase for your edification.
*/
__global__ void saxpy(int n, float a, float * x, float * y)
{
int tid = blockIdx.x * blockDim.x * threadIdx.x;
if ( tid < N )
y[tid] = a * x[tid] + y[tid];
}
void initWith(float num, float *a, int size)
{
for(int i = 0; i < size; ++i)
{
a[i] = num;
}
}
int main()
{
float *x, *y, *d_x, *d_y;
int size = N * sizeof (float); // The total number of bytes per vector
int deviceId;
cudaGetDevice(&deviceId);
x = (float*)malloc(size);
y = (float*)malloc(size);
cudaMalloc(&d_x, size);
cudaMalloc(&d_y, size);
// Initialize memory
initWith(2., x, N);
initWith(1., y, N);
cudaMemcpy(d_x, x, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, N*sizeof(float), cudaMemcpyHostToDevice);
int threads_per_block = 256;
int number_of_blocks = 4092;
saxpy <<< number_of_blocks, threads_per_block >>> ( N, 2.0, d_x, d_y);
cudaMemcpy(y, d_y, N*sizeof(float), cudaMemcpyDeviceToHost);
cudaError_t asyncErr;
asyncErr = cudaDeviceSynchronize();
if(asyncErr != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(asyncErr));
// Print out the first and last 5 values of c for a quality check
for( int i = 0; i < 5; ++i )
printf("c[%d] = %d, ", i, y[i]);
printf ("\n");
for( int i = N-5; i < N; ++i )
printf("c[%d] = %d, ", i, y[i]);
printf ("\n");
}
|
22,654 | /*
*
* Carlos Roman Rivera - A01700820
*
* Programming Languages - Cuda Lab 2
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <time.h>
__global__ void matrix_multiplication(int *matrix_1, int *matrix_2, int *matrix_r, int m, int n, int p){
int row = threadIdx.y + blockIdx.y * blockDim.y; // Multiply this row...
int col = threadIdx.x + blockIdx.x * blockDim.x; // with this column.
// Matrix multiplication as follows:
// (m x n) x (n x p) = (m x p)
int id = row * p + col; // Index of the result matrix in which we will write.
int sum = 0;
if (row < m && col < p) {
for(int i = 0; i < n; i++) {
// In matrix_1 we keep the row and advance in the columns.
// In matrix_2 we keep the column and advance in the rows.
sum = sum + matrix_1[row * n + i] * matrix_2[i * p + col];
// row * n stays in the same row and "+ i" advances 1 column each cicle.
// i * p advances one row each cicle and "+ col" keeps the same col.
}
matrix_r[id] = sum;
}
}
// Display a matrix of the given dimensions.
void print_matrix(int *mat, int rows, int cols){
for (int i = 0; i < rows; i++){
for (int j = 0; j < cols; j++){
printf("%d\t", mat[i * cols + j]);
}
printf("\n");
}
printf("\n");
}
// User gives the value of each element of the matrix.
void user_matrix(int *mat, int rows, int cols){
int aux;
for (int i = 0; i < rows; i++){
for (int j = 0; j < cols; j++){
printf("[%d][%d] = ", i, j);
scanf("%i%*c", &aux);
mat[i * cols + j] = aux;
}
}
}
// "Randomly" generate the value of each element of the matrix.
void fill_matrix(int *mat, int rows, int cols){
for (int i = 0; i < rows; i++){
for (int j = 0; j < cols; j++){
mat[i * cols + j] = (rand() % 99) + 1;
}
}
}
int main(){
srand(time(0));
// Matrices
int *h_matrix_1, *h_matrix_2, *h_matrix_r;
int *d_matrix_1, *d_matrix_2, *d_matrix_r;
// Dimensions
int matrix_1_rows, matrix_1_cols;
int matrix_2_rows, matrix_2_cols;
// Memory size
int matrix_1_size, matrix_2_size, matrix_r_size;
// User input for whether randomly or manually initialize matrices.
int respuesta;
printf("Matrix 1 rows: ");
scanf("%d%*c", &matrix_1_rows);
printf("Matrix 1 cols: ");
scanf("%d%*c", &matrix_1_cols);
printf("\nMatrix 2 rows: ");
scanf("%d%*c", &matrix_2_rows);
printf("Matrix 2 cols: ");
scanf("%d%*c", &matrix_2_cols);
// Matrices must be (m x n) and (n x p)
if (matrix_1_cols != matrix_2_rows) {
printf("\nLas dimensiones introducidas no son aceptables.\n");
return 0;
}
// Calculate memory given dimensions.
matrix_1_size = sizeof(int) * matrix_1_rows * matrix_1_cols;
matrix_2_size = sizeof(int) * matrix_2_rows * matrix_2_cols;
matrix_r_size = sizeof(int) * matrix_1_rows * matrix_2_cols;
// Allocate memory.
h_matrix_1 = (int *)malloc(matrix_1_size);
h_matrix_2 = (int *)malloc(matrix_2_size);
h_matrix_r = (int *)malloc(matrix_r_size);
// Select how to initialize matrices.
printf("\nDeseas:\n1. Introducir matrices manualmente.\n2. Generar matrices aleatoriamente.\nR = ");
scanf("%d%*c", &respuesta);
if(respuesta == 1) {
// User wants to initialize matrix.
printf("\nMatriz A: \n");
user_matrix(h_matrix_1, matrix_1_rows, matrix_1_cols);
printf("\nMatriz B: \n");
user_matrix(h_matrix_2, matrix_2_rows, matrix_2_cols);
} else {
// User wants random initialization.
if (respuesta != 2) {
// Invalid answer, therefore, randomly initialized.
printf("\nOpcion invalida, generando aleatorias.\n");
}
fill_matrix(h_matrix_1, matrix_1_rows, matrix_1_cols);
fill_matrix(h_matrix_2, matrix_2_rows, matrix_2_cols);
}
// Display matrix for interactive purpose.
printf("\nMatrix A:\n");
print_matrix(h_matrix_1, matrix_1_rows, matrix_1_cols);
// Display matrix for interactive purpose.
printf("Matrix B:\n");
print_matrix(h_matrix_2, matrix_2_rows, matrix_2_cols);
// Allocate memory on device.
cudaMalloc((void**)&d_matrix_1, matrix_1_size);
cudaMalloc((void**)&d_matrix_2, matrix_2_size);
cudaMalloc((void**)&d_matrix_r, matrix_r_size);
// Copy initialized matrices from host to device.
cudaMemcpy(d_matrix_1, h_matrix_1, matrix_1_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_matrix_2, h_matrix_2, matrix_2_size, cudaMemcpyHostToDevice);
// Each thread will calculate each element of the result matrix.
int ThreadsPerBlock = matrix_2_cols;
int NumBlocks = matrix_1_rows;
dim3 Blocks(NumBlocks, NumBlocks);
dim3 Threads(ThreadsPerBlock, ThreadsPerBlock);
// Display for interactive purpose.
printf("Blocks: %d\n", NumBlocks);
printf("Threads/Block: %d\n", ThreadsPerBlock);
// Execute on device.
matrix_multiplication<<<Blocks, Threads>>>(d_matrix_1, d_matrix_2, d_matrix_r, matrix_1_rows, matrix_1_cols, matrix_2_cols);
// Retrieve result from device and copy to host.
cudaMemcpy(h_matrix_r, d_matrix_r, matrix_r_size, cudaMemcpyDeviceToHost);
// Display results for illustrative purposes.
printf("\n");
printf("Matrix R:\n");
print_matrix(h_matrix_r, matrix_1_rows, matrix_2_cols);
// Free host memory.
free(h_matrix_1);
free(h_matrix_2);
free(h_matrix_r);
// Free device memory.
cudaFree(d_matrix_1);
cudaFree(d_matrix_2);
cudaFree(d_matrix_r);
}
|
22,655 | #include <stdlib.h>
#include <math.h>
#include <stdio.h>
#define N 512
#define NTPB 1024
__global__ void mergeSmall_k(int *a, int *b, int *m, int sizeA, int sizeB){
int K[2];
int P[2];
int Q[2];
int i = threadIdx.x;// + blockIdx.x * blockDim.x;
__shared__ int sA[N];
__shared__ int sB[N];
if(i<2*N){
sA[i%N] = a[i%N];
sB[i%N] = b[i%N];
__syncthreads();
if(i>sizeA){
K[0] = i - sizeA;
K[1] = sizeA;
P[0] = sizeA;
P[1] = i - sizeA;
}
else{
K[0] = 0; K[1] = i;
P[0] = i; P[1] = 0;
}
while(1){
int offset = (K[1]-P[1])/2;
Q[0] = K[0] + offset;
Q[1] = K[1] - offset;
if(Q[1] >= 0 && Q[0] <= sizeB && (Q[1] == sizeA || Q[0] == 0 || sA[ Q[1] ] > sB[ Q[0]-1 ]) ){
if(Q[0] == sizeB || Q[1] == 0 || sA[ Q[1]-1 ] <= sB[ Q[0] ]){
if(Q[1] < sizeA && (Q[0] == sizeB || sA[ Q[1] ] <= sB[ Q[0] ])){
m[i] = sA[Q[1]] ;
}
else{
m[i] = sB[Q[0]];
}
break;
}
else{
K[0] = Q[0] + 1;
K[1] = Q[1] - 1;
}
}
else{
P[0] = Q[0] - 1;
P[1] = Q[1] + 1;
}
}
}
}
int main(void){
int *A;
int *B;
int *M;
//int nb_block = (N+NTPB-1)/NTPB;
float time= 0.;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
A = (int*) malloc(sizeof(int)*N);
B = (int*) malloc(sizeof(int)*N);
M = (int*) calloc(sizeof(int),2*N);
for (int i = 0; i < N; i++){
A[i] = i*2+1;
B[i] = i*2;
}
int *A_gpu;
int *B_gpu;
int *M_gpu;
cudaMalloc(&A_gpu, N* sizeof(int));
cudaMalloc(&B_gpu, N* sizeof(int));
cudaMalloc(&M_gpu, 2*N*sizeof(int));
cudaMemcpy(A_gpu, A, N*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(B_gpu, B, N*sizeof(int), cudaMemcpyHostToDevice);
cudaEventRecord(start);
mergeSmall_k<<<1,NTPB>>>(A_gpu, B_gpu, M_gpu, N, N);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("mergeSmall_k: temps écoulé = %f secs\n", time/1000);
cudaMemcpy(M, M_gpu, 2*N*sizeof(int), cudaMemcpyDeviceToHost);
//for (int i = 0; i < 2*N; i++)
// printf("M[%d] = %d\n", i, M[i]);
free(A);
free(B);
free(M);
cudaFree(A_gpu);
cudaFree(B_gpu);
cudaFree(M_gpu);
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
|
22,656 | /*
Template code for convolution. CS6023, IITM */
#include<stdio.h>
#include<cuda.h>
#include<math.h>
#define W 1024 // Input DIM
#define OW (W-4) // Output DIM
#define D 8 // Input and Kernel Depth
#define T 5 // Kernel DIM
#define N 128 // Number of kernels
void fillMatrix(unsigned char *matrix){
unsigned char (*m)[W][D]=(unsigned char (*)[W][D])matrix;
for(int i=0;i<W;i++){
for(int j=0;j<W;j++){
for(int k=0;k<D;k++){
m[i][j][k]=(i*j+j*k+i*k+i*2+j*3+k*4)%255;
}
}
}
}
void fillKernel(float *kernel){
float (*t)[T][T][D]=(float (*)[T][T][D])kernel;
for(int i=0;i<N;i++){
for(int j=0;j<T;j++){
for(int k=0;k<T;k++){
for(int l=0;l<D;l++){
t[i][j][k][l]=fmod(-(i+1)*2.1+(j+1)*3.2-(k+1)*4.8+(l+1)*7.1,1.0);
}
}
}
}
}
void print_matrix_to_file(float *m){
const char *fname = "assignment4_out";
FILE *f = fopen(fname, "w");
float (*mat)[OW][OW]=(float (*)[OW][OW])m;
for(unsigned i=0; i < N; i++) {
for(unsigned j=0; j < OW; j++)
for(unsigned k=0;k<OW;k++)
fprintf(f,"%4f ", mat[i][j][k]);
fprintf(f,"\n");
}
fclose(f);
}
__global__ void conv(unsigned char *matrix,float *tile,float *output){
int filter=blockIdx.x;
int eX=blockIdx.y;
int eY=threadIdx.x;
unsigned char (*m)[W][D]=(unsigned char (*)[W][D])matrix;
float (*t)[T][T][D]=(float (*)[T][T][D])tile;
float (*o)[OW][OW]=(float (*)[OW][OW])output;
__shared__ unsigned char slice[W][D];
float psum;
if(eX<2||eX>W-3) return;
for(int j=0;j<T;j++){
for(int i=0;i<D;i++){
slice[eY][i]=m[(eX+j-2)][eY][i];
}
__syncthreads();
psum=0.0f;
if(!(eY<2||eY>W-3)){
for(int k=0;k<T;k++){
for(int l=0;l<D;l++){
psum+=t[filter][j][k][l]*slice[eY+k-2][l];
}
}
atomicAdd(&o[filter][(eX-2)][eY-2],psum);
}
__syncthreads();
}
}
int main()
{
unsigned char *matrix=(unsigned char*)malloc(sizeof(unsigned char)*W*W*D);
float *kernel=(float*)malloc(sizeof(float)*T*T*D*N);
float *output=(float *)malloc(sizeof(float)*N*OW*OW);
fillMatrix(matrix);
fillKernel(kernel);
unsigned char *Dmatrix;cudaMalloc(&Dmatrix,sizeof(unsigned char)*W*W*D);
float *Dkernel;cudaMalloc(&Dkernel,sizeof(float)*N*T*T*D);
float *Doutput;cudaMalloc(&Doutput,sizeof(float)*N*OW*OW);
cudaMemcpy(Dmatrix, matrix, sizeof(unsigned char)*W*W*D,cudaMemcpyHostToDevice);
cudaMemcpy(Dkernel, kernel, sizeof(float)*T*T*D*N,cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float milliseconds = 0;
cudaEventRecord(start,0);
//Make your cuda kernel call
conv<<<dim3(N,W),W>>>(Dmatrix,Dkernel,Doutput);
cudaDeviceSynchronize();
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
printf("%f\n",milliseconds);
cudaMemcpy(output, Doutput, sizeof(float)*N*OW*OW,cudaMemcpyDeviceToHost);
//Use print_matrix_to_file function only
print_matrix_to_file(output);
}
|
22,657 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <cuda_runtime.h>
#include <fstream>
#include <chrono>
#include <iostream>
__global__ void matrixMultiplication2D(const double *A, const double *B, double *C, int size) {
int rowIdx = blockIdx.y * blockDim.y + threadIdx.y;
int colIdx = blockIdx.x * blockDim.x + threadIdx.x;
if(rowIdx < size && colIdx < size){
double product = 0;
for(int i = 0; i < size; i++){
product += A[rowIdx * size + i] * B[i * size + colIdx];
}
C[rowIdx * size + colIdx] = product;
}
}
__global__ void strirde_MatrixMultiplication(const double* A, const double* B, double* result, int size){
int rowIdx = blockIdx.y * blockDim.y + threadIdx.y;
int colIdx = blockIdx.x * blockDim.x + threadIdx.x;
int strideX = gridDim.x * blockDim.x;
int strideY = gridDim.y * blockDim.y;
for(int j = colIdx; j < size; j += strideX){
for(int k = rowIdx; k < size; k+=strideY){
double product = 0;
for(int i = 0; i < size; i++){
product += A[k * size + i] * B[i * size + j];
}
result[k * size + j] = product;
}
}
}
__global__ void matrixMultiplication3D(const double *A, const double *B, double *C, int size) {
int rowIdx = blockIdx.y * blockDim.y + threadIdx.y;
int colIdx = blockIdx.x * blockDim.x + threadIdx.x;
int deepIdx = blockIdx.z * blockDim.z + threadIdx.z;
if(rowIdx < size && colIdx < size && deepIdx < size){
C[rowIdx * size + colIdx] += A[rowIdx * size + deepIdx] * B[deepIdx * size + colIdx];
}
}
void checkMatrixMul( double* a, double* b, double* c, int N)
{
double val = 0;
for( int row = 0; row < N; row++ )
for( int col = 0; col < N; col++ )
{
val = 0;
for ( int k = 0; k < N; k++ )
val += a[row * N + k] * b[k * N + col];
if(abs(c[row * N + col] - val)>0.01)
std::cout<<"Error: Result " << row << " "<<col << " "<< c[row * N + col] <<" "<< val<<std::endl;
}
}
inline cudaError_t checkCUDA(cudaError_t result){
if(result != cudaSuccess){
fprintf(stderr, "CUDA Runtime error: %s\n", cudaGetErrorString(result));
assert(result == cudaSuccess);
}
return result;
}
int main() {
double* h_A, *h_B, *h_result;
double* d_A, *d_B, *d_result;
double* A, *B, *result;
int numberOfElementsInDim;
int numberOfElemets;
size_t size;
std::ofstream save;
std::chrono::system_clock::time_point start;
std::chrono::system_clock::time_point stop;
std::chrono::duration<double> elapsed_time;
int startnum = 100;
int jump = 100;
int numberOfResult = 70;
int numberOfIteration = 1;
bool check = false;
std::cout<<"start"<<std::endl;
numberOfElementsInDim = startnum;
numberOfElemets = numberOfElementsInDim * numberOfElementsInDim;
size = numberOfElemets * sizeof(double);
std::cout<<"classic_2D"<<std::endl;
save.open("classic_2D.txt");
for(int i = 0; i < numberOfResult; i++){
double average = 0;
for(int k = 0; k < numberOfIteration; k++){
start = std::chrono::high_resolution_clock::now();
h_A = static_cast<double*>(malloc(size));
h_B = static_cast<double*>(malloc(size));
h_result = static_cast<double*>(malloc(size));
for(int j = 0; j < numberOfElemets; j++){
h_A[j] = static_cast<double>(rand())/(RAND_MAX/1.);
h_B[j] = static_cast<double>(rand())/(RAND_MAX/1.);
h_result[j] = 0;
}
checkCUDA(cudaMalloc((void**)&d_A, size));
checkCUDA(cudaMalloc((void**)&d_B, size));
checkCUDA(cudaMalloc((void**)&d_result, size));
checkCUDA(cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice));
checkCUDA(cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice));
dim3 threads_per_block (16, 16, 1); // A 16 x 16 block threads
dim3 number_of_blocks ((numberOfElementsInDim / threads_per_block.x) + 1, (numberOfElementsInDim / threads_per_block.y) + 1, 1);
matrixMultiplication2D<<<number_of_blocks, threads_per_block>>>(d_A, d_B, d_result, numberOfElementsInDim);
cudaDeviceSynchronize();
stop = std::chrono::high_resolution_clock::now();
elapsed_time = stop - start;
average = (average * k + elapsed_time.count()) / (k + 1);
if(k==0 && check==true){
checkCUDA(cudaMemcpy(h_result, d_result, size, cudaMemcpyDeviceToHost));
checkMatrixMul(h_A, h_B, h_result, numberOfElementsInDim);
}
checkCUDA(cudaFree(d_A));
checkCUDA(cudaFree(d_B));
checkCUDA(cudaFree(d_result));
free(h_A);
free(h_B);
free(h_result);
}
save << numberOfElemets <<"\t"<< numberOfElementsInDim <<"\t" << average << std::endl;
std::cout << numberOfElemets <<"\t"<< numberOfElementsInDim <<"\t" << average << std::endl;
numberOfElementsInDim += jump;
numberOfElemets = numberOfElementsInDim * numberOfElementsInDim;
size = numberOfElemets * sizeof(double);
}
save.close();
numberOfElementsInDim = startnum;
numberOfElemets = numberOfElementsInDim * numberOfElementsInDim;
size = numberOfElemets * sizeof(double);
std::cout<<"managed_2D"<<std::endl;
save.open("managed_2D.txt");
for(int i = 0; i < numberOfResult; i++){
double average = 0;
for(int k = 0; k < numberOfIteration; k++){
start = std::chrono::high_resolution_clock::now();
checkCUDA(cudaMallocManaged(&A, size));
checkCUDA(cudaMallocManaged(&B, size));
checkCUDA(cudaMallocManaged(&result, size));
for(int j = 0; j < numberOfElemets; j++){
A[j] = static_cast<double>(rand())/(RAND_MAX/1.);
B[j] = static_cast<double>(rand())/(RAND_MAX/1.);
result[j] = 0;
}
dim3 threads_per_block (16, 16, 1); // A 16 x 16 block threads
dim3 number_of_blocks ((numberOfElementsInDim / threads_per_block.x) + 1, (numberOfElementsInDim / threads_per_block.y) + 1, 1);
matrixMultiplication2D<<<number_of_blocks, threads_per_block>>>(A, B, result, numberOfElementsInDim);
cudaDeviceSynchronize();
stop = std::chrono::high_resolution_clock::now();
elapsed_time = stop - start;
average = (average * k + elapsed_time.count()) / (k + 1);
if(k==0 && check==true){
checkMatrixMul(A, B, result, numberOfElementsInDim);
}
checkCUDA(cudaFree(A));
checkCUDA(cudaFree(B));
checkCUDA(cudaFree(result));
}
save << numberOfElemets <<"\t"<< numberOfElementsInDim <<"\t" << average << std::endl;
std::cout << numberOfElemets <<"\t"<< numberOfElementsInDim <<"\t" << average << std::endl;
numberOfElementsInDim += jump;
numberOfElemets = numberOfElementsInDim * numberOfElementsInDim;
size = numberOfElemets * sizeof(double);
}
save.close();
numberOfElementsInDim = startnum;
numberOfElemets = numberOfElementsInDim * numberOfElementsInDim;
size = numberOfElemets * sizeof(double);
std::cout << "stridce_classic_2D" << std::endl;
save.open("stride_classic_2D.txt");
for(int i = 0; i < numberOfResult; i++){
double average = 0;
for(int k = 0; k < numberOfIteration; k++){
start = std::chrono::high_resolution_clock::now();
h_A = static_cast<double*>(malloc(size));
h_B = static_cast<double*>(malloc(size));
h_result = static_cast<double*>(malloc(size));
for(int j = 0; j < numberOfElemets; j++){
h_A[j] = static_cast<double>(rand())/(RAND_MAX/1.);
h_B[j] = static_cast<double>(rand())/(RAND_MAX/1.);
h_result[j] = 0;
}
checkCUDA(cudaMalloc((void**)&d_A, size));
checkCUDA(cudaMalloc((void**)&d_B, size));
checkCUDA(cudaMalloc((void**)&d_result, size));
checkCUDA(cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice));
checkCUDA(cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice));
dim3 threads_per_block (16, 16, 1); // A 16 x 16 block threads
dim3 number_of_blocks (32, 32, 1);
strirde_MatrixMultiplication<<<number_of_blocks, threads_per_block>>>(d_A, d_B, d_result, numberOfElementsInDim);
cudaDeviceSynchronize();
stop = std::chrono::high_resolution_clock::now();
elapsed_time = stop - start;
average = (average * k + elapsed_time.count()) / (k + 1);
if(k == 0 && check == true){
checkCUDA(cudaMemcpy(h_result, d_result, size, cudaMemcpyDeviceToHost));
checkMatrixMul(h_A, h_B, h_result, numberOfElementsInDim);
}
checkCUDA(cudaFree(d_A));
checkCUDA(cudaFree(d_B));
checkCUDA(cudaFree(d_result));
free(h_A);
free(h_B);
free(h_result);
}
save << numberOfElemets <<"\t"<< numberOfElementsInDim <<"\t" << average << std::endl;
std::cout << numberOfElemets <<"\t"<< numberOfElementsInDim <<"\t" << average << std::endl;
numberOfElementsInDim += jump;
numberOfElemets = numberOfElementsInDim * numberOfElementsInDim;
size = numberOfElemets * sizeof(double);
}
save.close();
numberOfElementsInDim = startnum;
numberOfElemets = numberOfElementsInDim * numberOfElementsInDim;
size = numberOfElemets * sizeof(double);
std::cout<<"stride_managed_2D"<<std::endl;
save.open("stride_managed_2D.txt");
for(int i = 0; i < numberOfResult; i++){
double average = 0;
for(int k = 0; k < numberOfIteration; k++){
start = std::chrono::high_resolution_clock::now();
checkCUDA(cudaMallocManaged(&A, size));
checkCUDA(cudaMallocManaged(&B, size));
checkCUDA(cudaMallocManaged(&result, size));
for(int j = 0; j < numberOfElemets; j++){
A[j] = static_cast<double>(rand())/(RAND_MAX/1.);
B[j] = static_cast<double>(rand())/(RAND_MAX/1.);
result[j] = 0;
}
dim3 threads_per_block (16, 16, 1); // A 16 x 16 block threads
dim3 number_of_blocks (32, 32, 1);
strirde_MatrixMultiplication<<<number_of_blocks, threads_per_block>>>(A, B, result, numberOfElementsInDim);
cudaDeviceSynchronize();
stop = std::chrono::high_resolution_clock::now();
elapsed_time = stop - start;
average = (average * k + elapsed_time.count()) / (k + 1);
if(k == 0 && check == true){
checkMatrixMul(A, B, result, numberOfElementsInDim);
}
checkCUDA(cudaFree(A));
checkCUDA(cudaFree(B));
checkCUDA(cudaFree(result));
}
save << numberOfElemets <<"\t"<< numberOfElementsInDim <<"\t" << average << std::endl;
std::cout << numberOfElemets <<"\t"<< numberOfElementsInDim <<"\t" << average << std::endl;
numberOfElementsInDim += jump;
numberOfElemets = numberOfElementsInDim * numberOfElementsInDim;
size = numberOfElemets * sizeof(double);
}
save.close();
return 0;
}
|
22,658 | #include "includes.h"
__global__ void OutputLayer(float* hiddenVotes, float* weight, int d_numHiddenNodes, float* d_votes){
int id = threadIdx.x + blockDim.x * blockIdx.x;
float total = 0.0f;
for (int i = 0; i < d_numHiddenNodes; ++i){
//printf("Hidden Votes: %i\n", hiddenVotes[i]);
//printf("Hidden Votes: %f, Weight: %f\n", hiddenVotes[i], weight[id * d_numHiddenNodes + i]);
total += hiddenVotes[i] * weight[id * d_numHiddenNodes + i];
//printf("Weight: %f", weight[id * d_numHiddenNodes + i]);
//printf("\n");
}
d_votes[id] = total;
//printf("Votes: %f\n", d_votes[id]);
} |
22,659 | #include "includes.h"
__global__ void simple_reduction(int *shared_var, int *input_values, int N, int iters)
{
__shared__ int local_mem[256];
int iter, i;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int local_tid = threadIdx.x;
int local_dim = blockDim.x;
int minThreadInThisBlock = blockIdx.x * blockDim.x;
int maxThreadInThisBlock = minThreadInThisBlock + (blockDim.x - 1);
if (maxThreadInThisBlock >= N)
{
local_dim = N - minThreadInThisBlock;
}
for (iter = 0; iter < iters; iter++)
{
if (tid < N)
{
local_mem[local_tid] = input_values[tid];
}
// Required for correctness
// __syncthreads();
/*
* Perform the local reduction across values written to shared memory
* by threads in this thread block.
*/
if (local_tid == 0)
{
int sum = 0;
for (i = 0; i < local_dim; i++)
{
sum = sum + local_mem[i];
}
atomicAdd(shared_var, sum);
}
// Required for correctness
// __syncthreads();
}
} |
22,660 | // aslp-aslp-cudamatrix/cu-nnet-mpi-sync.cu
// Copyright 2016 ASLP (author: zhangbinbin)
// Created on 2016-02-24
#include "curand.h"
#ifdef CURAND_CHECK
#undef CURAND_CHECK
#endif
#define CURAND_CHECK(status) { curandAssert(status, __FILE__, __LINE__); }
#include "stdio.h"
inline void curandAssert(curandStatus_t status, const char *file, int line, bool abort=true) {
if (status != CURAND_STATUS_SUCCESS) {
printf("curandAssert: error code %d in file: %s line: %d\n", status, file, line);
if (abort) exit(status);
}
}
const int BLOCK1D = 512;
//const int BLOCK2D = 32;
inline int divup(int x, int y) { return (x + y - 1) / y; }
/// Average
template<typename Real>
__global__
static void cuda_average_kernel(Real *dst, const Real *src, int num) {
int tid = threadIdx.x + threadIdx.y * blockDim.x;
int bid = blockIdx.x + blockIdx.y * gridDim.x;
int step = blockDim.x * blockDim.y * gridDim.x * gridDim.y;
for (int i = tid + bid * blockDim.x * blockDim.y; i < num; i += step) {
dst[i] = (dst[i] + src[i]) / 2;
}
}
template<typename Real>
void cuda_average_impl(Real *dst, const Real *src, int num, cudaStream_t &stream) {
dim3 block(BLOCK1D);
dim3 grid(divup(num, BLOCK1D));
cuda_average_kernel<<<grid, block, 0, stream>>>(dst, src, num);
}
void cuda_average(float *dst, const float *src, int num, cudaStream_t &stream) {
cuda_average_impl(dst, src, num, stream);
}
void cuda_average(double *dst, const double *src, int num, cudaStream_t &stream) {
cuda_average_impl(dst, src, num, stream);
}
|
22,661 | #include<cstdio>
#include<stdio.h>
#include<time.h>
#include<string.h>
#include<unistd.h>
#include<stdlib.h>
unsigned char *gpu_input_data_s,*gpu_output_data_s;
unsigned int *gpu_offset;
#define FINGERPRINT_LEN 20
#define MAX_CHUNK_LEN (16384)
#define MJW
#ifdef MJW
typedef struct
{
unsigned long total[2]; /*!< number of bytes processed */
unsigned long state[5]; /*!< intermediate digest state */
unsigned char buffer[64]; /*!< data block being processed */
unsigned char ipad[64]; /*!< HMAC: inner padding */
unsigned char opad[64]; /*!< HMAC: outer padding */
}
sha1_context;
#ifndef GET_ULONG_BE
#define GET_ULONG_BE(n,b,i) \
{ \
(n) = ( (unsigned long) (b)[(i) ] << 24 ) \
| ( (unsigned long) (b)[(i) + 1] << 16 ) \
| ( (unsigned long) (b)[(i) + 2] << 8 ) \
| ( (unsigned long) (b)[(i) + 3] ); \
}
#endif
#ifndef PUT_ULONG_BE
#define PUT_ULONG_BE(n,b,i) \
{ \
(b)[(i) ] = (unsigned char) ( (n) >> 24 ); \
(b)[(i) + 1] = (unsigned char) ( (n) >> 16 ); \
(b)[(i) + 2] = (unsigned char) ( (n) >> 8 ); \
(b)[(i) + 3] = (unsigned char) ( (n) ); \
}
#endif
/*
* SHA-1 context setup
*/
__device__ void sha1_starts( sha1_context *ctx )
{
ctx->total[0] = 0;
ctx->total[1] = 0;
ctx->state[0] = 0x67452301;
ctx->state[1] = 0xEFCDAB89;
ctx->state[2] = 0x98BADCFE;
ctx->state[3] = 0x10325476;
ctx->state[4] = 0xC3D2E1F0;
}
__device__ static void sha1_process( sha1_context *ctx, unsigned char data[64] )
{
unsigned long temp, W[16], A, B, C, D, E;
GET_ULONG_BE( W[ 0], data, 0 );
GET_ULONG_BE( W[ 1], data, 4 );
GET_ULONG_BE( W[ 2], data, 8 );
GET_ULONG_BE( W[ 3], data, 12 );
GET_ULONG_BE( W[ 4], data, 16 );
GET_ULONG_BE( W[ 5], data, 20 );
GET_ULONG_BE( W[ 6], data, 24 );
GET_ULONG_BE( W[ 7], data, 28 );
GET_ULONG_BE( W[ 8], data, 32 );
GET_ULONG_BE( W[ 9], data, 36 );
GET_ULONG_BE( W[10], data, 40 );
GET_ULONG_BE( W[11], data, 44 );
GET_ULONG_BE( W[12], data, 48 );
GET_ULONG_BE( W[13], data, 52 );
GET_ULONG_BE( W[14], data, 56 );
GET_ULONG_BE( W[15], data, 60 );
#define S(x,n) ((x << n) | ((x & 0xFFFFFFFF) >> (32 - n)))
#define R(t) \
( \
temp = W[(t - 3) & 0x0F] ^ W[(t - 8) & 0x0F] ^ \
W[(t - 14) & 0x0F] ^ W[ t & 0x0F], \
( W[t & 0x0F] = S(temp,1) ) \
)
#define P(a,b,c,d,e,x) \
{ \
e += S(a,5) + F(b,c,d) + K + x; b = S(b,30); \
}
A = ctx->state[0];
B = ctx->state[1];
C = ctx->state[2];
D = ctx->state[3];
E = ctx->state[4];
#define F(x,y,z) (z ^ (x & (y ^ z)))
#define K 0x5A827999
P( A, B, C, D, E, W[0] );
P( E, A, B, C, D, W[1] );
P( D, E, A, B, C, W[2] );
P( C, D, E, A, B, W[3] );
P( B, C, D, E, A, W[4] );
P( A, B, C, D, E, W[5] );
P( E, A, B, C, D, W[6] );
P( D, E, A, B, C, W[7] );
P( C, D, E, A, B, W[8] );
P( B, C, D, E, A, W[9] );
P( A, B, C, D, E, W[10] );
P( E, A, B, C, D, W[11] );
P( D, E, A, B, C, W[12] );
P( C, D, E, A, B, W[13] );
P( B, C, D, E, A, W[14] );
P( A, B, C, D, E, W[15] );
P( E, A, B, C, D, R(16) );
P( D, E, A, B, C, R(17) );
P( C, D, E, A, B, R(18) );
P( B, C, D, E, A, R(19) );
#undef K
#undef F
#define F(x,y,z) (x ^ y ^ z)
#define K 0x6ED9EBA1
P( A, B, C, D, E, R(20) );
P( E, A, B, C, D, R(21) );
P( D, E, A, B, C, R(22) );
P( C, D, E, A, B, R(23) );
P( B, C, D, E, A, R(24) );
P( A, B, C, D, E, R(25) );
P( E, A, B, C, D, R(26) );
P( D, E, A, B, C, R(27) );
P( C, D, E, A, B, R(28) );
P( B, C, D, E, A, R(29) );
P( A, B, C, D, E, R(30) );
P( E, A, B, C, D, R(31) );
P( D, E, A, B, C, R(32) );
P( C, D, E, A, B, R(33) );
P( B, C, D, E, A, R(34) );
P( A, B, C, D, E, R(35) );
P( E, A, B, C, D, R(36) );
P( D, E, A, B, C, R(37) );
P( C, D, E, A, B, R(38) );
P( B, C, D, E, A, R(39) );
#undef K
#undef F
#define F(x,y,z) ((x & y) | (z & (x | y)))
#define K 0x8F1BBCDC
P( A, B, C, D, E, R(40) );
P( E, A, B, C, D, R(41) );
P( D, E, A, B, C, R(42) );
P( C, D, E, A, B, R(43) );
P( B, C, D, E, A, R(44) );
P( A, B, C, D, E, R(45) );
P( E, A, B, C, D, R(46) );
P( D, E, A, B, C, R(47) );
P( C, D, E, A, B, R(48) );
P( B, C, D, E, A, R(49) );
P( A, B, C, D, E, R(50) );
P( E, A, B, C, D, R(51) );
P( D, E, A, B, C, R(52) );
P( C, D, E, A, B, R(53) );
P( B, C, D, E, A, R(54) );
P( A, B, C, D, E, R(55) );
P( E, A, B, C, D, R(56) );
P( D, E, A, B, C, R(57) );
P( C, D, E, A, B, R(58) );
P( B, C, D, E, A, R(59) );
#undef K
#undef F
#define F(x,y,z) (x ^ y ^ z)
#define K 0xCA62C1D6
P( A, B, C, D, E, R(60) );
P( E, A, B, C, D, R(61) );
P( D, E, A, B, C, R(62) );
P( C, D, E, A, B, R(63) );
P( B, C, D, E, A, R(64) );
P( A, B, C, D, E, R(65) );
P( E, A, B, C, D, R(66) );
P( D, E, A, B, C, R(67) );
P( C, D, E, A, B, R(68) );
P( B, C, D, E, A, R(69) );
P( A, B, C, D, E, R(70) );
P( E, A, B, C, D, R(71) );
P( D, E, A, B, C, R(72) );
P( C, D, E, A, B, R(73) );
P( B, C, D, E, A, R(74) );
P( A, B, C, D, E, R(75) );
P( E, A, B, C, D, R(76) );
P( D, E, A, B, C, R(77) );
P( C, D, E, A, B, R(78) );
P( B, C, D, E, A, R(79) );
#undef K
#undef F
ctx->state[0] += A;
ctx->state[1] += B;
ctx->state[2] += C;
ctx->state[3] += D;
ctx->state[4] += E;
}
/*
* SHA-1 process buffer
*/
__device__ void sha1_update( sha1_context *ctx, unsigned char *input, int ilen )
{
int fill;
unsigned long left;
if( ilen <= 0 )
return;
left = ctx->total[0] & 0x3F;
fill = 64 - left;
ctx->total[0] += ilen;
ctx->total[0] &= 0xFFFFFFFF;
if( ctx->total[0] < (unsigned long) ilen )
ctx->total[1]++;
if( left && ilen >= fill )
{
memcpy( (void *) (ctx->buffer + left),
(void *) input, fill );
sha1_process( ctx, ctx->buffer );
input += fill;
ilen -= fill;
left = 0;
}
while( ilen >= 64 )
{
sha1_process( ctx, input );
input += 64;
ilen -= 64;
}
if( ilen > 0 )
{
memcpy( (void *) (ctx->buffer + left),
(void *) input, ilen );
}
}
__device__ static const unsigned char sha1_padding[64] =
{
0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
};
/*
* SHA-1 final digest
*/
__device__ void sha1_finish( sha1_context *ctx, unsigned char output[20])
{
unsigned long last, padn;
unsigned long high, low;
unsigned char msglen[8];
high = ( ctx->total[0] >> 29 )
| ( ctx->total[1] << 3 );
low = ( ctx->total[0] << 3 );
PUT_ULONG_BE( high, msglen, 0 );
PUT_ULONG_BE( low, msglen, 4 );
last = ctx->total[0] & 0x3F;
padn = ( last < 56 ) ? ( 56 - last ) : ( 120 - last );
sha1_update( ctx, (unsigned char *) sha1_padding, padn );
sha1_update( ctx, msglen, 8 );
PUT_ULONG_BE( ctx->state[0], output, 0 );
PUT_ULONG_BE( ctx->state[1], output, 4 );
PUT_ULONG_BE( ctx->state[2], output, 8 );
PUT_ULONG_BE( ctx->state[3], output, 12 );
PUT_ULONG_BE( ctx->state[4], output, 16 );
}
/*
* output = SHA-1( input buffer )
*/
__device__ void sha1( unsigned char *input, int ilen, unsigned char *output )
{
sha1_context ctx;
// printf("hello");
sha1_starts( &ctx );
sha1_update( &ctx, input, ilen );
sha1_finish( &ctx, output);
}
__global__ void sha1_kernel(unsigned char * input, unsigned char* output, unsigned int * offset, unsigned int num)
{
const unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
if(index < num)
{
sha1( input + offset[index], offset[index+1]-offset[index], output + index * FINGERPRINT_LEN);
}
}
#else
#define FROM_BIG_ENDIAN(v) \
((v & 0xff) << 24) | ((v & 0xff00) << 8) | ((v & 0xff0000) >> 8) | \
((v & 0xff000000) >> 24) \
#define LEFTROL(v, n) (v << n) | (v >> (32 - n))
__device__ void GPU_sha1_kernel(unsigned char* data_tmp, unsigned int length_tmp, unsigned int* md)
{
unsigned int words[80];
unsigned int H0 = 0x67452301, H1 = 0xEFCDAB89, H2 = 0x98BADCFE, H3 = 0x10325476, H4 = 0xC3D2E1F0;
unsigned int a, b, c, d, e, f, k, temp, temp2;
unsigned int i, j;
unsigned char add_data[MAX_CHUNK_LEN + 128];
unsigned int kk;
unsigned int tmp;
unsigned long long long_tmp;
memcpy(add_data, data_tmp, length_tmp);
kk = length_tmp;
if(length_tmp%64<56)
{
add_data[kk++]=0x80;
int t=length_tmp%64+1;
for(;t<56;t++)
{
add_data[kk++]=0x00;
}
tmp=length_tmp-(length_tmp%64)+64;
}else if(length_tmp%64>56)
{
add_data[kk++]=0x80;
int t=length_tmp%64+1;
for(;t<64;t++)
{
add_data[kk++]=0x00;
}
for(t=0;t<56;t++)
{
add_data[kk++]=0x00;
}
tmp=length_tmp-(length_tmp%64)+128;
}
long_tmp = tmp;
add_data[tmp-8]=(long_tmp & 0xFF00000000000000) >> 56;
add_data[tmp-7]=(long_tmp & 0x00FF000000000000) >> 48;
add_data[tmp-6]=(long_tmp & 0x0000FF0000000000) >> 40;
add_data[tmp-5]=(long_tmp & 0x000000FF00000000) >> 32;
add_data[tmp-4]=(long_tmp & 0x00000000FF000000) >> 24;
add_data[tmp-3]=(long_tmp & 0x0000000000FF0000) >> 16;
add_data[tmp-2]=(long_tmp & 0x000000000000FF00) >> 8;
add_data[tmp-1]=(long_tmp & 0x00000000000000FF);
unsigned int *data=(unsigned int*)add_data;
unsigned int dataLen=tmp;
for(j = 0; j < dataLen; j += 64)
{
a = H0;
b = H1;
c = H2;
d = H3;
e = H4;
for (i=0; i<16; i++)
{
temp = *(( unsigned int*)(data + j/4+i));
words[i] = FROM_BIG_ENDIAN(temp);
f = (b & c) | ((~b) & d);
k = 0x5A827999;
temp = LEFTROL(a, 5);
temp2 = f + e + k + words[i];
temp = temp +temp2;
e = d;
d = c;
c = LEFTROL(b, 30);
b = a;
a = temp;
}
for (i=16; i<20; i++)
{
temp = (words[i - 3] ^ words[i - 8] ^ words[i - 14] ^ words[i - 16]);
words[i] = LEFTROL(temp, 1);
f = (b & c) | ((~b) & d);
temp = LEFTROL(a, 5);
temp2 = f + e + k + words[i];
temp = temp + temp2;
e = d;
d = c;
c = LEFTROL(b, 30);
b = a;
a = temp;
}
for (i=20; i<40; i++)
{
temp = words[i - 3] ^ words[i - 8] ^ words[i - 14] ^ words[i - 16];
words[i] = LEFTROL(temp, 1);
f=b ^ c ^ d;
k= 0x6ED9EBA1;
temp = LEFTROL(a, 5);
temp2 = f + e + k + words[i];
temp = temp + temp2;
e = d;
d = c;
c = LEFTROL(b, 30);
b = a;
a = temp;
}
for (i=40; i<60; i++)
{
temp = (words[i - 3] ^ words[i - 8] ^ words[i - 14] ^ words[i - 16]);
words[i] = LEFTROL(temp, 1);
f = (b & c) | (b & d) | (c & d);
k = 0x8F1BBCDC;
temp = LEFTROL(a, 5);
temp2 = f + e + k+ words[i];
temp = temp + temp2;
e = d;
d = c;
c = LEFTROL(b, 30);
b = a;
a = temp;
}
for (i=60; i<80; i++)
{
temp = (words[i - 3] ^ words[i - 8] ^ words[i - 14] ^ words[i - 16]);
words[i] = LEFTROL(temp, 1);
f = b ^ c ^ d;
k = 0xCA62C1D6;
temp = LEFTROL(a, 5);
temp2 = f + e + k + words[i];
temp = temp + temp2;
e = d;
d = c;
c = LEFTROL(b, 30);
b = a;
a = temp;
}
H0 += a;
H1 += b;
H2 += c;
H3 += d;
H4 += e;
}
a = H0;
b = H1;
c = H2;
d = H3;
e = H4;
words[0] = FROM_BIG_ENDIAN(128);
f = (b & c) | ((~b) & d);
k = 0x5A827999;
temp = LEFTROL(a, 5);
temp += f + e + k + words[0];
e = d;
d = c;
c = LEFTROL(b, 30);
b = a;
a = temp;
for (i=1; i<15; i++)
{
words[i] = 0;
f = (b & c) | ((~b) & d);
temp = LEFTROL(a, 5);
temp += f + e + k + words[i];
e = d;
d = c;
c = LEFTROL(b, 30);
b = a;
a = temp;
}
words[15] = dataLen*8;
f = (b & c) | ((~b) & d);
temp = LEFTROL(a, 5);
temp += f + e + k + words[15];
e = d;
d = c;
c = LEFTROL(b, 30);
b = a;
a = temp;
for (i=16; i<20; i++)
{
temp = (words[i - 3] ^ words[i - 8] ^ words[i - 14] ^ words[i - 16]);
words[i] = LEFTROL(temp, 1);
f = (b & c) | ((~b) & d);
temp = LEFTROL(a, 5);
temp += f + e + k + words[i];
e = d;
d = c;
c = LEFTROL(b, 30);
b = a;
a = temp;
}
for (i=20; i<40; i++)
{
temp = (words[i - 3] ^ words[i - 8] ^ words[i - 14] ^ words[i - 16]);
words[i] = LEFTROL(temp, 1);
f=b ^ c ^ d;
k = 0x6ED9EBA1;
temp = LEFTROL(a, 5);
temp += f + e + k + words[i];
e = d;
d = c;
c = LEFTROL(b, 30);
b = a;
a = temp;
}
for (i=40; i<60; i++)
{
temp = (words[i - 3] ^ words[i - 8] ^ words[i - 14] ^ words[i - 16]);
words[i] = LEFTROL(temp, 1);
f = (b & c) | (b & d) | (c & d);
k = 0x8F1BBCDC;
temp = LEFTROL(a, 5);
temp += f + e + k + words[i];
e = d;
d = c;
c = LEFTROL(b, 30);
b = a;
a = temp;
}
for (i=60; i<80; i++)
{
temp = (words[i - 3] ^ words[i - 8] ^ words[i - 14] ^ words[i - 16]);
words[i] = LEFTROL(temp, 1);
f = b ^ c ^ d;
k = 0xCA62C1D6;
temp = LEFTROL(a, 5);
temp += f + e + k + words[i];
e = d;
d = c;
c = LEFTROL(b, 30);
b = a;
a = temp;
}
H0 += a;
H1 += b;
H2 += c;
H3 += d;
H4 += e;
int ct=0;
md[ct++] =FROM_BIG_ENDIAN( H0);
md[ct++] =FROM_BIG_ENDIAN( H1);
md[ct++] =FROM_BIG_ENDIAN( H2);
md[ct++] =FROM_BIG_ENDIAN( H3);
md[ct++] =FROM_BIG_ENDIAN( H4);
}
__global__ void sha1_kernel(unsigned int *offset, unsigned char *input, unsigned char *output, unsigned int num)
{
unsigned int index=blockIdx.x*blockDim.x+threadIdx.x;
if(index<num)
{
GPU_sha1_kernel(input+offset[index],offset[index+1]-offset[index],(unsigned int*)(output+index*FINGERPRINT_LEN));
}
}
#endif
extern "C"
void GPU_sha1(unsigned char *input,unsigned char *output,unsigned int *offset,unsigned int num,unsigned int len)
{
cudaMemcpy(gpu_input_data_s,input,len*sizeof(unsigned char),cudaMemcpyHostToDevice);
cudaMemcpy(gpu_offset,offset,(num+1)*sizeof(unsigned int),cudaMemcpyHostToDevice);
unsigned int threadNum=32;
unsigned int blockNum=(unsigned int)(num+threadNum-1)/threadNum;
dim3 grid(blockNum,1,1);
dim3 threads(threadNum,1,1);
#ifdef MJW
sha1_kernel<<<grid,threads>>>(gpu_input_data_s, gpu_output_data_s, gpu_offset, num);
#else
sha1_kernel<<<grid,threads>>>(gpu_offset,gpu_input_data_s,gpu_output_data_s,num);
#endif
cudaThreadSynchronize();
cudaMemcpy(output,gpu_output_data_s,num*FINGERPRINT_LEN,cudaMemcpyDeviceToHost);
}
extern "C"
void GPU_sha1_init(unsigned int max_chunk_len,unsigned int num)
{
cudaSetDevice(0);
cudaMalloc((void**)&gpu_input_data_s, max_chunk_len*num);
cudaMalloc((void**)&gpu_output_data_s, num*FINGERPRINT_LEN);
cudaMalloc((void**)&gpu_offset, (num+1)*sizeof(unsigned int));
}
extern "C"
void GPU_sha1_destroy(void)
{
cudaFree(gpu_input_data_s);
cudaFree(gpu_output_data_s);
cudaFree(gpu_offset);
}
|
22,662 | #include <stdio.h>
__global__ void kernelA(){
// Giant conditional so that it only prints once, this would not be done in pactice
if (blockIdx.x == 0 & blockIdx.y == 1 & blockIdx.z == 0 & threadIdx.x == 1 & threadIdx.y == 0 & threadIdx.z == 1) {
printf("gridDim (%d, %d, %d)\n", gridDim.x, gridDim.y, gridDim.z);
printf("blockDim (%d, %d, %d)\n", blockDim.x, blockDim.y, blockDim.z);
printf("blockIdx (%d, %d, %d)\n", blockIdx.x, blockIdx.y, blockIdx.z);
printf("threadIdx (%d, %d, %d)\n", threadIdx.x, threadIdx.y, threadIdx.z);
// minimum unit being executed by compute engine at the same time
// called wave front in AMD. It is not set by the programmer
printf("warpSize (%d)\n", warpSize);
}
}
int main()
{
cudaSetDevice(0);
// dim3 is an integer vector type
dim3 blocks(50, 100, 50);
dim3 threads(8, 8, 16);
kernelA <<<blocks,threads>>>();
cudaDeviceSynchronize();
cudaDeviceReset();
return 0;
}
|
22,663 | __global__ void wave1Drusanov2(double * f_tmp,double * f_nm,
double * f_in, double nu, int N){
int tid = threadIdx.x+blockIdx.x*blockDim.x;
if(tid<N){
int x_m = tid-1;
if(x_m<0) x_m = (N-1);
f_tmp[tid]=f_in[tid]-(2.*nu/3.)*(f_nm[tid]-f_nm[x_m]);
}
}
|
22,664 | // input: in_data (b,n,c), in_grid (b,n)
// output: out_data (b,g,c), out_pooling_mask (b,g,c)
__global__ void grid_pooling_gpu(int b,int n,int c,int g,const float * in_data,const int * in_grid,float * out_data,int * out_pooling_mask){
//int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < b*c; index += stride)
{
int index_batch = index / c;
int index_channel = index % c;
// initialization : really IMPORTANT
/*for (int index_grid = 0; index_grid < g; index_grid++)
{
int index_max_pooling_mask = index_batch*g*c + index_grid*c + index_channel;
out_pooling_mask[index_max_pooling_mask] = -1;
out_data[index_max_pooling_mask] = .0;
}*/
for (int index_point = 0; index_point < n; index_point++)
{
int index_grid = in_grid[index_batch * n + index_point]; // in_grid[b,n]
int index_input = index_batch*n*c + index_point*c + index_channel; // in_data[b,n,c]
int index_output = index_batch*g*c + index_grid*c + index_channel; // out_data[b,g,c]
if (out_data[index_output] < in_data[index_input])
{
out_data[index_output] = in_data[index_input];
out_pooling_mask[index_output] = index_input;
}
}
}
}
__global__ void grid_pooling_grad_gpu(int b,int n,int c,int g,const int * pooling_mask,const float * grad_out,float * out){
//int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < b*c; index += stride)
{
int index_batch = index / c;
int index_channel = index % c;
/*for (int index_point = 0; index_point < n; index_point++)
{
int index_grad_output = index_batch*n*c + index_point*c + index_channel;
out[index_grad_output] = .0;
}*/
for (int index_grid = 0; index_grid < g; index_grid++)
{
int index_grad_output = index_batch*g*c + index_grid*c + index_channel;
int index_max_point = pooling_mask[index_grad_output];
if (index_max_point == -1) continue;
//if (index_max_point >= b*g*c) continue;
out[index_max_point] += 1 * grad_out[index_grad_output];
}
}
}
void gridpoolingLauncher(int b,int n,int c,int g,const float * in_data,const int * in_grid,float * out_data,int * out_pooling_mask){
const int threads_per_block = 128;
//const int number_of_blocks = (b*c + threads_per_block - 1)/threads_per_block;
const int number_of_blocks = 8;
//Fills the first count bytes of the memory area pointed to by devPtr with the constant byte value value.
cudaMemset(out_data,0,b*g*c*4);
cudaMemset(out_pooling_mask,-1,b*g*c*4);
grid_pooling_gpu<<<number_of_blocks, threads_per_block>>>(b,n,c,g,in_data,in_grid,out_data,out_pooling_mask);
//grid_pooling_gpu<<<1, 1>>>(b,n,c,g,inp_data,grid_index,out,grid_pooling_mask);
//cudaDeviceSynchronize();
}
void gridpoolinggradLauncher(int b,int n,int c,int g,const int * pooling_mask,const float * grad_out,float * out){
const int threads_per_block = 128;
//const int number_of_blocks = (b*c + threads_per_block - 1)/threads_per_block;
const int number_of_blocks = 8;
cudaMemset(out,0,b*n*c*4);
grid_pooling_grad_gpu<<<number_of_blocks, threads_per_block>>>(b,n,c,g,pooling_mask,grad_out,out);
//grid_pooling_grad_gpu<<<number_of_blocks, threads_per_block>>>(b,n,c,g,pooling_mask,grad_out,out);
//cudaDeviceSynchronize();
}
|
22,665 | #include "includes.h"
__global__ void matrixTrans(double * M,double * MT, int rows, int cols)
{
double val=0;
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
if (row < rows && col < cols){
val = M[col + row*cols];
MT[row + col*rows] = val;
}
} |
22,666 | // Copyright (c) OpenMMLab. All rights reserved.
#include <cstdint>
namespace mmdeploy {
namespace operation {
namespace cuda {
template <typename T>
__global__ void transpose(const T* src, int height, int width, int channels, int src_width_stride,
T* dst, int dst_channel_stride) {
auto x = blockIdx.x * blockDim.x + threadIdx.x;
auto y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width || y >= height) return;
for (auto c = 0; c < channels; ++c) {
dst[c * dst_channel_stride + y * width + x] = src[y * src_width_stride + x * channels + c];
}
}
template <typename T>
void Transpose(const T* src, int height, int width, int channels, T* dst, cudaStream_t stream) {
const dim3 thread_block(32, 8);
const dim3 block_num((width + thread_block.x - 1) / thread_block.x,
(height + thread_block.y - 1) / thread_block.y);
auto src_width_stride = width * channels;
auto dst_channel_stride = width * height;
transpose<T><<<block_num, thread_block, 0, stream>>>(src, height, width, channels,
src_width_stride, dst, dst_channel_stride);
}
template void Transpose<uint8_t>(const uint8_t* src, int height, int width, int channels,
uint8_t* dst, cudaStream_t stream);
template void Transpose<float>(const float* src, int height, int width, int channels, float* dst,
cudaStream_t stream);
} // namespace cuda
} // namespace operation
} // namespace mmdeploy
|
22,667 | #include "includes.h"
__global__ void compute_array_square(float* array, float* outArray, int size)
{
int thread_index = threadIdx.x + blockIdx.x * blockDim.x;
int num_threads = blockDim.x * gridDim.x;
for(int i = 0; i < size; i += num_threads)
{
int index = i + thread_index;
if(index < size)
{
outArray[index] = array[index] * array[index];
}
}
} |
22,668 | /*
**********************************************
* CS314 Principles of Programming Languages *
* Spring 2020 *
**********************************************
*/
#include <stdio.h>
#include <stdlib.h>
__global__ void collateSegments_gpu(int * src, int * scanResult, int * output, int numEdges) {
int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
int total_threads = blockDim.x * gridDim.x;
for(int i = tid; i < numEdges; i+=total_threads) {
if((i+1) < numEdges) {
if(src[i+1] != src[i]) output[src[i]] = scanResult[i];
}
else output[src[i]] = scanResult[i];
}
}
|
22,669 | #include<stdio.h>
#include <curand.h>
#include <curand_kernel.h>
#include<stdlib.h>
__global__ void fxn(double *W1, double *W2, double *X, double *Y, double *b1, double *b2, double *h, double *Z, double *loss){
int m = blockDim.x, n = blockDim.y, T = 10;
int mx = threadIdx.x, Nx = blockIdx.x, nx = threadIdx.y;
double lambda = 0.01;
// Initialization
if(Nx == 0){
/* curandState state;
curand_init(clock64(), nx, 0, &state);
W1[m*nx + mx] = curand_uniform(&state);
curand_init(clock64(), nx, 0, &state);
W2[n*mx + nx] = curand_uniform(&state);
printf("%lf %lf\n", W1[m*nx + mx], W2[n*mx + nx]);
*/
W1[m*nx + mx] = 0.1;
W2[n*mx + nx] = 0.1;
if(nx == 0){
//b1[mx] = curand_uniform(&state);
b1[mx] = 0.1;
}
if(mx == 0){
//b2[nx] = curand_uniform(&state);
b2[nx] = 0.1;
}
}
__syncthreads();
// PUT CUDA BARRIER
for(int t = 0; t < T; t++){
// Initialize loss
if(Nx == 0 && mx == 0 && nx == 0){
printf("***********\n\nTHIS IS ITERATION NUMBER %d\n\n****************\n\n", t);
loss[t] = 0;
}
// Initializing h
if(nx == 0){
h[Nx*m + mx] = b1[mx];
}
atomicAdd(&h[Nx*m + mx], X[Nx*n + nx] * W1[m*nx + mx]);
__syncthreads();
if(nx == 0)
printf("H values: %lf\n", h[Nx*m + mx]);
double e;
// Sigmoid
if(nx == 0){
e = exp(h[Nx*m + mx]);
h[Nx*m + mx] = e/(1 + e);
}
__syncthreads();
// calculating Z
if(mx == 0){
Z[Nx*n + nx] = b2[nx];
}
atomicAdd(&Z[Nx*n + nx], h[Nx*m + mx] * W2[n*mx + nx]); // CHECK SWAP
__syncthreads();
if(mx == 0){
e = exp(Z[Nx*n + nx]);
Z[Nx*n + nx] = e/(1 + e);
printf("Z values: %lf\n", Z[Nx*n + nx]);
}
__syncthreads();
printf("%d %d %d\n", Nx, mx, nx);
double d = 0;
if(mx == 0){
//printf("%d %lf %lf\n", bd*n + td, Z[bd*n + td], Y[bd*n + td]);
d = Z[Nx*n + nx] - Y[Nx*n + nx];
d = d * d;
}
if(Nx == 0){
d += lambda * (W1[m*nx + mx] * W1[m*nx + mx] + W2[n*mx + nx] * W2[n*mx + nx]);
//atomicAdd(&d, dx);
printf("aya");
}
if(mx == 0 || Nx == 0)
printf("d value here: %d %d %lf\n", mx, Nx, d);
// ATOMIC OPERATION REQUIRED HERE
if(d != 0){
atomicAdd(&loss[t], d);
}
printf("down: %d %d %d\n", Nx, mx, nx);
if(Nx + mx + nx == 0)
printf("loss: %lf\n", loss[t]);
// eta needs to be declared and grad needs to be found here
/* if(bd == 0){
for(int i = 0;i < n; i++){
W[m*i + td] -= eta * grad
}
b1[td] -= eta * grad;
if(td < n)
b2[td] -= eta * grad;
}*/
// ITERATION COMPLETE
}
}
int main(){
int N, m, n, T;
N = 3, m = 4, n = m/2, T = 10;
double *X, *Y, *h, *Z, *loss;
X = (double*)malloc(N*n*sizeof(double));
Y = (double*)malloc(N*n*sizeof(double));
h = (double*)malloc(N*m*sizeof(double));
Z = (double*)malloc(N*n*sizeof(double));
loss = (double*)malloc(T * sizeof(double));
// device memory
double *d_X, *d_Y, *d_W1, *d_W2, *d_b1, *d_b2, *d_h, *d_loss, *d_Z;
cudaMalloc((void**)&d_W1, sizeof(double) * n * m);
cudaMalloc((void**)&d_W2, sizeof(double) * m * n);
cudaMalloc((void**)&d_X, sizeof(double) * N * n);
cudaMalloc((void**)&d_Y, sizeof(double) * N * n);
cudaMalloc((void**)&d_Z, sizeof(double) * N * n);
cudaMalloc((void**)&d_b1, sizeof(double) * m);
cudaMalloc((void**)&d_b2, sizeof(double) * n);
cudaMalloc((void**)&d_h, sizeof(double) * N * m);
cudaMalloc((void**)&d_loss, sizeof(double) * T);
for(int i = 0; i < N*n; i++){
scanf("%lf", &X[i]);
printf("%lf\n", X[i]);
}
for(int i = 0; i < N*n; i++){
scanf("%lf", &Y[i]);
printf("%lf\n", Y[i]);
}
cudaMemcpy(d_X, X, sizeof(double) * N * n, cudaMemcpyHostToDevice);
cudaMemcpy(d_Y, Y, sizeof(double) * N * n, cudaMemcpyHostToDevice);
dim3 threads(m, n);
fxn<<<N, threads>>>(d_W1, d_W2, d_X, d_Y, d_b1, d_b2, d_h, d_Z, d_loss);
cudaMemcpy(h, d_h, sizeof(double) * N * m, cudaMemcpyDeviceToHost);
cudaMemcpy(Z, d_Z, sizeof(double) * N * n, cudaMemcpyDeviceToHost);
cudaMemcpy(loss, d_loss, sizeof(double) * T, cudaMemcpyDeviceToHost);
printf("h\n");
for(int i = 0;i < N*m; i++)
printf("%lf ", h[i]);
printf("\nZ");
for(int i = 0;i < N*n; i++)
printf("%lf ", Z[i]);
printf("\n");
printf("LOSS\n");
for(int i = 0;i < T; i++)
printf("%lf ", loss[i]);
printf("\n");
return 0;
}
|
22,670 | #include<stdio.h>
#include<stdlib.h>
#include<cuda.h>
#define N 4
#define TPB 2
__global__ void matrixMul(int *a, int *b,int *c ,int n)
{
int row = blockIdx.y * blockDim.y + threadIdx.y ;
int col = blockIdx.x * blockDim.x + threadIdx.x ;
int i;
int sum=0;
for( i=0 ;i<N; i++)
{
sum+= a[row * N+i] * b[i * N+col];
}
c[row * N+col] = sum;
}
int main()
{
int *h_a,*h_b,*h_c,*d_a,*d_b,*d_c;
int size = sizeof(int)*N*N;
h_a = (int*)malloc(size);
h_b = (int*)malloc(size);
h_c = (int*)malloc(size);
cudaMalloc(&d_a,size);
cudaMalloc(&d_b,size);
cudaMalloc(&d_c,size);
int i,j;
for(i=0; i<N*N;i++)
{
h_a[i]=random() % N;
h_b[i]=random() % N;
}
printf("\nMatrx A =>\n");
for(i=0;i<N;i++)
{
for(j=0;j<N;j++)
{
printf(" %d",h_a[i*N+j]);
}
printf("\n");
}
printf("\nMatrx B =>\n");
for(i=0; i<N ;i++)
{
for(j=0; j<N ;j++)
{
printf(" %d",h_b[i*N+j]);
}
printf("\n");
}
cudaMemcpy(d_a,h_a,size,cudaMemcpyHostToDevice);
cudaMemcpy(d_b,h_b,size,cudaMemcpyHostToDevice);
int BLOCK_SIZE= N/TPB;
dim3 GridSize(BLOCK_SIZE,BLOCK_SIZE);
dim3 BlockSize(TPB, TPB);
matrixMul<<<GridSize,BlockSize>>>(d_a , d_b ,d_c,N);
cudaMemcpy(h_c,d_c, size, cudaMemcpyDeviceToHost);
printf("\nMatrx C =>\n");
for(i=0;i<N;i++)
{
for(j=0;j<N;j++)
{
printf(" %d",h_c[i*N+j]);
}
printf("\n");
}
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
free(h_a);
free(h_b);
free(h_c);
return 0;
}
|
22,671 | __global__ void elementwise_add(const int * array1,
const int * array2, int * result, int size) {
unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int stride = gridDim.x * blockDim.x;
while (idx < size) {
result[idx] = array1[idx] + array2[idx];
idx += stride;
}
}
|
22,672 | #include <stdio.h>
__global__ void bbox_logits_to_attrs_gpu_kernel(int input_npoint, int channels,
const float* input_roi_attrs,
const float* input_logits,
float* output_attrs) {
int input_id = threadIdx.x + blockIdx.x * blockDim.x;
if (input_id < input_npoint) {
float roi_diag = sqrtf(powf(input_roi_attrs[input_id * 7 + 0], 2) + powf(input_roi_attrs[input_id * 7 + 1], 2));
output_attrs[input_id * 7 + 0] = expf(input_logits[input_id * channels + 0]) * input_roi_attrs[input_id * 7 + 0];
output_attrs[input_id * 7 + 1] = expf(input_logits[input_id * channels + 1]) * input_roi_attrs[input_id * 7 + 1];
output_attrs[input_id * 7 + 2] = expf(input_logits[input_id * channels + 2]) * input_roi_attrs[input_id * 7 + 2];
output_attrs[input_id * 7 + 3] = input_logits[input_id * channels + 3] * roi_diag + input_roi_attrs[input_id * 7 + 3];
output_attrs[input_id * 7 + 4] = input_logits[input_id * channels + 4] * roi_diag + input_roi_attrs[input_id * 7 + 4];
output_attrs[input_id * 7 + 5] = input_logits[input_id * channels + 5] * input_roi_attrs[input_id * 7 + 2] + input_roi_attrs[input_id * 7 + 5];
output_attrs[input_id * 7 + 6] = input_logits[input_id * channels + 6] * 3.1415927 + input_roi_attrs[input_id * 7 + 6];
}
}
void bbox_logits_to_attrs_gpu_launcher(int input_npoint, int channels,
const float* input_roi_attrs,
const float* input_logits,
float* output_attrs) {
if (input_npoint == 0)
return;
int blockSize; // The launch configurator returned block size
int minGridSize; // The minimum grid size needed to achieve the maximum occupancy for a full device launch
int gridSize; // The actual grid size needed, based on input size
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, bbox_logits_to_attrs_gpu_kernel, 0, input_npoint);
// Round up according to array size
gridSize = (input_npoint + blockSize - 1) / blockSize;
bbox_logits_to_attrs_gpu_kernel<<<gridSize, blockSize>>>(input_npoint, channels,
input_roi_attrs,
input_logits,
output_attrs);
}
|
22,673 |
#define N 100
__constant__ double buffer[N];
|
22,674 | #include "includes.h"
__global__ void VectorAdd(int *a, int *r, int n, double gamma)
{
int i=threadIdx.x;
if(i<n)
r[i] = (int)(255.0*pow((double)a[i]/255.0,1.0/gamma));
} |
22,675 | #include "includes.h"
#define DOUBLE
#ifdef DOUBLE
#define Complex cufftDoubleComplex
#define Real double
#define Transform CUFFT_Z2Z
#define TransformExec cufftExecZ2Z
#else
#define Complex cufftComplex
#define Real float
#define Transform CUFFT_C2C
#define TransformExec cufftExecC2C
#endif
#define TILE_DIM 8
// synchronize blocks
__global__ void spread_z(Real* src, Real* dst)
{
unsigned int tid = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x;
//unsigned int tid1 = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x;
Real res = src[tid];
src[tid + (gridDim.y * gridDim.x) * blockDim.x] = res;
#ifdef DOUBLE
src[tid] = 0.0;
#else
src[tid] = 0.f;
#endif
} |
22,676 | #include <stdio.h>
#include <stdint.h>
#include <cuda.h>
#include <inttypes.h>
#include <iostream>
#include <ctime>
using namespace std;
const long N = 12800;
const int THREADS_PER_BLOCK = 32;
// CPU copies of a, b, c
float *a_cpu, *b_cpu, *c_cpu;
__global__ void matrixMultiplicationKernel(float* A, float* B, float* C, long N) {
int ROW = blockIdx.y*blockDim.y+threadIdx.y;
int COL = blockIdx.x*blockDim.x+threadIdx.x;
float tmpSum = 0;
if (ROW < N && COL < N) {
// each thread computes one element of the block sub-matrix
for (int i = 0; i < N; i++) {
tmpSum += A[ROW * N + i] * B[i * N + COL];
}
}
C[ROW * N + COL] = tmpSum;
}
void matrixMultiplication(float *A, float *B, float *C, int N){
// declare the number of blocks per grid and the number of threads per block
// use 1 to 512 threads per block
int blockDim = (N*N)/THREADS_PER_BLOCK;
dim3 threadsPerBlock(THREADS_PER_BLOCK, THREADS_PER_BLOCK);
dim3 blocksPerGrid(blockDim, blockDim);
// if (N*N > 512){
// threadsPerBlock.x = 512;
// threadsPerBlock.y = 512;
// blocksPerGrid.x = ceil(double(N)/double(threadsPerBlock.x));
// blocksPerGrid.y = ceil(double(N)/double(threadsPerBlock.y));
// }
matrixMultiplicationKernel<<<blocksPerGrid,threadsPerBlock>>>(A, B, C, N);
}
void random_elements (long N) {
int i;
int j;
for (i = 0; i < N; ++i) {
for (j = 0; j < N; ++j) {
a_cpu[i * N + j] = 1.0f;
b_cpu[i * N + j] = 1.0f;
}
}
}
void print_matrix(float *m, long N) {
int i;
int j;
cout << "Matrix:" << endl;
for (i = 0; i < N; ++i) {
for (j = 0; j < N; ++j) {
cout << m[i * N + j] << "-";
}
cout << endl;
}
}
int main (void) {
clock_t begin = clock();
// GPU copies of a, b, c
float *a_gpu, *b_gpu, *c_gpu;
long size = N * N * sizeof(float);
cudaMalloc((void **)&a_gpu, size);
cudaMalloc((void **)&b_gpu, size);
cudaMalloc((void **)&c_gpu, size);
// Allocate GPU space for CPU copies of a, b, c
a_cpu = (float *)malloc(size);
b_cpu = (float *)malloc(size);
c_cpu = (float *)malloc(size);
// Set up random input variables
random_elements(N);
//print_matrix(c_cpu, N);
// Copy inputs to device
cudaMemcpy(a_gpu, a_cpu, size, cudaMemcpyHostToDevice);
cudaMemcpy(b_gpu, b_cpu, size, cudaMemcpyHostToDevice);
matrixMultiplication(a_gpu, b_gpu, c_gpu, N);
cudaDeviceSynchronize();
// Copy results to CPU copy of c
cudaMemcpy(c_cpu, c_gpu, size, cudaMemcpyDeviceToHost);
// Print
//print_matrix(c_cpu, N);
cout << c_cpu[0] << endl;
// Cleanup
cudaFree(a_gpu); cudaFree(b_gpu); cudaFree(c_gpu);
delete a_cpu, b_cpu, c_cpu;
clock_t end = clock();
double elapsed_secs = double(end - begin) / CLOCKS_PER_SEC;
cout << elapsed_secs;
return 0;
}
|
22,677 | #include "includes.h"
__global__ void remove_redness_from_coordinates( const unsigned int* d_coordinates, unsigned char* d_r, unsigned char* d_b, unsigned char* d_g, unsigned char* d_r_output, int num_coordinates, int num_pixels_y, int num_pixels_x, int template_half_height, int template_half_width )
{
int ny = num_pixels_y;
int nx = num_pixels_x;
int global_index_1d = ( blockIdx.x * blockDim.x ) + threadIdx.x;
int imgSize = num_pixels_x * num_pixels_y;
if ( global_index_1d < num_coordinates )
{
unsigned int image_index_1d = d_coordinates[ imgSize - global_index_1d - 1 ];
ushort2 image_index_2d = make_ushort2(image_index_1d % num_pixels_x, image_index_1d / num_pixels_x);
for ( int y = image_index_2d.y - template_half_height; y <= image_index_2d.y + template_half_height; y++ )
{
for ( int x = image_index_2d.x - template_half_width; x <= image_index_2d.x + template_half_width; x++ )
{
int2 image_offset_index_2d = make_int2( x, y );
int2 image_offset_index_2d_clamped = make_int2( min( nx - 1, max( 0, image_offset_index_2d.x ) ), min( ny - 1, max( 0, image_offset_index_2d.y ) ) );
int image_offset_index_1d_clamped = ( nx * image_offset_index_2d_clamped.y ) + image_offset_index_2d_clamped.x;
unsigned char g_value = d_g[ image_offset_index_1d_clamped ];
unsigned char b_value = d_b[ image_offset_index_1d_clamped ];
unsigned int gb_average = ( g_value + b_value ) / 2;
d_r_output[ image_offset_index_1d_clamped ] = (unsigned char)gb_average;
}
}
}
} |
22,678 | #include "includes.h"
__global__ void partialScan(unsigned int *d_in, unsigned int *d_out, unsigned int *d_total, size_t n)
{
__shared__ unsigned int temp[BLOCK_WIDTH];
int tx = threadIdx.x;
int bx = blockIdx.x;
int index = BLOCK_WIDTH * bx + tx;
if(index < n) {
temp[tx] = d_in[index];
} else { temp[tx] = 0; }
__syncthreads();
// Perform the actual scan
for(int offset = 1; offset < BLOCK_WIDTH; offset <<= 1) {
if(tx + offset < BLOCK_WIDTH) {
temp[tx + offset] += temp[tx];
}
__syncthreads();
}
// Shift when copying the result so as to make it an exclusive scan
if(tx +1 < BLOCK_WIDTH && index + 1 < n) {
d_out[index + 1] = temp[tx];
}
d_out[0] = 0;
// Store the total sum of each block
d_total[bx] = temp[BLOCK_WIDTH - 1];
} |
22,679 | /* jegood Joshua Good */
/**
* @file p3.cu
* Calculates the minimum distance for a set of file-specified points using GPU
* multi-threading. This program requires access to a CUDA-enabled GPU (i.e. NVIDIA
* graphics card).
*/
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <limits.h>
#include <math.h>
#include <time.h>
/** Maximum number of threads per block */
#define MAX_THRDS 1024
// Point struct
struct point{
int x;
int y;
int index;
double minDistance;
}typedef Point;
/**
* Calculates the minimum distance for this point from each point
* in the points array.
* @param points the point array
* @param numPoints number of points in the point array
*/
__global__ void calcMinDist(Point *points, int numPoints)
{
// Compute the minimum distance for each point in the point array
for(int i = 0; i < numPoints; i++){
// Ensure we don't calculate the distance to a point from itself
if(i != points[blockIdx.x].index){
double distance = sqrt(pow((double)(points[i].x - points[blockIdx.x].x), 2) + pow((double)(points[i].y - points[blockIdx.x].y), 2));
// Check if distance is a new minimum distance for this point
if(distance < points[blockIdx.x].minDistance){
points[blockIdx.x].minDistance = distance;
}
}
}
}
/**
* Calculates the minimum distance for a set of file-specified points using a CUDA
* kernel function. Reports this information and its associated minimum distance points
* alongside the time taken to complete this process.
* @param argc number of command line arguments
* @param argv list of command of line arguments
*/
int main(int argc, char *argv[])
{
FILE *fp;
// Ensure a valid file is given
if(!(fp = fopen(argv[1], "r"))){
printf("Usage: ./p3 <input file>\n");
exit(EXIT_FAILURE);
}
/** Start time for a process */
clock_t start;
/** End time for a process */
clock_t finish;
// Start process clock
start = clock();
// Initially loop through and calculate the number of points in the file
Point p;
/** Number of points in the file */
int numPoints = 0;
while(fscanf(fp, "%d%d", &p.x, &p.y) == 2){ // read, but don't store(*)
numPoints++;
}
// Rewind the file and assign points in the array of points
rewind(fp);
/** Index of point in points array */
int index = 0;
Point points[numPoints];
for(int i = 0; i < numPoints; i++){
// Scan in next point
fscanf(fp, "%d %d", &p.x, &p.y);
p.index = index;
p.minDistance = INFINITY;
points[i] = p;
index++;
}
// Allocate memory for kernel threads
double minDist = INFINITY;
Point *arr_p;
int size = numPoints * sizeof(Point);
cudaMalloc((void**)&arr_p, size);
cudaMemcpy(arr_p, points, size, cudaMemcpyHostToDevice);
// Launch the kernel to do work
// Runs numPoints blocks with one thread each
calcMinDist<<<numPoints, 1>>>(arr_p, numPoints);
// Use result on host
cudaMemcpy(points, arr_p, size, cudaMemcpyDeviceToHost);
// Determine minDist for these points
for(int i = 0; i < numPoints; i++){
if(points[i].minDistance < minDist){
minDist = points[i].minDistance;
}
}
// Determine which points have minimum distance
for(int i = 0; i < numPoints; i++){
if(points[i].minDistance == minDist){
printf("(%d,%d)", points[i].x, points[i].y);
}
}
// Print the minimum distance for the set of points
printf("%lf\n", minDist);
// End process time
finish = clock();
// Print the process time
printf("Time : %lf seconds\n", (double) (finish - start) / CLOCKS_PER_SEC);
// Free memory
cudaFree(arr_p);
// Return EXIT_SUCCESS
return 0;
}
|
22,680 | #include <iostream>
#include <sys/time.h>
#define TILE_DIM 32
using namespace std;
/* Compile with "-Xptxas -dlcm=cg" flags to disable Fermi L1 cache.
* Code would slow down when L1 cache is disabled.
* Disabling L1 cache would not have any effect on the shared memory
* version of matmul program (see exercises).
*/
__global__ void matmul(double *a, double* b, double *c, int aw, int bw) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
double sum = 0.0;
for (int i = 0; i < aw; i++) {
sum += a[row*aw+i] * b[i*bw+col];
}
c[row*bw+col] = sum;
}
int main() {
time_t sTime = time(NULL);
timeval tt1, tt2;
int ms;
double fms;
int ah=2560;
int aw=2560;
int bh=2560;
int bw=2560;
double *a = (double*)malloc(ah*aw*sizeof(double));
double *b = (double*)malloc(bh*bw*sizeof(double));
double *c = (double*)malloc(ah*bw*sizeof(double));
for (int i=0;i<ah;i++)
for (int j=0;j<aw;j++)
a[i*ah+j] = (double)(i+j);
for (int i=0;i<bh;i++)
for (int j=0;j<bw;j++)
b[i*bh+j] = (double)(i-j);
double *a_dev;
cudaMalloc((void**) &a_dev, ah*aw * sizeof(double));
double *b_dev;
cudaMalloc((void**) &b_dev, bh*bw * sizeof(double));
double *c_dev;
cudaMalloc((void**) &c_dev, ah*bw * sizeof(double));
cudaMemcpy(a_dev, a, ah*aw * sizeof(double) , cudaMemcpyHostToDevice);
cudaMemcpy(b_dev, b, bh*bw * sizeof(double) , cudaMemcpyHostToDevice);
dim3 nBlocks(bw/TILE_DIM, ah/TILE_DIM, 1);
dim3 nThreads(TILE_DIM, TILE_DIM, 1);
// (*) Set shared mem size 16KB, L1 cache size 48 KB
cudaFuncSetCacheConfig(matmul, cudaFuncCachePreferL1);
gettimeofday( &tt1, NULL );
matmul <<< nBlocks, nThreads >>> (a_dev, b_dev, c_dev, aw, bw);
cudaThreadSynchronize();
gettimeofday( &tt2, NULL );
cudaMemcpy(c, c_dev, ah*bw * sizeof(double) , cudaMemcpyDeviceToHost);
ms = (tt2.tv_sec - tt1.tv_sec);
ms = ms * 1000000 + (tt2.tv_usec - tt1.tv_usec);
fms = ((double)ms)/1000000.0;
cout << "Comp time = " << fms << endl;
double dNumOps = 2.0 * (double)aw * (double)aw * (double)bw;
double gflops = 1.0e-9 * dNumOps/fms;
cout << "GFlops = " << gflops << endl;
cout << "value check = " << c[145] << endl;
}
|
22,681 | #include<stdio.h>
#include<iostream>
__global__
void saxpy(int n, float a, float *x, float *y)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) y[i] = a*x[i] + y[i];
}
int main(void)
{
using namespace std;
int N=1<<20; //shift 20 bits to the left
int num=100000;
float a=2.0;
float *x; //host array x
float *y; //host array y
float *x_d; //device array x
float *y_d; //device array y
x = new (nothrow) float [N];
y = new (nothrow) float [N];
cudaMalloc(&x_d, N*sizeof(float)); //allocate memory for x on device
cudaMalloc(&y_d, N*sizeof(float)); //allocate memory for y on device
for (int i=0; i<N; i++ ) //fill host arrays
{
x[i]=(float)i;
y[i]=(float)2*i;
}
//transfer arrays to device
cudaMemcpy(x_d, x, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(y_d, y, N*sizeof(float), cudaMemcpyHostToDevice);
cout <<"\n";
cout <<"Performing "<<num<<" CUDA-C SAXPY on 2^20 elements.\n";
for( int j=0; j<num; j++)
{
saxpy<<<(N+255)/256, 256>>>(N, a, x_d, y_d); // Perform SAXPY on 1M elements
}
//transfer arrays to device
cudaMemcpy(y, y_d, N*sizeof(float), cudaMemcpyDeviceToHost);
cout <<"Done.\n";
cout <<"y[213]="<<y[213]<<"\n";
cout <<"\n";
return 0;
}
|
22,682 | #include <stdio.h>
#include <math.h>
#include <cuda_runtime.h>
__global__ void modular(int *a, int *b, int *c){
// for the small case, we only need thread, no block and grid
int i = threadIdx.x;
c[i] = a[i] % b[i];
// printf is not allowed in kernel function
// printf("%d", c[i])
}
__global__ void exponentiation(int *a, int *b, int *d){
// for the small case, we only need thread, no block and grid
int i = threadIdx.x;
d[i] = pow(a[i], b[i]);
}
int main(){
int a[3] = {1, 2, 5}, b[3] = {2, 4, 6};
int c[3], d[3], i;
int c_check[3], d_exponentiation[3];
int *A_gpu, *B_gpu, *C_gpu, *D_gpu; // pointers
int size = 5 * sizeof(int);
// allocate memory for A, B, and C
cudaMalloc((void **)&A_gpu, size);
cudaMalloc((void **)&B_gpu, size);
cudaMalloc((void **)&C_gpu, size);
cudaMalloc((void **)&D_gpu, size);
// copy the memory
cudaMemcpy(A_gpu, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(B_gpu, b, size, cudaMemcpyHostToDevice);
modular<<<1, 3>>>(A_gpu, B_gpu, C_gpu);
exponentiation<<<1, 3>>>(A_gpu, B_gpu, D_gpu);
// copy memory from device to host
// since the result is stored in C_gpu
// we just need this memory
cudaMemcpy(c, C_gpu, size, cudaMemcpyDeviceToHost);
cudaMemcpy(d, D_gpu, size, cudaMemcpyDeviceToHost);
cudaFree(A_gpu);
cudaFree(B_gpu);
cudaFree(C_gpu);
cudaFree(D_gpu);
for (i = 0; i < 3; i++){
c_check[i] = a[i] % b[i];
printf("The GPU Version %d, The CPU Version is %d\n", c[i], c_check[i]);
}
//TODO: why the CPU version in exponentiation is wrong ? WHY ?
for (i = 0; i < 3; i++){
d_exponentiation[i] = pow(a[i], b[i]);
printf("The GPU Version %d, The CPU Version is %d\n", d[i], d_exponentiation[i]);
}
return 0;
}
|
22,683 | #include <math.h>
#include <stdio.h>
#include <cuda_runtime.h>
#define f(i,j) f[(i) + (j)*(m)]
#define Z(i,j) Z[(i) + (j)*m]
__global__ void Zev(float const * const Ag,float const * const A, float *Z,float const * const H, int m, int n,int patch,float filtsigma){
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int x_local=threadIdx.x;
int y_local = threadIdx.y;
int fix =(patch-1)/2;//temporary value
int dim_sh=blockDim.x+patch-1;//dimensions of shared arrays = threadsPerBlock + patch-1
extern __shared__ float block[];//dynamic allocation of shared memory
float *shared_A=&block[0];//shared array to keep values for calculations
float *shared_g=&block[dim_sh*dim_sh];//shared array to keep values for calculations
float *shared_h=&block[dim_sh*dim_sh*2];//shared array to keep gaussian
if(x<m-2*fix && y<n-2*fix){// check to avoid exceeding arrays limits
int th=blockDim.x;//block dimension=number of threads
//fill shared_h with H values
if(x_local<patch && y_local<patch){
shared_h[x_local +y_local*patch]=H[x_local + y_local*patch];
}
//save in shared array the 2nd part for calculations (from a block of image array)
__syncthreads();
//filling shared memory is described in the report
shared_A[x_local +y_local*dim_sh]=A[x_local+y_local*dim_sh];
__syncthreads();
if(x_local<(patch-1)){
shared_A[(x_local+th) + y_local*dim_sh]=A[x_local+th +y_local*dim_sh];
}
__syncthreads();
if(y_local<(patch-1)){
shared_A[x_local + (y_local+th)*dim_sh]=A[x_local + (y_local+th)*dim_sh];
}
__syncthreads();
if(x_local>blockDim.x-patch && y_local>blockDim.y-patch ){
shared_A[x_local+patch-1 + (y_local+patch-1)*dim_sh]=A[x_local+patch-1 + (y_local+patch-1)*dim_sh];
}
__syncthreads();
//save in shared array the 1st part for calculations (from image array)
shared_g[x_local +y_local*dim_sh]=Ag[x+y*m];
__syncthreads();
if(x_local<(patch-1)){
shared_g[(x_local+th) + y_local*dim_sh]=Ag[x+th +y*m];
}
__syncthreads();
if(y_local<(patch-1)){
shared_g[x_local + (y_local+th)*dim_sh]=Ag[x + (y+th)*m];
}
__syncthreads();
if(x_local>blockDim.x-patch && y_local>blockDim.y-patch ){
shared_g[x_local+patch-1 + (y_local+patch-1)*dim_sh]=Ag[x+patch-1 + (y+patch-1)*m];
}
__syncthreads();
}
patch=(patch-1)/2;
if(x<m-2*patch && y<n-2*patch){
int counter=0;//counter for gaussian values
float temp=0,sum=0;
float z_l=Z(x+patch,y+patch);//load into local z value from global Z
for(int i=patch;i<dim_sh-patch;i++){//make calculations for the shared block wihtout pads
for(int j=patch;j<dim_sh-patch;j++){
for(int p=-patch;p<=patch;p++){//calculate neighborhood
for(int l=-patch;l<=patch;l++){
temp=(shared_g[(x_local+patch +l)+(y_local+patch + p)*dim_sh]-shared_A[(i+l) + (j+p)*dim_sh])*shared_h[counter];
sum=sum+temp*temp;
counter++;
}
}
z_l=z_l+expf(-(sum/filtsigma));
sum=0;
counter=0;
}
}
//__syncthreads();
Z[x+patch + (y+patch)*m]=z_l;//return calculated value (with padding) matlab is used to remove pads from f
}
}
__global__ void fev(float const * const Ag,float const * const A,float const * const Z,float const * const H,float *f, int m, int n,int patch,float filtsigma){
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int x_local=threadIdx.x;
int y_local = threadIdx.y;
int fix =(patch-1)/2;//temporary variable
int dim_sh=blockDim.x+patch-1;//dimensions of shared arrays = threadsPerBlock + patch-1
extern __shared__ float block[]; //dynamic allocation of shared memory
float *shared_A=&block[0];//shared array to keep values for calculations
float *shared_g=&block[dim_sh*dim_sh];//shared array to keep values for calculations
float *shared_h=&block[dim_sh*dim_sh*2];//shared array to keep gaussian
if(x<m-2*fix && y<n-2*fix){ // check to avoid exceeding arrays limits
int th=blockDim.x;//block dimension=number of threads
//fill shared_h with H values
if(x_local<patch && y_local<patch){
shared_h[x_local +y_local*patch]=H[x_local + y_local*patch];
}
__syncthreads();
//save in shared array the 2nd part for calculations (from a block of image array)
shared_A[x_local +y_local*dim_sh]=A[x_local+y_local*dim_sh];
__syncthreads();
if(x_local<(patch-1)){
shared_A[(x_local+th) + y_local*dim_sh]=A[x_local+th +y_local*dim_sh];
}
__syncthreads();
if(y_local<(patch-1)){
shared_A[x_local + (y_local+th)*dim_sh]=A[x_local + (y_local+th)*dim_sh];
}
__syncthreads();
if(x_local>blockDim.x-patch && y_local>blockDim.y-patch ){
shared_A[x_local+patch-1 + (y_local+patch-1)*dim_sh]=A[x_local+patch-1 + (y_local+patch-1)*dim_sh];
}
__syncthreads();
//save in shared array the 1st part for calculations (from image array)
shared_g[x_local +y_local*dim_sh]=Ag[x+y*m];
__syncthreads();
if(x_local<(patch-1)){
shared_g[(x_local+th) + y_local*dim_sh]=Ag[x+th +y*m];
}
__syncthreads();
if(y_local<(patch-1)){
shared_g[x_local + (y_local+th)*dim_sh]=Ag[x + (y+th)*m];
}
__syncthreads();
if(x_local>blockDim.x-patch && y_local>blockDim.y-patch ){
shared_g[x_local+patch-1 + (y_local+patch-1)*dim_sh]=Ag[x+patch-1 + (y+patch-1)*m];
}
__syncthreads();
}
patch=(patch-1)/2; //patch for checking neighbourhood
if(x<m-2*patch && y<n-2*patch){
int counter=0;//counter for gaussian
float temp=0,sum=0,z_l=Z(x+patch,y+patch),f_l=f(x+patch,y+patch);//load into local z(or f) value from global Z(or f)
for(int i=patch;i<dim_sh-patch;i++){//make calculations for the shared block wihtout pads
for(int j=patch;j<dim_sh-patch;j++){
for(int p=-patch;p<=patch;p++){//calculate neighborhood
for(int l=-patch;l<=patch;l++){
temp=(shared_g[(x_local+patch +l)+(y_local+patch + p)*dim_sh]-shared_A[(i+l) + (j+p)*dim_sh])*shared_h[counter];
sum=sum+temp*temp;
counter++;
}
}
f_l=f_l+(1/z_l)*(expf(-(sum/filtsigma)))*shared_A[i+j*dim_sh];
sum=0;
counter=0;
}
}
//__syncthreads();
f[x+patch + (y+patch)*m]=f_l;//return calculated value
}
}
|
22,684 | #include "includes.h"
__global__ void erosionColumns3DKernel( unsigned short *d_dst, unsigned short *d_src, int w,int h,int d, int kernel_radius )
{
__shared__ unsigned short smem[ER_COLUMNS_BLOCKDIM_Z][ER_COLUMNS_BLOCKDIM_X][(ER_COLUMNS_RESULT_STEPS + 2 * ER_COLUMNS_HALO_STEPS) * ER_COLUMNS_BLOCKDIM_Y + 1];
unsigned short *smem_thread = smem[threadIdx.z][threadIdx.x];
//Offset to the upper halo edge
const int baseX = blockIdx.x * ER_COLUMNS_BLOCKDIM_X + threadIdx.x;
const int baseY = (blockIdx.y * ER_COLUMNS_RESULT_STEPS - ER_COLUMNS_HALO_STEPS) * ER_COLUMNS_BLOCKDIM_Y + threadIdx.y;
const int baseZ = blockIdx.z * ER_COLUMNS_BLOCKDIM_Z + threadIdx.z;
d_src += (baseZ * h + baseY) * w + baseX;
d_dst += (baseZ * h + baseY) * w + baseX;
//Main data
#pragma unroll
for (int i = ER_COLUMNS_HALO_STEPS; i < ER_COLUMNS_HALO_STEPS + ER_COLUMNS_RESULT_STEPS; i++) {
smem_thread[threadIdx.y + i * ER_COLUMNS_BLOCKDIM_Y] = d_src[i * ER_COLUMNS_BLOCKDIM_Y * w];
}
//Upper halo
#pragma unroll
for (int i = 0; i < ER_COLUMNS_HALO_STEPS; i++) {
smem_thread[threadIdx.y + i * ER_COLUMNS_BLOCKDIM_Y] = (baseY + i * ER_COLUMNS_BLOCKDIM_Y >= 0) ? d_src[i * ER_COLUMNS_BLOCKDIM_Y * w] : 0;
}
//Lower halo
#pragma unroll
for (int i = ER_COLUMNS_HALO_STEPS + ER_COLUMNS_RESULT_STEPS; i < ER_COLUMNS_HALO_STEPS + ER_COLUMNS_RESULT_STEPS + ER_COLUMNS_HALO_STEPS; i++) {
smem_thread[threadIdx.y + i * ER_COLUMNS_BLOCKDIM_Y]= (baseY + i * ER_COLUMNS_BLOCKDIM_Y < h) ? d_src[i * ER_COLUMNS_BLOCKDIM_Y * w] : 0;
}
//Compute and store results
__syncthreads();
#pragma unroll
for (int i = ER_COLUMNS_HALO_STEPS; i < ER_COLUMNS_HALO_STEPS + ER_COLUMNS_RESULT_STEPS; i++) {
unsigned short *smem_kern = &smem_thread[threadIdx.y + i * ER_COLUMNS_BLOCKDIM_Y - kernel_radius];
unsigned short val = smem_kern[0];
//#pragma unroll
for (int j = 1; j <= 2 * kernel_radius; j++) {
val = min(val, smem_kern[j]);
}
d_dst[i * ER_COLUMNS_BLOCKDIM_Y * w] = val;
}
} |
22,685 | /*
Name: Daniyal Manair
Student Number: 20064993
*/
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <vector>
#include <stdio.h>
#include <random>
#include <algorithm>
#include <chrono>
#include <map>
__global__ void sumMatrixGPU(float* A, float* B, float* C, const int N) {
unsigned int col = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int row = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int idx = row * N + col;
if (row < N && col < N){
C[idx] = A[idx] + B[idx];
}
}
__global__ void sumMatrixGPUperRow(float* A, float* B, float* C, const int N) {
int row = blockIdx.x * blockDim.x + threadIdx.x;
if (row < N){
for (int i = 0; i < N; i++)
C[row * N + i] = A[row * N + i] + B[row * N + i];
}
}
__global__ void sumMatrixGPUperCol(float* A, float* B, float* C, const int N) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (col < N){
for (int i = 0; i < N; i++)
C[i * N + col] = A[i * N + col] + B[i * N + col];
}
}
void initialData(float* matrix, const int N){
for (int i = 0; i < (N*N); i++)
matrix[i] = (float)(rand() & 0xFF) / 10.0f;
}
void sumMatrixCPU(float* A, float* B, float* C, const int N){
for (int i = 0; i < (N*N); i++)
C[i] = A[i] + B[i];
}
void checkResult(float* CPU, float* GPU, const int N) {
double epsilon = 1.0E-8;
for (int i = 0; i < (N*N); i++){
if (abs(CPU[i] - GPU[i]) > epsilon){
printf("CPU %f GPU %f ", CPU[i], GPU[i]);
printf("Arrays do not match.\n\n");
return;
}
}
printf("Test PASSED\n\n");
}
void printArr(float* matrix, const int N) {
printf("[");
for (int i = 0; i < (N*N); i++)
printf("%f,", matrix[i]);
printf("\b]\n");
}
void computeMatrix(const int N) {
// Initial prints
printf("------------------------------------------------------------------------\n\n");
printf("%dx%d matrix addition.\n\n", N, N);
// Initialize Host variables
float* C_A, *C_B, *C_C, *C_C1;
float timeDuration;
size_t size = N * N * sizeof(float);
cudaEvent_t gStart, gEnd;
FILE *fp;
// Initialize space
C_A = (float*)malloc(size);
C_B = (float*)malloc(size);
C_C = (float*)malloc(size);
C_C1 = (float*)malloc(size);
fp=fopen("machineProblem2.csv","a");
cudaEventCreate(&gStart);
cudaEventCreate(&gEnd);
// Set with random data
initialData(C_A, N);
initialData(C_B, N);
memset(C_C, 0, N);
memset(C_C1, 0, N);
// Initialize GPU variables
float* G_A, *G_B, *G_C, *G_C1, *G_C2;
cudaMalloc((void**)&G_A, size);
cudaMalloc((void**)&G_B, size);
cudaMalloc((void**)&G_C, size);
cudaMalloc((void**)&G_C1, size);
cudaMalloc((void**)&G_C2, size);
// Copy over the data
cudaMemcpy(G_A, C_A, size, cudaMemcpyHostToDevice);
cudaMemcpy(G_B, C_B, size, cudaMemcpyHostToDevice);
// Serial Test CPU
auto start = std::chrono::high_resolution_clock::now();
sumMatrixCPU(C_A, C_B, C_C, N);
auto end = std::chrono::high_resolution_clock::now();
auto timeElapse = (std::chrono::duration_cast<std::chrono::microseconds>(end - start).count())/1000.0;
printf("The CPU took %f to perform the computation.\n\n", timeElapse);
fprintf(fp,"%d,CPU,ELEMENT,0,%f\n",N,timeElapse);
// Test Complete parallel Computation
dim3 block(16, 16);
dim3 thread((N + block.x - 1) / block.x, (N + block.y - 1) / block.y);
cudaEventRecord(gStart);
sumMatrixGPU <<<thread, block >>> (G_A, G_B, G_C, N);
cudaEventRecord(gEnd);
cudaEventSynchronize(gEnd);
cudaEventElapsedTime(&timeDuration, gStart, gEnd);
printf("The GPU took %f to perform the computation with one thread per element.\n", timeDuration);
fprintf(fp,"%d,GPU,ELEMENT,16,%f\n",N,timeDuration);
// Copy over the result and compare
cudaMemcpy(C_C1, G_C, size, cudaMemcpyDeviceToHost);
checkResult(C_C, C_C1, N);
// Test row based parallel Computation
dim3 block1(16);
dim3 thread1((N + block1.x - 1) / block1.x);
cudaEventRecord(gStart);
sumMatrixGPUperRow <<<thread1, block1 >>> (G_A, G_B, G_C1, N);
cudaEventRecord(gEnd);
cudaEventSynchronize(gEnd);
cudaEventElapsedTime(&timeDuration, gStart, gEnd);
printf("The GPU took %f to perform the computation with one thread per Row.\n", timeDuration);
fprintf(fp,"%d,GPU,ROW,16,%f\n",N,timeDuration);
// Copy over the result and compare
cudaMemcpy(C_C1, G_C1, size, cudaMemcpyDeviceToHost);
checkResult(C_C, C_C1, N);
// Test Complete parallel Computation
dim3 block2(16);
dim3 thread2((N + block2.x - 1) / block2.x);
// Test column based parallel Computation
cudaEventRecord(gStart);
sumMatrixGPUperCol <<<thread2, block2 >>> (G_A, G_B, G_C2, N);
cudaEventRecord(gEnd);
cudaEventSynchronize(gEnd);
cudaEventElapsedTime(&timeDuration, gStart, gEnd);
printf("The GPU took %f to perform the computation with one thread per Column.\n", timeDuration);
fprintf(fp,"%d,GPU,COL,16,%f\n",N,timeDuration);
// Copy over the result and compare
cudaMemcpy(C_C1, G_C2, size, cudaMemcpyDeviceToHost);
checkResult(C_C, C_C1, N);
// Free all the memory
cudaFree(G_A);
cudaFree(G_B);
cudaFree(G_C);
cudaFree(G_C1);
cudaFree(G_C2);
free(C_A);
free(C_B);
free(C_C);
free(C_C1);
fclose(fp);
cudaDeviceReset();
}
int main(){
FILE *fp;
fp=fopen("machineProblem2.csv","w");
fprintf(fp,"matrixSize,processor,type,blockSize,time\n");
fclose(fp);
computeMatrix(100);
computeMatrix(200);
computeMatrix(500);
computeMatrix(1000);
computeMatrix(1500);
computeMatrix(3000);
computeMatrix(5000);
printf("------------------------------------------------------------------------\n\n");
return 0;
}
|
22,686 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define N 1000000
/*
<<<B, T>>>
gridDim.x = B
blockDim.x = T
blockIdx.x = 0 ... B - 1
threadIdx.x = 0 ... T - 1
*/
/*
clP - Cond0tional Likelihood of Parents (1x6)
clC - Conditional Likelihood of Children (1x12)
clPC - Transition Probability of Parent -> Child (1x12)
*/
__global__ void compute_parent_likelihood (float *clP, float *clC, float *tiPC) {
for (int p_id = 0; p_id < 6 * N; p_id++) {
int l = p_id * 2, r = p_id * 2 + 1;
clP[p_id] = tiPC[l] * clC[l] + tiPC[r] * clC[r];
}
}
int main() {
// Define and allocate host memory
float *clP, *clC, *tiPC;
clP = (float*) malloc(sizeof(float) * 6 * N);
clC = (float*) malloc(sizeof(float) * 12 * N);
tiPC = (float*) malloc(sizeof(float) * 12 * N);
// Define and allocate device memory
float *d_clP, *d_clC, *d_tiPC;
cudaMalloc((void**) &d_clP, sizeof(float) * 6 * N);
cudaMalloc((void**) &d_clC, sizeof(float) * 12 * N);
cudaMalloc((void**) &d_tiPC, sizeof(float) * 12 * N);
// Initialize with random values
for (int i = 0; i < 12 * N; i++) {
clC[i] = (float)rand()/(float)(RAND_MAX);
}
for (int i = 0; i < 6 * N; i++) {
tiPC[2*i] = (float)rand()/(float)(RAND_MAX);
tiPC[2*i + 1] = 1 - tiPC[2*i];
}
// Copy from host to device memory
cudaMemcpy((void*) d_clC, (void*) clC, sizeof(float) * 12 * N, cudaMemcpyHostToDevice);
cudaMemcpy((void*) d_tiPC, (void*) tiPC, sizeof(float) * 12 * N, cudaMemcpyHostToDevice);
// Run kernel N times (to benchmark)
compute_parent_likelihood<<<1, 1>>>(d_clP, d_clC, d_tiPC);
// Copy from device to host memory
cudaMemcpy((void*) clP, (void*) d_clP, sizeof(float) * 6 * N, cudaMemcpyDeviceToHost);
// Print first 6
printf("First 6 CLs: ");
for (int i = 0; i < 6; i++) {
printf("%f ", clP[i]);
}
printf("\n");
// Validation
int errors = 0;
for (int t = 0; t < N; t++) {
for (int i = 0; i < 6; i++) {
int id = t * 6 + i;
int l = id * 2, r = id * 2 + 1;
if (clP[id] - (clC[l] * tiPC[l] + clC[r] * tiPC[r]) > 1e-6) {
errors++;
}
}
}
printf("Errors: %d\n", errors);
// Free host memory
free(clP);
free(clC);
free(tiPC);
// Free device memory
cudaFree(d_clP);
cudaFree(d_clC);
cudaFree(d_tiPC);
return 0;
} |
22,687 | #include <thrust/sort.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
using namespace std;
class Point {
public:
Point() = default;
__host__ __device__
Point(double x, double y) : x(x), y(y) {};
double x, y;
};
__device__ Point d_query_point;
template<typename T>
struct device_sort {
typedef T first_argument_type;
typedef T second_argument_type;
typedef bool result_type;
__host__ __device__ bool operator()(const T &a, const T &b) const {
// Not concerned with actual distances, so skip the sqrt
double norm_a = (a.x - d_query_point.x) * (a.x - d_query_point.x) + (a.y - d_query_point.y) * (a.y - d_query_point.y);
double norm_b = (b.x - d_query_point.x) * (b.x - d_query_point.x) + (b.y - d_query_point.y) * (b.y - d_query_point.y);
return norm_a < norm_b;
//return true;
}
};
// This simple "hello world" example implements the kNearestNeighbors example on a set of example 2D points.
int main(void) {
thrust::device_vector<Point> d_points;
thrust::host_vector<Point> h_points;
Point query_point = Point(0,0);
h_points.push_back(Point(2, 0));
h_points.push_back(Point(1, 0));
h_points.push_back(Point(0, 10));
h_points.push_back(Point(5, 5));
h_points.push_back(Point(2, 5));
cudaMemcpyToSymbol(d_query_point, &query_point, sizeof(Point));
// transfer to device
d_points = h_points;
thrust::sort(d_points.begin(), d_points.end(), device_sort<Point>());
// transfer results to host
h_points = d_points;
for (const auto p : h_points) {
cout << p.x << ", " << p.y << endl;
}
return 0;
} |
22,688 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__ void sineof(float *X, float *Y){
int idx = blockIdx.x;
Y[idx] = sinf(X[idx]);
}
int main(){
float *X,*Y, N; //program vars
float *d_x, *d_y; //device vars
int size = sizeof(float);
printf("Enter number of elements: ");
scanf("%f",&N);
X = (float*)malloc(sizeof(float)*N);
Y = (float*)malloc(sizeof(float)*N);
printf("Enter elements in rad:\n");
for(int i=0; i<N; i++){
scanf("%f ",&X[i]);
}
//Allocate space for device copies of a,b,c
cudaMalloc((void**)&d_x,size*N);
cudaMalloc((void**)&d_y,size*N);
//setup input values
cudaMemcpy(d_x,X,size*N,cudaMemcpyHostToDevice);
cudaMemcpy(d_y,Y,size*N,cudaMemcpyHostToDevice);
//launch add kernel on GPU
sineof<<<N,1>>>(d_x,d_y);
//copy result back to host
cudaMemcpy(Y,d_y,sizeof(float)*N,cudaMemcpyDeviceToHost);
printf("Result:\n");
for(int i=0; i<N; i++){
printf("Y%d = %f \n",i,Y[i]);
}
//Cleanup
cudaFree(d_x);
cudaFree(d_y);
return 0;
}
|
22,689 | /*
#ifndef __CUDACC__
#define __CUDACC__
#endif
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>*
#include <conio.h>
const int TILE_WIDTH=2;
__global__ void MatrixMulKernel(float* Md, float* Nd, float* Pd, int Width)
{
__shared__ float Mds[TILE_WIDTH][TILE_WIDTH];
__shared__ float Nds[TILE_WIDTH][TILE_WIDTH];
// Identify the row and column of the Pd element to work on
int Row = blockIdx.y * TILE_WIDTH + threadIdx.y;
int Col = blockIdx.x * TILE_WIDTH + threadIdx.x;
float Pvalue = 0;
// Loop over the Md and Nd tiles required to compute the Pd element
for (int m = 0; m < Width/TILE_WIDTH; ++m)
{
// Collaborative loading of Md and Nd tiles into shared memory
Mds[threadIdx.y][threadIdx.x] = Md[Row*Width + (m*TILE_WIDTH + threadIdx.x)];
Nds[threadIdx.y][threadIdx.x] = Nd[Col + (m*TILE_WIDTH + threadIdx.y)*Width];
__syncthreads();
for (int k = 0; k < TILE_WIDTH; ++k)
Pvalue += Mds[threadIdx.y][k] * Nds[k][threadIdx.x];
__syncthreads();
}
Pd[Row*Width+Col] = Pvalue;
}
int main()
{
const int width=4;
int i,j,size;
float h_M[width][width],h_N[width][width],h_P[width][width];
float *d_M,*d_N,*d_P;
printf("\n width= %d \n ", width);
for(i=0;i<width;i++)
{
for(j=0;j<width;j++)
{
h_M[i][j]=1;
h_N[i][j]=1;
}
}
printf("\nh_M array is: \n");
for(i=0;i<width;i++)
{
printf("\n");
for(j=0;j<width;j++)
printf("%d ",h_M[i][j]);
}
printf("\n\nh_N array is: \n");
for(i=0;i<width;i++)
{
printf("\n");
for(j=0;j<width;j++)
printf("%d ",h_N[i][j]);
}
size=sizeof(int)*width*width;
cudaMalloc((void**)&d_M,size);
cudaMalloc((void**)&d_N,size);
cudaMalloc((void**)&d_P,size);
cudaMemcpy(d_M,h_M,size,cudaMemcpyHostToDevice);
cudaMemcpy(d_N,h_N,size,cudaMemcpyHostToDevice);
dim3 dimGrid((width/TILE_WIDTH),(width/TILE_WIDTH),1);
dim3 dimBlock(TILE_WIDTH,TILE_WIDTH,1);
MatrixMulKernel<<<dimGrid,dimBlock>>>(d_M,d_N,d_P,width);
cudaMemcpy(h_P,d_P,size,cudaMemcpyDeviceToHost);
cudaFree(d_M);
cudaFree(d_N);
cudaFree(d_P);
printf("\n\nMultiplied array is: \n");
for(i=0;i<width;i++)
{
printf("\n");
for(j=0;j<width;j++)
printf("%d ",h_P[i][j]);
}
getch();
return 0;
}
*/ |
22,690 | // Assemble.cu
//
//This file contains the function that assembles bodies
#include <iostream>
//Function Prototypes
// Functions found in DCAfuncts.cu
void Mat66Mult(double A[6][6], double B[6][6], double C[6][6]);
void Mat61Mult(double A[6][6], double B[6][6], double C[6][6]);
void get_X(double z1[6][6], double z2[6][6], double D[6][6]);
void invert_X(double X[6][6]);
void make_W(double Xinv[6][6], double D[6][6]);
void printm(double a[6][6]);
//Assemble:
// Function used to assemble a list of bodies into a list of bodies that is
// half the size of the original list. To accomplish this, the list of old bodies
// is cycled through looking at two bodies at a time. These two bodies are assembled
// into one new body. The counter then moves on to look at the next two old bodies
// If the original list has an odd number of bodies, the last body is ignored during
// assembly and is added on to the new list of bodies at the end of the function.
// oldbds is the old list of bodies
// newbds is the new list of assembled bodies
// len is the length of oldbds
// odd is 1 if oldbds has an odd number of bodies and 0 if it has an even number
void Assemble(double Zs[], double Xs[],double nZs[], double nXs[], int len, int odd, int n)
{
//Variable Declarations
//Both varaibles are temporary variables used for matrix operations
double z1[6][6];
double z2[6][6];
double A[6][6];
//Loop through every body in oldbds and newbds. j is used to
//reference a body in oldbds and i to reference a body in newbds
for(int i = 0, j=0; i<len-odd; i++, j+=2)
{
for(int r=0; r<6; r++) //Loop through every row
{
for(int c=0; c<6; c++) //Loop through every column
{
z1[r][c]=Zs[c+r*n*26+i*52+26]; //Save b2.z11 into z1
z2[r][c]=Zs[c+r*n*26+i*52+19]; //save b1.z22 into z2
}
}
get_X(z1,z2,A);//Get the intermediate quantity X and save it in A
invert_X(A);//Invert X and put it in A
for(int r = 0; r<5; r++) //Loop through every row
{
for(int c=0; c<5; c++) //Loop through every column
{
nXs[c+r*5*len+i*5]=A[r][c];
//Save Xinv in the new body corresponding to the
} //two used to construct X
}
make_W(A,z1); //Using X inverse, construct the intermediate quantity W and save it in A
for(int r =0; r<6; r++)
{
for(int c=0; c<6; c++)
{
z1[r][c]=Zs[c+r*n*26+i*52+32]; //z212
z2[r][c]=Zs[c+r*n*26+i*52+6]; //z112
}
}
Mat66Mult(A,z1,z1); //Perform A*b2.z12 and save the result in z1
//z1 now holds W*b2.z12
Mat66Mult(z2,z1,z1); //Perform b1.z12*z1 and save the result in z1
//z1 now holds b1.z12*W*b2.z12=z12
for(int r = 0; r<6; r++) //Loop through every row
{
for(int c=0; c<6; c++) //Loop through every column
{
nZs[c+r*len*26+i*26+6]=z1[r][c];
//Save the new z12 in the corresponding new body
z1[r][c]= Zs[c+r*n*26+i*52+13];//z121
z2[r][c]=Zs[c+r*n*26+i*52+6];//z112
}
}
Mat66Mult(A,z1,z1); //Perform A*b1.z21 and store the result in z1
//z1 now holds W*b1.z21
Mat66Mult(z2,z1,z1); //Perform b1.z12*z1 and store the result in z1
//z1 now holds b1.z12*W*b1.z21
for(int r = 0; r<6; r++) //Loop through every row
{
for(int c=0; c<6; c++) //Loop through every column
{
nZs[c+r*len*26+i*26]=Zs[c+r*26*n+i*52]-z1[r][c]; //Save the new z11 in newbds
z1[r][c]=Zs[c+r*26*n+i*52+13];//z121
z2[r][c]=Zs[c+r*26*n+i*52+39];//z221
}
}
Mat66Mult(A,z1,z1); //Perform A*b1.z21 and save the result in z1
//z1 now holds W*b1.z21
Mat66Mult(z2,z1,z1); //Perform b2.z21*z1 and store the result in z1
//z1 now holds b2.z21*W*b1.z21=z21
for(int r = 0; r<6; r++) //Loop through every row
{
for(int c=0; c<6; c++) //Loop through every column
{
nZs[c+r*len*26+i*26+13]=z1[r][c]; //Save the new z21 in newbds
z1[r][c]=Zs[c+r*26*n+i*52+32];//z212
z2[r][c]=Zs[c+r*26*n+i*52+39];//z221
}
}
Mat66Mult(A,z1,z1); //Perform A*b2.z12 and store the result in z1
//z1 now holds W*b2.z12
Mat66Mult(z2,z1,z1); //Perform b2.z21*z1 and store the result in z1
//z1 now holds b2.z21*W*b2.z12
for(int r = 0; r<6; r++) //Loop through every row
{
for(int c=0; c<6; c++) //Loop through every column
{
nZs[c+r*len*26+i*26+19]=Zs[c+r*26*n+i*52+45]-z1[r][c];
}
}
for(int r = 0; r<6; r++) //Loop through every row
{
z1[r][0]=Zs[r*26*n+i*52+25]-Zs[r*26*n+i*52+38];
//Save b1.z23+b2.z13 into z1
for(int c=0; c<6; c++)
{
z2[r][c]=Zs[c+r*26*n+i*52+6];
}
}
Mat61Mult(A,z1,A); //Perform A*z1 and store the result in A
//A now holds W*(b1.z23+b2.z13)=Y
Mat61Mult(z2,A,z1); //Perform b1.z12*A and store the result in z1
//z1 now holds b1.z12*Y
for(int r = 0; r< 6; r++) //Loop through every row
{
nZs[r*len*26+i*26+12]=Zs[r*n*26+i*52+12]-z1[r][0];
//Save the new z13
for(int c=0; c<6; c++)
{
z2[r][c]=Zs[c+r*26*n+52*i+39];
}
}
Mat61Mult(z2,A,z1); //Perform b2.z21*A and store the result in z1
//z1 now holds b2.z21*Y
for(int r=0; r< 6; r++) //Loop through every row
{
nZs[r*len*26+i*26+25]=Zs[r*n*26+i*52+51]+z1[r][0];
//Save the new z23 in newbds
}
} //End loop through bodies
//If there is and odd number of oldbds, the list can not be cut directly in half.
//Because of this, the last body in oldbds must be added to the end of newbds.
if(odd ==1)
{
for(int r=0; r<6; r++) //Loop through every row
{
for(int c=0; c<6; c++) //Loop through every column
{
nZs[c+r*26*len+(len-1)*26]=Zs[c+r*26*n+(n-1)*26]; //z11
nZs[c+r*26*len+(len-1)*26+6]=Zs[c+r*26*n+(n-1)*26+6]; //z12
nZs[c+r*26*len+(len-1)*26+13]=Zs[c+r*26*n+(n-1)*26+13]; //z21
nZs[c+r*26*len+(len-1)*26+19]=Zs[c+r*26*n+(n-1)*26+19]; //z22
}
nZs[r*26*len+(len-1)*26+12]=Zs[r*26*n+(n-1)*26+12];
nZs[r*26*len+(len-1)*26+25]=Zs[r*26*n+(n-1)*26+25];
}
}
}
|
22,691 | /*
**********************************************
* CS314 Principles of Programming Languages *
* Spring 2020 *
**********************************************
*/
#include <stdio.h>
#include <stdlib.h>
__global__ void markFilterEdges_gpu(int * src, int * dst, int * matches, int * keepEdges, int numEdges) {
/*YOUR CODE HERE*/
}
|
22,692 | /**
* vecAdd: C = A + B.
*
* Partially based on CUDA samples from CUDA 7.5 Toolkit
*
*/
#include <stdio.h>
#include <time.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
/**
* CUDA Kernel Device code
*
* Computes the vector addition of A and B into C. The 3 vectors have the same
* number of elements numElements.
*
* Error checking is not performed for simplicity
*
*/
__global__ void
vecAdd(const float *A, const float *B, float *C, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
C[i] = A[i] + B[i];
}
}
/**
* Host main routine
*/
int
main(void)
{
cudaError_t error;
// Use device 0 or 1
cudaSetDevice(0);
int numElements = 100000000;
size_t size = numElements * sizeof(float);
printf("[Vector addition of %d elements]\n", numElements);
// Allocate the host vectors A, B and C
float *h_A = (float *)malloc(size);
float *h_B = (float *)malloc(size);
float *h_C = (float *)malloc(size);
// Initialize the host input vectors
time_t t;
srand((unsigned) time(&t));
for (int i = 0; i < numElements; ++i)
{
h_A[i] = rand()/(float)RAND_MAX;
h_B[i] = rand()/(float)RAND_MAX;
}
// Allocate the device vectors A, B and C
cudaEvent_t startMem, stopMem;
cudaEventCreate(&startMem);
cudaEventCreate(&stopMem);
cudaEventRecord(startMem, 0) ;
float *d_A, *d_B, *d_C;
cudaMalloc((void **)&d_A, size);
cudaMalloc((void **)&d_B, size);
cudaMalloc((void **)&d_C, size);
// Create start and stop CUDA events to measure time
cudaEvent_t start, stop;
float time;
// Copy the host input vectors A and B to the device
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
cudaEventRecord(stopMem, 0) ;
cudaEventSynchronize( stopMem );
cudaEventElapsedTime(&time, startMem, stopMem);
printf("time=%f, Host to Device bandwidth (GB/s): %f\n", time, numElements * 1e-6 / time);
// Launch the Vector Add CUDA Kernel
///*
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord( start, 0 );
int threadsPerBlock = 1024;
int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
vecAdd<<<blocksPerGrid,threadsPerBlock>>>(d_A, d_B, d_C, numElements);
error=cudaGetLastError();
if (error!=cudaSuccess)
printf("Maaaaal!!\n!");
// vecAdd<<<1,numElements>>>(d_A, d_B, d_C, numElements);
cudaEventRecord( stop, 0);
cudaEventSynchronize( stop );
cudaEventElapsedTime( &time, start, stop );
// Copy the device result vector to the host memory.
cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
// Print kernel execution time
printf("Kernel execution time %f\n", time);
// Testing some values
for (int i=0; i<10; i++)
printf("%d -> A+B(host)=%f, A+B(GPU)=%f\n",i, h_A[i]+h_B[i], h_C[i]);
for (int i=1024; i<1034; i++)
printf("%d -> A+B(host)=%f, A+B(GPU)=%f\n",i, h_A[i]+h_B[i], h_C[i]);
// Free device global memory (no error checking)
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
// Free host memory
free(h_A);
free(h_B);
free(h_C);
// Destroy event
cudaEventDestroy(start);
cudaEventDestroy(stop);
// Reset the device and exit
cudaDeviceReset();
printf("Done\n");
return 0;
}
|
22,693 | #include <stdio.h>
#define ANGLE_COUNT 360
// declare constant memory
__constant__ float cangle[ANGLE_COUNT];
// declare global memory
__device__ float gangle[ANGLE_COUNT];
// kernel function for constant memory
__global__ void test_kernel(float* darray)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
for (int loop = 0; loop < ANGLE_COUNT; loop++) {
darray[index] = darray[index] + cangle[loop] ;
}
}
// kernel function for global memory
__global__ void test_kernel2(float* darray)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
for (int loop = 0; loop < ANGLE_COUNT; loop++) {
darray[index] = darray[index] + gangle[loop] ;
}
}
int main(int argc,char** argv)
{
int threads_per_block = 256;
int blocks = 32;
int size = blocks * threads_per_block;
float* darray;
float hangle[360];
cudaEvent_t startEvent, stopEvent;
float time;
cudaEventCreate(&startEvent);
cudaEventCreate(&stopEvent);
//initialize angle array on host
for (int loop = 0; loop < ANGLE_COUNT; loop++) {
hangle[loop] = acos( -1.0f )* loop / 180.0f;
}
//allocate device memory
cudaMalloc((void**)&darray, sizeof(float) * size);
//initialize allocated memory
cudaMemset(darray, 0, sizeof(float) * size);
//copy host angle data to constant memory
cudaMemcpyToSymbol(cangle, hangle, sizeof(float) * ANGLE_COUNT);
cudaEventRecord(startEvent, 0);
test_kernel<<<blocks, threads_per_block>>>(darray);
cudaEventRecord(stopEvent, 0);
cudaEventSynchronize(stopEvent);
cudaEventElapsedTime(&time, startEvent, stopEvent);
printf("Time for constant memory (ms): %f\n", time);
//re-initialize allocated memory
cudaMemset(darray, 0, sizeof(float) * size);
//copy host angle data to global memory
cudaMemcpy(gangle, hangle, sizeof(float) * ANGLE_COUNT, cudaMemcpyHostToDevice);
cudaEventRecord(startEvent, 0);
test_kernel2<<<blocks, threads_per_block>>>(darray);
cudaEventRecord(stopEvent, 0);
cudaEventSynchronize(stopEvent);
cudaEventElapsedTime(&time, startEvent, stopEvent);
printf("Time for global memory (ms): %f\n", time);
//free device memory
cudaFree(darray);
//destroy eventsmake
cudaEventDestroy(startEvent);
cudaEventDestroy(stopEvent);
return 0;
}
|
22,694 | #include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/scan.h>
#include <iostream>
#include <iterator>
// BinaryPredicate for the head flag segment representation
// equivalent to thrust::not2(thrust::project2nd<int,int>()));
template <typename HeadFlagType>
struct head_flag_predicate
: public thrust::binary_function<HeadFlagType,HeadFlagType,bool>
{
__host__ __device__
bool operator()(HeadFlagType left, HeadFlagType right) const
{
return !right;
}
};
int main(void)
{
int keys[] = {0,0,0,1,1,2,2,2,2,3,4,4,5,5,5}; // segments represented with keys
int flags[] = {1,0,0,1,0,1,0,0,0,1,1,0,1,0,0}; // segments represented with head flags
int values[] = {2,2,2,2,2,2,2,2,2,2,2,2,2,2,2}; // values corresponding to each key
int N = sizeof(keys) / sizeof(int); // number of elements
// copy input data to device
thrust::device_vector<int> d_keys(keys, keys + N);
thrust::device_vector<int> d_flags(flags, flags + N);
thrust::device_vector<int> d_values(values, values + N);
std::cout << "input: "; thrust::copy(d_values.begin(), d_values.end(), std::ostream_iterator<int>(std::cout, " ")); std::cout << std::endl;
std::cout << "keys: "; thrust::copy(d_keys.begin(), d_keys.end(), std::ostream_iterator<int>(std::cout, " ")); std::cout << std::endl;
std::cout << "head flags: "; thrust::copy(d_flags.begin(), d_flags.end(), std::ostream_iterator<int>(std::cout, " ")); std::cout << std::endl;
// allocate storage for output
thrust::device_vector<int> d_output(N);
// scan using keys
thrust::inclusive_scan_by_key(d_values.begin(), d_values.end(),
d_keys.begin(),
d_output.begin());
// scan using head flags
thrust::inclusive_scan_by_key(d_values.begin(), d_values.end(),
d_flags.begin(),
d_output.begin(),
head_flag_predicate<int>(),
thrust::plus<int>());
std::cout << "output: "; thrust::copy(d_output.begin(), d_output.end(), std::ostream_iterator<int>(std::cout, " ")); std::cout << std::endl;
return 0;
}
|
22,695 | #include <assert.h>
#include <stdio.h>
__global__ void hello_from_gpu(void)
{
printf("Hello world from GPU, thread %d!\n", threadIdx.x);
}
int main(void)
{
printf("Hello world from CPU!\n");
hello_from_gpu<<<1, 10>>>();
int32_t runtime_version;
cudaError_t cudaerr = cudaRuntimeGetVersion(&runtime_version);
assert(cudaerr == cudaSuccess);
int32_t driver_version;
cudaerr = cudaDriverGetVersion(&driver_version);
assert(cudaerr == cudaSuccess);
printf("Runtime: %d, Driver: %d\n", runtime_version, driver_version);
cudaerr = cudaDeviceSynchronize();
if (cudaerr != cudaSuccess)
printf("Kernel launch failed with error %s\n",
cudaGetErrorString(cudaerr));
cudaDeviceReset();
return EXIT_SUCCESS;
}
|
22,696 | #include "includes.h"
__global__ void matrixSum(int* a,int* b, int* c, int size)
{
// int max = maxThreadsPerBlock;
// printf("ERROR en global\n");
int pos = threadIdx.x + blockIdx.x * blockDim.x;
// printf("Block: %d\n", blockIdx.x );
// printf("pos= %d\n",pos);
if(pos<size*size){
c[pos] = a[pos] + b[pos];
}
} |
22,697 | // ###
// ###
// ### Practical Course: GPU Programming in Computer Vision
// ###
// ###
// ### Technical University Munich, Computer Vision Group
// ### Summer Semester 2017, September 11 - October 9
// ###
#include <cuda_runtime.h>
#include <iostream>
using namespace std;
// cuda error checking
#define CUDA_CHECK cuda_check(__FILE__,__LINE__)
void cuda_check(string file, int line)
{
cudaError_t e = cudaGetLastError();
if (e != cudaSuccess)
{
cout << endl << file << ", line " << line << ": " << cudaGetErrorString(e) << " (" << e << ")" << endl;
exit(1);
}
}
int main(int argc, char **argv)
{
// alloc and init input arrays on host (CPU)
int n = 20;
float *a = new float[n];
float *b = new float[n];
float *c = new float[n];
for(int i=0; i<n; i++)
{
a[i] = i;
b[i] = (i%5)+1;
c[i] = 0;
}
// CPU computation
for(int i=0; i<n; i++) c[i] = a[i] + b[i];
// print result
cout << "CPU:"<<endl;
for(int i=0; i<n; i++) cout << i << ": " << a[i] << " + " << b[i] << " = " << c[i] << endl;
cout << endl;
// init c
for(int i=0; i<n; i++) c[i] = 0;
// GPU computation
// ###
// ### TODO: Implement the array addition on the GPU, store the result in "c"
// ###
// ### Notes:
// ### 1. Remember to free all GPU arrays after the computation
// ### 2. Always use the macro CUDA_CHECK after each CUDA call, e.g. "cudaMalloc(...); CUDA_CHECK;"
// ### For convenience this macro is defined directly in this file, later we will only include "helper.h"
// print result
cout << "GPU:"<<endl;
for(int i=0; i<n; i++) cout << i << ": " << a[i] << " + " << b[i] << " = " << c[i] << endl;
cout << endl;
// free CPU arrays
delete[] a;
delete[] b;
delete[] c;
}
|
22,698 | #include "includes.h"
extern "C"
{
}
__global__ void elSq2(int N, int M, float *In, float *Out)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int index = j*N + i;
if (i < N && j < M)
{
Out[index] = __fmul_rn(In[index], In[index]);
}
} |
22,699 | #include <iostream>
#include <cstdio>
__global__ void helloFromGPU(void)
{
printf("Hello from GPU - block: %d - thread: %d. \n", blockIdx.x, threadIdx.x);
}
int main()
{
std::cout << "Hello from CPU. " << std::endl;
helloFromGPU<<<2, 5>>>();
//cudaDeviceReset();
cudaDeviceSynchronize();
return 0;
} |
22,700 | #include "includes.h"
__global__ void sumArraysOnGPU(float *A, float *B, float *C) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
C[idx] = A[idx] + B[idx];
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.