serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
20,501 | #include <bits/stdc++.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/transform.h>
#include <thrust/reduce.h>
#include <thrust/copy.h>
#include <thrust/functional.h>
//#include "inner.hpp"
#define t_copy(x, y) thrust::copy((x).begin(), (x).end(), (y).begin())
#define t_tran_u(x, z, u) thrust::transform((x).begin(), (x).end(), (z).begin(), u)
#define t_tran_b(x, y, z, b) thrust::transform((x).begin(), (x).end(), (y).begin(), (z).begin(), b)
#define t_sum(x) thrust::reduce((x).begin(), (x).end(), 0)
using namespace std;
typedef thrust::device_vector<int> dvi;
int innerSerial(const vector<int> &x, const vector<int> &y){
assert(x.size() == y.size());
int n = (int)x.size();
int res = 0;
for(int i=0;i<n;++i) res += x[i]*y[i];
return res;
}
int innerParallel(const vector<int> &x, const vector<int> &y){
assert(x.size() == y.size());
dvi dx = x, dy = y;
t_tran_b(dx, dy, dx, thrust::multiplies<int>());
return t_sum(dx);
}
int innerProduct(const vector<int> &x, const vector<int> &y, bool parallel){
if(parallel) return innerParallel(x, y);
return innerSerial(x, y);
}
|
20,502 | #include <sys/time.h>
#include <stdio.h>
#include <math.h>
//TODO for writing to file, will be deleted
#include <stdlib.h>
//TODO: could include later
//#include <device_launch_parameters.h>
#include <cuda_runtime.h>
//#include "../inc/helper_cuda.h"
#define GRID_YDIM 65535
// time stamp function in seconds
double getTimeStamp() {
struct timeval tv ;
gettimeofday( &tv, NULL ) ;
return (double) tv.tv_usec/1000000 + tv.tv_sec ;
}
void initDataA(float* data, int nx, int ny){
int i,j;
for(i = 0; i < nx; i++){
for(j = 0; j < ny; j++){
data[i*ny + j] = (float) (i+j)/3.0;
}
}
}
void initDataB(float* data, int nx, int ny){
int i,j;
for(i = 0; i < nx; i++){
for(j = 0; j < ny; j++){
data[i*ny + j] = (float)3.14*(i+j);
}
}
}
void debugPrint(float* data, int nx, int ny){
int i,j;
for(i = 0; i < nx; i++){
for(j = 0; j < ny; j++){
printf("%f ",data[i*ny + j]);
}
printf("\n");
}
printf("\n");
}
// host side matrix addition
void h_addmat(float *A, float *B, float *C, int nx, int ny){
int i;
for(i = 0; i < nx*ny; i++){
C[i] = A[i] + B[i];
}
}
// device-side matrix addition
//__global__ void f_addmat( float *A, float *B, float *C, int nx, int ny ){
// // kernel code might look something like this
// // but you may want to pad the matrices and index into them accordingly
// int ix = threadIdx.x + blockIdx.x*blockDim.x ;
// int iy = threadIdx.y + blockIdx.y*blockDim.y ;
// int idx = iy*nx + ix ;
// if( (ix<nx) && (iy<ny) )
// C[idx] = A[idx] + B[idx] ;
//}
__global__ void f_addmat( float *A, float *B, float *C, int nx, int ny ){
// kernel code might look something like this
// but you may want to pad the matrices and index into them accordingly
int ix = threadIdx.x + blockIdx.x*blockDim.x ;
int iy = threadIdx.y + blockIdx.y*blockDim.y ;
int idx = iy*blockDim.x*gridDim.x + ix ;
if(idx<nx*ny)
C[idx] = A[idx] + B[idx] ;
}
int main( int argc, char *argv[] ) {
// get program arguments
if( argc != 3) {
printf("Error: wrong number of args\n") ;
exit(1) ;
}
int nx = atoi( argv[1] ) ; // should check validity
int ny = atoi( argv[2] ) ; // should check validity
int noElems = nx*ny ;
int bytes = noElems * sizeof(float) ;
// but you may want to pad the matrices…
// alloc memory host-side
float *h_A = (float *) malloc( bytes ) ;
float *h_B = (float *) malloc( bytes ) ;
float *h_hC = (float *) malloc( bytes ) ; // host result
float *h_dC = (float *) malloc( bytes ) ; // gpu result
// init matrices with random data
//initData( h_A, noElems ) ; initData( h_B, noElems ) ;
initDataA(h_A, nx, ny);
initDataB(h_B, nx, ny);
// alloc memory dev-side
float *d_A, *d_B, *d_C ;
cudaMalloc( (void **) &d_A, bytes ) ;
cudaMalloc( (void **) &d_B, bytes ) ;
cudaMalloc( (void **) &d_C, bytes ) ;
// invoke Kernel
dim3 block( 32, 32 ) ; // you will want to configure this
//int block = 64;
//int grid = (noElems + block-1)/block;
int gy = (int)sqrt(noElems);
int gx = (noElems+gy-1)/gy;
//printf("prev gx %d and gy %d\n",gx,gy);
if(gy > GRID_YDIM){
gx = (gx*gy+GRID_YDIM-1)/GRID_YDIM;
gy = GRID_YDIM;
}
//printf("gx %d and gy %d\n",gx,gy);
gx = (gx+block.x-1)/block.x;
gy = (gy+block.y-1)/block.y;
dim3 grid( gx, gy ) ;
//cudaDeviceProp GPUprop;
//cudaGetDeviceProperties(&GPUprop,0);
//printf("maxgridsize x is %d\n",GPUprop.maxGridSize[0]);
printf("noelems is %d\n",noElems);
//printf("gridx is %d\n",grid);
//printf("gridx is %d and grid y is %d\n",grid.x,grid.y);
double timeStampA = getTimeStamp() ;
//transfer data to dev
cudaMemcpy( d_A, h_A, bytes, cudaMemcpyHostToDevice ) ;
cudaMemcpy( d_B, h_B, bytes, cudaMemcpyHostToDevice ) ;
// note that the transfers would be twice as fast if h_A and h_B
// matrices are pinned
double timeStampB = getTimeStamp() ;
f_addmat<<<grid, block>>>( d_A, d_B, d_C, nx, ny ) ;
cudaDeviceSynchronize() ;
double timeStampC = getTimeStamp() ;
//copy data back
cudaMemcpy( h_dC, d_C, bytes, cudaMemcpyDeviceToHost ) ;
double timeStampD = getTimeStamp() ;
// free GPU resources
cudaFree( d_A ) ; cudaFree( d_B ) ; cudaFree( d_C ) ;
cudaDeviceReset() ;
// check result
h_addmat( h_A, h_B, h_hC, nx, ny ) ;
// print out results
if(!memcmp(h_hC,h_dC,nx*ny*sizeof(float))){
//debugPrint(h_hC, nx, ny);
//debugPrint(h_dC, nx, ny);
FILE* fptr;
fptr = fopen("time.log","a");
fprintf(fptr,"%dX%d %.6f %.6f %.6f %.6f\n", nx, ny, timeStampD-timeStampA, timeStampB-timeStampA, timeStampC-timeStampB, timeStampD-timeStampC);
fclose(fptr);
printf("%.6f %.6f %.6f %.6f\n", timeStampD-timeStampA, timeStampB-timeStampA, timeStampC-timeStampB, timeStampD-timeStampC);
}else{
printf("Error: function failed.\n");
}
}
|
20,503 |
#include <stdio.h>
__global__ void thread_per(int* a, int * b, int *c, int* alpha) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
c[index] = *alpha * a[index] + b[index];
}
void thread_per_block(int count) {
int *a = (int*) malloc(sizeof(int) * count);
int *b = (int*) malloc(sizeof(int) * count);
int *c = (int*) malloc(sizeof(int) * count);
int alpha;
printf("Enter alpha: ");
scanf("%d", &alpha);
int *d_a;
int *d_b;
int *d_c;
int *d_alpha;
for(int i = 0; i < count; i++) {
a[i] = i;
b[i] = i + 2;
}
cudaMalloc((void **)& d_a, sizeof(int) * count);
cudaMalloc((void **)& d_b, sizeof(int) * count);
cudaMalloc((void **)& d_c, sizeof(int) * count);
cudaMalloc((void **) &d_alpha, sizeof(int));
cudaMemcpy(d_a, a, count * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, count * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_alpha, &alpha, sizeof(int), cudaMemcpyHostToDevice);
thread_per<<<ceil(count / 256.0), 256>>>(d_a, d_b, d_c, d_alpha);
cudaMemcpy(c, d_c, sizeof(int) * count, cudaMemcpyDeviceToHost);
for(int i = 0; i < count; i++) {
printf("%d * %d + %d = %d\n", alpha, a[i], b[i], c[i]);
}
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
}
int main(int argc, char** argv) {
if(argc < 2) {
printf("Enter count in file args\n");
return -1;
}
int count = atoi(argv[1]);
thread_per_block(count);
return 0;
} |
20,504 | /*
number of mathematical operations (only floating point)
operation flo/o total
+-* : 27 1 27
/ : 2 4 8
pow : 1 13 13
sum 48
*/
#define M2L_KERNEL_CORE \
for(i=0;i<3;i++) nc[i]=0;\
nb=je-1;\
k=0;\
i=1;\
while(nb!=0){\
j=2-k;\
nc[j]=nc[j]+nb%2*i;\
nb=nb/2;\
j=k+1;\
k=j%3;\
if(k==0) i=i*2;\
}\
nd=nc[0];\
nc[0]=nc[1];\
nc[1]=nc[2];\
nc[2]=nd;\
xijc=(nc[0]-3)*rb;\
yijc=(nc[1]-3)*rb;\
zijc=(nc[2]-3)*rb;\
rh=sqrt(xijc*xijc+yijc*yijc+zijc*zijc)+eps;\
jbase=(je-1)*mpdnm;\
n=ng[tx];\
m=mg[tx];\
nms=n*(n+1)/2+m;\
for(i=0;i<2;i++) vecd[i]=0;\
for(k=-n;k<0;k++){\
nks=n*(n+1)/2-k;\
nmk=jbase+(4*n*n*n+6*n*n+5*n)/3+m*(2*n+1)+k;\
dnmre=lvec[2*nmk+0];\
dnmim=lvec[2*nmk+1];\
vecd[0]+=dnmre*vecj[2*nks+0];\
vecd[0]+=dnmim*vecj[2*nks+1];\
vecd[1]-=dnmre*vecj[2*nks+1];\
vecd[1]+=dnmim*vecj[2*nks+0];\
}\
for(k=0;k<=n;k++){\
nks=n*(n+1)/2+k;\
nmk=jbase+(4*n*n*n+6*n*n+5*n)/3+m*(2*n+1)+k;\
dnmre=lvec[2*nmk+0];\
dnmim=lvec[2*nmk+1];\
vecd[0]+=dnmre*vecj[2*nks+0];\
vecd[0]-=dnmim*vecj[2*nks+1];\
vecd[1]+=dnmre*vecj[2*nks+1];\
vecd[1]+=dnmim*vecj[2*nks+0];\
}\
__syncthreads();\
for(i=0;i<2;i++) vecj[2*nms+i]=vecd[i];\
__syncthreads();\
j=ng[tx];\
k=mg[tx];\
jks=j*(j+1)/2+k;\
for(i=0;i<2;i++) vecd[i]=0;\
fnmm=1.0;\
for(i=0;i<j-k;i++) fnmm=fnmm*(i+1);\
fnpm=1.0;\
for(i=0;i<j+k;i++) fnpm=fnpm*(i+1);\
ajk=pow(-1.0,j)*rsqrtf(fnmm*fnpm);\
for(n=abs(k);n<mp;n++){\
nks=n*(n+1)/2+k;\
jnk=(j+n)*(j+n)+j+n;\
fnmm=1.0;\
for(i=0;i<n-k;i++) fnmm=fnmm*(i+1);\
fnpm=1.0;\
for(i=0;i<n+k;i++) fnpm=fnpm*(i+1);\
ank=pow(-1.0,n)*rsqrtf(fnmm*fnpm);\
fnpm=1.0;\
for(i=0;i<j+n;i++) fnpm=fnpm*(i+1);\
ajn=pow(-1.0,j+n)/fnpm;\
sr=pow(-1.0,j+k)*ank*ajk/ajn;\
cnmre=sr*ynmre[jnk]/pow(rh,j+n+1);\
cnmim=sr*ynmim[jnk]/pow(rh,j+n+1);\
vecd[0]+=vecj[2*nks+0]*cnmre;\
vecd[0]-=vecj[2*nks+1]*cnmim;\
vecd[1]+=vecj[2*nks+0]*cnmim;\
vecd[1]+=vecj[2*nks+1]*cnmre;\
}\
__syncthreads();\
for(i=0;i<2;i++) vecj[2*jks+i]=vecd[i];\
__syncthreads();\
jbase=(je+nrbm-1)*mpdnm;\
n=ng[tx];\
m=mg[tx];\
nms=n*(n+1)/2+m;\
for(i=0;i<2;i++) vecd[i]=0;\
for(k=-n;k<0;k++){\
nks=n*(n+1)/2-k;\
nmk=jbase+(4*n*n*n+6*n*n+5*n)/3+m*(2*n+1)+k;\
dnmre=lvec[2*nmk+0];\
dnmim=lvec[2*nmk+1];\
vecd[0]+=dnmre*vecj[2*nks+0];\
vecd[0]+=dnmim*vecj[2*nks+1];\
vecd[1]-=dnmre*vecj[2*nks+1];\
vecd[1]+=dnmim*vecj[2*nks+0];\
}\
for(k=0;k<=n;k++){\
nks=n*(n+1)/2+k;\
nmk=jbase+(4*n*n*n+6*n*n+5*n)/3+m*(2*n+1)+k;\
dnmre=lvec[2*nmk+0];\
dnmim=lvec[2*nmk+1];\
vecd[0]+=dnmre*vecj[2*nks+0];\
vecd[0]-=dnmim*vecj[2*nks+1];\
vecd[1]+=dnmre*vecj[2*nks+1];\
vecd[1]+=dnmim*vecj[2*nks+0];\
}
|
20,505 | #include "ops-builder.hh"
#include <stdexcept>
#include "graph.hh"
#include "add.hh"
#include "adam-update.hh"
#include "argmax-accuracy.hh"
#include "input.hh"
#include "leaky-relu-grad.hh"
#include "log-softmax.hh"
#include "mat-mat-mul.hh"
#include "mat-mul-add.hh"
#include "mat-rvect-add.hh"
#include "mat-sum.hh"
#include "moment-update.hh"
#include "mse.hh"
#include "mse-grad.hh"
#include "relu-grad.hh"
#include "seq.hh"
#include "sigmoid-cross-entropy.hh"
#include "sigmoid-cross-entropy-grad.hh"
#include "sigmoid-grad.hh"
#include "softmax.hh"
#include "softmax-cross-entropy.hh"
#include "softmax-cross-entropy-grad.hh"
#include "tanh-grad.hh"
#include "update.hh"
#include "variable.hh"
#include "vect-sigmoid.hh"
#include "conv2d.hh"
#include "conv2d-bias-add.hh"
#include "conv2d-input-grad.hh"
#include "conv2d-kernel-grad.hh"
#include "conv2d-bias-add-grad.hh"
#include "conv2d-transpose.hh"
#include "conv2d-transpose-input-grad.hh"
#include "conv2d-transpose-kernel-grad.hh"
#include "vect-relu.hh"
#include "vect-relu-leaky.hh"
#include "vect-tanh.hh"
#include "reshape.hh"
namespace ops
{
OpsBuilder& OpsBuilder::instance()
{
static OpsBuilder builder;
return builder;
}
OpsBuilder::OpsBuilder()
: graph_(Graph::instance())
{}
Add* OpsBuilder::add(Op* left, Op* right)
{
if (left->shape_get() != right->shape_get())
throw std::runtime_error {"add: left and right must have the same shape"};
auto res = new Add(left, right);
graph_.add(res);
return res;
}
AdamUpdate* OpsBuilder::adam_update(Variable* var, Op* m, Op* v,
dbl_t learning_rate,
dbl_t beta1, dbl_t beta2, dbl_t eps)
{
if (var->shape_get() != m->shape_get())
throw std::runtime_error {"var and m must have the same shape"};
if (var->shape_get() != v->shape_get())
throw std::runtime_error {"var and v must have the same shape"};
auto res = new AdamUpdate(var, m, v, learning_rate, beta1, beta2, eps);
graph_.add(res);
return res;
}
ArgmaxAccuracy* OpsBuilder::argmax_accuracy(Op* y, Op* y_hat)
{
if (y->shape_get().ndims() != 2)
throw std::runtime_error {"y must be a matrix"};
if (y_hat->shape_get().ndims() != 2)
throw std::runtime_error {"y_hat must be a matrix"};
if (y->shape_get() != y_hat->shape_get())
throw std::runtime_error {"y and y_hat must have the same shape"};
auto res = new ArgmaxAccuracy(y, y_hat);
graph_.add(res);
return res;
}
Conv2D* OpsBuilder::conv2d(Op* input, Op* kernel, const int* strides)
{
if (input->shape_get().ndims() != 4)
throw std::runtime_error {"Conv2D:input must be a 4D tensor"};
if (kernel->shape_get().ndims() != 4)
throw std::runtime_error {"Conv2D:kernel must be a 4D tensor"};
auto res = new Conv2D(input, kernel, strides);
graph_.add(res);
return res;
}
Conv2DBiasAdd* OpsBuilder::conv2d_bias_add(Op* z, Op* bias)
{
if (z->shape_get().ndims() != 4)
throw std::runtime_error {"Conv2DBiasAdd:z must be a 4D tensor"};
if (bias->shape_get().ndims() != 1)
throw std::runtime_error {"Conv2DBiasAdd:bias must be a 1D array"};
if (z->shape_get()[3] != bias->shape_get()[0])
throw std::runtime_error {"Conv2DBiasAdd:z and bias shape are not corresponding"};
auto res = new Conv2DBiasAdd(z, bias);
graph_.add(res);
return res;
}
Conv2DBiasAddGrad* OpsBuilder::conv2d_bias_add_grad(Op* z)
{
if (z->shape_get().ndims() != 4)
throw std::runtime_error {"Conv2DBiasAddGrad:z must be a 4D tensor"};
auto res = new Conv2DBiasAddGrad(z);
graph_.add(res);
return res;
}
Conv2DInputGrad* OpsBuilder::conv2d_input_grad(Op* y, Op* kernel, const int* strides, const int* input_size)
{
auto res = new Conv2DInputGrad(y, kernel, strides, input_size);
graph_.add(res);
return res;
}
Conv2DKernelGrad* OpsBuilder::conv2d_kernel_grad(Op* y, Op* input, const int* strides, const int* kernel_size, const int* padded_size)
{
auto res = new Conv2DKernelGrad(y, input, strides, kernel_size, padded_size);
graph_.add(res);
return res;
}
Conv2DTranspose* OpsBuilder::conv2d_transpose(Op* input, Op* kernel, const int* out_size, const int* strides)
{
auto res = new Conv2DTranspose(input, kernel, out_size, strides);
graph_.add(res);
return res;
}
Conv2DTransposeInputGrad* OpsBuilder::conv2d_transpose_input_grad(Op* y, Op* kernel, const int* strides, const int* input_size)
{
auto res = new Conv2DTransposeInputGrad(y, kernel, strides, input_size);
graph_.add(res);
return res;
}
Conv2DTransposeKernelGrad* OpsBuilder::conv2d_transpose_kernel_grad(Op* y, Op* input, const int* strides, const int* kernel_size)
{
auto res = new Conv2DTransposeKernelGrad(y, input, strides, kernel_size);
graph_.add(res);
return res;
}
Input* OpsBuilder::input(const Shape& shape)
{
auto res = new Input(shape);
graph_.add(res);
return res;
}
LeakyReluGrad* OpsBuilder::leaky_relu_grad(Op* z, Op* dout, dbl_t alpha)
{
if (z->shape_get() != dout->shape_get())
throw std::runtime_error {"LeakyReluGrad: z and dout must have the same shape"};
auto res = new LeakyReluGrad(z, dout, alpha);
graph_.add(res);
return res;
}
LogSoftmax* OpsBuilder::log_softmax(Op* arg)
{
if (arg->shape_get().ndims() != 2)
throw std::runtime_error{"log softmax input must be a matrix"};
auto res = new LogSoftmax(arg);
graph_.add(res);
return res;
}
MatMatMul* OpsBuilder::mat_mat_mul(Op* left, Op* right, bool left_tr, bool right_tr)
{
if (left->shape_get().ndims() != 2)
throw std::runtime_error{"left operand must be a matrix"};
if (right->shape_get().ndims() != 2)
throw std::runtime_error{"right operand must be a matrix"};
if (left->shape_get()[!left_tr] != right->shape_get()[right_tr])
throw std::runtime_error{"left[1] and right[0] differ"};
auto res = new MatMatMul(left, right, left_tr, right_tr);
graph_.add(res);
return res;
}
MatMulAdd* OpsBuilder::mat_mul_add(Op* x, Op* w, Op* b)
{
if (x->shape_get().ndims() != 2)
throw std::runtime_error{"x must be a matrix"};
if (w->shape_get().ndims() != 2)
throw std::runtime_error{"w must be a matrix"};
if (b->shape_get().ndims() != 1)
throw std::runtime_error{"b must be a vector"};
if (x->shape_get()[1] != w->shape_get()[0])
throw std::runtime_error{"x[1] and w[0] differ"};
if (w->shape_get()[1] != b->shape_get()[0])
throw std::runtime_error{"w[1] and b[0] differ"};
auto res = new MatMulAdd(x, w, b);
graph_.add(res);
return res;
}
MatRvectAdd* OpsBuilder::mat_rvect_add(Op* left, Op* right)
{
if (left->shape_get().ndims() != 2)
throw std::runtime_error{"left operand must be a matrix"};
if (right->shape_get().ndims() != 1)
throw std::runtime_error{"right operand must be a vector"};
if (left->shape_get()[1] != right->shape_get()[0])
throw std::runtime_error{"left[1] and right[0] differ"};
auto res = new MatRvectAdd(left, right);
graph_.add(res);
return res;
}
MatSum* OpsBuilder::mat_sum(Op* arg, std::size_t axis)
{
if (arg->shape_get().ndims() != 2)
throw std::runtime_error {"arg must be a matrix"};
if (axis >= 2)
throw std::runtime_error {"axis must be 0 or 1"};
auto res = new MatSum(arg, axis);
graph_.add(res);
return res;
}
MomentUpdate* OpsBuilder::moment_update(Variable* var, Op* dt,
dbl_t coeff1, dbl_t coeff2, bool sq_update)
{
if (var->shape_get() != dt->shape_get())
throw std::runtime_error {"var and dt must have the same shape"};
auto res = new MomentUpdate(var, dt, coeff1, coeff2, sq_update);
graph_.add(res);
return res;
}
MSE* OpsBuilder::mse(Op* y, Op* y_hat)
{
if (y->shape_get().ndims() != 2)
throw std::runtime_error {"MSE:y must be a matrix"};
if (y_hat->shape_get().ndims() != 2)
throw std::runtime_error {"MSE:y_hat must be a matrix"};
if (y->shape_get() != y_hat->shape_get())
throw std::runtime_error {"MSE: y and y_hat must have the same shape"};
auto res = new MSE(y, y_hat);
graph_.add(res);
return res;
}
Reshape* OpsBuilder::reshape(Op* arg, const Shape& shape)
{
auto& arg_shape = arg->shape_get();
if (shape.defined() && shape.total() != arg_shape.total())
throw std::runtime_error {"Reshape:"};
// if (! shape.defined() && (arg_shape.total() % (- shape.total()) != 0))
// throw std::runtime_error {"Reshape:"};
// nb -1 = max 1 ?? has to be checked
auto res = new Reshape(arg, shape);
graph_.add(res);
return res;
}
MSEGrad* OpsBuilder::mse_grad(Op* y, Op* y_hat)
{
if (y->shape_get().ndims() != 2)
throw std::runtime_error {"MSEGrad: y must be a matrix"};
if (y_hat->shape_get().ndims() != 2)
throw std::runtime_error {"MSEGrad: y_hat must be a matrix"};
if (y->shape_get() != y_hat->shape_get())
throw std::runtime_error {"MSEGrad: y and y_hat must have the same shape"};
auto res = new MSEGrad(y, y_hat);
graph_.add(res);
return res;
}
ReluGrad* OpsBuilder::relu_grad(Op* z, Op* dout)
{
if (z->shape_get() != dout->shape_get())
throw std::runtime_error {"ReluGrad: z and dout must have the same shape"};
auto res = new ReluGrad(z, dout);
graph_.add(res);
return res;
}
Seq* OpsBuilder::seq(const std::vector<Op*>& ops)
{
if (ops.empty())
throw std::runtime_error {"seq: ops can't be empty"};
auto res = new Seq(ops);
graph_.add(res);
return res;
}
SigmoidCrossEntropy* OpsBuilder::sigmoid_cross_entropy(Op* y, Op* logits)
{
if (y->shape_get().ndims() != 2)
throw std::runtime_error {"y must be a matrix"};
if (logits->shape_get().ndims() != 2)
throw std::runtime_error {"logits must be a matrix"};
if (y->shape_get() != logits->shape_get())
throw std::runtime_error {"y and logits must have the same shape"};
auto res = new SigmoidCrossEntropy(y, logits);
graph_.add(res);
return res;
}
SigmoidCrossEntropyGrad* OpsBuilder::sigmoid_cross_entropy_grad(Op* y, Op* logits)
{
if (y->shape_get().ndims() != 2)
throw std::runtime_error {"y must be a matrix"};
if (logits->shape_get().ndims() != 2)
throw std::runtime_error {"logits must be a matrix"};
if (y->shape_get() != logits->shape_get())
throw std::runtime_error {"y and logits must have the same shape"};
auto res = new SigmoidCrossEntropyGrad(y, logits);
graph_.add(res);
return res;
}
SigmoidGrad* OpsBuilder::sigmoid_grad(Op* sig_out, Op* dout)
{
if (sig_out->shape_get() != dout->shape_get())
throw std::runtime_error {"SigmoidGrad: sig_out and dout must have the same shape"};
auto res = new SigmoidGrad(sig_out, dout);
graph_.add(res);
return res;
}
Softmax* OpsBuilder::softmax(Op* arg)
{
if (arg->shape_get().ndims() != 2)
throw std::runtime_error{"softmax input must be a matrix"};
auto res = new Softmax(arg);
graph_.add(res);
return res;
}
SoftmaxCrossEntropy* OpsBuilder::softmax_cross_entropy(Op* y, Op* logits)
{
if (y->shape_get().ndims() != 2)
throw std::runtime_error {"y must be a matrix"};
if (logits->shape_get().ndims() != 2)
throw std::runtime_error {"logits must be a matrix"};
if (y->shape_get() != logits->shape_get())
throw std::runtime_error {"y and logits must have the same shape"};
auto res = new SoftmaxCrossEntropy(y, logits);
graph_.add(res);
return res;
}
SoftmaxCrossEntropyGrad* OpsBuilder::softmax_cross_entropy_grad(Op* y, Op* logits)
{
if (y->shape_get().ndims() != 2)
throw std::runtime_error {"y must be a matrix"};
if (logits->shape_get().ndims() != 2)
throw std::runtime_error {"logits must be a matrix"};
if (y->shape_get() != logits->shape_get())
throw std::runtime_error {"y and logits must have the same shape"};
auto res = new SoftmaxCrossEntropyGrad(y, logits);
graph_.add(res);
return res;
}
TanhGrad* OpsBuilder::tanh_grad(Op* tanh_out, Op* dout)
{
if (tanh_out->shape_get() != dout->shape_get())
throw std::runtime_error {"TanhGrad: tanh_out and dout must have the same shape"};
auto res = new TanhGrad(tanh_out, dout);
graph_.add(res);
return res;
}
Update* OpsBuilder::update(Variable* var, Op* dt, Op* coeff)
{
if (var->shape_get() != dt->shape_get())
throw std::runtime_error {"var and dt must have the same shape"};
if (coeff->shape_get().ndims())
throw std::runtime_error {"coeff must be a scalar"};
auto res = new Update(var, dt, coeff);
graph_.add(res);
return res;
}
Variable* OpsBuilder::variable(const Shape& shape, bool trainable)
{
if (!shape.defined())
throw std::runtime_error{"shape not fully defined"};
auto res = new Variable(shape, trainable);
graph_.add_var(res);
return res;
}
VectSigmoid* OpsBuilder::vect_sigmoid(Op* arg)
{
auto res = new VectSigmoid(arg);
graph_.add(res);
return res;
}
VectRelu* OpsBuilder::vect_relu(Op* arg)
{
auto res = new VectRelu(arg);
graph_.add(res);
return res;
}
VectReluLeaky* OpsBuilder::vect_relu_leaky(Op* arg, const dbl_t alpha)
{
auto res = new VectReluLeaky(arg, alpha);
graph_.add(res);
return res;
}
VectTanh* OpsBuilder::vect_tanh(Op* arg)
{
auto res = new VectTanh(arg);
graph_.add(res);
return res;
}
}
|
20,506 | #include "includes.h"
namespace ann {
// CUDA2
}
__global__ void kernel_weight_update( int layer_id, int *l, int *s, int *sw, float *z_arr, float *a_arr, float *t_arr, float *gjl, float *w_arr, float *dw_arr, float eta, float alpha ){
volatile int idx = threadIdx.x + blockDim.x*blockIdx.x;
int neuron_count = l[layer_id];
int neuron_count_next = l[layer_id+1];
if(idx >= neuron_count) return;
float a = a_arr[s[layer_id] + idx];
for(int k = 0; k < neuron_count_next-1; k++){
float grad=/*a_arr[s[layer_id] + idx]*/a*gjl[s[layer_id + 1] + k];
dw_arr[sw[layer_id] + idx*(neuron_count_next - 1) + k]=
-eta*grad+
alpha*dw_arr[sw[layer_id] + idx*(neuron_count_next - 1) + k];
w_arr[sw[layer_id] + idx*(neuron_count_next - 1) + k]+=
dw_arr[sw[layer_id] + idx*(neuron_count_next - 1) + k];
}
} |
20,507 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda.h>
#define BLOCK_X 10
#define BLOCK_Y 1
#define BLOCK_Z 1
#define THREAD_X 360
#define THREAD_Y 1
#define THREAD_Z 1
#define N 3600
#define PI 3.14159265358979323846
#define DEG_TO_RAD(deg) ((deg) / 180.0 * (PI))
__global__ void cosine10_1_360_1(double *B_d, double *radius_d)
{
int thread_index = (blockIdx.x * blockDim.x) + threadIdx.x;
B_d[thread_index] = cos(radius_d[thread_index]);
}
int main()
{
int i;
double B[N]; // HOST
double radius[N]; // HOST
double *B_d; // DEVICE
double *radius_d; // DEVICE
double deg = 0.0;
FILE *outputfile;
outputfile = fopen("./outputs/cosine10_1_360_1.txt", "w");
if (outputfile == NULL) {
printf("cannot open either directory or file! \n");
exit(1);
}
for (int i = 0; i < N; i+=1) {
radius[i] = DEG_TO_RAD(deg);
deg += 360 /(double) N;
}
dim3 blocks(BLOCK_X,BLOCK_Y,BLOCK_Z);
dim3 threads(THREAD_X,THREAD_Y,THREAD_Z);
cudaMalloc( (void**) &B_d, N*sizeof(double));
cudaMalloc( (void**) &radius_d, N*sizeof(double));
cudaMemcpy(B_d, B, N*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(radius_d, radius, N*sizeof(double), cudaMemcpyHostToDevice);
cosine10_1_360_1<<< blocks, threads >>>(B_d, radius_d);
cudaMemcpy(B, B_d, N*sizeof(double), cudaMemcpyDeviceToHost);
for(i = 0; i < N; i += 1){
fprintf(outputfile,"%d %.16f\n",i, B[i]);
}
for(i = 0; i < 5; i += 1){
printf("%d %.16f\n",i, B[i]);
}
fclose(outputfile);
cudaFree(B_d);
cudaFree(radius_d);
return 0;
}
|
20,508 | /* CUDA timing example
To compile: nvcc -o testprog2 testprog2.cu
*/
#include <iostream>
#include <cuda.h>
// Kernel that executes on the CUDA device. This is executed by ONE
// stream processor
__global__ void vec_add(float* A, float* B, float* C, int N)
{
// What element of the array does this thread work on
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
C[i] = A[i] + B[i];
}
// main routine that executes on the host
int main(void)
{
int n;
int N = 10000000;
size_t size = N * sizeof(float);
// CUDA event types used for timing execution
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Allocate in HOST memory
float* h_A = (float*)malloc(size);
float* h_B = (float*)malloc(size);
float* h_C = (float*)malloc(size);
// Initialize vectors
for (n = 0; n < N; ++n) {
h_A[n] = 3.2333 * n;
h_B[n] = 8.09287 * n;
}
// Allocate in DEVICE memory
float *d_A, *d_B, *d_C;
cudaMalloc(&d_A, size);
cudaMalloc(&d_B, size);
cudaMalloc(&d_C, size);
// Copy vectors from host to device memory
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
// Set up layout of kernel grid
int threadsPerBlock = 1024;
int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;
std::cout << "Launching a grid of "
<< blocksPerGrid << " "
<< threadsPerBlock * blocksPerGrid
<< " threads" << std::endl;
// Launch kernel and time it
cudaEventRecord(start, 0);
vec_add<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float time; // Must be a float
cudaEventElapsedTime(&time, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
std::cout << "Kernel took: " << time << " ms" << std::endl;
// Copy result from device memory into host memory
cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
// Free device memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
// Print the first and last 10 elements of the arrays
for (n = 0; n < N; ++n) {
if (n < 10 || n >= N - 10)
std::cout << n << " " << h_A[n] << " " << h_B[n]
<< " " << h_C[n] << std::endl;
}
free(h_A);
free(h_B);
free(h_C);
}
|
20,509 | #include <iostream>
#include <math.h>
#include <algorithm>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <thrust/adjacent_difference.h>
#include <thrust/generate.h>
#include <thrust/unique.h>
#include <thrust/scan.h>
#include <thrust/transform_reduce.h>
#include <thrust/transform.h>
#include <thrust/binary_search.h>
#include <thrust/functional.h>
#include <thrust/inner_product.h>
#define SITES 10
#define MAX_MEASUREMENT 8
unsigned int TotalRain ( thrust::device_vector<unsigned int>& M)
{ return thrust::reduce(M.begin(), M.end()); }
unsigned int TotalDaysRainInSite ( thrust::device_vector<unsigned int>& S,
const unsigned int Site)
{ return thrust::count(S.begin(), S.end(), Site); }
unsigned int TotalSites ( thrust::device_vector<unsigned int>& S)
{
thrust::pair<thrust::device_vector<unsigned int>::iterator,
thrust::device_vector<unsigned int>::iterator> new_end;
thrust::device_vector<unsigned int> G(S.size());
thrust::device_vector<unsigned int> D(S.size());
thrust::device_vector<unsigned int> K(S.size());
thrust::sort(S.begin(), S.end());
new_end = thrust::reduce_by_key(S.begin(), S.end(), G.begin(),K.begin(), D.begin() );
return new_end.first - K.begin();
}
/**
* TotalRainIN auxiliar code
*/
///Type definitions
typedef thrust::tuple<unsigned int, unsigned int> UIntTuple;
///Map functor
struct zero_if_not_site : thrust::unary_function<UIntTuple,UIntTuple>
{
const unsigned int site;
zero_if_not_site(unsigned int _site) : site(_site) {}
__host__ __device__ UIntTuple operator()(const UIntTuple x) const
{
return (thrust::get<0>(x) == site) ? x : thrust::make_tuple(thrust::get<0>(x), (unsigned int) 0);
}
};
///Reduce functor
struct add_tuple_value : thrust::binary_function<UIntTuple, UIntTuple, UIntTuple>
{
__host__ __device__ UIntTuple operator()(const UIntTuple &x, const UIntTuple &y)
{
return thrust::make_tuple(thrust::get<0>(x) + thrust::get<0>(y),
thrust::get<1>(x) + thrust::get<1>(y));
}
};
unsigned int TotalRainIN ( thrust::device_vector<unsigned int>& S,
thrust::device_vector<unsigned int>& M,
const unsigned int St)
{
UIntTuple t =
thrust::transform_reduce(thrust::make_zip_iterator(thrust::make_tuple(S.begin(), M.begin())),
thrust::make_zip_iterator(thrust::make_tuple(S.end(), M.end ())),
zero_if_not_site(St),
thrust::make_tuple(0,0),
add_tuple_value());
return thrust::get<1>(t);
}
///Map functor
struct zero_if_not_period : thrust::unary_function<UIntTuple,UIntTuple>
{
const unsigned int start;
const unsigned int end;
zero_if_not_period(unsigned int _start, unsigned int _end) : start(_start), end(_end) {}
__host__ __device__ UIntTuple operator()(const UIntTuple x) const
{
return (start <= thrust::get<0>(x) && thrust::get<0>(x) <= end) ? x : thrust::make_tuple(-thrust::get<0>(x), (unsigned int) 0);
}
};
unsigned int TotalRainBetween ( thrust::device_vector<unsigned int>& D,
thrust::device_vector<unsigned int>& M,
const unsigned int Start, const unsigned int End)
{
UIntTuple t =
thrust::transform_reduce(thrust::make_zip_iterator(thrust::make_tuple(D.begin(), M.begin())),
thrust::make_zip_iterator(thrust::make_tuple(D.end(), M.end ())),
zero_if_not_period(Start, End),
thrust::make_tuple(0,0),
add_tuple_value());
return thrust::get<1>(t);
}
unsigned int TotalDaysWithRain ( thrust::device_vector<unsigned int>& D) {
thrust::pair<thrust::device_vector<unsigned int>::iterator,
thrust::device_vector<unsigned int>::iterator> new_end;
thrust::device_vector<unsigned int> G(D.size());
thrust::device_vector<unsigned int> V(D.size());
thrust::device_vector<unsigned int> K(D.size());
thrust::sort(D.begin(), D.end());
new_end = thrust::reduce_by_key(D.begin(), D.end(), G.begin(),K.begin(), V.begin() );
return new_end.first - K.begin();
}
///Filter functor
struct is_higher_than : thrust::unary_function<UIntTuple,UIntTuple>
{
const unsigned int min_threshold;
is_higher_than(unsigned int _min_threshold) : min_threshold(_min_threshold) {}
__host__ __device__ bool operator()(const UIntTuple x) const
{
return thrust::get<1>(x) > min_threshold;
}
};
unsigned int TotalDaysRainHigher( thrust::device_vector<unsigned int>& D,
thrust::device_vector<unsigned int>& M,
const unsigned int Min)
{
return thrust::count_if(thrust::make_zip_iterator(thrust::make_tuple(D.begin(), M.begin())),
thrust::make_zip_iterator(thrust::make_tuple(D.end(), M.end ())),
is_higher_than(Min));
}
bool Option ( char o, thrust::device_vector<unsigned int>& Days,
thrust::device_vector<unsigned int>& Sites,
thrust::device_vector<unsigned int>& Measurements)
{
switch (o) {
case '0': std::cout << "Total Rainfall is " << TotalRain( Measurements ) << std::endl; break;
case '1': std::cout << "Total number of Days with any Rainfall in Site 3: "
<< TotalDaysRainInSite ( Sites, 3 ) << std::endl; break;
case '2': std::cout << "Total Sites with rain: " << TotalSites ( Sites ) << std::endl; break;
case '3': std::cout << "Total Rainfall in Site 7 is " << TotalRainIN ( Sites, Measurements, 7 )
<< std::endl; break;
case '4': std::cout << "Total Rainfall between days 7 and 77 is "
<< TotalRainBetween ( Days, Measurements, 7, 77 ) << std::endl; break;
case '5': std::cout << "Total number of Days with any rainfall: "
<< TotalDaysWithRain ( Days ) << std::endl; break;
case '6': std::cout << "Number of Days where Rainfall exceeded 10 is "
<< TotalDaysRainHigher ( Days, Measurements, 10 ) << std::endl; break;
default: return false;
}
return true;
}
struct rand_modulus {
unsigned int N;
rand_modulus(unsigned int _NN) : N(_NN) {}
__host__ __device__
unsigned int operator()() const {
return rand() % N; //N*SITES
}
};
struct is_equal {
__host__ __device__
unsigned int operator() ( const unsigned int& d, const unsigned int& s ) {
return d==s? 1: 0;
}
};
struct get_site {
__host__ __device__
unsigned int operator() ( const unsigned int& v ) {
return v % SITES;
}
};
struct get_day {
__host__ __device__
unsigned int operator() ( const unsigned int& v ) {
return v / SITES;
}
};
unsigned int rand_mes() {
return (unsigned int) pow( 2.0, ((double) (rand() % 100000)) / (100000 / MAX_MEASUREMENT) );
}
int main (int argc, char **argv)
{
unsigned int N=20;
char o= '1';
int Dup = -1;
if (argc>1) { o = argv[1][0]; }
if (argc>2) { N = atoi(argv[2]); }
if (o == 'H' || o == 'h') {
std::cout << "Arguments: (H|1|2|3|4|5|6) N " << std::endl;
exit(0);
}
// use this host vector to generate random input data
thrust::host_vector<unsigned int> HDay(N);
thrust::host_vector<unsigned int> HMes(N);
srand(0); // init random generation seed: same random numbers generated in each execution
// Generate Information sorted by (increasing) day and site, and with no duplicates (day, site)
thrust::generate ( HDay.begin(), HDay.end(), rand_modulus(N*SITES) );
thrust::generate ( HMes.begin(), HMes.end(), rand_mes );
// Create Device vectors and copy data from host vectors
thrust::device_vector<unsigned int> Days = HDay;
thrust::device_vector<unsigned int> Measurements= HMes;
thrust::device_vector<unsigned int> Sites(N);
// Sort data and modify to avoid duplicates ( only works fine if SITES=10 )
thrust::sort ( Days.begin(), Days.end() );
do {
Dup++;
thrust::transform ( Days.begin(), Days.end()-1, Days.begin()+1, Sites.begin(), is_equal() );
thrust::transform ( Days.begin()+1, Days.end(), Sites.begin(), Days.begin()+1, thrust::plus<unsigned int>() );
} while (thrust::reduce ( Sites.begin(), Sites.end()-1 ) > 0);
thrust::transform ( Days.begin(), Days.end(), Sites.begin(), get_site() );
thrust::transform ( Days.begin(), Days.end(), Days.begin(), get_day() );
if (Dup >0)
std::cout << "Phases to extract duplicates during generation: " << Dup << std::endl << std::endl;
if ( N<=20 ) { // for small cases: print contains of input vectors
std::cout << "Days: ";
thrust::copy( Days.begin(), Days.end(), std::ostream_iterator<unsigned int>( std::cout, ", " ));
std::cout << std::endl << "Sites: ";
thrust::copy( Sites.begin(), Sites.end(), std::ostream_iterator<unsigned int>( std::cout, ", " ));
std::cout << std::endl << "Measurements: ";
thrust::copy( HMes.begin(), HMes.end(), std::ostream_iterator<unsigned int>( std::cout, ", " ));
std::cout << std::endl;
}
// create device vectors and copy data from host vectors
Option ( o, Days, Sites, Measurements);
return 0;
}
|
20,510 | /*
* Created by Marcos Luciano
* https://www.github.com/marcoslucianops
*/
#include <stdint.h>
inline __device__ float sigmoidGPU(const float& x) { return 1.0f / (1.0f + __expf(-x)); }
__global__ void gpuYoloLayer(const float* input, float* boxes, float* scores, float* classes, const uint netWidth,
const uint netHeight, const uint gridSizeX, const uint gridSizeY, const uint numOutputClasses, const uint numBBoxes,
const uint64_t lastInputSize, const float scaleXY, const float* anchors, const int* mask)
{
uint x_id = blockIdx.x * blockDim.x + threadIdx.x;
uint y_id = blockIdx.y * blockDim.y + threadIdx.y;
uint z_id = blockIdx.z * blockDim.z + threadIdx.z;
if (x_id >= gridSizeX || y_id >= gridSizeY || z_id >= numBBoxes)
return;
const int numGridCells = gridSizeX * gridSizeY;
const int bbindex = y_id * gridSizeX + x_id;
const float alpha = scaleXY;
const float beta = -0.5 * (scaleXY - 1);
float xc = (sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 0)]) * alpha + beta + x_id)
* netWidth / gridSizeX;
float yc = (sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 1)]) * alpha + beta + y_id)
* netHeight / gridSizeY;
float w = __expf(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 2)]) * anchors[mask[z_id] * 2];
float h = __expf(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 3)]) * anchors[mask[z_id] * 2 + 1];
const float objectness = sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 4)]);
float maxProb = 0.0f;
int maxIndex = -1;
for (uint i = 0; i < numOutputClasses; ++i) {
float prob = sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + (5 + i))]);
if (prob > maxProb) {
maxProb = prob;
maxIndex = i;
}
}
int count = z_id * gridSizeX * gridSizeY + y_id * gridSizeY + x_id + lastInputSize;
boxes[count * 4 + 0] = xc;
boxes[count * 4 + 1] = yc;
boxes[count * 4 + 2] = w;
boxes[count * 4 + 3] = h;
scores[count] = maxProb * objectness;
classes[count] = (float) maxIndex;
}
cudaError_t cudaYoloLayer(const void* input, void* boxes, void* scores, void* classes, const uint& batchSize,
const uint64_t& inputSize, const uint64_t& outputSize, const uint64_t& lastInputSize, const uint& netWidth,
const uint& netHeight, const uint& gridSizeX, const uint& gridSizeY, const uint& numOutputClasses, const uint& numBBoxes,
const float& scaleXY, const void* anchors, const void* mask, cudaStream_t stream);
cudaError_t cudaYoloLayer(const void* input, void* boxes, void* scores, void* classes, const uint& batchSize,
const uint64_t& inputSize, const uint64_t& outputSize, const uint64_t& lastInputSize, const uint& netWidth,
const uint& netHeight, const uint& gridSizeX, const uint& gridSizeY, const uint& numOutputClasses, const uint& numBBoxes,
const float& scaleXY, const void* anchors, const void* mask, cudaStream_t stream)
{
dim3 threads_per_block(16, 16, 4);
dim3 number_of_blocks((gridSizeX / threads_per_block.x) + 1, (gridSizeY / threads_per_block.y) + 1,
(numBBoxes / threads_per_block.z) + 1);
for (unsigned int batch = 0; batch < batchSize; ++batch) {
gpuYoloLayer<<<number_of_blocks, threads_per_block, 0, stream>>>(
reinterpret_cast<const float*> (input) + (batch * inputSize),
reinterpret_cast<float*> (boxes) + (batch * 4 * outputSize),
reinterpret_cast<float*> (scores) + (batch * 1 * outputSize),
reinterpret_cast<float*> (classes) + (batch * 1 * outputSize),
netWidth, netHeight, gridSizeX, gridSizeY, numOutputClasses, numBBoxes, lastInputSize, scaleXY,
reinterpret_cast<const float*> (anchors), reinterpret_cast<const int*> (mask));
}
return cudaGetLastError();
}
|
20,511 | #include <iostream>
#include <stdio.h>
#include <cmath>
#include <math.h>
#include <cstdlib>
#include <ctime>
#include <fstream>
#include <cstring>
#include <string>
#include <algorithm>
#include <random>
#include <numeric>
#include <time.h>
#include <curand.h>
#include <curand_kernel.h>
//bacteria surface geometry, flat surface, Brownian noise
//Last updated: March 10, 2021
using namespace std;
//set random seed for uniform distribution for angles
unsigned int seed = time(NULL);
default_random_engine engine(seed);
#define THREADS_PER_BLOCK 128
#define TILE_SIZE 128
#define PI 3.14159265358979
#define K_B 1.38064852E-23 //m^2 kg s^-2 K^-1
#define DIM 3
//====================================================================================
//Returns the inverse parallel geometric factor for the
// translational friction tensor. a is the aspect ratio: a=l/d
__device__ double inverse_parallel_geo_factor(double a)
{
double inverse_parallel = (log(a) - 0.207 + 0.980 / a - 0.133 / (a * a))
* (1.0 / (2.0 * PI * a));
return inverse_parallel;
}
//Returns the inverse perpendicular geometric factor for the
// translational friction tensor. a is the aspect ratio: a=l/d
__device__ double inverse_perpendicular_geo_factor(double a)
{
double inverse_perp = (log(a) + 0.839 + 0.185 / a + 0.233 / (a * a))
* ( 1.0 / (4.0 * PI * a));
return inverse_perp;
}
//Returns the rotational geometric factor for the
// rotation friction tensor. a is the aspect ratio: a=l/d
__device__ double inverse_rotation_geo_factor(double a)
{
double inverse_rotation = (log(a) - 0.662 + 0.917 / a - 0.050 / (a * a))
* ( 3.0 / (PI * a * a));
return inverse_rotation;
}
/* this GPU kernel function is used to initialize the random states */
__global__ void init(unsigned int seed, curandState *state) {
int id = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK;
/* Each thread gets same seed, a different sequence number, no offset */
curand_init(seed, id, 0, &state[id]);
}
__global__ void init(unsigned int seed, curandStatePhilox4_32_10_t *state) {
int id = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK;
/* Each thread gets same seed, a different sequence number, no offset */
curand_init(seed, id, 0, &state[id]);
}
__global__ void generate_random_numbers_noise(curandStatePhilox4_32_10_t *state, float4 *numbers) {
int id = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK;
/* Copy state to local memory for efficiency */
curandStatePhilox4_32_10_t localState = state[id];
numbers[id] = curand_normal4(&localState);
/* Copy state back to global memory */
state[id] = localState;
}
__global__ void calculate_BodyWallInteraction(double3 *d_dUbdy_dxa,
double3 *d_dUbdy_dna, double4 *d_x, double4 *d_n, double3 W_hat,
double C, double sigma_bdy, int N)
{
int gtid = blockIdx.x * blockDim.x + threadIdx.x;
if (gtid < N)
{
double4 xa = d_x[gtid];
double4 na = d_n[gtid];
double la = xa.w;
double da = na.w;
double3 S; // point on the surface closest to bacteria
if (xa.y <= 0.0) //bottom surface
{
W_hat.x = abs(W_hat.x);
W_hat.y = abs(W_hat.y);
W_hat.z = abs(W_hat.z);
S.x = xa.x;
S.y = -abs(C);
S.z = 0.0;
}
else // top surface
{
W_hat.x = -abs(W_hat.x);
W_hat.y = -abs(W_hat.y);
W_hat.z = -abs(W_hat.z);
S.x = xa.x;
S.y = abs(C);
S.z = 0.0;
}
double dot_na_W_hat, dot_xa_W_hat, dot_W_hat_S, r_alpha;
dot_na_W_hat = na.x * W_hat.x + na.y * W_hat.y + na.z * W_hat.z;
dot_xa_W_hat = xa.x * W_hat.x + xa.y * W_hat.y + xa.z * W_hat.z;
dot_W_hat_S = W_hat.x * S.x + W_hat.y * S.y + W_hat.z * S.z;
r_alpha = la * abs(dot_na_W_hat) + da - dot_xa_W_hat + dot_W_hat_S;
double dUbdy_dralpha;
double3 dUbdy_dxa, dUbdy_dna;
double3 dralpha_dna;
if (r_alpha > 0.0) //contact with boundary
{
dUbdy_dralpha = (1.0 / sigma_bdy) * exp(r_alpha / sigma_bdy);
//boundary force derivatives:
dUbdy_dxa.x = dUbdy_dralpha * -W_hat.x;
dUbdy_dxa.y = dUbdy_dralpha * -W_hat.y;
dUbdy_dxa.z = dUbdy_dralpha * -W_hat.z;
//boundary orientation derivatives:
if (dot_na_W_hat == 0.0)
{
dUbdy_dna.x = 0.0;
dUbdy_dna.y = 0.0;
dUbdy_dna.z = 0.0;
}
else
{
dralpha_dna.x = (la * dot_na_W_hat / abs(dot_na_W_hat)) * W_hat.x;
dralpha_dna.y = (la * dot_na_W_hat / abs(dot_na_W_hat)) * W_hat.y;
dralpha_dna.z = (la * dot_na_W_hat / abs(dot_na_W_hat)) * W_hat.z;
dUbdy_dna.x = dUbdy_dralpha * dralpha_dna.x;
dUbdy_dna.y = dUbdy_dralpha * dralpha_dna.y;
dUbdy_dna.z = dUbdy_dralpha * dralpha_dna.z;
}
}
else //no contact with boundary
{
dUbdy_dxa.x = 0.0;
dUbdy_dxa.y = 0.0;
dUbdy_dxa.z = 0.0;
dUbdy_dna.x = 0.0;
dUbdy_dna.y = 0.0;
dUbdy_dna.z = 0.0;
}
// Save the result in global memory for the integration step
d_dUbdy_dxa[gtid] = dUbdy_dxa;
d_dUbdy_dna[gtid] = dUbdy_dna;
}
}
__global__ void time_marching(double4 *d_x, double4 *d_n,
double3 *d_dUbdy_dxa, double3 *d_dUbdy_dna,
double epsilon_r,
double inverse_Pe_T, double inverse_Pe_parallel, double inverse_Pe_perp, double inverse_Pe_R,
double dt, int N, double L,
double *d_t_run, double *d_t_tumble, int *d_tumble_flag,
double delta_run, double delta_tumble,
double avg_n_tumble, double std_n_tumble,
curandState *state, float4 *d_random_numbers_noise)
{
int gtid = blockIdx.x * blockDim.x + threadIdx.x;
if (gtid < N)
{
double4 xa = d_x[gtid];
double4 na = d_n[gtid];
float4 random_numbers_noise = d_random_numbers_noise[gtid];
double la = xa.w;
double da = na.w;
double3 dUbdy_dxa = d_dUbdy_dxa[gtid];
double3 dUbdy_dna = d_dUbdy_dna[gtid];
double4 x_new;
double4 n_new;
//-----Start: creating orientation and orientation projection matrix-----
double ori_matrix[DIM][DIM];
ori_matrix[0][0] = na.x * na.x;
ori_matrix[1][1] = na.y * na.y;
ori_matrix[2][2] = na.z * na.z;
ori_matrix[0][1] = na.x * na.y;
ori_matrix[0][2] = na.x * na.z;
ori_matrix[1][2] = na.y * na.z;
ori_matrix[1][0] = ori_matrix[0][1];
ori_matrix[2][0] = ori_matrix[0][2];
ori_matrix[2][1] = ori_matrix[1][2];
double ori_proj_matrix[DIM][DIM];
ori_proj_matrix[0][0] = 1.0 - na.x * na.x;
ori_proj_matrix[1][1] = 1.0 - na.y * na.y;
ori_proj_matrix[2][2] = 1.0 - na.z * na.z;
ori_proj_matrix[0][1] = 0.0 - na.x * na.y;
ori_proj_matrix[0][2] = 0.0 - na.x * na.z;
ori_proj_matrix[1][2] = 0.0 - na.y * na.z;
ori_proj_matrix[1][0] = ori_proj_matrix[0][1];
ori_proj_matrix[2][0] = ori_proj_matrix[0][2];
ori_proj_matrix[2][1] = ori_proj_matrix[1][2];
//-----End: creating orientation and orientation projection matrix-----
//-----Start: time-marching + tumbling dynamics-----
if (d_tumble_flag[gtid] == 1) //tumbling
{
d_t_tumble[gtid] += dt;
if (d_t_tumble[gtid] < delta_tumble) //don't move
{
x_new.x = xa.x;
x_new.y = xa.y;
x_new.z = xa.z;
x_new.w = xa.w;
n_new.x = na.x;
n_new.y = na.y;
n_new.z = na.z;
n_new.w = na.w;
}
else //tumble
{
d_tumble_flag[gtid] = 0;
d_t_tumble[gtid] = 0.0;
float angle;
double rad_angle;
curandState localState = state[gtid];
angle = curand_normal(&localState);
angle = angle * std_n_tumble + avg_n_tumble;
while (angle < 0.0 || angle > 180.0)
{
angle = curand_normal(&localState);
angle = angle * std_n_tumble + avg_n_tumble;
}
double uniform1 = curand_uniform(&localState); //number between 0 and 1
if (uniform1 < 0.5) //otherwise angle is positive
{
angle = -angle;
}
state[gtid] = localState;
rad_angle = angle * PI / 180; //convert to radians
//rotation matrix
double R[2][2];
R[0][0] = cos(rad_angle);
R[0][1] = -sin(rad_angle);
R[1][0] = sin(rad_angle);
R[1][1] = cos(rad_angle);
n_new.x = R[0][0] * na.x + R[0][1] * na.y;
n_new.y = R[1][0] * na.x + R[1][1] * na.y;
n_new.z = 0.0;
n_new.w = da;
x_new.x = xa.x;
x_new.y = xa.y;
x_new.z = xa.z;
x_new.w = xa.w;
}
}
else //run
{
d_t_run[gtid] += dt;
if (d_t_run[gtid] < delta_run) //run
{
//translational dynamics:
//calculating geometric factors:
double aspect = la/da;
double inverse_parallel = inverse_parallel_geo_factor(aspect);
double inverse_perp = inverse_perpendicular_geo_factor(aspect);
double inverse_rotation = inverse_rotation_geo_factor(aspect);
//-----Start: creating Gamma_inverse matrix-----
double Gamma_inverse[DIM][DIM];
for(int i = 0; i < DIM; i++)
{
for(int j = 0; j < DIM; j++)
{
Gamma_inverse[i][j] = inverse_parallel * ori_matrix[i][j]
+ inverse_perp * ori_proj_matrix[i][j];
}
}
//-----End: creating Gamma_inverse matrix-----
//-----Start: creating translational diffusion matrix-----
double Pe_trans_matrix[DIM][DIM];
double sqrt_Pe_inverse_parallel = sqrt(inverse_Pe_parallel);
double sqrt_Pe_inverse_perp = sqrt(inverse_Pe_perp);
for(int i = 0; i < DIM; i++)
{
for(int j = 0; j < DIM; j++)
{
Pe_trans_matrix[i][j] = sqrt_Pe_inverse_parallel * ori_matrix[i][j]
+ sqrt_Pe_inverse_perp * ori_proj_matrix[i][j];
}
}
//-----End: creating translational diffusion matrix-----
//adding it all together:
double3 x_b;
x_b.x = - epsilon_r * (inverse_Pe_T) * dUbdy_dxa.x;
x_b.y = - epsilon_r * (inverse_Pe_T) * dUbdy_dxa.y;
x_b.z = - epsilon_r * (inverse_Pe_T) * dUbdy_dxa.z;
//matrix multiply:
double3 Gamma_inverse_x_b;
Gamma_inverse_x_b.x = Gamma_inverse[0][0] * x_b.x
+ Gamma_inverse[0][1] * x_b.y
+ Gamma_inverse[0][2] * x_b.z;
Gamma_inverse_x_b.y = Gamma_inverse[1][0] * x_b.x
+ Gamma_inverse[1][1] * x_b.y
+ Gamma_inverse[1][2] * x_b.z;
Gamma_inverse_x_b.z = Gamma_inverse[2][0] * x_b.x
+ Gamma_inverse[2][1] * x_b.y
+ Gamma_inverse[2][2] * x_b.z;
//noise:
float3 d_xi;
d_xi.x = random_numbers_noise.x * sqrt(2.0 * dt);
d_xi.y = random_numbers_noise.y * sqrt(2.0 * dt);
d_xi.z = 0.0;
float3 trans_noise;
trans_noise.x = Pe_trans_matrix[0][0] * d_xi.x
+ Pe_trans_matrix[0][1] * d_xi.y
+ Pe_trans_matrix[0][2] * d_xi.z;
trans_noise.y = Pe_trans_matrix[1][0] * d_xi.x
+ Pe_trans_matrix[1][1] * d_xi.y
+ Pe_trans_matrix[1][2] * d_xi.z;
trans_noise.z = Pe_trans_matrix[2][0] * d_xi.x
+ Pe_trans_matrix[2][1] * d_xi.y
+ Pe_trans_matrix[2][2] * d_xi.z;
//time step:
x_new.x = xa.x + na.x * dt + Gamma_inverse_x_b.x * dt + trans_noise.x;
x_new.y = xa.y + na.y * dt + Gamma_inverse_x_b.y * dt + trans_noise.y;
x_new.z = 0.0;
x_new.w = la;
//orientation dynamics
double3 n_b;
int dim = 2;
n_b.x = - epsilon_r * inverse_Pe_R * inverse_rotation * dUbdy_dna.x + (1 - dim) * inverse_Pe_R * na.x;
n_b.y = - epsilon_r * inverse_Pe_R * inverse_rotation * dUbdy_dna.y + (1 - dim) * inverse_Pe_R * na.y;
n_b.z = - epsilon_r * inverse_Pe_R * inverse_rotation * dUbdy_dna.z + (1 - dim) * inverse_Pe_R * na.z;
double3 ori_proj_n_b;
ori_proj_n_b.x = ori_proj_matrix[0][0] * n_b.x
+ ori_proj_matrix[0][1] * n_b.y
+ ori_proj_matrix[0][2] * n_b.z;
ori_proj_n_b.y = ori_proj_matrix[1][0] * n_b.x
+ ori_proj_matrix[1][1] * n_b.y
+ ori_proj_matrix[1][2] * n_b.z;
ori_proj_n_b.z = ori_proj_matrix[2][0] * n_b.x
+ ori_proj_matrix[2][1] * n_b.y
+ ori_proj_matrix[2][2] * n_b.z;
//noise:
float3 d_zeta;
d_zeta.x = random_numbers_noise.z * sqrt(2.0 * inverse_Pe_R * dt);
d_zeta.y = random_numbers_noise.w * sqrt(2.0 * inverse_Pe_R * dt);
d_zeta.z = 0.0;
double3 ori_noise;
ori_noise.x = ori_proj_matrix[0][0] * d_zeta.x
+ ori_proj_matrix[0][1] * d_zeta.y
+ ori_proj_matrix[0][2] * d_zeta.z;
ori_noise.y = ori_proj_matrix[1][0] * d_zeta.x
+ ori_proj_matrix[1][1] * d_zeta.y
+ ori_proj_matrix[1][2] * d_zeta.z;
ori_noise.z = ori_proj_matrix[2][0] * d_zeta.x
+ ori_proj_matrix[2][1] * d_zeta.y
+ ori_proj_matrix[2][2] * d_zeta.z;
n_new.x = na.x + ori_proj_n_b.x * dt + ori_noise.x;
n_new.y = na.y + ori_proj_n_b.y * dt + ori_noise.y;
n_new.z = 0.0;
n_new.w = da;
}
else
{
d_tumble_flag[gtid] = 1;
d_t_run[gtid] = 0.0;
x_new.x = xa.x;
x_new.y = xa.y;
x_new.z = xa.z;
x_new.w = xa.w;
n_new.x = na.x;
n_new.y = na.y;
n_new.z = na.z;
n_new.w = na.w;
}
}
//-----End: time-marching + tumbling dynamics-----
//normalize n afterwards:
double magn_n_new_Sqrd = n_new.x * n_new.x + n_new.y * n_new.y + n_new.z * n_new.z;
double magn_n_new = sqrt(magn_n_new_Sqrd);
n_new.x = (n_new.x / magn_n_new);
n_new.y = (n_new.y / magn_n_new);
n_new.z = (n_new.z / magn_n_new);
//periodic BC
if (x_new.x < 0.0)
{
x_new.x = L - x_new.x;
}
else if (x_new.x > L)
{
double delta_x = x_new.x - L;
x_new.x = delta_x;
}
// Save the result in global memory
d_x[gtid] = x_new;
d_n[gtid] = n_new;
}
}
//returns the greater common divisor of two numbers
int gcd(int first_number, int second_number)
{
int gcd_value;
for(int i = 1; i <= first_number && i <= second_number; i++)
{
if(first_number % i == 0 && second_number % i == 0 )
{
gcd_value = i;
}
}
return gcd_value;
}
//loads the .txt file that contains the simulation input variables data
void load_textfile_sim_parameters( char filename[],
int& sim_num, int& case_num,
double& dt, double& time_save, double& start_time, double& final_time,
int& N, double& l, double& d,
double3& W_hat, double& C, double& L,
double& epsilon_r, double& sigma_bdy,
double& inverse_Pe_T, double& inverse_Pe_parallel, double& inverse_Pe_perp, double& inverse_Pe_R,
double& delta_run, double& delta_tumble, double& avg_n_tumble, double& std_n_tumble)
{
ifstream infile(filename);
if (infile.fail())
{
cout<<"\nSimulation parameters input file opening failed.\n";
exit(1);
}
int number_inputs = 24;
double input_vec[number_inputs];
for (int i = 0; i < number_inputs; i++)
{
infile >> input_vec[i];
}
int i = 0;
sim_num = int(input_vec[i]);
case_num = int(input_vec[++i]);
dt = input_vec[++i];
time_save = input_vec[++i];
start_time = input_vec[++i];
final_time = input_vec[++i];
N = int(input_vec[++i]);
l = input_vec[++i];
d = input_vec[++i];
W_hat.x = input_vec[++i];
W_hat.y = input_vec[++i];
W_hat.z = input_vec[++i];
C = input_vec[++i];
L = input_vec[++i];
epsilon_r = input_vec[++i];
sigma_bdy = input_vec[++i];
inverse_Pe_T = input_vec[++i];
inverse_Pe_parallel = input_vec[++i];
inverse_Pe_perp = input_vec[++i];
inverse_Pe_R = input_vec[++i];
delta_run = input_vec[++i];
delta_tumble = input_vec[++i];
avg_n_tumble = input_vec[++i];
std_n_tumble = input_vec[++i];
cout << "\nSimulation parameters loaded\n";
}
void initial_loading(double4 x[], double4 n[], int N, double C, double L,
double l, double d, double t_run[], double t_tumble[], double delta_run, double simulation_time)
{
double factorC = 1.0;
double factorL = 1.0;
double factorLminus1 = 1.0 - factorL;
double xmin = 0.0 + 0.5 * factorLminus1 * L;
double xmax = L - 0.5 * factorLminus1 * L;
uniform_real_distribution<double> uniform_x(xmin, xmax);
uniform_real_distribution<double> uniform_y(-factorC * C, factorC * C);
uniform_real_distribution<double> uniform_dist_angle(0, 2.0 * PI );
uniform_real_distribution<double> uniform_dist_run_time(0.0, delta_run);
double angle;
for(int alpha = 0; alpha < N; alpha++)
{
//set bacteria dimensions:
x[alpha].w = l;
n[alpha].w = d;
//set initial positions
x[alpha].x = uniform_x(engine);
x[alpha].y = uniform_y(engine);
x[alpha].z = 0.0;
//set initial bacteria orientations:
angle = uniform_dist_angle(engine);
n[alpha].x = cos(angle);
n[alpha].y = sin(angle);
n[alpha].z = 0.0;
//set initial run time
if (delta_run < simulation_time) {
t_run[alpha] = uniform_dist_run_time(engine);
}
else {
t_run[alpha] = 0.0;
}
//set initial tumble time
t_tumble[alpha] = 0.0;
}
return;
}
//Returns the eigenvectors corresponding to the orientation vectors for
// all the bacteria.
void eigenvectors_ellipsoid(double eigenvectors[][DIM*DIM], double4 n[], int N)
{
for (int alpha = 0; alpha < N; alpha++)
{
if (n[alpha].x == 1.0)
{
//v1:
eigenvectors[alpha][0] = 1.0;
eigenvectors[alpha][1] = 0.0;
eigenvectors[alpha][2] = 0.0;
//v2:
eigenvectors[alpha][3] = 0.0;
eigenvectors[alpha][4] = 1.0;
eigenvectors[alpha][5] = 0.0;
//v3:
eigenvectors[alpha][6] = 0.0;
eigenvectors[alpha][7] = 0.0;
eigenvectors[alpha][8] = 1.0;
}
else if (n[alpha].x == -1.0)
{
//v1:
eigenvectors[alpha][0] = -1.0;
eigenvectors[alpha][1] = 0.0;
eigenvectors[alpha][2] = 0.0;
//v2:
eigenvectors[alpha][3] = 0.0;
eigenvectors[alpha][4] = -1.0;
eigenvectors[alpha][5] = 0.0;
//v3:
eigenvectors[alpha][6] = 0.0;
eigenvectors[alpha][7] = 0.0;
eigenvectors[alpha][8] = 1.0;
}
else
{
//v1:
eigenvectors[alpha][0] = n[alpha].x;
eigenvectors[alpha][1] = n[alpha].y;
eigenvectors[alpha][2] = n[alpha].z;
double denom = sqrt(1.0 - n[alpha].x * n[alpha].x );
//v2:
eigenvectors[alpha][3] = 0.0;
eigenvectors[alpha][4] = -n[alpha].z / denom;
eigenvectors[alpha][5] = n[alpha].y / denom;
//v3:
eigenvectors[alpha][6] = 1.0 - n[alpha].x * n[alpha].x;
eigenvectors[alpha][7] = -(n[alpha].x * n[alpha].y) / denom;
eigenvectors[alpha][8] = -(n[alpha].x * n[alpha].z) / denom;
}
}
return;
}
//Prints simulation input to file
void print_to_file_input(
int sim_num, int case_num,
double dt, double time_save, double start_time, double final_time,
int N, double l, double d,
double3 W_hat, double C, double L,
double epsilon_r, double sigma_bdy,
double inverse_Pe_T, double inverse_Pe_parallel, double inverse_Pe_perp, double inverse_Pe_R,
double delta_run, double delta_tumble, double avg_n_tumble, double std_n_tumble)
{
ofstream fout;
char file_name2[100];
sprintf(file_name2,"SimulationInput.txt");
fout.open(file_name2);
if (fout.fail())
{
cout<<"Output file opening failed.\n";
exit(1);
}
fout.setf(ios::fixed);
fout.setf(ios::showpoint);
fout.precision(30);
string headers("sim_num, case_num, dt, time_save, start_time, final_time, N, l, d, W_hat_1, W_hat_2, W_hat_3, C, L, epsilon_r, sigma_bdy, inverse_Pe_T, inverse_Pe_parallel, inverse_Pe_perp, inverse_Pe_R, delta_run, delta_tumble, avg_n_tumble, std_n_tumble");
fout << headers << endl;
fout << sim_num << ", "
<< case_num << ", "
<< dt << ", "
<< time_save << ", "
<< start_time << ", "
<< final_time << ", "
<< N << ", "
<< l << ", "
<< d << ", "
<< W_hat.x << ", "
<< W_hat.y << ", "
<< W_hat.z << ", "
<< C << ", "
<< L << ", "
<< epsilon_r << ", "
<< sigma_bdy << ", "
<< inverse_Pe_T << ", "
<< inverse_Pe_parallel << ", "
<< inverse_Pe_perp << ", "
<< inverse_Pe_R << ", "
<< delta_run << ", "
<< delta_tumble << ", "
<< avg_n_tumble << ", "
<< std_n_tumble << endl;
fout.close();
return;
}
//Prints output to file
void print_to_file_output(int sim_num, int case_num, int itime, int N,
double4 x[], double4 n[], double t_run[])
{
double eig_vec[N][DIM * DIM]; //dimensionless Cartesian vector components of the eigenvectors for the orientation of the bacteria
eigenvectors_ellipsoid(eig_vec, n, N);
ofstream fout;
char file_name2[100];
sprintf(file_name2,"sim%d_case%d_timestep%015d.txt", sim_num, case_num, itime);
fout.open(file_name2);
if (fout.fail())
{
cout<<"Output file opening failed.\n";
exit(1);
}
fout.setf(ios::fixed);
fout.setf(ios::showpoint);
fout.precision(15);
string headers("Centroid_1, Centroid_2, Centroid_3, DirVector1_1, DirVector1_2, DirVector1_3, DirVector2_1, DirVector2_2, DirVector2_3, DirVector3_1, DirVector3_2, DirVector3_3, SemiAxis1, SemiAxis2, SemiAxis3, tRun");
fout << headers << endl;
for (int alpha = 0; alpha < N; alpha++)
{
fout << x[alpha].x << ", "
<< x[alpha].y << ", "
<< x[alpha].z << ", ";
for (int nCol = 0; nCol < DIM*DIM; nCol++)
{
fout << eig_vec[alpha][nCol] << ", ";
}
fout << x[alpha].w << ", "
<< n[alpha].w << ", "
<< n[alpha].w << ", "
<< t_run[alpha] << endl;
}
fout.close();
return;
}
//====================================================================================
int main(void)
{
//-----Start: simulation input-----
int sim_num; //simulation number
int case_num; //case number
double dt; //dimensionless time step
double time_save; //dimensionless time at which to output
double start_time; //dimensionless start time of simulation
double final_time; //dimensionless final time of simulation
int N; //number of bacteria in simulation
double l; //half-length of bacteria
double d; //half-diameter of bacteria
double3 W_hat; //wall normal
double C; //wall surface displacement from origin
double L; //wall length
double epsilon_r;
double sigma_bdy; //range parameter for bacteria-wall steric repulsion
double inverse_Pe_T;
double inverse_Pe_parallel;
double inverse_Pe_perp;
double inverse_Pe_R;
double delta_run; //run time
double delta_tumble; //tumble time
double avg_n_tumble; //average tumbling angle in degrees
double std_n_tumble; //std tumbling angle in degrees
load_textfile_sim_parameters( "bacteria_surface_input.txt",
sim_num, case_num,
dt, time_save, start_time, final_time,
N, l, d,
W_hat, C, L,
epsilon_r, sigma_bdy,
inverse_Pe_T, inverse_Pe_parallel, inverse_Pe_perp, inverse_Pe_R,
delta_run, delta_tumble, avg_n_tumble, std_n_tumble);
cout.setf(ios::fixed);
cout.setf(ios::showpoint);
cout.precision(15);
cout << endl<<"==============="<<endl
<< "sim_num = " << sim_num << endl
<< "case_num = " << case_num << endl
<< "dt = " << dt << endl
<< "time_save = " << time_save << endl
<< "start_time = " << start_time << endl
<< "final_time = " << final_time << endl
<< "N = " << N << endl
<< "l = " << l << endl
<< "d = " << d << endl
<< "W_hat = " << "< " << W_hat.x << ", " << W_hat.y << ", " << W_hat.z << ">" << endl
<< "C = " << C << endl
<< "L = " << L << endl
<< "epsilon_r = " << epsilon_r << endl
<< "sigma_bdy = " << sigma_bdy << endl
<< "inverse_Pe_T = " << inverse_Pe_T << endl
<< "inverse_Pe_parallel = " << inverse_Pe_parallel << endl
<< "inverse_Pe_perp = " << inverse_Pe_perp << endl
<< "inverse_Pe_R = " << inverse_Pe_R << endl
<< "delta_run = "<< delta_run << endl
<< "delta_tumble = " << delta_tumble << endl
<< "avg_n_tumble = " << avg_n_tumble << endl
<< "std_n_tumble = " << std_n_tumble << endl
<< "================"<<endl;
cout.precision(15);
print_to_file_input(sim_num, case_num, dt, time_save, start_time, final_time,
N, l, d, W_hat, C, L, epsilon_r, sigma_bdy,
inverse_Pe_T, inverse_Pe_parallel, inverse_Pe_perp, inverse_Pe_R, delta_run, delta_tumble, avg_n_tumble, std_n_tumble);
//-----End: simulation input-----
//-----Start: declaring derived simulation parameters-----
//simulation variables:
int time_steps = ceil((final_time - start_time) / dt); //number of simulation time steps
int timestep_save; //number of simulation time steps until save output
timestep_save = ceil(time_save / dt);
double4 x[N]; //dimensionless Cartesian coordinates of the bacteria & dimensionless half-length of the bacteria
double4 n[N]; //dimensionless Cartesian vector components of the orientation vector of the bacteria & dimensionless half-diameter of the bacteria
double t_run[N]; //run time of the bacteria
double t_tumble[N]; //tumble time of bacteria
int tumble_flag[N]; //tumble flag of bacteria (if tumble_flag[alpha] = 1, then bacteria tumbles; otherwise it runs)
memset(x, 0, N * sizeof(double4));
memset(n, 0, N * sizeof(double4));
memset(t_run, 0, N * sizeof(double));
memset(t_tumble, 0, N * sizeof(double));
memset(tumble_flag, 0, N * sizeof(int));
//-----End: declaring derived simulation parameters-----
//-----Start: INITIALIZING-----
//-----Start: initial positions, orientations, and run time-----
initial_loading(x, n, N, C, L, l, d, t_run, t_tumble, delta_run, (final_time - start_time));
//-----End: initial positions, orientations, and run time-----
//-----Start: print initial positions and orientations-----
print_to_file_output(sim_num, case_num, 0, N, x, n, t_run);
//-----End: print initial positions and orientations-----
//-----Start: set up cuda variables-----
// calculate number of blocks and threads needed
int num_BLOCKS, num_THREADS;
if (N < THREADS_PER_BLOCK)
{
num_BLOCKS = 1;
num_THREADS = N;
}
else
{
num_BLOCKS = 1 + (N - 1)/THREADS_PER_BLOCK; //ceiling, use only if h_N != 0
num_THREADS = THREADS_PER_BLOCK;
}
// declare GPU memory pointers
double4 *d_x;
double4 *d_n;
double3 *d_dUbdy_dxa;
double3 *d_dUbdy_dna;
double *d_t_run;
double *d_t_tumble;
int *d_tumble_flag;
float4 *d_random_numbers_noise;
// allocate GPU memory
cudaMalloc((void**) &d_x, N * sizeof(double4));
cudaMalloc((void**) &d_n, N * sizeof(double4));
cudaMalloc((void**) &d_dUbdy_dxa, N * sizeof(double3));
cudaMalloc((void**) &d_dUbdy_dna, N * sizeof(double3));
cudaMalloc((void**) &d_t_run, N * sizeof(double));
cudaMalloc((void**) &d_t_tumble, N * sizeof(double));
cudaMalloc((void**) &d_tumble_flag, N * sizeof(int));
cudaMalloc((void **)&d_random_numbers_noise, N * sizeof(float4));
// transfer the array to the GPU
cudaMemcpy(d_x, x, N * sizeof(double4), cudaMemcpyHostToDevice);
cudaMemcpy(d_n, n, N * sizeof(double4), cudaMemcpyHostToDevice);
cudaMemcpy(d_t_run, t_run, N * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_t_tumble, t_tumble, N * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_tumble_flag, tumble_flag, N * sizeof(int), cudaMemcpyHostToDevice);
//random number generators:
curandState *d_CurandStates;
curandStatePhilox4_32_10_t *d_PHILOXStates;
cudaMalloc((void **) &d_CurandStates, N * sizeof(curandState));
cudaMalloc((void **) &d_PHILOXStates, N * sizeof(curandStatePhilox4_32_10_t));
// setup seeds
init<<< num_BLOCKS, num_THREADS >>>(seed, d_CurandStates);
init<<< num_BLOCKS, num_THREADS >>>(seed, d_PHILOXStates);
//-----End: set up cuda variables-----
cout << "End: INITIALIZING" << endl;
//-----End: INITIALIZING-----
//-----Start: DYNAMICS LOOP-----
int itime = floor(start_time / dt) + 1;
cout << "itime: " << itime << endl;
cout << "time_steps: " << time_steps << endl;
while (itime <= time_steps)
{
//-----Start: random numbers -----
generate_random_numbers_noise<<< num_BLOCKS, num_THREADS >>>(d_PHILOXStates, d_random_numbers_noise);
//-----End: random numbers -----
//-----Start: boundary interactions-----
calculate_BodyWallInteraction<<< num_BLOCKS, num_THREADS >>>(d_dUbdy_dxa,
d_dUbdy_dna, d_x, d_n, W_hat,
C, sigma_bdy, N);
//-----End: boundary interactions-----
//-----Start: time-marching-----
time_marching<<< num_BLOCKS, num_THREADS >>>
(d_x, d_n,
d_dUbdy_dxa, d_dUbdy_dna,
epsilon_r,
inverse_Pe_T, inverse_Pe_parallel, inverse_Pe_perp, inverse_Pe_R,
dt, N, L,
d_t_run, d_t_tumble, d_tumble_flag,
delta_run, delta_tumble,
avg_n_tumble, std_n_tumble,
d_CurandStates, d_random_numbers_noise);
//-----End: time-marching-----
//-----Start: saving variables-----
if ( itime % timestep_save == 0)
{
// copy back the result array to the CPU
cudaMemcpy(x, d_x, N * sizeof(double4), cudaMemcpyDeviceToHost);
cudaMemcpy(n, d_n, N * sizeof(double4), cudaMemcpyDeviceToHost);
cudaMemcpy(t_run, d_t_run, N * sizeof(double), cudaMemcpyDeviceToHost);
print_to_file_output(sim_num, case_num, itime, N, x, n, t_run);
}
//-----End: saving variables-----
printf("\ntime step: %d", itime);
itime++;
}
cout << endl << endl;
//-----End: DYNAMICS LOOP-----
return 0;
}
|
20,512 | #include <stdio.h>
#include <stdlib.h>
__global__ void shiftArray(unsigned* a, unsigned size) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
while (tid < size) {
a[tid] = a[(tid + 1) % size];
tid += blockDim.x;
}
}
int main() {
unsigned size = 100, i;
unsigned size_in_byte = size * sizeof(unsigned);
unsigned *array = (unsigned*)malloc(size_in_byte);
unsigned *d_array;
for (i = 0; i < size; i++) {
array[i] = i;
}
cudaMalloc(&d_array, size_in_byte);
cudaMemcpy(d_array, array, size_in_byte, cudaMemcpyHostToDevice);
shiftArray<<<1, 32>>> (d_array, size);
cudaMemcpy(array, d_array, size_in_byte, cudaMemcpyDeviceToHost);
for (i = 0; i < size; i++) {
printf("array[%d] = %d\n", i, array[i]);
}
return 0;
}
|
20,513 | #include <stdlib.h>
#include <assert.h>
#include <stdio.h>
#define BLOCKSIZE 16
__global__ void Cuda_Mult(int *d_a, int *d_b, int *d_res, int n){
// dot product of two matrices
__shared__ int T1[BLOCKSIZE][BLOCKSIZE];
__shared__ int T2[BLOCKSIZE][BLOCKSIZE];
int R = blockIdx.y * BLOCKSIZE + threadIdx.y,C = blockIdx.x * BLOCKSIZE + threadIdx.x;
int idx,Temp = 0;
for (int i = 0; i < gridDim.x; ++i){
idx = R * n + i * BLOCKSIZE + threadIdx.x;
if(idx >= n*n){
T1[threadIdx.y][threadIdx.x] = 0;
}
else{
T1[threadIdx.y][threadIdx.x] = d_a[idx];
}
idx = (i * BLOCKSIZE + threadIdx.y) * n + C;
if(idx >= n*n){
T2[threadIdx.y][threadIdx.x] = 0;
}
else{
T2[threadIdx.y][threadIdx.x] = d_b[idx];
}
__syncthreads();
for (int k = 0; k < BLOCKSIZE; ++k) {
Temp += T1[threadIdx.y][k] * T2[k][threadIdx.x];
}
__syncthreads();
}
if(R < n && C < n){
d_res[R * n + C] = Temp;
}
}
void CPU_Mult(int *a, int *b, int *res, int n){
// function to obtain dot product of two matrices
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
int ans = 0.0;
for (int h = 0; h < n; h++) {
ans += a[i * n + h] * b[h * n + j];
}
res[i * n + j] = ans;
}
}
}
int main(int argc, char* argv[]){
int option = 1;
if(argc>1){
if(*argv[2] == 'S'){
option = 0;
}
}
//Fixed seed for illustration
int N = 100;
srand(1234);
//allocate memory
int *A, *B, *C, *new_C;
cudaMallocHost((void **) &A, sizeof(int)*N*N);
cudaMallocHost((void **) &B, sizeof(int)*N*N);
cudaMallocHost((void **) &C, sizeof(int)*N*N);
// randomly initialize matrix A
for (int i = 0; i < N; ++i){
for (int j = 0; j < N; ++j){
A[i*N + j] = (double)rand()/(double)(RAND_MAX/N);
}
}
// randomly initialize matrix B
for (int i = 0; i < N; ++i){
for (int j = 0; j < N; ++j){
B[i*N + j] = (double)rand()/(double)(RAND_MAX/N);
}
}
float gpu_elapsed_time;
float cpu_elapsed_time;
//event to calculate execution time
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
if(option==1){
printf("To run serial mult on cpu use: %s -o S\n",argv[0]);
//allocate space
int *dev_A, *dev_B, *dev_C;
cudaMalloc((void **) &dev_A, sizeof(int)*N*N);
cudaMalloc((void **) &dev_B, sizeof(int)*N*N);
cudaMalloc((void **) &dev_C, sizeof(int)*N*N);
cudaEventRecord(start, 0);
// copy matrix A and B from host to device mem
cudaMemcpy(dev_A, A, sizeof(int)*N*N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_B, B, sizeof(int)*N*N, cudaMemcpyHostToDevice);
unsigned int grid_Rs = (N + BLOCKSIZE - 1) / BLOCKSIZE;
unsigned int gridev_Cs = (N + BLOCKSIZE - 1) / BLOCKSIZE;
dim3 dimGrid(gridev_Cs, grid_Rs);
dim3 dimBlock(BLOCKSIZE, BLOCKSIZE);
//Launch Kernal
Cuda_Mult<<<dimGrid, dimBlock>>>(dev_A, dev_B, dev_C, N);
// Transefr results from device to host
cudaMemcpy(C, dev_C, sizeof(int)*N*N, cudaMemcpyDeviceToHost);
//stop counting time
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
//time for cuda evaluation
cudaEventElapsedTime(&gpu_elapsed_time, start, stop);
printf("Time for mat mult of %dx%d . %dx%d on GPU: %f ms.\n\n", N, N, N, N, gpu_elapsed_time);
cudaFree(dev_A);
cudaFree(dev_B);
cudaFree(dev_C);
}
else{
cudaMallocHost((void **) &new_C, sizeof(int)*N*N);
cudaEventRecord(start, 0);
//call normal multiplication by CPu.
CPU_Mult(A, B, new_C, N);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
//time for cpu evaluation
cudaEventElapsedTime(&cpu_elapsed_time, start, stop);
printf("Time elapsed on matrix multiplication of %dx%d . %dx%d on CPU: %f ms.\n\n", N, N, N, N, cpu_elapsed_time);
cudaFreeHost(new_C);
}
cudaFreeHost(A);
cudaFreeHost(B);
cudaFreeHost(C);
return 0;
} |
20,514 | #include <iostream>
#include <math.h>
#include <cstdlib>
#include <sys/time.h>
#include <math.h>
#include <stdio.h>
#include <cuda_runtime.h>
// #include <stdio.h>
// #include <math.h>
// #include <stdlib.h>
// #include <time.h>
#include <string.h>
#include <stdbool.h>
int nNodes;
short int* graph;
void write(FILE *fl){
int i, j;
// printf("RESULTADO: \n");
for( i=0; i<nNodes; i++ ){
for( j=0; j<nNodes; j++ )
fprintf( fl, "%d ", graph[i * nNodes + j] );
fprintf( fl, "\n");
}
}
void read (){
char line[50];
char* token;
int size = 50;
int l;
int c;
fgets(line,size,stdin);
while(!feof(stdin)){
token = strtok(line," "); // split using space as divider
if(*token == 'p') {
token = strtok(NULL," "); // sp
token = strtok(NULL," "); // no. of vertices
// nNodes é o número de NÓS no grafo.
nNodes = atoi(token);
token = strtok(NULL," "); // no. of directed edges
// printf("N_NODES: %d\n", nNodes);
// Aloca um vetor do tamanho NODES por NODES
// graph = (short int*) malloc(nNodes * nNodes * sizeof (short int));
cudaMallocManaged(&graph, nNodes * nNodes * sizeof (short int));
if (graph == NULL) {
printf( "Error in graph allocation: NULL!\n");
exit( EXIT_FAILURE);
}
// Zera a matriz
for(int i = 0; i < nNodes;i++){
for(int j = 0; j < nNodes;j++){
graph[i*nNodes+j] = 0;
}
}
}
// Coloca o valor do caminho do nó de saída para o de chegada.
else if(*token == 'a'){
token = strtok(NULL," ");
// l -> partida
l = atoi(token)-1;
token = strtok(NULL," ");
// c -> chegada
c = atoi(token)-1;
token = strtok(NULL," ");
graph[l*nNodes+c] = 1;
}
fgets(line,size,stdin);
}
}
__global__
void warshall_CUDA(short int* graph, int nNodes, int offset)
{
if(offset == -1){
int k = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = 0; i < nNodes; i++){
for (int j = 0; j < nNodes; j++){
if(graph[i * nNodes + k] + graph[k * nNodes + j] < graph[i * nNodes + j])
graph[i * nNodes + j] = 1;
}
}
}
else{
int i = (threadIdx.x + blockIdx.x * blockDim.x) + (offset * blockDim.x);
if(i < nNodes * nNodes){
int k = float(i) / float(blockDim.x);
for (int j = 0; j < nNodes; j++){
if(graph[i * nNodes + k] + graph[k * nNodes + j] < graph[i * nNodes + j])
graph[i * nNodes + j] = 1;
}
}
}
}
int main(int argc, char *argv[])
{
// start time
timeval start, end;
gettimeofday(&start, NULL);
read();
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, 0);
if(nNodes <= devProp.maxThreadsDim[0]){
warshall_CUDA<<<1, nNodes>>> (graph, nNodes, -1);
}else{
float Blocks = float(nNodes * nNodes) / float(devProp.maxThreadsDim[0]);
float Offset = Blocks / float(devProp.maxThreadsDim[0]);
int iblocks = ceil(Blocks);
int iOffset = ceil(Offset);
for(int i=0; i < iOffset; i++){
warshall_CUDA<<< devProp.maxThreadsDim[0], devProp.maxThreadsDim[0] >>> (graph, nNodes, i);
}
}
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
// end time
gettimeofday(&end, NULL);
double runtime = end.tv_sec + end.tv_usec / 1000000.0 - start.tv_sec - start.tv_usec / 1000000.0;
printf("%.4f\n", runtime);
// write(stdout);
// Free memory
cudaFree(graph);
return 0;
}
|
20,515 | #include <iostream>
#include <chrono>
#include <time.h>
#include <algorithm>
#include <math.h>
#define eChk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) {
if (code != cudaSuccess) {
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
const int N_POINTS = 1e4, N_QUERIES = 1e6, INF = 1e9, RANGE_MAX = 100, N_PRINT = 10;
__host__ void print(int3 *points, int n);
__host__ void generatePoints(int3 *points, int n);
__host__ void buildKDTree(int3 *points, int3 *tree, int n, int m);
__global__ void nearestNeighborGPU(int3 *tree, int treeSize, int3 *queries, int3 *results, int nQueries);
__host__ void printResults(int3 *queries, int3 *results, int start, int end);
int main() {
srand(16);
int TREE_SIZE = 1;
while(TREE_SIZE < N_POINTS) TREE_SIZE <<= 1;
int3 *points;
int3 *tree;
int3 *queries;
eChk(cudaMallocManaged(&points, N_POINTS * sizeof(int3)));
eChk(cudaMallocManaged(&tree, TREE_SIZE * sizeof(int3)));
eChk(cudaMallocManaged(&queries, N_QUERIES * sizeof(int3)));
generatePoints(points, N_POINTS);
buildKDTree(points, tree, N_POINTS, TREE_SIZE);
generatePoints(queries, N_QUERIES);
auto start = std::chrono::system_clock::now();
int3 *results;
eChk(cudaMallocManaged(&results, N_QUERIES * sizeof(int3)));
nearestNeighborGPU<<<32768, 32>>>(tree, TREE_SIZE, queries, results, N_QUERIES);
eChk(cudaDeviceSynchronize());
auto end = std::chrono::system_clock::now();
float duration = 1000.0 * std::chrono::duration<float>(end - start).count();
printResults(queries, results, N_QUERIES-N_PRINT-1, N_QUERIES);
std::cout << "Elapsed time in milliseconds : " << duration << "ms\n\n";
eChk(cudaFree(results));
eChk(cudaFree(points));
eChk(cudaFree(tree));
eChk(cudaFree(queries));
}
__host__ void generatePoints(int3 *points, int n) {
for(int i = 0; i < n; i++) {
points[i] = make_int3(rand()%RANGE_MAX+1, rand()%RANGE_MAX+1, rand()%RANGE_MAX+1);
}
}
__host__ void buildSubTree(int3 *points, int3 *tree, int start, int end, int depth, int node) {
if(start >= end) return;
std::sort(points + start, points + end, [depth](int3 p1, int3 p2) -> bool {
if(depth % 3 == 0) return p1.x < p2.x;
if(depth % 3 == 1) return p1.y < p2.y;
return p1.z < p2.z;
});
int split = (start + end - 1)/2;
tree[node] = points[split];
buildSubTree(points, tree, start, split, depth+1, node*2);
buildSubTree(points, tree, split + 1, end, depth+1, node*2 + 1);
}
__host__ void buildKDTree(int3 *points, int3 *tree, int n, int treeSize) {
for(int i = 0; i < treeSize; i++) {
tree[i] = make_int3(-INF, -INF, -INF);
}
buildSubTree(points, tree, 0, n, 0, 1);
}
void print(int3 *points, int n) {
for(int i = 0; i < n; i++) {
std::cout<<"["<<points[i].x<<", "<<points[i].y<<", "<<points[i].z<<"] ";
}
std::cout<<std::endl;
}
__device__ int3 getCloser(int3 p, int3 p2, int3 p3)
{
if ((abs(p.x - p2.x) + abs(p.y - p2.y) + abs(p.z - p2.z)) < (abs(p.x - p3.x) + abs(p.y - p3.y) + abs(p.z - p3.z)))
{
return p2;
}
return p3;
}
__device__ int3 findNearestNeighbor(int3 *tree, int treeSize, int treeNode, int depth, int3 query)
{
int3 node = tree[treeNode];
int val1, val2;
if (depth % 3 == 0)
{
val1 = node.x;
val2 = query.x;
}
else if (depth % 3 == 1)
{
val1 = node.y;
val2 = query.y;
}
else
{
val1 = node.z;
val2 = query.z;
}
if ((val1 < val2) && (treeNode * 2 < treeSize))
{
int3 leftChild = tree[treeNode * 2];
if (leftChild.x != -INF && leftChild.y != -INF && leftChild.z != -INF)
{
return getCloser(query, node, findNearestNeighbor(tree, treeSize, treeNode * 2, depth + 1, query));
}
}
else if ((val1 > val2) && (treeNode * 2 + 1 < treeSize))
{
int3 rightChild = tree[treeNode * 2 + 1];
if (rightChild.x != -INF && rightChild.y != -INF && rightChild.z != -INF)
{
return getCloser(query, node, findNearestNeighbor(tree, treeSize, treeNode * 2 + 1, depth + 1, query));
}
}
return node;
}
__global__ void nearestNeighborGPU(int3 *tree, int treeSize, int3 *queries, int3 *results, int nQueries) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if(index < nQueries) {
results[index] = findNearestNeighbor(tree, treeSize, 1, 0, queries[index]);
}
}
__host__ void printResults(int3 *queries, int3 *results, int start, int end) {
for(int i = start; i < end; i++) {
std::cout<<"query: ["<<queries[i].x<<", "<<queries[i].y<<", "<<queries[i].z<<"] ";
std::cout<<", result: ["<<results[i].x<<", "<<results[i].y<<", "<<results[i].z<<"] ";
std::cout<<", distance: "<<sqrt(pow(queries[i].x - results[i].x, 2) + pow(queries[i].y - results[i].y, 2) + pow(queries[i].z - results[i].z, 2));
std::cout<<std::endl;
}
} |
20,516 | #include "includes.h"
__device__ void applyRule(char* left, char* middle, char* right, char* res){
char a = *left;
char b = *middle;
char c = *right;
if(a == 0 && b == 0 && c == 0){
*res = 0;
}else if(a == 0 && b == 0 && c == 1){
*res = 1;
}else if(a == 0 && b == 1 && c == 0){
*res = 1;
}else if(a == 0 && b == 1 && c == 1){
*res = 1;
}else if(a == 1 && b == 0 && c == 0){
*res = 0;
}else if(a == 1 && b == 0 && c == 1){
*res = 1;
}else if(a == 1 && b == 1 && c == 0){
*res = 1;
}else if(a == 1 && b == 1 && c == 1){
*res = 0;
}
}
__global__ void computeCell(char* cellData, unsigned int* width, unsigned int* height)
{
int y = 0;
int x = 0;
/*
printf("width : %d\n", *width);
printf("height : %d\n", *height);
printf("dimblock : %d\n", blockDim.x);
printf("threadid : %d\n", threadIdx.x);
*/
for(y=1; y < (*height); ++y){
for(x=threadIdx.x; x < (*width); x += blockDim.x){
char left = ((x-1)>=0) ? cellData[(x-1) + (y-1) * (*width)] : 0;
char middle = cellData[x + (y-1) * (*width)];
char right = ((x+1) < *width) ? cellData[(x+1) + (y-1) * (*width)] : 0;
//printf("left : (%d, %d) => %d : %d\n", x-1, y-1, (x-1) + (y-1) * (*width), left);
//printf("middle : (%d, %d) => %d : %d\n", x, y-1, (x) + (y-1) * (*width), middle);
//printf("right : (%d, %d) => %d : %d\n", x+1, y-1, (x+1) + (y-1) * (*width), right);
applyRule(&left, &middle, &right, &cellData[x + y * (*width)]);
//printf("res : (%d, %d) => %d : %d\n", x, y, x + y * (*width), cellData[x+y*(*width)]);
//printf("-----------\n");
}
__syncthreads();
}
/*
for(y=0; y < *height; ++y){
for(x=0; x < *width; ++x){
printf("(%d, %d) = %d\n", x, y, cellData[x+y*(*width)]);
}
}
*/
} |
20,517 | #include <cuda.h>
#include <cuda_runtime.h>
#include <unistd.h>
#include <future>
#include <mutex>
#include <stdio.h>
// This works fine with a mutex, but crashes with a sigbus error when not using a mutex
// #define USE_MUTEX
#ifdef USE_MUTEX
std::mutex m;
#endif
__global__ void testKernel() {
printf("Thread Kernel running\n");
}
void testCuda() {
testKernel<<<1,1>>>();
cudaError_t err = cudaDeviceSynchronize();
if (err != cudaSuccess) {
printf("SYNC FAILED\n\n\n");
}
}
struct MyThread {
void run() {
cudaFree(NULL); // should initialize device context in this thread
int threadLoop = 0;
while(1) {
#ifdef USE_MUTEX
m.lock();
#endif
printf("Thread Run (loop %d)\n", threadLoop++);
// run kernel
testCuda();
#ifdef USE_MUTEX
m.unlock();
#endif
usleep(0);
}
}
};
int main(int argc, char** argv) {
MyThread thread;
auto threadFuture = std::async(std::launch::async, &MyThread::run, thread);
int loop = 0;
while(1){
#ifdef USE_MUTEX
m.lock();
#endif
int* temp = nullptr;
printf("*** Main Allocating (loop = %d)\n", loop++);
cudaError_t err = cudaMallocManaged(&temp, sizeof(int));
if (err != cudaSuccess) {
printf("Failed to cudaMallocManaged()\n");
return -1;
}
*temp = 0; // <-- SIGBUS occurs here if don't use a mutex
printf("*** Main Finished Allocating value: %d\n", *temp);
#ifdef USE_MUTEX
m.unlock();
#endif
usleep(0);
}
} |
20,518 | #include "includes.h"
//Number of elements of the inpu layers, that correspond to the number of pixels of a picture
#define PIXELS 3073
//Number of elements of the first hidden layer
#define HIDDEN_LAYER_1 2000
//Number of elements of the second hidden layer
#define HIDDEN_LAYER_2 450
//Number of elements of the output layer
#define OUTPUT_LAYER 10
//Learning rate of the algorithm
#define LEARNING_RATE 0.01
//Numbers of elements to use for training
#define ELEMENTS 1000
//Blocks
#define BLOCKS 32
/*
* Function that given a vector and its size, print it
* In:
* f: vector of doubles to be printed
* N: size of the vector
*/
__global__ void get_layer(double *input, double *matrix, double *result,int input_size, int hidden_size){
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x < hidden_size && y < input_size)
result[x] += input[y]*matrix[y*hidden_size+x];
} |
20,519 | #include "includes.h"
__global__ void total(float *input, float *output, int len){
__shared__ float partialSum[2*BLOCK_SIZE];
unsigned int t=threadIdx.x,start=2*blockIdx.x*BLOCK_SIZE;
if(start+t<len) partialSum[t] = input[start+t];
else partialSum[t]=0;
__syncthreads();
if(start+BLOCK_SIZE+t<len)partialSum[BLOCK_SIZE+t]=input[start+BLOCK_SIZE+t];
else partialSum[BLOCK_SIZE+t]=0;
__syncthreads();
for(unsigned int stride=BLOCK_SIZE;stride>=1; stride>>=1){
__syncthreads();
if (t<stride) partialSum[t]+=partialSum[t+stride];
__syncthreads();
}
if(t==0) output[blockIdx.x]=partialSum[0];
} |
20,520 | #include <cstdlib>
#include <stdio.h>
#include <cassert> #include <cuda_runtime.h>
/*
Naive implementation.
Allocate one thread for one element in result matrix, processing dot(Arow, Bcol);
*/
__global__ void kMatrixMul0 (float *d_res,
float *d_mat1, int m1, int m2,
float *d_mat2, int n1, int n2){
int x = blockDim.x*blockIdx.x + threadIdx.x;
int y = blockDim.y*blockIdx.y + threadIdx.y;
if (x >= n2 || y >= m1){return;}
float sum=0.0;
for (int i=0; i<m2; i++){
// if (blockIdx.x == 0 && threadIdx.x == 0 && blockIdx.y == 0 && threadIdx.y == 0) {
// printf("sum:%f mat1:%f mat2:%f i:%d\n", sum, d_mat1[y*m2+i], d_mat2[i*n2+x], i);
// }
sum += d_mat1[y*m2+i] * d_mat2[i*n2+x];
}
d_res[n2*y+x] = sum;
};
/* Using shared memory */
template<int BLOCK_SIZE>
__global__ void kMatrixMul1 (float *d_res,
float *d_mat1, int m1, int m2,
float *d_mat2, int n1, int n2) {
//assume squre block
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
//trivial opt: calculate these in CPU, shared by all blocks
int aStart = blockDim.y*blockIdx.y * m2;
int bStart = blockDim.x*blockIdx.x;
int bStep = blockDim.y*n2;
int tx = threadIdx.x;
int ty = threadIdx.y;
float c = 0.0f;
for (int a = aStart, b = bStart; a < m2; a += blockDim.x, b += bStep) {
//load data
As[ty][tx] = d_mat1[a + m2 * ty + tx];
Bs[ty][tx] = d_mat2[b + n2 * ty + tx];
__syncthreads();
for (int k = 0; k<blockDim.x; k++){
c += As[ty][k] * Bs[k][tx];
}
__syncthreads();
}
d_res[(blockDim.y*blockIdx.y+ty)*n2+blockDim.x*blockIdx.x+tx] = c;
}
/* Using register blocking */
template<int BLOCK_SIZE, int REGT_SIZE>
__global__ void kMatrixMul2 (float *d_res,
float *d_mat1, int m1, int m2,
float *d_mat2, int n1, int n2) {
//assume squre block
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
//trivial opt: calculate these in CPU, shared by all blocks
int aStart = blockDim.y*blockIdx.y * m2;
int bStart = blockDim.x*blockIdx.x;
int bStep = blockDim.y*n2;
int tx = threadIdx.x;
int ty = threadIdx.y;
float c = 0.0f;
int reg_tiles = BLOCK_SIZE / REGT_SIZE;
float acc[REGT_SIZE][REGT_SIZE];
float Ar;
float Br[REGT_SIZE];
#pragma unroll
for (int i = 0; i<REGT_SIZE; i++){
#pragma unroll
for (int j = 0; j<REGT_SIZE; j++){
acc[i][j] = 0.0f;
}
}
//block 32x32, each thread work on 2x2
for (int a = aStart, b = bStart; a < m2; a += blockDim.x, b += bStep) {
//load data
#pragma unroll
for (int i = 0; i<REGT_SIZE; i++){
#pragma unroll
for (int j = 0; j<REGT_SIZE; j++){
As[ty+i][tx+j] = d_mat1[a + m2 * (ty + i) + tx + j];
Bs[ty+i][tx+j] = d_mat2[b + n2 * (ty + i) + tx + j];
}
}
__syncthreads();
for (int k=0; k<BLOCK_SIZE; k+=REGT_SIZE){
#pragma unroll
for (int i=0; i<REGT_SIZE; i++){
#pragma unroll
for (int j = 0; j<REGT_SIZE; j++){
int ax = k + j;
int ay = k + i;
acc[ty+i][tx+j] += As[ty][k] * Bs[k][tx];
}
}
}
__syncthreads();
}
d_res[(blockDim.y*blockIdx.y+ty)*n2+blockDim.x*blockIdx.x+tx] = c;
}
float* MatrixMultGPU0(float *mat1, int m1, int m2, float *mat2, int n1, int n2){
float *d_res, *d_mat1, *d_mat2;
cudaEvent_t start, end;
cudaError_t error;
error = cudaEventCreate(&start);
error = cudaEventCreate(&end);
//malloc the device memory for matrices
cudaError_t result = cudaMalloc((void**)&d_res, sizeof(float)*m1*n2);
result = cudaMalloc((void**)&d_mat1, sizeof(float)*m1*m2);
assert (result == cudaSuccess);
result = cudaMalloc((void**)&d_mat2, sizeof(float)*n1*n2);
assert (result == cudaSuccess);
//init source matrices in device memory
result = cudaMemcpy(d_mat1, mat1, sizeof(float)*m1*m2, cudaMemcpyHostToDevice);
assert (result == cudaSuccess);
result = cudaMemcpy(d_mat2, mat2, sizeof(float)*n1*n2, cudaMemcpyHostToDevice);
assert (result == cudaSuccess);
cudaEventRecord(start, NULL);
int N = 64;
dim3 block_size(N, N);
//grid width in blocks
int grid_wib = ceil(float(n2)/float(N));
//grid height in blocks
int grid_hib = ceil(float(m1)/float(N));
dim3 grid_size(grid_wib, grid_hib);
//naive version
//kMatrixMul0<<<grid_size, block_size>>>(d_res, d_mat1, m1, m2, d_mat2, n1, n2);
//sharedMem version
kMatrixMul1<64><<<grid_size, block_size>>>(d_res, d_mat1, m1, m2, d_mat2, n1, n2);
//copy back the multiplication result
float* res = new float[m1*n2];
result = cudaMemcpy(res, d_res, sizeof(float)*m1*n2, cudaMemcpyDeviceToHost);
assert (result == cudaSuccess);
cudaEventRecord(end, NULL);
error = cudaEventSynchronize(end);
float msecTotal = 0.0f;
error = cudaEventElapsedTime(&msecTotal, start, end);
printf("calculation Time:%f ms\n", msecTotal);
cudaFree(d_res);
cudaFree(d_mat1);
cudaFree(d_mat2);
return res;
}
float* MatrixMulCPU(float *mat1, int m1, int m2, float *mat2, int n1, int n2){
assert(m2 == n1 && "matrx a's cols != matrix b's rows");
float* res = new float[m1*n2] ;
for (int i = 0; i < m1; i++) {
for (int j = 0; j < n2; j++) {
res[i*m1 + j] = 0;
for (int x = 0; x < m2; x++) {
res[i*m1+j] += mat1[i*m1+x] * mat2[x*n1+j];
}
}
}
return res;
}
float CompareMatrix(float mat1[], float mat2[], int m1, int m2){
float err = 0;
for (int x=0; x<m1; x++){
for (int y=0; y<m2; y++){
err += mat1[m2*y+x] - mat2[m2*y+x];
}
}
return err;
/*
if (err > 0.1){
printf("matrix comparison failed.error:%f\n", err);
return false;
}
return true;
*/
}
void FillMatrix(float mat[], int m1, int m2, float d=1.0){
for (int i=0; i<m1; i++){
for (int j=0; j<m2; j++){
if (d<0.0) {
mat[i*m1+j] = static_cast<float>(rand())/static_cast<float>(RAND_MAX) * 10.0;
} else {
mat[i*m1+j] = d;
}
}
}
}
void PrintMatrix(float mat[], int m1, int m2){
for (int i=0; i<m1; i++){
for (int j=0; j<m2; j++){
printf("%f,", mat[i*m1+j]);
}
printf("\n");
}
}
int main(int argc, char *argv[]){
int m1 = 1024;
int m2;
int n1 = m2 = 1024;
int n2 = 1024;
float *matrix_a = new float[m1*m2];
float *matrix_b = new float[n1*n2];
FillMatrix((float*)matrix_a, m1, m2);
FillMatrix((float*)matrix_b, n1, n2);
//float *ref = MatrixMulCPU((float*)matrix_a, m1, m2, (float*)matrix_b, n1, n2);
float *res = MatrixMultGPU0((float*)matrix_a, m1, m2, (float*)matrix_b, n1, n2);
//printf("error:%f\n", CompareMatrix(ref, res, m1, n2));
//PrintMatrix(res, m1, m2);
delete[] matrix_a;
delete[] matrix_b;
delete res;
//delete ref;
}
|
20,521 | #include "includes.h"
__global__ void BaseNeuronGetFloatArray(float *arr1, float *arr2, int n_elem, int step1, int step2)
{
int array_idx = threadIdx.x + blockIdx.x * blockDim.x;
if (array_idx<n_elem) {
arr2[array_idx*step2] = arr1[array_idx*step1];
}
} |
20,522 | //pass
//--gridDim=1 --blockDim=2 --only-divergence
__device__ unsigned int x = 0;
__global__ void f()
{
atomicInc(&x, 1);
}
|
20,523 | #include <iostream>
#include <cuda_runtime.h>
#include<cmath>
const double NEWTON_G = 6.67384e-11;
const double SOFTENING = 1e-9f;
__constant__ double NEWTON_GG = 6.67384e-11;
__constant__ double SOFTENINGG = 1e-9f;
void writeSoA(double** f, int B, int size, const char *filename){
FILE* file;
file=fopen(filename, "w");
fprintf(file, "%d\n", B);
for (int i = 0; i < 5; i++){
for (int j = 0; j < size/5; j++){
fprintf(file, "%lf ", ((*f)[j + i*(size/5)]));
}
fprintf(file, "\n");
}
fclose(file);
}
void ReadSoA(double** f, int* B, const char *filename) {
FILE *fp;
fp = fopen(filename, "r");
fscanf(fp, "%d", B);
int size = (*B) * 5; // 5 atributos: masa, pos_x, pos_y, vel_x, vel_y
double* F = new double[size];
int i;
for (i = 0; i < size; i++) {
fscanf(fp, "%lf ", &(F[i]));
}
*f = F;
fclose(fp);
}
void printArray(int size, double *arr) {
printf("[");
for (int i = 0; i < size; i++) {
printf("%lf ", arr[i]);
}
printf("]\n");
}
void N_body_CPU(int size, double delta_t, double *f, double *fout, int T){
//printf("N_body_CPU\n");
for (int body_i = 0; body_i < size/5; body_i++){ // para cada cuerpo
//printf("body %d ", body_i);
//if (body_i == 95 && T == 0) printf("(%lf, %lf, %lf, %lf, %lf)\n", f[body_i], f[body_i + (size/5)], f[body_i + (size/5)*2], f[body_i + (size/5)*3], f[body_i + (size/5)*4]);
double mass1 = f[body_i];
double x1 = f[body_i + (size/5)];
double y1 = f[body_i + (size/5)*2];
double vx1 = f[body_i + (size/5)*3];
double vy1 = f[body_i + (size/5)*4];
double Fx = 0;
double Fy = 0;
for (int j = 0; j < size/5; j++){ // comparar con otros cuerpos
if (j == body_i) continue; // creo que puedo obviarlo pues el radio seria cero (aunque nos da division por cero)
double mass2 = f[j];
double x2 = f[j + (size/5)];
double y2 = f[j + (size/5)*2];
double distance = sqrt( pow(x2-x1, 2) + pow(y2-y1, 2) + pow(SOFTENING, 2) );
//if (body_i == 0) printf("distance: %lf\n", distance);
double angle = atan((y2-y1)/(x2-x1));
Fx += NEWTON_G*mass1*mass2/(pow(distance, 2)) * cos(angle);
Fy += NEWTON_G*mass1*mass2/(pow(distance, 2)) * sin(angle);
}
double new_vx1 = vx1 + Fx*delta_t/mass1;
double new_vy1 = vy1 + Fy*delta_t/mass1;
// a futuro, usar otro arreglo para la masa, pues no cambia
fout[body_i] = mass1;
fout[body_i + (size/5)] = x1 + new_vx1*delta_t; //new x
fout[body_i + (size/5)*2] = y1 + new_vy1*delta_t ; //new y
fout[body_i + (size/5)*3] = new_vx1; //new vx
fout[body_i + (size/5)*4] = new_vy1; //new vy
}
}
__global__ void N_body_GPU(int size, double delta_t, double *f, double *fout){
int body_i= threadIdx.x + blockDim.x*blockIdx.x;
if (body_i < size/5){
double mass1 = f[body_i];
double x1 = f[body_i + (size/5)];
double y1 = f[body_i + (size/5)*2];
double vx1 = f[body_i + (size/5)*3];
double vy1 = f[body_i + (size/5)*4];
double mass2,x2,y2,distance,angle,new_vx1,new_vy1;
double Fx = 0;
double Fy = 0;
for (int j = 0; j < size/5; j++){ // comparar con otros cuerpos
if (j != body_i){
mass2 = f[j];
x2 = f[j + (size/5)];
y2 = f[j + (size/5)*2];
distance = sqrt( (x2-x1)*(x2-x1) + (y2-y1)*(y2-y1) + SOFTENINGG);
//printf("distance: %lf\n", distance);
angle = atan((y2-y1)/(x2-x1));
Fx += NEWTON_GG*mass2/(pow(distance, 2)) * cos(angle);
Fy += NEWTON_GG*mass2/(pow(distance, 2)) * sin(angle);
}
}
//printf("F: %lf\n", F);
new_vx1 = vx1 + Fx*delta_t;
new_vy1 = vy1 + Fy*delta_t;
//printf("F*delta_t/mass1: %lf\n", F*delta_t/mass1);
//printf("new_vx1: %lf\n", new_vx1);
//printf("new_vy1: %lf\n", new_vy1);
// a futuro, usar otro arreglo para la masa, pues no cambia
fout[body_i] = mass1;
fout[body_i + (size/5)] = x1 + new_vx1*delta_t; //new x
fout[body_i + (size/5)*2] = y1 + new_vy1*delta_t ; //new y
fout[body_i + (size/5)*3] = new_vx1; //new vx
fout[body_i + (size/5)*4] = new_vy1; //new vy
}
}
__global__ void N_body_GPU_F(int size, double delta_t, double *f, double *fout,int T){
int body_i= threadIdx.x + blockDim.x*blockIdx.x;
if (body_i<size/5){
extern __shared__ double datos[];
// 5 atributos: masa
datos[body_i ]= f[body_i];
datos[body_i+ (size/5) ]= f[body_i + (size/5)];
datos[body_i+ (size/5)*2]= f[body_i + (size/5)*2];
double autx,auty,rx,ry,vx,vy;
vx=f[body_i+ (size/5)*3];
vy=f[body_i+ (size/5)*4];
double angle;
double Ax,Ay;
fout[body_i]=datos[body_i];
for (int t = 0; t < T; ++t){
__syncthreads();
Ax=0.0,Ay=0.0;
autx=datos[body_i+ (size/5) ];
auty=datos[body_i+ (size/5)*2];
auty=datos[body_i+ (size/5)*2];
for (int i = 0; i < size/5; ++i){
if (i!=body_i){
rx=autx-datos[i+ (size/5) ];
ry=auty-datos[i+ (size/5)*2];
angle=atan(ry/rx);
rx=datos[i]/sqrt(rx*rx+ry*ry+SOFTENINGG);
Ax += rx*cos(angle);
Ay += rx*sin(angle);
}
}
Ax*=NEWTON_GG*delta_t;
Ay*=NEWTON_GG*delta_t;
datos[body_i+ (size/5) ]=autx+vx*delta_t+Ax*delta_t;
datos[body_i+ (size/5)*2]=auty+vy*delta_t+Ay*delta_t;
vx=Ax+vx;
vy=Ay+vy;
}
fout[body_i+ (size/5) ]=autx+vx*delta_t+Ax*delta_t;
fout[body_i+ (size/5)*2]=auty+vy*delta_t+Ay*delta_t;
fout[body_i+ (size/5)*3]=Ax+vx;
fout[body_i+ (size/5)*4]=Ay+vy;
}
}
int main() {
cudaEvent_t ct1, ct2, ct3, ct4;
clock_t t1, t2;
double ms;
char filename[] = "input.txt";
char filename_out[] = "-CPU-Resultado.txt";
char filename_aux[30];
char final[] = "final";
char directory[] = "data/";
char directory_aux[30];
float dt,dt2;
//int iterator=2;
int B;
double *f, *fout, *fhost, *fhostout, *faux,*ff;
int grid_size, block_size = 256;
ReadSoA(&fhost, &B, filename);
int size = B*5;
cudaMalloc((void**)&f, size* sizeof(double));
cudaMalloc((void**)&ff, size* sizeof(double));
cudaMalloc((void**)&fout, size* sizeof(double));
cudaMemcpy(f, fhost, size* sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(ff, fhost, size* sizeof(double), cudaMemcpyHostToDevice);
int debug = 0;
if (debug){
printf("B: %d\n", B );
printf("size: %d\n", size);
printArray(size, fhost);
}
double delta_t = 0.01;
fhostout = new double[size];
if (debug){
printArray(size, fhost);
}
long T = 10000;
char integer_string[32];
char integer_string2[32];
/*** CPU ***/
int cpu = 1;
if (cpu){
t1 = clock();
for (long t = 0; t < T; t++){
N_body_CPU(size, delta_t, fhost, fhostout, t);
faux=fhost;
fhost = fhostout;
fhostout=faux;
if (t % 1000 == 0 || t == T-1){
sprintf(integer_string, "%d", t);
sprintf(integer_string2, "-%d", T);
strcpy(filename_aux, filename_out);
strcpy(directory_aux, directory);
writeSoA(&fhostout, B, size,
strcat(directory_aux, strcat(integer_string, strcat(integer_string2, filename_aux) ) ));
}
//printArray(size, fhostout);
//std::cout << "-----------------------" << std::endl;
}
t2 = clock();
if (debug){
printArray(size, fhost);
}
ms = 1000.0 * (double)(t2 - t1) / CLOCKS_PER_SEC;
std::cout << "Tiempo CPU : " << ms << "[ms]" << std::endl;
//writeSoA(&fhostout, B, size, strcat(final, filename_out) );
}
int long_simulation = 0;
if (long_simulation){
T = 20000*20;
}
/*** GPU ***/
int gpu1 = 1;
if (gpu1){
char filename_outGPU[] = "-GPU-Resultado.txt";
grid_size = (int)ceil((float) B / block_size);
cudaEventCreate(&ct1);
cudaEventCreate(&ct2);
cudaEventRecord(ct1);
for (long t = 0; t < T; t++){
N_body_GPU<<<grid_size, block_size>>>(size, delta_t, f, fout);
faux = fout;
fout = f;
f = faux;
if (t % 1000 == 0 || t == T-1){
sprintf(integer_string, "%d", t);
sprintf(integer_string2, "-%d", T);
strcpy(filename_aux, filename_outGPU);
strcpy(directory_aux, directory);
cudaMemcpy(fhostout, f, size* sizeof(double), cudaMemcpyDeviceToHost);
writeSoA(&fhostout, B, size,
strcat(directory_aux, strcat(integer_string, strcat(integer_string2, filename_aux))) );
}
}
cudaEventRecord(ct2);
cudaEventSynchronize(ct2);
cudaEventElapsedTime(&dt, ct1, ct2);
std::cout << "Tiempo GPU : " << dt << "[ms]" << std::endl;
cudaMemcpy(fhostout, f, size* sizeof(double), cudaMemcpyDeviceToHost);
//strcpy(filename_out, "GPU-Resultado.txt");
//writeSoA(&fhostout, B, size, filename_outGPU);
}
/*** GPU Fast ***/
char filename_outFPU[] = "data/FPU-Resultado.txt";
grid_size = (int)ceil((float) B / block_size);
cudaEventCreate(&ct3);
cudaEventCreate(&ct4);
cudaEventRecord(ct3);
N_body_GPU_F<<<grid_size, block_size,B*3* sizeof(double)>>>(size, delta_t, ff, fout,T);
cudaEventRecord(ct4);
cudaEventSynchronize(ct4);
cudaEventElapsedTime(&dt2, ct3, ct4);
std::cout << "Tiempo GPU-F: " << dt2 << "[ms]" << std::endl;
cudaMemcpy(fhostout, fout, size* sizeof(double), cudaMemcpyDeviceToHost);
writeSoA(&fhostout, B, size, filename_outFPU);
delete[] fhostout;
}
|
20,524 | #include <stdio.h>
#include <fstream>
#include <iostream>
#include <stdlib.h>
#include "vector"
#include <sstream>
#include <string>
using namespace std;
#define STD_TEST true
using namespace std;
__global__ void befriend_adjacents(int* adj_lists, int* sizes, int* labels, int* changed) {
int id = threadIdx.x;
// Get start of adj list
int begin = 0;
for (int i = 0; i < id; i++) {
begin += sizes[i];
}
int* list = &adj_lists[begin];
// debug
// if (sizes[id] == 0) {
// debug[id] = -1;
// } else {
// debug[id] = list[0];
// }
int min = labels[id];
for (int i = 0; i < sizes[id]; i++) {
int adj_label = labels[list[i]];
if (adj_label < min) {
min = adj_label;
*changed = 1;
}
}
labels[id] = min;
}
__global__ void init_labels(int* labels) {
int id = threadIdx.x;
labels[id] = id;
}
void print_labels(int* labels, int size) {
ofstream myfile ("cuda_output.group");
cout << "labels: ";
for (int i = 0; i < size; i++) {
cout << labels[i] << " ";
myfile << labels[i] << " ";
}
cout << endl;
myfile << endl;
}
void print_lists(int* adj_lists, int size, int* list_sizes) {
ofstream myfile ("cuda_output.adjlist");
cout << "adjacency lists:" << endl;
int* list = adj_lists;
for (int i = 0; i < size; i++) {
cout << "[";
myfile << i << " ";
for (int j = 0; j < list_sizes[i]; j++) {
cout << " " << list[j] << " ";
myfile << list[j] << " ";
}
cout << "]" << endl;
myfile << endl;
list += list_sizes[i];
}
myfile.close();
}
// void print_debug(int* debug, int size) {
// cout << "debug: ";
// for (int i = 0; i < size; i++) {
// cout << debug[i] << " ";
// }
// cout << endl;
// }
// adj_lists is flattened to 1d array
void cc_para(int* adj_lists, int size, int* list_sizes) {
int* labels = (int*)malloc(size * sizeof(int));
int* d_labels;
int list_size_total = 0;
for (int i = 0; i < size; i++) {
list_size_total += list_sizes[i];
}
cudaMalloc((void**)&d_labels, size * sizeof(int));
init_labels<<<1, size>>>(d_labels);
cudaMemcpy(labels, d_labels, size * sizeof(int), cudaMemcpyDeviceToHost);
int* d_adj_lists;
cudaMalloc((void**)&d_adj_lists, list_size_total * sizeof(int));
cudaMemcpy(d_adj_lists, adj_lists, list_size_total * sizeof(int), cudaMemcpyHostToDevice);
int* d_sizes;
cudaMalloc((void**)&d_sizes, size * sizeof(int));
cudaMemcpy(d_sizes, list_sizes, size * sizeof(int), cudaMemcpyHostToDevice);
int* changed = (int*)malloc(sizeof(int));
int* d_changed;
cudaMalloc((void**)&d_changed, sizeof(int));
*changed = 1;
while (*changed != 0) {
*changed = 0;
cudaMemcpy(d_changed, changed, sizeof(int), cudaMemcpyHostToDevice);
befriend_adjacents<<<1, size>>>(d_adj_lists, d_sizes, d_labels, d_changed);
cudaMemcpy(changed, d_changed, sizeof(int), cudaMemcpyDeviceToHost);
}
cudaMemcpy(labels, d_labels, size * sizeof(int), cudaMemcpyDeviceToHost);
print_labels(labels, size);
cudaFree(d_changed); cudaFree(d_labels); cudaFree(d_changed); cudaFree(d_adj_lists);
free(labels); free(changed);
}
int populate_array(vector<int>* arr, int* len, string file_name) {
ifstream infile(file_name.c_str());
if (!infile.is_open()) {
cout<<"File failed to open"<<endl;
return 0;
}
string line;
while (getline(infile, line))
{
istringstream ss(line);
while (ss)
{
string s;
if (!getline(ss, s, ',')) break;
(*len)++;
cout << " " << atoi(s.c_str());
arr->push_back(atoi(s.c_str()));
}
cout << endl;
}
infile.close();
return 1;
}
int main() {
// int * adj_lists[3];
// int* list0;
// int list1[1] = {2};
// int list2[2] = {1};
// adj_lists[0] = list0;
// adj_lists[1] = list1;
// adj_lists[2] = list2;
/* Simple Test
int adj_lists[4] = {1, 2, 0, 0};
int size = 4;
int sizes[4] = {2, 1, 1, 0};
*/
if(STD_TEST){
int adj_lists[20] = {1, 2, 0, 3, 0, 3, 1, 2, 5, 4, 7, 8, 6, 10, 6, 9, 10, 8, 7, 8};
int size = 11;
int sizes[11] = {2, 2, 2, 2, 1, 1, 2, 2, 3, 1, 2};
print_lists(adj_lists, size, sizes);
cc_para(adj_lists, size, sizes);
}
else{
vector<int> adj_lists;
vector<int> size_lists;
int adj_list_len = 0;
int size_list_len = 0;
populate_array(&adj_lists,&adj_list_len,"rand_graph.list_vec");
populate_array(&size_lists,&size_list_len,"rand_graph.size_vec");
int* adj_lists_data = adj_lists.data();
int* size_lists_data = size_lists.data();
print_lists(adj_lists_data, size_list_len, size_lists_data);
cc_para(adj_lists_data, size_list_len, size_lists_data);
}
}
|
20,525 | #include "Renderer.cuh"
#include "MathOps.cuh"
#include "cuda.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__device__ void cuLight()
{
}
__device__ void cuRefraction()
{
}
__device__ void cuReflection()
{
}
__device__ void cuIntersection()
{
}
__device__ unsigned int cuCastRay()
{
return 0;
}
//To be moved into cu... methods ___
__device__ float intersection(float3 rayOrigin, float3 rayDirection, float3 position, float radius)
{
float3 dist = position - rayOrigin;
float b = dot(rayDirection, dist);
float d = b*b - dot(dist, dist) + radius * radius;
if(d < 0 ) //If the object is behind the ray, return
{
return -1;
}
float t0 = b -sqrt(d);
float t1 = b +sqrt(d);
if(t0 < 0)
{
return t1;
}
else
{
return t0;
}
}
__device__ bool shadowed(float3 rayOrigin, float3 rayDirection, float distanceToLight,float3 position, float radius)
{
float3 distance = position - rayOrigin;
float dotDistance = dot(distance, distance);
if(dotDistance > distanceToLight)
{
return false;
}
float b = dot(rayDirection, distance);
float d = b*b - dotDistance + radius * radius;
if(d < 0 ) //If the object is behind the ray, return
{
return false;
}
float t = b -sqrtf(d);
if(t < 0)
{
t = b +sqrtf(d);
if(t < 0)
{
return false;
}
}
return true;
}
__device__ contactInfo castRay(float3 start, float3 direction, Sphere* slist, int scount, float adx)
{
contactInfo rval;
rval.reflect = false;
float3 lightPos = {0 +100*__cosf(adx), 100, 100*__sinf(adx)};
// float3 lightPos = {0, 100, -200};
////////////////
rval.reflectionWeight = 0.6f;
////////////////
float d = -1;
int snum = -1;
for(int i = 0; i < scount; i++)
{
float td = intersection(start, direction, slist[i].position, slist[i].radius);
if((d < 0 || td < d) && td > 0)
{
d = td;
snum = i;
}
}
d-= 0.05f; //Accounts for floating point errors (prevents the contact point from being inside an object)
if(snum > -1)
{
float3 contactPoint = start +direction*d;
float3 cray = getRayDirection(contactPoint, lightPos);
float3 snorm = getRayDirection(slist[snum].position, contactPoint);
//////
rval.startPosition = contactPoint;
rval.normal = snorm;
rval.reflectionWeight = slist[snum].reflectionWeight;
if(!(rval.reflectionWeight == 0))
{
rval.reflectionWeight = frensel(slist[snum].reflectionWeight, snorm, direction);
}
///////
float intensity = lambert(cray, snorm); //Compute lambert intensity
for(int i = 0; i < scount; i++)
{
if(!shadowed(contactPoint, cray, dot(contactPoint - lightPos), slist[i].position, slist[i].radius)) //add ray length parameter, for shadows ray length is distance to light
{
if(intensity < 0.07)
{
intensity = 0.07;
}
rval.basicColor = slist[snum].color * intensity;;
rval.reflect = true;
}
else
{
rval.basicColor = slist[snum].color * 0.07f;
rval.reflect = true;
break;
}
}
}
else
{
rval.basicColor = rgb(0, 0, 0); //Background color //0x222222
//rval.basicColor = rgb(126,192,238);
rval.reflect = false;
}
return rval;
}
//To be moved into cu... methods ^^^
//ADD FRENSEL
__device__ uchar4 cuTraceRay(float3 startPosition, float3 startDirection, Sphere* sslist, int scount,float adx)
{
float3 dir = startDirection;
contactInfo inft;
inft = castRay(startPosition, dir, sslist, scount, adx);
if(true)
{
#define NUM 5
uchar4 rcols[NUM];
float rw[NUM];
int i = 0;
while(i < NUM)
{
rcols[i] = inft.basicColor;
rw[i] = inft.reflectionWeight;
if(inft.reflect)
{
dir = reflect(dir, inft.normal);
normalize(dir);
inft = castRay(inft.startPosition, dir, sslist, scount, adx);
}
else
{
break;
}
i++;
}
while(i >0)
{
rcols[i-1] = rcols[i-1]*(1-rw[i-1]) + rcols[i] * rw[i-1];
i--;
}
return rcols[0];
}
else
{
return inft.basicColor;
}
}
__device__ uchar4 cuRenderPixelAA(int pixelX, int pixelY, int width, int height, int aa,float3 position, Sphere* sharedslist, int scount, float adx) //adx to be removed
{
uchar4 result;
float offs = 0;
if(aa % 2 == 0)
{
offs = aa;//finish
}
else
{
offs = (int)aa/2;
}
int r = 0;
int g = 0;
int b = 0;
for(int ix = 0; ix < aa; ix++)
{
for(int iy = 0; iy < aa; iy++)
{
float aix = ix - offs;
float aiy = iy - offs;
float normalizedX = (pixelX+aix/(1.5*offs) - 0.5*width)/(0.5*height);
float normalizedY = (pixelY+aiy/(1.5*offs) - 0.5*height)/(0.5*height);
float3 lensLocation = {normalizedX, normalizedY, 1.5};
float3 dir = getRayDirection(make_float3(0, 0, 0), lensLocation);
result = cuTraceRay(position, dir, sharedslist, scount, adx);
r += result.x;
g += result.y;
b += result.z;
}
}
r /= aa*aa;
g /= aa*aa;
b /= aa*aa;
result = rgb(r,g, b);
return result;
}
__device__ uchar4 cuRenderPixel(int pixelX, int pixelY, int width, int height, float3 position, Sphere* sharedslist, int scount, float adx) //adx to be removed
{
float normalizedX = (pixelX - 0.5*width)/(0.5*height);
float normalizedY = (pixelY - 0.5*height)/(0.5*height);
float3 lensLocation = {normalizedX, normalizedY, 1.5};
/*
int oldx = lensLocation.x;
int oldz = lensLocation.z;
int rrot = adx*10;
lensLocation.x = oldx * cosf(rrot) - oldz * sinf(rrot);
lensLocation.z = oldz * sinf(rrot) + oldx * cosf(rrot);
*/
uchar4 result;
float3 dir = getRayDirection(make_float3(0, 0, 0), lensLocation);
result = cuTraceRay(position, dir, sharedslist, scount, adx);
return result;
}
__global__ void cuRender(cudaSurfaceObject_t out, int width, int height, int aa, float3 cameraPos, Sphere* slist, int scount, float adx, float ady, int renderOffset)
{
extern __shared__ Sphere sharedslist[];
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
if(blockIdx.x *blockIdx.y ==1)
{
slist[0].position.x = __sinf(adx) * 100;
slist[0].position.z = __cosf(adx) * 100;
//slist[2].position.x += 0.01;
slist[6].position.z += __sinf(adx);
//slist[6].radius += 0.5;
}
__syncthreads();
if(x < width && y < height)
{
unsigned int tindex = threadIdx.x*blockDim.y +threadIdx.y;
if(tindex < scount)
{
sharedslist[tindex].position = slist[tindex].position;
sharedslist[tindex].radius = slist[tindex].radius;
sharedslist[tindex].reflectionWeight = slist[tindex].reflectionWeight;
sharedslist[tindex].color = slist[tindex].color;
}
__syncthreads();
uchar4 result;
if(aa > 1)
{
result = cuRenderPixelAA(x, y, width, height, aa, cameraPos, sharedslist, scount, adx);
}
else
{
result = cuRenderPixel(x, y, width, height, cameraPos, sharedslist, scount, adx);
}
surf2Dwrite(result, out, renderOffset* sizeof(uchar4)+ x * sizeof(uchar4), y, cudaBoundaryModeClamp);
}
}
//To be cleaned up
float adx = 0.00; //remove
float ady = 0.00; //remove
Sphere* slist;
Sphere* devslist;
int tcount = 7;
Renderer::Renderer(void)
{
slist = new Sphere[tcount];
slist[0].position = make_float3(200, 0, 0);
slist[0].radius = 30;
slist[0].reflectionWeight = 0.1f;
slist[0].color = rgb(0xFF);
slist[1].position = make_float3(0, -0, 0);
slist[1].radius = 15;
slist[1].reflectionWeight = 0.0f;
slist[1].color = rgb(0xFF, 0, 0);
slist[2].position = make_float3(10, -30, 0);
slist[2].radius = 10;
slist[2].reflectionWeight = 0.6f;
slist[2].color = rgb(0, 0xFF, 0);
slist[3].position = make_float3(0, -40, 0);
slist[3].radius = 5;
slist[3].reflectionWeight = 0.0f;
slist[3].color = rgb(0xFF);
slist[4].position = make_float3(-0, -10000, 0);
slist[4].radius = 9960;
slist[4].reflectionWeight = 0.0f;
slist[4].color = rgb(0xFF);
// slist[5].position = make_float3(20, -30, 0);
// slist[5].radius = 8;
slist[5].position = make_float3(-0, 20, -210);
slist[5].radius = 8;
slist[5].reflectionWeight = 0.0f;
slist[5].color = rgb(0, 0, 0xFF);
// slist[6].position = make_float3(20, 500, 1000);
// slist[6].radius = 800;
slist[6].position = make_float3(0, 20000, -230); //20
slist[6].radius = 1000;
slist[6].reflectionWeight = 0.0f;
slist[6].color = rgb(0xFF);
cudaMalloc((void**)&devslist, tcount * sizeof(Sphere));
cudaMemcpy(devslist, slist, tcount * sizeof(Sphere), cudaMemcpyHostToDevice);
}
void Renderer::setResolution(int width, int height)
{
blockSize = dim3(16,16); //16 * 16 threads per block
int xGridSize = (width + blockSize.x-1)/blockSize.x;
int yGridSize = (height + blockSize.y-1)/blockSize.y;
gridSize = dim3( xGridSize, yGridSize);
renderWidth = width;
renderHeight = height;
}
void Renderer::setProjectionMode(bool orthographic)
{
this->orthographic = orthographic;
}
int ad = 0;
int aa = 1;
void Renderer::renderFrame(cudaSurfaceObject_t pixels)
{
adx += 0.0045f;
// ady += 0.003f;
float3 cameraPos = {0, 20, -200}; //put in scene
unsigned int smem = sizeof(Sphere)*tcount; //to be replaced
if(true) //3d
{
cameraPos.x -= 0.5f;
cuRender<<<gridSize, blockSize, smem >>>(pixels, renderWidth/2, renderHeight, aa, cameraPos, devslist, tcount, adx, ady, 0);
cameraPos.x += 1;
cuRender<<<gridSize, blockSize, smem >>>(pixels, renderWidth/2, renderHeight, aa, cameraPos, devslist, tcount, adx, ady, renderWidth/2);
}
else
{
cuRender<<<gridSize, blockSize, smem >>>(pixels, renderWidth, renderHeight, aa, cameraPos, devslist, tcount, adx, ady, 0);
}
}
Renderer::~Renderer(void)
{
delete[] slist;
cudaFree(devslist);
}
|
20,526 | #include <thrust/device_vector.h>
#include <thrust/extrema.h>
#include <cmath>
#include <time.h>
#include <iostream>
#define CSC(call) \
do { \
cudaError_t res = call; \
if (res != cudaSuccess) { \
fprintf(stderr, "ERROR: file:%s line:%d message:%s\n", \
__FILE__, __LINE__, cudaGetErrorString(res)); \
exit(0); \
} \
} while (0)
typedef struct _comparator {
__device__ __host__ bool operator()(double a, double b) {
return std::fabs(a) < std::fabs(b);
}
} comparator;
__device__ __host__ int pos2Dto1D(int x, int y, int width) {
return x + y * width;
}
__global__ void subtractProductKernel(double *mtx, int size, int i) {
int idX = blockDim.x * blockIdx.x + threadIdx.x;
int idY = blockDim.y * blockIdx.y + threadIdx.y;
int offsetX = blockDim.x * gridDim.x;
int offsetY = blockDim.y * gridDim.y;
for (int j = idX + i + 1; j < size; j += offsetX) {
for (int k = idY + i + 1; k < size; k += offsetY) {
mtx[pos2Dto1D(j, k, size)] -= mtx[pos2Dto1D(j, i, size)] * mtx[pos2Dto1D(i, k, size)];
}
}
}
__global__ void divideKernel(double *mtx, int size, int columnIdx) {
int idX = columnIdx + threadIdx.x + blockIdx.x * blockDim.x + 1;
int offsetX = blockDim.x * gridDim.x;
while (idX < size) {
mtx[pos2Dto1D(idX, columnIdx, size)] /= mtx[pos2Dto1D(columnIdx, columnIdx, size)];
idX += offsetX;
}
}
__global__ void swapKernel(double *mtx, int size, int strIdx, int swapIdx) {
int idX = blockDim.x * blockIdx.x + threadIdx.x;
int offsetX = blockDim.x * gridDim.x;
double swapTmp;
for (int i = idX; i < size; i += offsetX) {
swapTmp = mtx[pos2Dto1D(strIdx, i, size)];
mtx[pos2Dto1D(strIdx, i, size)] = mtx[pos2Dto1D(swapIdx, i, size)];
mtx[pos2Dto1D(swapIdx, i, size)] = swapTmp;
}
}
void printSquareMatrix(const double *mtx, const int &size) {
for (int x = 0; x < size; ++x) {
for (int y = 0; y < size; ++y) {
printf("%lf ", mtx[pos2Dto1D(x, y, size)]);
}
printf("\n");
}
}
void printVector(const int *vec, const int &size) {
for (int i = 0; i < size; ++i) {
printf("%d ", vec[i]);
}
printf("\n");
}
void initPermVec(int *p, const int &size) {
for (int i = 0; i < size; ++i) {
p[i] = i;
}
}
void readSquareMatrix(double *mtx, const int &size) {
for (int x = 0; x < size; ++x) {
for (int y = 0; y < size; ++y) {
scanf("%lf", &mtx[pos2Dto1D(x, y, size)]); // Check %lf -> %f
}
}
}
void printAnswer(const double *mtx, const int *p, const int &size) {
std::cout.precision(11);
// Print C matrix
for (int x = 0; x < size; ++x) {
for (int y = 0; y < size; ++y) {
std::cout << std::fixed << mtx[pos2Dto1D(x, y, size)] << " ";
}
std::cout << std::endl;
}
// Print permutation vector
for (int i = 0; i < size; ++i) {
std::cout << p[i] << " ";
}
std::cout << std::endl;
}
int main(int argc, const char **argv) {
// TODO Implement time calculation
comparator compare;
int size; // Size of initial matrix
scanf("%d", &size);
// Read initial matrix
double *mtx = (double *)malloc(sizeof(double) * size * size);
readSquareMatrix(mtx, size);
// printSquareMatrix(mtx, size); // DEBUG
// Initialize permutation vector
int *p = (int *)malloc(sizeof(int) * size);
initPermVec(p, size);
// printVector(p, size); // DEBUG
// Initialize CUDA matrix
double *cudaMtx;
CSC(cudaMalloc(&cudaMtx, sizeof(double) * size * size));
CSC(cudaMemcpy(cudaMtx, mtx, sizeof(double) * size * size, cudaMemcpyHostToDevice));
// Main algorithm
thrust::device_ptr<double> ptr = thrust::device_pointer_cast(cudaMtx);
for (int i = 0; i < size - 1; ++i) {
// Searching for max element
thrust::device_ptr<double> maxElem = thrust::max_element(ptr + i + i * size,
ptr + i * size + size, compare);
int maxIdx = maxElem - ptr - i * size;
p[i] = maxIdx;
// Swap i string and string with max element
if (maxIdx != i) {
swapKernel<<<32, 256>>>(cudaMtx, size, i, maxIdx);
}
// Divide column by max element
divideKernel<<<32, 256>>>(cudaMtx, size, i);
// Subtract the product of Cji and Cik from Cjk
subtractProductKernel<<<dim3(32, 32), dim3(32, 32)>>>(cudaMtx, size, i);
}
CSC(cudaMemcpy(mtx, cudaMtx, sizeof(double) * size * size, cudaMemcpyDeviceToHost));
// printSquareMatrix(mtx, size); // DEBUG
printAnswer(mtx, p, size);
CSC(cudaFree(cudaMtx));
free(mtx);
free(p);
return 0;
}
|
20,527 | #include <iostream>
#include <chrono>
#include <stdio.h>
#include <math.h>
#include <chrono>
struct Source{
double x;
double y;
double z;
};
// Since sm35 is the targeted platform, and doesn't have float64 atomicAdd implemented,
// We need a custom atomicAdd function
__device__ double atomicAdd_sm35(double* address, double val)
{
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val + __longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
__global__ void
potential_reduce(
struct Source query_point,
struct Source *sources,
const int N,
double *partialSum,
double *sum
){
if(threadIdx.x==0){
partialSum[blockIdx.x]=0;
}
__syncthreads();
int i = blockIdx.x * blockDim.x + threadIdx.x;
double threadSum =1;
if(i<N){
// Compute point source contribution
double r = sqrt(
pow((sources[i].x-query_point.x),2)
+pow((sources[i].y-query_point.y),2)
+pow((sources[i].z-query_point.z),2)
);
threadSum = 1.0/r;
// Block Sum
atomicAdd_sm35(&partialSum[blockIdx.x],threadSum);
__syncthreads();
}
if(threadIdx.x==0){
// Global Sum;
atomicAdd_sm35(&sum[0],partialSum[blockIdx.x]);
}
}
int main(int argc, char **argv)
{
auto start = std::chrono::system_clock::now();
int N = 31200;
struct Source *sources;
cudaMallocManaged(&sources,N * sizeof(struct Source));
// Create a 10m x 2m x 2m box with 31200 point source on the surface
int count = 0;
for(int i=-100;i<100;i++){
for(int j=-19;j<19;j++){
double x=i*0.05+0.025;
double y=-1.0;
double z=j*0.05+0.025;
sources[count].x = x;
sources[count].y = y;
sources[count].z = z;
count++;
y=1.0;
sources[count].x = x;
sources[count].y = y;
sources[count].z = z;
count++;
}
}
for(int i=-100;i<100;i++){
for(int j=-20;j<20;j++){
double x=i*0.05+0.025;
double y=j*0.05+0.025;
double z=-1.0;
sources[count].x = x;
sources[count].y = y;
sources[count].z = z;
count++;
z=1.0;
sources[count].x = x;
sources[count].y = y;
sources[count].z = z;
count++;
}
}
int blockSize = 256;
int numBlocks = (N+blockSize -1)/blockSize;
double *partialSum;
double *sum;
cudaMallocManaged(&partialSum,numBlocks*sizeof(double));
cudaMallocManaged(&sum,sizeof(double));
struct Source query_point;
query_point.x = -2.0;
query_point.y = 0;
query_point.z = 0;
// auto start = std::chrono::system_clock::now();
for(int i=0;i<10;i++){
sum[0]=0;
potential_reduce<<<numBlocks,blockSize>>>(query_point,sources,N,partialSum,sum);
cudaDeviceSynchronize();
std::cout
<< "---" << std::endl
<< query_point.x << std::endl
<< query_point.y << std::endl
<< query_point.z << std::endl
<< "---" << std::endl
<< sum[0]
<< std::endl;
query_point.x+=0.5;
}
auto end = std::chrono::system_clock::now();
std::chrono::duration<double> elapsed_seconds = end-start;
std::cout << "elapsed time: " << elapsed_seconds.count() << "s\n";
} |
20,528 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
int main()
{
cudaError_t err;
// Device number
int deviceCount = 0;
err = cudaGetDeviceCount(&deviceCount);
if (err != cudaSuccess)
return 1;
/*
CUDA 3.0
totalGlobalMem = 2GB
sharedMemPerBlock = 49152 bytes
regsPerBlock = 65536
warpSize = 32
maxThreadsPerBlock = 1024
maxThreadsDim = {1024, 1024, 64}
maxGridSize = {2147483647, 65535, 65535}
totalConstMem = 65536
*/
cudaDeviceProp props;
for (int i = 0; i < deviceCount; i++)
{
err = cudaGetDeviceProperties(&props, i);
if (err != cudaSuccess)
return 1;
}
}
|
20,529 | #include "includes.h"
__global__ void matrixMul(int *a, int *b, int *c, int n){
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int temp_sum = 0;
if((row < n) && (col < n)){
for (int k = 0; k < n; k++){
temp_sum += a[row * n + k] * b[k * n + col];
}
c[row * n + col] = temp_sum;
}
} |
20,530 |
#include <iostream>
#include <cstdlib>
#include <cuda_runtime.h>
#include <cassert>
#include <vector>
#include <cstdio>
#define BLOCOS 2
#define THREADS 4
#define REPETICOES 4
#define CHECK_ERROR(call) do { \
if( cudaSuccess != call) { \
std::cerr << std::endl << "CUDA ERRO: " << \
cudaGetErrorString(call) << " in file: " << __FILE__ \
<< " in line: " << __LINE__ << std::endl; \
exit(0); \
} } while (0)
using namespace std;
__global__ void mallocTest(void)
{
char* ptr = new char[256]; //(char*)malloc(123);
printf("(%d, %d) Ponteiro: %p\n" , blockIdx.x, threadIdx.x, threadIdx.x, ptr);
delete[] ptr;
}
__global__ void mallocTest1(void)
{
__shared__ int* data;
if (threadIdx.x == 0)
data = new int[blockDim.x];
__syncthreads();
if (data == NULL)
return;
data[threadIdx.x] = (threadIdx.x + 1) * 10;
__syncthreads();
printf("(%d, %d) Valor: %d\n", blockIdx.x, threadIdx.x, data[threadIdx.x]);
__syncthreads();
if (threadIdx.x == 0)
delete[] data;
}
int main(int argc, char *argv[]){
size_t free = 0,
total = 0,
tamanhoHeap = 0;
cudaDeviceProp deviceProp;
// CHECK_ERROR( cudaDeviceSetLimit(cudaLimitMallocHeapSize, 1024*1024*1024) );
CHECK_ERROR( cudaGetDeviceProperties(&deviceProp, 0) );
CHECK_ERROR( cudaDeviceGetLimit(&tamanhoHeap, cudaLimitMallocHeapSize) );
CHECK_ERROR(cudaMemGetInfo(&free, &total));
cout << "\nTrabalhando com memoria heap\n" << endl;
cout << "Tamanho do heap: " << (tamanhoHeap / 1024 / 1024) << " MB\n";
cout << "Memoria livre: " << (free / 1024 / 1024) << " MB\n";
cout << "Memoria total: " << (total / 1024 / 1024) << " MB\n";
mallocTest1<<<2, 4>>>();
CHECK_ERROR(cudaDeviceSynchronize());
return EXIT_SUCCESS;
}
|
20,531 | /*
============================================================================
Filename : algorithm.c
Author : Your name goes here
SCIPER : Your SCIPER number
============================================================================
*/
#include <iostream>
#include <iomanip>
#include <sys/time.h>
#include <cuda_runtime.h>
using namespace std;
// CPU Baseline
void array_process(double *input, double *output, int length, int iterations)
{
double *temp;
for(int n=0; n<(int) iterations; n++)
{
for(int i=1; i<length-1; i++)
{
for(int j=1; j<length-1; j++)
{
output[(i)*(length)+(j)] = (input[(i-1)*(length)+(j-1)] +
input[(i-1)*(length)+(j)] +
input[(i-1)*(length)+(j+1)] +
input[(i)*(length)+(j-1)] +
input[(i)*(length)+(j)] +
input[(i)*(length)+(j+1)] +
input[(i+1)*(length)+(j-1)] +
input[(i+1)*(length)+(j)] +
input[(i+1)*(length)+(j+1)] ) / 9;
}
}
output[(length/2-1)*length+(length/2-1)] = 1000;
output[(length/2)*length+(length/2-1)] = 1000;
output[(length/2-1)*length+(length/2)] = 1000;
output[(length/2)*length+(length/2)] = 1000;
temp = input;
input = output;
output = temp;
}
}
// GPU Optimized function
void GPU_array_process(double *input, double *output, int length, int iterations)
{
//Cuda events for calculating elapsed time
cudaEvent_t cpy_H2D_start, cpy_H2D_end, comp_start, comp_end, cpy_D2H_start, cpy_D2H_end;
cudaEventCreate(&cpy_H2D_start);
cudaEventCreate(&cpy_H2D_end);
cudaEventCreate(&cpy_D2H_start);
cudaEventCreate(&cpy_D2H_end);
cudaEventCreate(&comp_start);
cudaEventCreate(&comp_end);
/* Preprocessing goes here */
cudaEventRecord(cpy_H2D_start);
/* Copying array from host to device goes here */
cudaEventRecord(cpy_H2D_end);
cudaEventSynchronize(cpy_H2D_end);
//Copy array from host to device
cudaEventRecord(comp_start);
/* GPU calculation goes here */
cudaEventRecord(comp_end);
cudaEventSynchronize(comp_end);
cudaEventRecord(cpy_D2H_start);
/* Copying array from device to host goes here */
cudaEventRecord(cpy_D2H_end);
cudaEventSynchronize(cpy_D2H_end);
/* Postprocessing goes here */
float time;
cudaEventElapsedTime(&time, cpy_H2D_start, cpy_H2D_end);
cout<<"Host to Device MemCpy takes "<<setprecision(4)<<time/1000<<"s"<<endl;
cudaEventElapsedTime(&time, comp_start, comp_end);
cout<<"Computation takes "<<setprecision(4)<<time/1000<<"s"<<endl;
cudaEventElapsedTime(&time, cpy_D2H_start, cpy_D2H_end);
cout<<"Device to Host MemCpy takes "<<setprecision(4)<<time/1000<<"s"<<endl;
} |
20,532 | #include <stdio.h>
#include<sys/time.h>
#include <pthread.h>
#define MAX_INITIAL_WEIGHT 1000
#define MAX_INITIAL_RANGE 10000
#define MAX_INITIAL_VELOCITY 100
#define EPS 1e-9f
#define BLOCK_DIM 32
#define G 100
// time stamp function in seconds
double getTimeStamp() {
struct timeval tv ;
gettimeofday( &tv, NULL ) ;
return (double) tv.tv_usec/1000000 + tv.tv_sec ;
}
//initialize velocity and position data
void initData(float4 *A, float3 *V, int noElems){
for (int i = 0; i < noElems; i++){
A[i].w = (float) rand() / (float) (RAND_MAX / MAX_INITIAL_WEIGHT);
A[i].x = (float) rand() / (float) (RAND_MAX / MAX_INITIAL_RANGE);
A[i].y = (float) rand() / (float) (RAND_MAX / MAX_INITIAL_RANGE);
A[i].z = (float) rand() / (float) (RAND_MAX / MAX_INITIAL_RANGE);
V[i].x = (float) rand() / (float) (RAND_MAX / MAX_INITIAL_VELOCITY);
V[i].y = (float) rand() / (float) (RAND_MAX / MAX_INITIAL_VELOCITY);
V[i].z = (float) rand() / (float) (RAND_MAX / MAX_INITIAL_VELOCITY);
}
}
__device__ float3 bodyBodyInteraction(float4 bi, float4 bj, float3 ai)
{
float3 r;
r.x = bj.x - bi.x;
r.y = bj.y - bi.y;
r.z = bj.z - bi.z;
float distSqr = r.x * r.x + r.y * r.y + r.z * r.z + EPS;
float distSixth = distSqr * distSqr * distSqr;
float invDistCube = 1.0f/sqrtf(distSixth);
float s = bj.w * invDistCube * G;
ai.x += r.x * s;
ai.y += r.y * s;
ai.z += r.z * s;
return ai;
}
__device__ float3 tile_calculation(float4 myPosition, float3 accel, int sharedPositionLength)
{
int i;
__shared__ float4 sharedPosition[BLOCK_DIM];
for (i = 0; i < sharedPositionLength; i++) {
accel = bodyBodyInteraction(myPosition, sharedPosition[i], accel);
}
return accel;
}
// device-side acceleration compution
__device__ void d_acce(float4 *X, float3 *A, int noElems)
{
__shared__ float4 sharedPosition[BLOCK_DIM];
float4 myPosition;
int i, tile;
float3 acc = {0.0f, 0.0f, 0.0f};
int gtid = blockIdx.x * blockDim.x + threadIdx.x;
if (gtid >= noElems) return;
myPosition = X[gtid];
for (i = 0, tile = 0; i < noElems; i += blockDim.x, tile++) {
int idx = tile * blockDim.x + threadIdx.x;
if (idx < noElems) sharedPosition[threadIdx.x] = X[idx];
int sharedPositionLength = noElems - tile * blockDim.x;
if (sharedPositionLength > blockDim.x) sharedPositionLength = blockDim.x;
__syncthreads();
acc = tile_calculation(myPosition, acc, sharedPositionLength);
__syncthreads();
}
// Save the result in global memory for the integration step.
A[gtid] = acc;
}
// device-side preprocess the data
__global__ void d_preprocess(float4* X, float3 *V, float3* A, float dt, int noElems){
d_acce(X, A, noElems);
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < noElems){
V[i].x += 0.5 * A[i].x * dt;
V[i].y += 0.5 * A[i].y * dt;
V[i].z += 0.5 * A[i].z * dt;
}
}
// device-side integration
__global__ void d_inte(float4* X, float3 *V, float3* A, float dt, int noElems){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < noElems){
X[i].x += V[i].x * dt;
X[i].y += V[i].y * dt;
X[i].z += V[i].z * dt;
d_acce(X, A, noElems);
V[i].x += A[i].x * dt;
V[i].y += A[i].y * dt;
V[i].z += A[i].z * dt;
}
}
// device-side function
void d_func(float4* d_X, float3 *d_V, float3* d_A, float4 *h_X, float dt, int noElems, int maxIteration){
double timeStampA = getTimeStamp() ;
d_preprocess<<<(noElems + BLOCK_DIM - 1) / BLOCK_DIM, BLOCK_DIM>>>(d_X, d_V, d_A, dt, noElems);
int i = 0;
FILE *f = fopen("position2.txt", "w");
while (i < maxIteration){
i++;
d_inte<<<(noElems + BLOCK_DIM - 1) / BLOCK_DIM, BLOCK_DIM>>>(d_X, d_V, d_A, dt, noElems);
cudaDeviceSynchronize();
cudaMemcpy( h_X, d_X, noElems * sizeof(float4), cudaMemcpyDeviceToHost );
//FILE *f = fopen("position2.txt", "w");
for (int j = 0; j < noElems; j++)
fprintf(f, "%.6f %.6f %.6f\n", h_X[j].x, h_X[j].y, h_X[j].z);
//fclose(f);
}
fclose(f);
double timeStampB = getTimeStamp() ;
printf("%.6f\n", timeStampB - timeStampA);
}
int main( int argc, char *argv[] ) {
// get program arguments
if( argc < 2) {
printf( "Error: wrong number of args\n" ) ;
exit(0) ;
}
int noElems = atoi(argv[1]);
float dt = 0.01;
if (argc > 2) dt = atof(argv[2]);
int maxIteration = 10;
if (argc > 3) maxIteration = atoi(argv[3]);
// alloc memory host-side
float4 *h_X;
cudaError_t status = cudaMallocHost((void**)&h_X, noElems * sizeof(float4));
if (status != cudaSuccess){
printf("Error: allocating pinned host memory\n");
exit(0);
}
float3 *h_V;
status = cudaMallocHost((void**)&h_V, noElems * sizeof(float3));
if (status != cudaSuccess){
printf("Error: allocating pinned host memory\n");
exit(0);
}
// init matrices with random data
initData(h_X, h_V, noElems) ;
// alloc memory dev-side
float4 *d_X;
float3 *d_A, *d_V;
cudaMalloc( (void **) &d_X, noElems * sizeof(float4) ) ;
cudaMalloc( (void **) &d_A, noElems * sizeof(float3) ) ;
cudaMalloc( (void **) &d_V, noElems * sizeof(float3) ) ;
//transfer data to dev
cudaMemcpy( d_X, h_X, noElems * sizeof(float4), cudaMemcpyHostToDevice );
cudaMemcpy( d_V, h_V, noElems * sizeof(float3), cudaMemcpyHostToDevice );
d_func(d_X, d_V, d_A, h_X, dt, noElems, maxIteration);
// free GPU resources
cudaFree( d_X );
cudaFree( d_A );
cudaFree( d_V );
cudaDeviceReset() ;
}
|
20,533 | #include <stdio.h>
#include "cuda.h"
#include "cuda_runtime.h"
////////////////////////////////////////////////////////////////////////////////
// Cuda error checking
////////////////////////////////////////////////////////////////////////////////
void SAFE_CALL(cudaError_t err){
if(err != cudaSuccess){
printf("Error: %s \n", cudaGetErrorString(err));
}
}
void KERNEL_ERROR_CHECK(){
cudaError_t errSync = cudaGetLastError();
cudaError_t errAsync = cudaDeviceSynchronize();
if(errSync != cudaSuccess){
printf("\tSync kernel error: %s \n", cudaGetErrorString(errSync));
}
if(errAsync != cudaSuccess){
printf("\tAsync kernel error: %s \n", cudaGetErrorString(errAsync));
}
}
void KERNEL_ERROR_CHECK(char const *message){
cudaError_t errSync = cudaGetLastError();
cudaError_t errAsync = cudaDeviceSynchronize();
if(errSync != cudaSuccess){
printf("%s\n", message);
printf("\tSync kernel error: %s \n", cudaGetErrorString(errSync));
}
if(errAsync != cudaSuccess){
printf("%s\n", message);
printf("\tAsync kernel error: %s \n", cudaGetErrorString(errAsync));
}
}
|
20,534 | #include <iostream>
#include <stdio.h>
#include <stdlib.h>
int main(void) {
int num_bits = 16;
int num_bytes = num_bits * sizeof(int);
int* device_array = 0;
int* host_array = 0;
host_array = (int*) malloc(num_bytes);
cudaMalloc((void**)&device_array, num_bytes);
cudaMemset(device_array, 0, num_bytes);
cudaMemcpy(host_array, device_array, num_bytes,
cudaMemcpyDeviceToHost);
for (int i = 0; i < 16; i++) {
std::cout<<host_array[i]<<std::endl;
}
free(host_array);
cudaFree(device_array);
return 0;
}
|
20,535 | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <limits.h>
#define OUTPUT_FILE_NAME_A "q2a.txt"
#define OUTPUT_FILE_NAME_B "q2b.txt"
#define OUTPUT_FILE_NAME_C "q2c.txt"
#define NUM_THREADS_A 32
#define NUM_BLOCKS_A 2
#define NUM_THREADS_B 32
#define NUM_BLOCKS_B 2
// int* fileToArray(char file1[], int* n){
// FILE* fptr = fopen(file1, "r");
// char* str = (char*) malloc(sizeof(char)*2048);
// int token;
// fscanf(fptr, "%d,", n);
// int* array;
// //int* array = malloc(sizeof(int)*(*n));
// cudaMallocManaged(&array, sizeof(int)*(*n));
// for(int i = 0; i < *n; i++){
// fscanf(fptr, "%d,", &token);
// array[i] = token;
// }
// fclose(fptr);
// return array;
// }
int* fileToArray(char file1[], int* n){
FILE* fptr = fopen(file1, "r");
char* str = (char*) malloc(sizeof(char)*2048);
int token;
int count = 0;
while (fscanf(fptr, "%d, ", &token) != EOF) {
//printf("%dth token: %d\n", count, token);
count++;
}
*n = count;
//printf("total number of elements: %d\n", *n);
int* array;
cudaMallocManaged(&array, sizeof(int)*(*n));
rewind(fptr);
for(int i = 0; i < *n; i++){
fscanf(fptr, "%d, ", &token);
array[i] = token;
}
fclose(fptr);
return array;
}
__global__
void sharedBucket(int* array, int* result, int n) {
__shared__ int local_array[10];
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride) {
int numHundreds = array[i] / 100;
atomicAdd((local_array+numHundreds), 1);
}
__syncthreads();
if ((threadIdx.x | threadIdx.y | threadIdx.z) == 0) {
for (int i = 0; i < 10; i++) {
atomicAdd((result+i), local_array[i]);
// result[i] = local_array[i];
}
}
__syncthreads();
}
void computeSharedBucket(int* array, int n) {
int* result;
cudaMallocManaged(&result, sizeof(int)*(10));
for (int i = 0; i < 10; i++) {
result[i] = 0;
}
sharedBucket<<<NUM_BLOCKS_A, NUM_THREADS_A>>>(array, result, n);
cudaDeviceSynchronize();
FILE *output = fopen(OUTPUT_FILE_NAME_B, "w");
if(output == NULL) printf("failed to open file %s\n", OUTPUT_FILE_NAME_B);
fprintf(output, "%d", result[0]);
for(int i = 1; i < 10 ; i++) {
fprintf(output, ", %d", result[i]);
}
fclose(output);
}
__global__
void bucket(int* array, int* result, int n) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride) {
int numHundreds = array[i] / 100;
atomicAdd((result+numHundreds), 1);
}
}
__global__
void prescan(int* array, int n) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
extern __shared__ int local_scan[];
for (int d = 1; d < 10; d *= 2) {
if (index + 1 > d && index < 10) {
array[index] += array[index-d];
}
__syncthreads();
}
}
void computeBucket(int* array, int n) {
int* result;
cudaMallocManaged(&result, sizeof(int)*(10));
for (int i = 0; i < 10; i++) {
result[i] = 0;
}
bucket<<<NUM_BLOCKS_B, NUM_THREADS_B>>>(array, result, n);
cudaDeviceSynchronize();
FILE *output = fopen(OUTPUT_FILE_NAME_A, "w");
if(output == NULL) printf("failed to open file %s\n", OUTPUT_FILE_NAME_A);
fprintf(output, "%d", result[0]);
for(int i = 1; i < 10 ; i++) {
fprintf(output, ", %d", result[i]);
}
fclose(output);
prescan<<<1, 10>>>(result, 10);
output = fopen(OUTPUT_FILE_NAME_C, "w");
if(output == NULL) printf("failed to open file %s\n", OUTPUT_FILE_NAME_C);
fprintf(output, "%d", result[0]);
for(int i = 1; i < 10 ; i++) {
fprintf(output, ", %d", result[i]);
}
fclose(output);
}
int main(int argc, char* argv[]){
int n;
int* array = fileToArray("inp.txt", &n);
/*for(int i = 0; i < 10; i++){
printf("%d\n", array[i]);
}*/
computeBucket(array, n);
computeSharedBucket(array, n);
//int min = computeMin(array, n);
//printf("min: %d\n", min);
cudaFree(array);
}
|
20,536 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, float var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22) {
comp = (+1.3269E34f - -1.9901E36f / (-0.0f + -1.1501E36f / var_1 * -1.9201E12f));
comp = -1.7101E-36f - (var_2 + var_3);
if (comp < var_4 / acosf(var_5 + (var_6 + log10f(sinf(+1.7934E35f / (-1.4872E34f - (var_7 * +1.0960E36f))))))) {
float tmp_1 = var_8 / +1.8982E-42f + (var_9 / var_10);
float tmp_2 = tanhf(+1.2637E-42f * (var_11 * var_12 * cosf(var_13 / var_14)));
comp += tmp_2 - tmp_1 * var_15 * var_16 / (+0.0f - sinf(+1.3280E34f / (var_17 + (-1.0570E-42f / (var_18 - powf((-1.6889E-36f / var_19), -1.2768E-43f - (-1.5121E2f + (var_20 - +1.6499E-37f - (var_21 * var_22)))))))));
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
float tmp_2 = atof(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23);
cudaDeviceSynchronize();
return 0;
}
|
20,537 | // REQUIRES: clang-driver
// REQUIRES: x86-registered-target
// REQUIRES: nvptx-registered-target
// RUN: %clang --target=x86_64-linux -v -### --cuda-gpu-arch=sm_20 --cuda-path=%S/Inputs/CUDA/usr/local/cuda 2>&1 %s | \
// RUN: FileCheck %s --check-prefix=OK
// RUN: %clang --target=x86_64-linux -v -### --cuda-gpu-arch=sm_20 --cuda-path=%S/Inputs/CUDA_80/usr/local/cuda 2>&1 %s | \
// RUN: FileCheck %s --check-prefix=OK
// RUN: %clang --target=x86_64-linux -v -### --cuda-gpu-arch=sm_60 --cuda-path=%S/Inputs/CUDA_80/usr/local/cuda 2>&1 %s | \
// RUN: FileCheck %s --check-prefix=OK
// RUN: %clang --target=x86_64-linux -v -### --cuda-gpu-arch=sm_60 --cuda-path=%S/Inputs/CUDA-unknown/usr/local/cuda 2>&1 %s | \
// RUN: FileCheck %s --check-prefix=UNKNOWN_VERSION
// Make sure that we don't warn about CUDA version during C++ compilation.
// RUN: %clang --target=x86_64-linux -v -### -x c++ --cuda-gpu-arch=sm_60 \
// RUN: --cuda-path=%S/Inputs/CUDA-unknown/usr/local/cuda 2>&1 %s | \
// RUN: FileCheck %s --check-prefix=UNKNOWN_VERSION_CXX
// The installation at Inputs/CUDA is CUDA 7.0, which doesn't support sm_60.
// RUN: %clang --target=x86_64-linux -v -### --cuda-gpu-arch=sm_60 --cuda-path=%S/Inputs/CUDA/usr/local/cuda 2>&1 %s | \
// RUN: FileCheck %s --check-prefix=ERR_SM60
// This should only complain about sm_60, not sm_35.
// RUN: %clang --target=x86_64-linux -v -### --cuda-gpu-arch=sm_60 --cuda-gpu-arch=sm_35 \
// RUN: --cuda-path=%S/Inputs/CUDA/usr/local/cuda 2>&1 %s | \
// RUN: FileCheck %s --check-prefix=ERR_SM60 --check-prefix=OK_SM35
// We should get two errors here, one for sm_60 and one for sm_61.
// RUN: %clang --target=x86_64-linux -v -### --cuda-gpu-arch=sm_60 --cuda-gpu-arch=sm_61 \
// RUN: --cuda-path=%S/Inputs/CUDA/usr/local/cuda 2>&1 %s | \
// RUN: FileCheck %s --check-prefix=ERR_SM60 --check-prefix=ERR_SM61
// We should still get an error if we pass -nocudainc, because this compilation
// would invoke ptxas, and we do a version check on that, too.
// RUN: %clang --target=x86_64-linux -v -### --cuda-gpu-arch=sm_60 -nocudainc --cuda-path=%S/Inputs/CUDA/usr/local/cuda 2>&1 %s | \
// RUN: FileCheck %s --check-prefix=ERR_SM60
// If with -nocudainc and -E, we don't touch the CUDA install, so we
// shouldn't get an error.
// RUN: %clang --target=x86_64-linux -v -### -E --cuda-device-only --cuda-gpu-arch=sm_60 -nocudainc \
// RUN: --cuda-path=%S/Inputs/CUDA/usr/local/cuda 2>&1 %s | \
// RUN: FileCheck %s --check-prefix=OK
// --no-cuda-version-check should suppress all of these errors.
// RUN: %clang --target=x86_64-linux -v -### --cuda-gpu-arch=sm_60 --cuda-path=%S/Inputs/CUDA/usr/local/cuda 2>&1 \
// RUN: --no-cuda-version-check %s | \
// RUN: FileCheck %s --check-prefix=OK
// We need to make sure the version check is done only for the device toolchain,
// therefore we should not get an error in host-only mode. We use the -S here
// to avoid the error being produced in case by the assembler tool, which does
// the same check.
// RUN: %clang --target=x86_64-linux -v -### --cuda-gpu-arch=sm_60 --cuda-host-only --cuda-path=%S/Inputs/CUDA/usr/local/cuda -S 2>&1 %s | \
// RUN: FileCheck %s --check-prefix=OK
// RUN: %clang --target=x86_64-linux -v -### --cuda-gpu-arch=sm_60 --cuda-device-only --cuda-path=%S/Inputs/CUDA/usr/local/cuda -S 2>&1 %s | \
// RUN: FileCheck %s --check-prefix=ERR_SM60
// OK-NOT: error: GPU arch
// OK_SM35-NOT: error: GPU arch sm_35
// We should only get one error per architecture.
// ERR_SM60: error: GPU arch sm_60 {{.*}}
// ERR_SM60-NOT: error: GPU arch sm_60
// ERR_SM61: error: GPU arch sm_61 {{.*}}
// ERR_SM61-NOT: error: GPU arch sm_61
// UNKNOWN_VERSION: Unknown CUDA version 999.999. Assuming the latest supported version
// UNKNOWN_VERSION_CXX-NOT: Unknown CUDA version
|
20,538 | #include <algorithm>
#include <random>
#include <iostream>
#include <iomanip>
#include <functional>
constexpr int BLOCK_SIZE = 32;
__global__ void matrix_mult(float* C, float* A, float* B, int size) {
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int i = by * blockDim.y + ty;
int j = bx * blockDim.x + tx;
float sum = 0.0;
#pragma unroll
for (int k = 0; k < size; k++) {
sum += A[i * size + k] * B[k * size + j];
}
C[i * size + j] = sum;
}
struct Matrix {
Matrix(int size) :
size { size }, data { new float[size * size] } {
}
~Matrix() {
delete[] data;
}
const int size;
float* data;
float& get(int i, int j) {
return data[i * size + j];
}
};
void fill(Matrix& a) {
const auto min = 0.0f;
const auto max = 1.0f;
std::random_device real_random;
std::mt19937 random { real_random() };
std::uniform_real_distribution<float> distribution { min, max };
auto next_val = std::bind(distribution, random);
std::generate(a.data, a.data + a.size * a.size, next_val);
}
bool is_equal(Matrix& a, Matrix& b) {
const auto epsilon = 0.0001;
if (a.size != b.size)
return false;
for (auto i = 0; i < a.size * a.size; ++i) {
if (abs(a.data[i] - b.data[i]) > epsilon)
return false;
}
return true;
}
bool is_correct(Matrix& c, Matrix& a, Matrix& b) {
const auto epsilon = 0.0001;
for (int i = 0; i < c.size; ++i) {
for (int j = 0; j < c.size; ++j) {
float sum = 0.0f;
for (int k = 0; k < c.size; ++k) {
sum += a.get(i, k) * b.get(k, j);
}
if (abs(sum - c.get(i, j) > epsilon))
return false;
}
}
return true;
}
int main(int argc, char **argv) {
int devID = 0;
cudaError_t error;
cudaDeviceProp deviceProp;
error = cudaGetDevice(&devID);
error = cudaGetDeviceProperties(&deviceProp, devID);
if (deviceProp.computeMode == cudaComputeModeProhibited) {
return 1;
}
if (error != cudaSuccess) {
}
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
{
constexpr auto matrix_size = 1024;
// constexpr auto matrix_size = 2048;
// constexpr auto matrix_size = 4096;
// constexpr auto matrix_size = 8192;
Matrix h_A { matrix_size }, h_B { matrix_size }, h_C { matrix_size };
fill(h_A);
fill(h_B);
float* d_A, *d_B, *d_C;
cudaEventRecord(start);
const int size_in_bytes = matrix_size * matrix_size * sizeof(float);
//make space for device matrix representation
cudaMalloc((void**) &d_A, size_in_bytes);
cudaMalloc((void**) &d_B, size_in_bytes);
cudaMalloc((void**) &d_C, size_in_bytes);
//copy input matrix to device
cudaMemcpy(d_A, h_A.data, size_in_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B.data, size_in_bytes, cudaMemcpyHostToDevice);
dim3 threadsPerBlock(BLOCK_SIZE, BLOCK_SIZE, 1);
dim3 blocksPerGrid(matrix_size / BLOCK_SIZE, matrix_size / BLOCK_SIZE,
1);
matrix_mult<<<blocksPerGrid, threadsPerBlock>>>(d_C, d_A, d_B,
matrix_size);
//copy output matrix to host
cudaMemcpy(h_C.data, d_C, size_in_bytes, cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
std::cout << "Matrixmultiplikation (" << milliseconds << " ms)"
<< std::endl;
std::cout << "is_correct:" << std::boolalpha
<< is_correct(h_C, h_A, h_B) << std::endl;
}
return 0;
}
|
20,539 | #include <vector>
#include <stdio.h>
#include <iostream>
#include <cuda.h>
#include<cuda_runtime.h>
#include<device_launch_parameters.h>
#define BLOCKSIZE_x 32
#define BLOCKSIZE_y 32
using namespace std;
__device__ double eucludianDist(int Ax, int Ay, int Bx, int By) {
double d = sqrt(pow((Ax - Bx), 2) +
pow((Ay - By), 2)
);
return d;
}
__global__ void voronoiKernel(int* vec, int cols, int rows, int pitch, int* Px, int* Py, int numSeeds) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int* row_a = (int*)((char*)vec + y * pitch) + x;
__syncthreads();
if((x < cols) && (y < rows)){
double d = eucludianDist(x, y, Px[0], Py[0]);
__syncthreads();
//atomicExch(row_a, 0);
*row_a = 0;
for (int i = 0; i < numSeeds ; i++) {
double temp = eucludianDist(x, y, Px[i], Py[i]);
__syncthreads();
if (temp < d) {
d = temp;
*row_a = i;
}
}
}
}
void voronoi_cuda(int* h_vec, std::vector<std::pair<unsigned int, unsigned int>> &P, int cols = 0, int rows = 0) {
cudaEvent_t start, end;
cudaEventCreate(&start);
cudaEventCreate(&end);
int* vec_dev;
int* Px_dev;
int* Py_dev;
size_t pitch;
int nSeeds = P.size();
int ln = nSeeds * sizeof(int) ;
int* Px = (int*) malloc(ln);
int* Py = (int*) malloc(ln);
int i = 0;
// could not figure an easy way to copy vector of pair to the gpu
for(auto& p : P){
Px[i] = p.first;
Py[i] = p.second;
i++;
}
std::cout << "computing veronoi->Parallel..." << std::endl;
cudaMallocPitch((void**)&vec_dev, &pitch, sizeof(int)*cols, rows);
cudaEventRecord(start); //start counting event on GPU
cudaMalloc((void**)&Px_dev, ln);
cudaMalloc((void**)&Py_dev, ln);
cudaMemcpy(Px_dev, Px, ln, cudaMemcpyHostToDevice);
cudaMemcpy(Py_dev, Py, ln, cudaMemcpyHostToDevice);
dim3 blockDim(BLOCKSIZE_y, BLOCKSIZE_x);
dim3 gridDim(ceil((float)cols /blockDim.x) , ceil((float)rows /blockDim.y));
voronoiKernel<<<gridDim, blockDim>>>(vec_dev, cols, rows, pitch, Px_dev, Py_dev, nSeeds);
cudaMemcpy2D(h_vec, sizeof(int)*cols, vec_dev, pitch, sizeof(int)*cols, rows, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
cudaEventRecord(end);
cudaEventSynchronize(end);
std::cout << "done..." << std::endl;
float ms{0};
cudaEventElapsedTime(&ms, start, end);
std::cout << "computation time(ms): " << ms << std::endl;
cudaFree(vec_dev);
cudaFree(Px_dev);
cudaFree(Py_dev);
free(Px);
free(Py);
}
|
20,540 | #include "includes.h"
__global__ void cudaUpdateFiringRate_kernel(unsigned int * firingRate, unsigned int * totalFiringRatePartial, unsigned int inputsDimX, unsigned int inputsDimY, unsigned int inputsDimZ)
{
const unsigned int inputSize = inputsDimZ * inputsDimX * inputsDimY;
const unsigned int batchInputOffset = blockIdx.z * inputSize;
const unsigned int blockOffset = blockIdx.x * blockDim.x;
const unsigned int partialIdx = threadIdx.x + blockOffset;
extern __shared__ unsigned int partialSum[];
// Perform first level of reduction during initialization
// This is more efficient since we need all threads to load data
// but the partial sum will see only half of the threads active
//partialSum[threadIdx.x] = firingRate[partialIdx + batchInputOffset] +
// firingRate[partialIdx + blockDim.x + batchInputOffset];
partialSum[threadIdx.x] = 0;
if (partialIdx < inputSize){
partialSum[threadIdx.x] = firingRate[partialIdx + batchInputOffset];
}
__syncthreads();
// Reduction over neurons
for (int offset = blockDim.x/2; offset > 0; offset >>= 1) {
if (threadIdx.x < offset){
partialSum[threadIdx.x] += partialSum[threadIdx.x + offset];
}
__syncthreads();
}
if (threadIdx.x == 0) {
totalFiringRatePartial[blockIdx.x+gridDim.x*blockIdx.z] = partialSum[0];
}
} |
20,541 | #include <stdio.h>
#include <cuda.h>
__global__ void matrixAddKernel(int *a,int *b, int *c, int N)
{
int col = threadIdx.x + blockDim.x * blockIdx.x;
int row = threadIdx.y + blockDim.y * blockIdx.y;
int index = row * N + col;
if(col < N && row < N)
{
c[index] = a[index]+b[index];
}
}
void matrixAdd(int *a, int *b, int *c, int N)
{
int index;
for(int col=0; col<N; col++)
{
for(int row=0; row<N; row++)
{
index = row * N + col;
c[index] = a[index] + b[index];
}
}
}
int main(int argc, char *argv[])
{
//matrix size in each dimension
int N = 4096;
//grid and block sizes
//dim3 grid(1, 1, 1);
dim3 grid(N/32, N/32, 1);
dim3 block(32, 32, 1);
/* Block has 1024 threads, so use this many and grid as many multiprocessors
(blocks) as you need. Exception is if you aren't fully utilising all blocks
then reduce number of threads and distribute among all blocks*/
//host memory pointers
int *a_h;
int *b_h;
int *c_h;
int *d_h;
//device memory pointers
int *a_d;
int *b_d;
int *c_d;
//number of bytes in arrays
int size;
//variable used for storing keyboard input
//char key;
//CUDA events to measure time
cudaEvent_t start;
cudaEvent_t stop;
float elapsedTime;
//print out summary
printf("Number of threads: %i (%ix%i)\n", block.x*block.y,
block.x, block.y);
printf("Number of blocks: %i (%ix%i)\n", grid.x*grid.y, grid.x,
grid.y);
//number of bytes in each array
size = N * N * sizeof(int);
//allocate memory on host, this time we are using dynamic
//allocation
a_h = (int*) malloc(size);
b_h = (int*) malloc(size);
c_h = (int*) malloc(size);
d_h = (int*) malloc(size);
//load arrays with some numbers
for(int i=0; i<N; i++)
{
for(int j=0; j<N; j++)
{
a_h[i * N + j] = i;
b_h[i * N + j] = i;
}
}
//GPU computation//////////////////////////////////
//allocate device memory
cudaMalloc((void**)&a_d, size);
cudaMalloc((void**)&b_d, size);
cudaMalloc((void**)&c_d, size);
//copy the host arrays to device
cudaMemcpy(a_d, a_h, size, cudaMemcpyHostToDevice);
cudaMemcpy(b_d, b_h, size, cudaMemcpyHostToDevice);
cudaMemcpy(c_d, c_h, size, cudaMemcpyHostToDevice);
//start timer
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
//launch kernel
matrixAddKernel<<<grid, block>>>(a_d, b_d, c_d, N);
//stop timer
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
//print out execution time
printf("Time to calculate results on GPU: %f ms.\n", elapsedTime);
//copy the results to host
cudaMemcpy(c_h, c_d, size, cudaMemcpyDeviceToHost);
//grid and block sizes
//CPU computation//////////////////////////////////
//start timer
cudaEventRecord(start, 0);
//do the calculation on host
matrixAdd(a_h, b_h, d_h, N);
//stop timer
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop );
//print out execution time
printf("Time to calculate results on CPU: %f ms.\n", elapsedTime);
//check if the CPU and GPU results match
for(int i=0; i<N*N; i++)
{
if (c_h[i] != d_h[i])
{
printf("Error: CPU and GPU result [%d] do not match\n", i);
//break;
}
}
//clean up
free(a_h);
free(b_h);
free(c_h);
free(d_h);
cudaFree(a_d);
cudaFree(b_d);
cudaFree(c_d);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return 0;
}
|
20,542 | #include "includes.h"
__global__ void shared1R8C1W1G1RG(float *A, float *B, float *C, const int N)
{
// compilador é esperto e aproveita o valor de i, mas faz 1W, 2 R nas outras posições da Shared
__shared__ float Smem[512];
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
Smem[(threadIdx.x+1)%512] = A[i];
C[i] = Smem[(threadIdx.x*8)%512];
}
/*if ( blockIdx.x == 2 && threadIdx.x < 32 ) {
printf("th %d smem %d\n",threadIdx.x,(threadIdx.x*8)%512);
}*/
} |
20,543 | #include "includes.h"
#define BLOCK_SIZE 32
#define N 3200
__global__ void matMult(float* a, float* b, int n, float* c)
{
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
float sum = 0.0f;
int ia = n * BLOCK_SIZE * by + n * ty;
int ib = BLOCK_SIZE * bx + tx;
for (int k = 0; k < n; k++)
sum += a[ia + k] * b[ib + k * n];
int ic = n * BLOCK_SIZE * by + BLOCK_SIZE * bx;
c[ic + n * ty + tx] = sum;
} |
20,544 | const int NUM_DIMS = 4;
template <typename T>
__device__ void reduce(const int * const numVals, const void * const oldVals, void * const newVals)
{
T output = static_cast<T>(0);
const int count = *numVals;
const T * input = reinterpret_cast<const T * >(oldVals) + *numVals * blockIdx.x;
for (int i = 0; i < count; ++i) output += input[i];
reinterpret_cast<T * >(newVals)[blockIdx.x] = output;
}
__global__ void kmeansReducerKernel(const int numKeys,
const int * const numVals,
const int * const oldKeys,
int * const newKeys,
const void * const oldVals,
void * const newVals)
{
newKeys[blockIdx.x] = oldKeys[blockIdx.x];
if (blockIdx.x % (NUM_DIMS + 1) == 0) reduce<int >(numVals, oldVals, newVals);
else reduce<float>(numVals, oldVals, newVals);
}
void kmeansReducerExecute(const int numKeys,
const int * const numVals,
const int * const oldKeys,
int * const newKeys,
const void * const oldVals,
void * const newVals,
cudaStream_t & stream)
{
kmeansReducerKernel<<<1, numKeys, 0, stream>>>(numKeys, numVals, oldKeys, newKeys, oldVals, newVals);
}
|
20,545 | #include "includes.h"
#define num_thread 256
#define num_block 256
__global__ void blending_pairs(float *a,float *b,float *c,float *d,float *wei,int width,int height,int w,float A,float error_lm,float error_mm,int class_num)
{
const int tid=threadIdx.x;
const int bid=blockIdx.x;
const int Idx=num_thread*bid+tid;
float r_LM,r_MM, r_center_LM,r_center_MM;
int row,column;
int i,j;
float sum1,sum2;
float st=0.0;
int judge;
float dis;
//float wei;
float weih,result;
int kk=0;
int rmin,rmax,smin,smax;
for(int kkk=Idx;kkk<width*height;kkk=kkk+num_thread*num_block)
{
result=0;
judge=0;
wei[kkk]=0;
kk=0;
sum1=0,sum2=0;
row=kkk/width;
column=kkk%width;
//if(row==1)
// wei=0;
r_center_LM =d[kkk]-b[kkk]+error_lm;
r_center_MM=d[kkk]-c[kkk]+1.412*error_mm;
if(column-w/2<=0)
rmin=0;
else
rmin = column-w/2;
if(column+w/2>=width-1)
rmax = width-1;
else
rmax = column+w/2;
if(row-w/2<=0)
smin=0;
else
smin = row-w/2;
if(row+w/2>=height-1)
smax = height-1;
else
smax = row+w/2;
for(i=smin;i<=smax;i++)
{
for(j=rmin;j<=rmax;j++)
{
sum1+=b[i*width+j]*b[i*width+j];
sum2+=b[i*width+j];
}
}
//if(column==30&&row==30)
// result=0;
st=sqrt(sum1/(w*w)-(sum2/(w*w))*(sum2/(w*w)))/ class_num;
for(i=smin;i<=smax;i++)
{
for(j=rmin;j<=rmax;j++)
{
if(fabs(b[kkk]-b[i*width+j])<st)
{
r_LM=d[i*width+j]-b[i*width+j];
r_MM=d[i*width+j]-c[i*width+j];
if((r_center_LM>0&&r_LM<r_center_LM)||(r_center_LM<0&&r_LM>r_center_LM))
{
if((r_center_MM>0&&r_MM<r_center_MM)||(r_center_MM<0&&r_MM>r_center_MM))
{
r_LM=fabs(r_LM)+0.0001;
r_MM=fabs(r_MM)+0.0001;
if(kkk==i*width+j)
judge=1;
dis=float((row-i)*(row-i)+(column-j)*(column-j));
dis=sqrt(dis)/A+1.0;
weih=1.0/(dis* r_LM*r_MM);
wei[kkk]+=weih;
result+=weih*(c[i*width+j]+b[i*width+j]-d[i*width+j]);
kk++;
}
}
}
}
}
if(kk==0)
{
a[kkk]=abs(b[kkk]+c[kkk]-d[kkk])*1000;
wei[kkk]=1000;
}
else
{
if(judge==0)
{
dis=1.0;
r_LM=fabs(d[kkk]-b[kkk])+0.0001;
r_MM=fabs(d[kkk]-c[kkk])+0.0001;
weih=1.0/(dis* r_LM*r_MM);
result+=weih*(b[kkk]+c[kkk]-d[kkk]);
wei[kkk]+=weih;
}
a[kkk]=result;
//if(a[kkk]<0)
// a[kkk]=(b[kkk]+c[kkk]-d[kkk]);
}
}
} |
20,546 | /************************************************
* MATRIX TRANSPOSE CHECK between parallel
* and sequential programs.
*
* Usage:
* Compile using nvcc -lcudart transpose.cu -o transpose
* Run using ./mat <size of the matrix>
*
* Example:
* ./mat 153
* The above will check whether for a random matrix, A = transpose(A)
*
************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#ifndef __CUDA_SAFE_CALL
cudaError_t __cuda_error;
#define __CUDA_SAFE_CALL(call) do { __cuda_error = call; if(__cuda_error != cudaSuccess) { fprintf(stderr,"CUDA Error: %s,%s, line %d\n",cudaGetErrorString(__cuda_error), __FILE__, __LINE__ ); return -1;} } while(0)
#endif
void generate_random_vector( double *A, int size )
{
srand( time(NULL) );
for( int i=0; i<size; i++ )
//A[i] = 1.2;
A[i] = ((double)rand())/100000;
}
__device__ inline void addfour( volatile double *A, int id, int threads, int size, volatile double *B )
{
int index=threads+id;
for( int i=0; i<3; i++ ){
if( index < size ){
//printf("%d %d %d %d\n",index,id,threads,size);
B[id] += A[index];
}
index += threads;
}
}
__device__ inline void maxfour( volatile double *A, int id, int threads, int size, volatile double *B )
{
int index=threads+id;
for( int i=0; i<3; i++ ){
if( index < size ){
if( B[id] < A[index] )
//printf("%d %d %d %d\n",index,id,threads,size);
B[id] = A[index];
}
index += threads;
}
}
__device__ double tmp;
__device__ inline void copyadd( double *A, volatile double *shared_mem, int tid, int offset )
{
shared_mem[tid] += A[ offset ];
}
__device__ inline void copyaddsquared( double *A, volatile double *shared_mem, int tid, int offset )
{
shared_mem[tid] += A[ offset ] * A[ offset ];
}
__device__ inline void copymax( double *A, volatile double *shared_mem, int tid, int offset )
{
tmp = A[ offset ];
if( shared_mem[tid] < tmp )
shared_mem[tid] = tmp;
}
__device__ void do4096( double *A, double *B, int size, void (*copyfunc)( double*,volatile double*,int,int), void (*accumulate)(volatile double*, int, int, int, volatile double*) )
{
__shared__ double nums[256];
int id = blockIdx.x;
int tid = threadIdx.x;
int offset = 4096*id;
int alive = blockDim.x;
int this_block = ( size - offset >= 4096 )? 4096: size - offset;
int intra_offset = 0;
nums[tid] = 0;
while( intra_offset < this_block ){
if( intra_offset + tid < this_block )
copyfunc( A, (volatile double *)&nums, tid, offset + intra_offset + tid );
intra_offset += alive;
}
__syncthreads();
alive = alive >> 2;
this_block = ( this_block >= blockDim.x )? blockDim.x: this_block;
while( 1 ){
if( tid < alive ){
accumulate( (double *)&nums, tid, alive, this_block, (double *)&nums );
//printf(" id=%d total alive=%d tid=%d, %5.2f\n",id,alive,tid,nums[tid]);
}
if( alive == 1 )
break;
this_block = ( this_block >= alive )? alive: this_block;
alive = alive>>2;
__syncthreads();
}
B[id] = nums[0];
}
__global__ void add4096( double *A, double *B, int size )
{
do4096( A, B, size, copyadd, addfour );
}
__global__ void addsquared4096( double *A, double *B, int size )
{
do4096( A, B, size, copyaddsquared, addfour );
}
__global__ void max4096( double *A, double *B, int size )
{
do4096( A, B, size, copymax, maxfour );
}
double reduce( double *A, int size, void (*reduce_fn1)(double*, double*, int), void (*reduce_fn2)(double*, double*, int) )
{
double *ga,*gb;
int vector_size = sizeof(double) * size;
int num_blocks = ( ((size - 1) / 4096) + 1 );
int out_vector = sizeof(double)* num_blocks;
double ans;
void (*reduce)(double*, double*, int) = reduce_fn1;
__CUDA_SAFE_CALL( cudaMalloc( &ga, vector_size ) );
__CUDA_SAFE_CALL( cudaMalloc( &gb, out_vector ) );
__CUDA_SAFE_CALL( cudaMemcpy( ga, A, vector_size, cudaMemcpyHostToDevice ) );
while( size > 1 ){
reduce<<<num_blocks,256>>> (ga, gb, size);
size = num_blocks;
num_blocks = ( ((size - 1) / 4096) + 1 );
ga = gb;
reduce = reduce_fn2;
}
__CUDA_SAFE_CALL( cudaMemcpy( &ans, gb, sizeof(double) , cudaMemcpyDeviceToHost ) );
cudaFree( ga );
cudaFree( gb );
return ans;
}
double padd( double *A, int size )
{
return reduce( A, size, add4096, add4096 );
}
double psquareadd( double *A, int size )
{
return reduce( A, size, addsquared4096, add4096 );
}
double pmax( double *A, int size )
{
return reduce( A, size, max4096, max4096 );
}
double pmean( double *A, int size )
{
double ans;
ans = padd( A, size );
return ans/size;
}
double pstd( double *A, int size )
{
double mean, squaredsum;
mean = pmean( A, size );
squaredsum = psquareadd( A, size )/size;
return sqrt(squaredsum-(mean*mean));
}
double sadd( double* A, int size )
{
double ans=0;
for( int i=0; i< size; i++ ){
ans += A[i];
}
return ans;
}
double ssquaredadd( double *A, int size )
{
double ans = 0;
for( int i=0; i< size; i++ ){
ans += A[i]*A[i];
}
return ans;
}
double smax( double *A, int size )
{
double ans = -1;
for( int i=0; i< size; i++ ){
if( A[i] > ans )
ans = A[i];
}
return ans;
}
double smean( double *A, int size )
{
double ans;
ans = sadd( A, size );
return ans/size;
}
double sstd( double *A, int size )
{
double mean, sqsum;
mean = smean( A, size );
sqsum = ssquaredadd( A, size ) / size;
return sqrt( sqsum - (mean*mean) );
}
int main( int argc, char* argv[] )
{
/* Matrix container pointers */
double *A;
int size; /* Number of elements */
int vector_size; /* Physical size of the elements in the memory */
cudaEvent_t start,stop;
bool do_print=false; /* Debug flag to print matrices in case of small matrices */
float pms = 0,sms=0; /* Parallel and sequential times */
double mean,std,max;
if( argc != 2 ){
fprintf(stderr, "Atleast one argument required. Usage: %s <Side of the matrix>",argv[0]);
return -1;
}
/* Get size of the matrix from command line */
size = atoi( argv[1] );
vector_size = sizeof(double)* size;
if( size <= 32 ) do_print= true;
A = (double *) malloc( vector_size );
//B = (double *) malloc( out_vector );
generate_random_vector( A, size );
if( do_print ){
for( int i=0; i<size; i++ )
printf("%5.2f ",A[i]);
}
/* Timers to time the parallel process */
__CUDA_SAFE_CALL( cudaSetDevice(2) );
cudaEventCreate(&start);
cudaEventCreate(&stop);
/*********************
* Start of GPU run
*******************/
cudaEventRecord(start);
cudaEventRecord(stop);
mean = 0;
std = 0;
max = 0;
mean = pmean( A, size );
std = pstd( A, size );
max = pmax( A, size );
cudaEventSynchronize(stop);
/*****************
* End of GPU code
****************/
cudaEventElapsedTime( &pms, start, stop );
printf("Mean is %lf\n",mean);
printf("Std is %lf\n",std);
printf("Max is %lf\n",max);
/*********************
* Sequential Stuff
********************/
struct timespec seq_start,seq_end;
/* clock_gettime gets the process specific time spent, as opposed to the system time expended
*/
clock_gettime( CLOCK_PROCESS_CPUTIME_ID, &seq_start );
mean = smean( A, size );
std = sstd( A, size );
max = smax( A, size );
clock_gettime( CLOCK_PROCESS_CPUTIME_ID, &seq_end );
/*************************
* End of Sequential Stuff
************************/
printf("Mean is %lf\n",mean);
printf("Std is %lf\n",std);
printf("Max is %lf\n",max);
/* Getting time in milliseconds for comparability */
sms = ( (float)seq_end.tv_sec - seq_start.tv_sec )*1000 + ( (float)seq_end.tv_nsec - seq_start.tv_nsec ) / 1000000;
printf("%12s %12s %12s %12s\n","N","Parallel","Sequential","Speedup");
printf("%12d % 12f % 12f % 12f\n",size,pms,sms,sms/pms);
/*
printf("<html>\n\t<body>\n\t\t<table>\n");
printf("<tr>\n");
printf("\t<td> %12d </td>\n\t<td>% 12f</td>\n\t<td>% 12f</td>\n\t<td>% 12f</td>\n",size,pms,sms,sms/pms);
printf("</tr>\n");
printf("</table>\n</body>\n</html>\n");
*/
free(A);
}
|
20,547 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <time.h>
#include <stdio.h>
#include "parann.cuh";
//Sigmoid function
__device__ float sigmoid(float x) {
return 1.0 / (1.0 + exp(-x));
}
//Derivative of sigmoid function
__device__ float d_sigmoid(float x) {
return x * (1 - x);
}
__device__ float delta(float value, float errsum) {
return sigmoid(value) * errsum;
}
__global__ void train(
float *input, float *hidden, float *output, // node values
unsigned int inputSize, unsigned int hiddenSize, unsigned int outputSize, // node counts
float *weights_i2h, float *weights_h2o, // weight infos
float *trainingInput, float *trainingOutput,
unsigned int epochCount) {
}
__global__ void forwardLayer(
float *sourceLayer, float *targetLayer, // node values
float *interWeights, // weight infos
unsigned int iteration) { //set iteration for other than real inputlayer
// current sourceNodeID : threadIdx.x
// current targetNodeID : blockIdx.x
__shared__ float sum;
if(threadIdx.x == 0) {//initialize sum
sum = 0;
}
__syncthreads();
int weightID = blockDim.x * blockIdx.x + threadIdx.x;
float value = sourceLayer[iteration * blockDim.x + threadIdx.x] * interWeights[weightID];
atomicAdd(&sum, value);
__syncthreads();
if(threadIdx.x == 0) {
atomicAdd(&targetLayer[blockIdx.x], sigmoid(sum));
}
//&targetLayer[blockIdx.x]
//input[from] = trainingInput
}
__global__ void initBP(
float *outputLayer, float *truthLayer,
float *weights_h2o, unsigned int iteration
) {
outputLayer[blockDim.x + threadIdx.x] =
delta(
outputLayer[threadIdx.x],
truthLayer[blockDim.x * iteration + threadIdx.x] - outputLayer[threadIdx.x]
);
}
// grouped by hidden node blocks
__global__ void BP_o2h(
float *hiddenLayer, float *outputLayer, // node values
float *interWeights, // weight infos
unsigned int iteration) { //mode: 0 for output, not zero for others
// current hidden node : blockIdx.x
// current output node : threadIdx.x
// outputcount : blockDim.x
// hiddenCount : gridDim.x
__shared__ float sum;
if(threadIdx.x == 0) {
sum = 0;
}
__syncthreads();
unsigned int w_coord = gridDim.x * threadIdx.x + blockIdx.x;
float weight = interWeights[w_coord];
float delta = outputLayer[blockIdx.x + threadIdx.x];
atomicAdd(&sum, weight * delta);
__syncthreads();
if(threadIdx.x == 0) {
hiddenLayer[gridDim.x + blockIdx.x] = sum;
}
__syncthreads();
float *lastDelta = &interWeights[gridDim.x * blockDim.x + w_coord];
float newDelta = *lastDelta * ALPHA + (delta * sum) * (1- ALPHA);
interWeights[w_coord] += LEARNING_RATE * newDelta;
*lastDelta = newDelta;
/*
lastDelta = &nn2->weight_h2o[h][nn2->outputCount + o];
newDelta = *lastDelta * ALPHA + (deltaOutputs[o] * nn2->hidden[h])*(1 - ALPHA);
nn2->weight_h2o[h][o] += LEARNING_RATE * newDelta;
*lastDelta = newDelta;
*/
}
void setupNN2(NN2* nn2);
void randomizeWeights(NN2* nn2);
int trainWithGPU(NN2* nn2, float *trainingInput, float *trainingOutput, int epoch);
bool cudaCheck(cudaError_t, char*);
void printWeights(float* weightArray, int width, int height);
int main() {
cout << "SETUP PHASE" << LINE;
cout << "Setting up training set for " << TRAIN_SIZE << " elements....";
srand(time(NULL));
//create training dataset
float inputArray[TRAIN_SIZE * (INPUT_COUNT + 1)];
float outputArray[TRAIN_SIZE * (OUTPUT_COUNT)];
for(int t = 0; t < TRAIN_SIZE; t++) {
inputArray[INPUT_COUNT*t + 0] = 1; //rand() % 2;
inputArray[INPUT_COUNT*t + 1] = 0; //rand() % 2;
inputArray[INPUT_COUNT*t + 2] = 1; //bias
outputArray[t] = (int)inputArray[INPUT_COUNT*t + 0] TEST_OPERATOR (int)inputArray[INPUT_COUNT*t + 1];
}
cout << "OK\n" << "Setting up neural network [" << INPUT_COUNT << "i, " << HIDDEN_COUNT << "h]....";
//Setup neural network
NN2 nn2;
setupNN2(&nn2);
//cout << "\ni2h Weights:\n ";
//printWeights(nn2.weight_i2h, nn2.inputCount, nn2.hiddenCount);
printWeights(nn2.weight_h2o, nn2.hiddenCount, nn2.outputCount);
cout << "\nHidden layer: ";
for(int i=0; i < nn2.hiddenCount; i++)
printf("%f\t", nn2.hidden[i]);
//Iterasyon dizileri
float inputSet[INPUT_COUNT + 1];
float outputSet[OUTPUT_COUNT];
cout << "OK\n";
//initialize the GPU
//cudaError_t initGPU();
bool errorExist = 0;
cout << "Initializing device..";
errorExist |= cudaCheck(cudaSetDevice(0),"");
clock_t start = clock();
cout << LINE << "TRAINING PHASE" << LINE;
cout << "Training started [" << MAX_EPOCH << " epoch]...";
trainWithGPU(&nn2,inputArray, outputArray, MAX_EPOCH);
cout << "\nHidden layer: ";
for(int i=0; i < nn2.hiddenCount; i++)
printf("%f\t", nn2.hidden[i]);
cout << "\Out layer: ";
for(int i=0; i < nn2.outputCount; i++)
printf("%f\t", nn2.output[i]);
errorExist |= cudaCheck(cudaDeviceReset(),"Device reset");
if(!errorExist) {
cout << "\n Completed with error";
}
getchar();
return 0;
}
int trainWithGPU(NN2* nn2, float *trainingInput, float *trainingOutput, int epoch) {
float *d_trainingInput, *d_trainingOutput;
float *d_inputArray, *d_hiddenArray, *d_outputArray;
float *d_weight_i2h, *d_weight_h2o;
bool errStat = 1;
cout << "OK\n" << "Allocating memory on GPU..";
// ALLOCATING MEMORY
errStat &= cudaCheck( // Allocate input
cudaMalloc((void**)&d_inputArray, nn2->inputCount * sizeof(float) ),
"Memory allocate error: input");
errStat &= cudaCheck( // Allocate hidden x2 for deltas
cudaMalloc((void**)&d_hiddenArray, nn2->hiddenCount * sizeof(float) * 2),
"Memory allocate error: hidden");
errStat &= cudaCheck( // Allocate output x2 for deltas
cudaMalloc((void**)&d_outputArray, nn2->outputCount * sizeof(float) * 2),
"Memory allocate error: output");
errStat &= cudaCheck( // Allocate i2h
cudaMalloc((void**)&d_weight_i2h, 2 * nn2->inputCount * nn2->hiddenCount * sizeof(float)),
"Memory allocate error: i2h weights");
errStat &= cudaCheck( // Allocate h2o
cudaMalloc((void**)&d_weight_h2o, 2 * nn2->hiddenCount * nn2->outputCount * sizeof(float)),
"Memory allocate error: h2o weights");
errStat &= cudaCheck( // Allocate trainingInput
cudaMalloc((void**)&d_trainingInput, TRAIN_SIZE * nn2->inputCount * sizeof(float)),
"Memory allocate error: trainingInput");
errStat &= cudaCheck( // Allocate trainingOutput
cudaMalloc((void**)&d_trainingOutput, TRAIN_SIZE * nn2->outputCount * sizeof(float)),
"Memory allocate error: trainingOutput");
// COPY DATA ------------------------------------------------------------------
//cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
cout << "OK\nTransferring structure..";
errStat |= cudaCheck( // Copy input
cudaMemcpy(d_inputArray,nn2->input, nn2->inputCount * sizeof(float), cudaMemcpyHostToDevice),
"Memory allocate error: copying input");
errStat |= cudaCheck( // Copy hidden
cudaMemcpy(d_hiddenArray,nn2->hidden, nn2->hiddenCount * sizeof(float), cudaMemcpyHostToDevice),
"Memory allocate error: copying hidden");
errStat |= cudaCheck( // Copy output
cudaMemcpy(d_outputArray,nn2->output, nn2->outputCount * sizeof(float), cudaMemcpyHostToDevice),
"Memory allocate error: copying input");
errStat |= cudaCheck( // Copy i2h weights
cudaMemcpy(d_weight_i2h,nn2->weight_i2h, 2 * nn2->inputCount * nn2->hiddenCount * sizeof(float), cudaMemcpyHostToDevice),
"Memory allocate error: copying i2hw");
errStat |= cudaCheck( // Copy h2o weights
cudaMemcpy(d_weight_h2o,nn2->weight_h2o, 2 * nn2->hiddenCount * nn2->outputCount * sizeof(float), cudaMemcpyHostToDevice),
"Memory allocate error: copying i2hw");
cout << "OK\n" << "Transferring training data..";
errStat |= cudaCheck( // Copy output
cudaMemcpy(d_trainingInput,trainingInput, TRAIN_SIZE * nn2->inputCount * sizeof(float), cudaMemcpyHostToDevice),
"Memory allocate error: copying input");
errStat |= cudaCheck( // Copy output
cudaMemcpy(d_trainingOutput,trainingOutput, TRAIN_SIZE * nn2->outputCount * sizeof(float), cudaMemcpyHostToDevice),
"Memory allocate error: copying output");
cout << "OK\n";
//-----
int totalWork = TRAIN_SIZE * MAX_EPOCH;
int i2hLinkCount = nn2->inputCount * nn2->hiddenCount;
int h2oLinkCount = nn2->hiddenCount * nn2->outputCount;
for(int it = 0; it < 20; it++) {
// 2 farward
forwardLayer<<<nn2->hiddenCount, nn2->inputCount>>>(
d_trainingInput,
d_hiddenArray,
d_weight_i2h,
it);
errStat |= cudaCheck(cudaGetLastError(), "Kernel execution error");
errStat |= cudaCheck(cudaDeviceSynchronize(), "Device synchronize error");
forwardLayer<<<nn2->outputCount, nn2->hiddenCount>>>(
d_hiddenArray,//source layer
d_outputArray, // target layer
d_weight_h2o, // interlayer weights
0); // iteration is always zero for hidden layers
errStat |= cudaCheck(cudaGetLastError(), "Kernel execution error");
errStat |= cudaCheck(cudaDeviceSynchronize(), "Device synchronize error");
// bp
initBP<<<1, nn2->outputCount>>>(d_outputArray,d_trainingOutput,d_weight_h2o, it);
errStat |= cudaCheck(cudaGetLastError(), "Kernel execution error");
errStat |= cudaCheck(cudaDeviceSynchronize(), "Device synchronize error");
BP_o2h<<<nn2->hiddenCount, nn2->outputCount>>>(d_hiddenArray, d_outputArray, d_weight_h2o, it);
errStat |= cudaCheck(cudaGetLastError(), "Kernel execution error");
errStat |= cudaCheck(cudaDeviceSynchronize(), "Device synchronize error");
BP_o2h<<<nn2->inputCount, nn2->hiddenCount>>>(d_inputArray, d_hiddenArray, d_weight_i2h, it);
errStat |= cudaCheck(cudaGetLastError(), "Kernel execution error");
errStat |= cudaCheck(cudaDeviceSynchronize(), "Device synchronize error");
}
errStat |= cudaCheck( // Copy hidden
cudaMemcpy(nn2->hidden, d_hiddenArray, nn2->hiddenCount * sizeof(float), cudaMemcpyDeviceToHost),
"Memory allocate error: copying hidden");
errStat |= cudaCheck( // Copy output
cudaMemcpy(nn2->output, d_outputArray, nn2->outputCount * sizeof(float), cudaMemcpyDeviceToHost),
"Memory allocate error: copying input to host");
cudaFree(d_inputArray);
cudaFree(d_hiddenArray);
cudaFree(d_outputArray);
cudaFree(d_weight_i2h);
cudaFree(d_weight_h2o);
cudaFree(d_trainingInput);
cudaFree(d_trainingOutput);
return 0;
}
void printWeights(float* weightArray, int width, int height) {
for(int y=0; y < height; y++) {
for(int x=0; x < width; x++) {
printf("%f\t",weightArray[width * y + x]);
}
cout << "\n";
}
}
void setupNN2(NN2* nn2) {
nn2->inputCount = INPUT_COUNT + 1;
nn2->hiddenCount = HIDDEN_COUNT + 1;
nn2->outputCount = OUTPUT_COUNT;
//Allocate the memory ***
nn2->input = (float*)calloc(nn2->inputCount, sizeof(float)); // +1 for bias
nn2->hidden = (float*)calloc(nn2->hiddenCount, sizeof(float));
nn2->output = (float*)calloc(nn2->outputCount, sizeof(float));
// 20 * 8 = 160 Byte'ý bir arada veremeyecekse ne baslarim oyle bellege
nn2->weight_i2h = (float*)calloc(2 * nn2->hiddenCount * nn2->inputCount, sizeof(float));
nn2->weight_h2o = (float*)calloc(2 * nn2->outputCount * nn2->hiddenCount, sizeof(float));
//Set activation function
//nn2->activator = &activator;
//nn2->delta = δ
cout << "OK\n" << "Randomizing weights..";
//Initialize the weights
randomizeWeights(nn2);
}
void randomizeWeights(NN2* nn2) {
for(int i = 0; i < nn2->inputCount; i++) {
for(int h = 0; h < nn2->hiddenCount; h++) {
// for accessing second layer: (nn2->inputCount * nn2->hiddenCount * layernum) + nn2->inputCount * h + i
nn2->weight_i2h[nn2->inputCount * h + i] = (RANDOM_FLOAT * 4.0) - 2;
}
}
for(int h = 0; h < nn2->hiddenCount; h++) {
for(int o = 0; o < nn2->outputCount; o++) {
nn2->weight_h2o[nn2->hiddenCount * o + h] = (RANDOM_FLOAT * 4.0) - 2;
}
}
}
bool cudaCheck(cudaError_t cudaStatus, char* errorStr) {
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "CUDA ERROR: %s", errorStr);
return false;
}
return true;
} |
20,548 | #include <stdio.h>
#include <iostream>
#include <fstream>
#include <math.h>
#include <stdlib.h>
#include <time.h>
using namespace std;
float randomNumber(int max)
{
return (rand() % (max + 1 ));
}
struct vect
{
float x;
float y;
float z;
};
struct vectProd
{
vect v1;
vect v2;
float dot;
};
__global__ void dot(vectProd* pointer)
{
int threadId = threadIdx.x + blockIdx.x * blockDim.x;
pointer[threadId].dot =
pointer[threadId].v1.x*pointer[threadId].v2.x
+pointer[threadId].v1.y*pointer[threadId].v2.y
+pointer[threadId].v1.z*pointer[threadId].v2.z;
}
void openDotKernelAndTime(int blocksize, int gridsize, vectProd* devicePointer, std::ofstream &f )
{
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
dim3 dimBlock1(blocksize);
dim3 dimGrid1(gridsize);
cudaEventRecord(start, 0);
dot<<<dimGrid1, dimBlock1>>>(devicePointer);
f << "ErrorCode " <<cudaThreadSynchronize()<<std::endl;
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
f << "Blöcke: "<< gridsize << ", Threads: "<< blocksize << std::endl;
f << ", Timing:" << fixed<< elapsedTime <<std::endl;
}
int main(int argc, char** argv)
{
srand((unsigned) time(NULL));
vectProd* hostPointer;
std::ofstream f("timing.txt");
f << "Timing for different Grid / Block size:" << std::endl << std::endl;
size_t Elements = 1024*1024;
size_t sizeInBytes = Elements * sizeof(*hostPointer);
hostPointer = (vectProd*) malloc(sizeInBytes);
cudaHostAlloc(&hostPointer, sizeInBytes, cudaHostAllocDefault);
memset(hostPointer, 0, Elements);
for ( int l=0; l<Elements; l++ )
{
hostPointer[l].v1.x=randomNumber(10);
hostPointer[l].v1.y=randomNumber(10);
hostPointer[l].v1.z=randomNumber(10);
hostPointer[l].v2.x=randomNumber(10);
hostPointer[l].v2.y=randomNumber(10);
hostPointer[l].v2.z=randomNumber(10);
}
vectProd* devicePointer;
f << "ErrorCode " <<cudaMalloc(&devicePointer, sizeInBytes)<< std::endl;
f << "ErrorCode " <<cudaMemcpy(devicePointer, hostPointer, sizeInBytes, cudaMemcpyHostToDevice) <<std::endl;
f << "ErrorCode "<< cudaThreadSynchronize() << std::endl;
int blocksize;
int gridsize;
//Maximum sizes of each dimension of a block: 1024 x 1024 x 64
//Maximum sizes of each dimension of a grid: 65535 x 65535 x 1
blocksize=32;
gridsize=32768;
openDotKernelAndTime(blocksize,gridsize, devicePointer, f);
blocksize=64;
gridsize=16384;
openDotKernelAndTime(blocksize,gridsize, devicePointer, f);
blocksize=128;
gridsize=8192;
openDotKernelAndTime(blocksize,gridsize, devicePointer, f);
blocksize=256;
gridsize=4096;
openDotKernelAndTime(blocksize,gridsize, devicePointer, f);
blocksize=512;
gridsize=2048;
openDotKernelAndTime(blocksize,gridsize, devicePointer, f);
blocksize=1024;
gridsize=1024;
openDotKernelAndTime(blocksize,gridsize, devicePointer, f);
f << "ErrorCode " <<cudaMemcpy(hostPointer, devicePointer, sizeInBytes, cudaMemcpyDeviceToHost)<<std::endl;
f << "ErrorCode " <<cudaFree(devicePointer)<<std::endl;
f << "ErrorCode " <<cudaFreeHost(hostPointer)<<std::endl;
return 0;
}
|
20,549 | #include <cuda_runtime.h>
#include <stdio.h>
#define CHECK(call)\
{\
const cudaError_t error = call;\
if (error != cudaSuccess)\
{\
printf("Error %s, %s\n", __FILE__, __LINE__);\
printf("code: %s, reason: %s\n", error, cudaGetErrorString(error));\
exit(-10 * error);\
}\
}\
void init_data(int *inp, int n)
{
for (int i = 0; i < n; i++)
inp[i] = i;
}
void print_matrix(int *mat, const int x, const int y)
{
int *m = mat;
printf("\nMatrix: (%d,%d)\n", x, y);
for (int j=0;j<y;j++)
{
for (int i=0;i<x;i++)
printf("%3d",m[i]);
m += x;
printf("\n");
}
printf("\n");
}
__global__ void print_thread_idx(int *a, const int x, const int y)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
unsigned int idx = j * x + i;
printf("thread_id (%d, %d) block_id (%d,%d) coord (%d, %d) "
"global index %2d ival %2d\n", threadIdx.x, threadIdx.y, blockIdx.x,
blockIdx.y, i, j, idx, a[idx]);
}
int main()
{
int dev = 0;
cudaDeviceProp devp;
CHECK(cudaGetDeviceProperties(&devp, dev));
printf("Device %d: %s\n", dev, devp.name);
CHECK(cudaSetDevice(dev));
int x = 8, y = 6;
int n = 48;
int nbytes = n * sizeof(float);
int *ha;
ha = (int *)malloc(nbytes);
init_data(ha, n);
print_matrix(ha, x, y);
int *da;
cudaMalloc((void **)&da, nbytes);
cudaMemcpy(da, ha, nbytes, cudaMemcpyHostToDevice);
dim3 block(4,2);
dim3 grid((x+block.x-1)/block.x, (y+block.y-1)/block.y);
print_thread_idx<<<grid, block>>>(da, x,y);
cudaDeviceSynchronize();
cudaFree(da);
free(ha);
cudaDeviceReset();
int c = getchar();
return 0;
}
|
20,550 | #include <iostream>
#include <stdio.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "cuComplex.h"
#include <complex>
using namespace std;
#define N 8192
struct complexF{
float real;
float imag;
};
__global__ void addNums(cuFloatComplex *a, cuFloatComplex *b, cuFloatComplex *c) {
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < N)
c[id] = cuCaddf(a[id],b[id]);
}
int main() {
int count;
cudaGetDeviceCount(&count);
cout << "\n\nThe number of devices supported are " << count << endl;
cudaDeviceProp devProp;
for (int i = 0; i < count; i++) {
cudaGetDeviceProperties(&devProp, i);
cout << "Device ID: " << devProp.name << endl;
cout << "Total global memory: " << devProp.totalGlobalMem << endl;
cout << "Memory pitch: " << devProp.memPitch << endl;
cout << "Total constant memory: " << devProp.totalConstMem << endl;
cout << "Number of Processor(s): " << devProp.multiProcessorCount << endl;
//cout << "Number of register(s) per processor: " << devProp.regsPerMultiprocessor << endl;
cout << "Number of thread(s) per processor: " << devProp.maxThreadsPerMultiProcessor << endl;
cout << "Number of thread(s) per block: " << devProp.maxThreadsPerBlock << endl;
}
std::complex<float> a[N], b[N], ans[N];
cuFloatComplex *dev_a, *dev_b, *c;
cudaMalloc((void**)&dev_a, N*sizeof(*dev_a));
cudaMalloc((void**)&dev_b, N*sizeof(*dev_b));
cudaMalloc((void**)&c, N*sizeof(*c));
for (int i = 0; i < N; i++) {
a[i] = std::complex<float>(i,i*3);
b[i] = std::complex<float>(i,i*3);
// a[i].real = i;
// a[i].imag = i * 3;
// b[i].real = i;
// b[i].imag = i * 3;
}
cudaMemcpy(dev_a, &a, N * sizeof(*dev_a), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, &b, N * sizeof(*dev_b), cudaMemcpyHostToDevice);
addNums <<<N/devProp.maxThreadsPerBlock,devProp.maxThreadsPerBlock>>>(dev_a, dev_b, c);
cudaMemcpy(ans, c, N * sizeof(*c), cudaMemcpyDeviceToHost);
cout << "\nThe answer is ";
for (int i = 0; i < N; i = i + 100) {
cout << "\n" << a[i].real() << "+" << b[i].real() << "=" << ans[i].real() << ", ";
cout << "\n" << a[i].imag() << "+" << b[i].imag() << "=" << ans[i].imag() << ", ";
}
return 0;
} |
20,551 | float h_A[]= {
0.7414255370004672, 0.7334934052927213, 0.5741340217761788, 0.6971148962014382, 0.8059969085163883, 0.7909542016925057, 0.6352556255067705, 0.9601936377550603, 0.9856604348821778, 0.8144888851871118, 0.8733320072857484, 0.8134113562732539, 0.5511309378200807, 0.8170992303736243, 0.5453870685558884, 0.6579281019395088, 0.5203914998533102, 0.7349487872054095, 0.8289485447822368, 0.8727816194445815, 0.8981249290238147, 0.7233611090750618, 0.8758991662209357, 0.7881515610906715, 0.7968187628023315, 0.7889175641557058, 0.7907687279488071, 0.6046744057481692, 0.5896539999748804, 0.6509619550279295, 0.9664161724620148, 0.8089174424424909, 0.9554413032918455, 0.7282174220317517, 0.7688449572334937, 0.9366523669734068, 0.5054165138030289, 0.9954668017644817, 0.5146559551266419, 0.5864165254505793, 0.7569373811415835, 0.5062879673681695, 0.5999664380487743, 0.8449344520875781, 0.6457539926447191, 0.5908632201832031, 0.7546502433710878, 0.6791486736168044, 0.7916069420461984, 0.8510533427364844, 0.7354806189311972, 0.6299843881739713, 0.993924430765126, 0.8659781995359805, 0.9867947573383717, 0.811369940748608, 0.9415046358230116, 0.5664896795416756, 0.9291684388796703, 0.5225590366272483, 0.998950188187705, 0.6959300855010748, 0.7588410006580038, 0.6384289600448203, 0.9438753840103321, 0.5138842690473577, 0.9952653269195217, 0.5248565910843535, 0.729058892157991, 0.7955706493900783, 0.9039033334104662, 0.7213855756949028, 0.7192788480405297, 0.8079815307040392, 0.5657249326951922, 0.9998082284211949, 0.7786818633969258, 0.9628913699697924, 0.9348324154279497, 0.6437850546290342, 0.8701847679952244, 0.7102449648337322, 0.7256304443206115, 0.6942696164456985, 0.6179995386805142, 0.7458789303258446, 0.728858012476941, 0.5162382980236097, 0.9100818767516001, 0.8813429331865417, 0.5268071897289176, 0.9414023254030885, 0.7578149790144268, 0.8433468236378363, 0.5673696613417233, 0.9783528988854404, 0.5067654796020825, 0.5367493769209426, 0.569098848727061, 0.6191239925481502, 0.7095803902090648, 0.8114968581129124, 0.7836871549181083, 0.8440572246043034, 0.8252412977712501, 0.5875457254837015, 0.5756273369920719, 0.9972393716382522, 0.7439343573698511, 0.6155694489809418, 0.8723937596984802, 0.6372637482945366, 0.696985823172421, 0.89754507348163, 0.7997851248020849, 0.8455814520903235, 0.5268122203506551, 0.5370702937004335, 0.5047297711483345, 0.9401057151938692, 0.9775223085379907, 0.8247093041367117, 0.8112992330207991, 0.5669495703229657, 0.8577257794645259, 0.6662142132426738, 0.5840239619113619, 0.627755121058982, 0.7307779261375049, 0.8275116382103531, 0.8276883454089698, 0.527075604022259, 0.7068019590840522, 0.6568994761923941, 0.8089382621109004, 0.6326667591199504, 0.5942629156827866, 0.7298856951341196, 0.8331529142658517, 0.8338981136992187, 0.9223097279425845, 0.9985737371272403, 0.9565932333182643, 0.7626512289678209, 0.5562890875322604, 0.5037331025754948, 0.8372861110770062, 0.678431770620008, 0.8427243345180733, 0.7389645116360914, 0.7619589716395103, 0.6493051039787039, 0.5552413705155582, 0.5995687080830273, 0.607117077557918, 0.7863747993069025, 0.7034874987881112, 0.5923680361440788, 0.5294047684573777, 0.5160994041851446, 0.6702164690484307, 0.8685289882403138, 0.9721990799717257, 0.6371108068504996, 0.8012286185896742, 0.6791188744873617, 0.7113204100468145, 0.9862104554897794, 0.7532568980133838, 0.8253805962638492, 0.8958379000523948, 0.7786405426678944, 0.6961353051450968, 0.5406191520697947, 0.7297120795617421, 0.5207094200874078, 0.8760234691419548, 0.8819546645228371, 0.6054453111175744, 0.5560046646109089, 0.5698555589044729, 0.9704382952376778, 0.9294793165181185, 0.6780419741238813, 0.8306931659699456, 0.5699601126027495, 0.7114248629527911, 0.5984369777098831, 0.9671698265870597, 0.7316743255668686, 0.7642012828330206, 0.6003854026077518, 0.6435995998457045, 0.9508286293301917, 0.7580587250592663, 0.5847662964173782, 0.803575141787074, 0.5425024242974572, 0.765695432325375, 0.5146950942995794, 0.9371551725494003, 0.6281364365749169, 0.679693569111336, 0.730068040774422, 0.7172498938999048, 0.8872814400200573, 0.7868540368650703, 0.7051598516218557, 0.842500584245647, 0.5491169435425985, 0.8317097842261538, 0.8585544060084052, 0.6692585697688083, 0.7896435258846988, 0.6059282897877578, 0.630985940937435, 0.675036039356301, 0.9313318569779444, 0.9106175943239492, 0.7271107973328215, 0.6180633858919496, 0.5553698196089234, 0.8817641569120876, 0.6852084980112056, 0.9811354112038919, 0.790627919841882, 0.7986706702384592, 0.5100636709473363, 0.5482734361492173, 0.9929080621648587, 0.644010072857016, 0.9333275532600374, 0.8353299772925409, 0.8505783028430345, 0.8048429772905994, 0.6308671413911815, 0.536659855502623, 0.9732220834463918, 0.6487887167956303, 0.9646372942126593, 0.8699966017523164, 0.6890500514523075, 0.6903170132421996, 0.688892044688305, 0.6393497732837155, 0.5032792722544144, 0.7802023783255427, 0.9606120848587862, 0.8374168032458764, 0.6104798114329305, 0.9548447916014253, 0.9932612568785062, 0.621443915861188, 0.8207353590635325, 0.5662331225014281, 0.9132637933281966, 0.7138259300262331, 0.5580438518160056, 0.9669685233683343, 0.8651158023837195, 0.5690770590256028, 0.7977395733085089, 0.5221410127091186, 0.817267609126616, 0.8657054282584854, 0.8633579836636069, 0.7317167379821132, 0.5270212966770231, 0.8088074603178421, 0.980962980106014, 0.9072529841954531, 0.6384736798331742, 0.859933147132966, 0.6962032187238691, 0.7549513030510651, 0.9961063126628894, 0.8585280722813888, 0.5163448307962029, 0.8948378684022031, 0.9480252926441339, 0.6206224408452802, 0.9930527863247305, 0.8464641571072404, 0.6437594757501629, 0.714288572316478, 0.7206965987915717, 0.5190303927291109, 0.9895927851651392, 0.8503698617251507, 0.9819838374227433, 0.8707371540437039, 0.8830679029122975, 0.7945778916456818, 0.9220541157843125, 0.6056687734106688, 0.8794507417337789, 0.6502088999880111, 0.7474502889126301, 0.9525700178493366, 0.7679307430851257, 0.6685608638755864, 0.6794387418110637, 0.5801328307379192, 0.7417487500853157, 0.7998816956165442, 0.6599100507160505, 0.9191057818009978, 0.6005489417576764, 0.7066634919134158, 0.972404445440016, 0.5929051889932073, 0.5758217573322133, 0.7177382147427583, 0.9661737055170516, 0.7314058468869252, 0.515356817180262, 0.6040926482854643, 0.6130721418623314, 0.6915312105972098, 0.7493869726728617, 0.5388811080375955, 0.7285544015496921, 0.6383824989322044, 0.5451597288512007, 0.7725484731241771, 0.6764785067348205, 0.8822369117869651, 0.8305709314999419, 0.5793517447306955, 0.5639508926391337, 0.7066345441219299, 0.8047013554789635, 0.5273604087788304, 0.6009282160557272, 0.7032313644434914, 0.9449653420663435, 0.7363519484418386, 0.5138862589065936, 0.6761794016119602, 0.8004389874488319, 0.6498957865552566, 0.6106418811072971, 0.5607530352998223, 0.6203599833106412, 0.6609989248167167, 0.6532825141164358, 0.9502032921900602, 0.9497623964118905, 0.6541986835069278, 0.6466221398335469, 0.8190132677907289, 0.6228433355779563, 0.6891142730464511, 0.6250926003374855, 0.6810528065816563, 0.9767897838947366, 0.8142900282852175, 0.6465776608521787, 0.8336721038682531, 0.8427789960828974, 0.9872892516979311, 0.5185553972512764, 0.9911353898482631, 0.9942541752904186, 0.7737047349507398, 0.6389165753660582, 0.8744941163078968, 0.557834763966033, 0.8700403911390336, 0.7067709726103352, 0.5256360317080855, 0.9806636187583491, 0.8135323736431876, 0.8362343636154494, 0.9454529069104974, 0.5389660215106502, 0.953533022010121, 0.8217505102075129, 0.8672374933840218, 0.5913341356319458, 0.7529206526214103, 0.5134370072468444, 0.7417454139061908, 0.7828268196856101, 0.5861883334553573, 0.6883944749831524, 0.5759605540008672, 0.6353350204518453, 0.834071829175117, 0.788307303031922, 0.6027067036410014, 0.9640049325566735, 0.8933888346564207, 0.6861234032226318, 0.5630618174876142, 0.6638639641437221, 0.9643069464326052, 0.7672704083583475, 0.7799076582187456, 0.7469544361199731, 0.9897331432969187, 0.9148407804714896, 0.8557885158950868, 0.9509729571114047, 0.5054460110191151, 0.9736898029075924, 0.5603746656848909, 0.8525543160105886, 0.7366942509529958, 0.6001954744817632, 0.9789746384281108, 0.6147607089414273, 0.5489018069786188, 0.7679963946765433, 0.5349592251138963, 0.8828125936669884, 0.9127460696516998, 0.5969089986545026, 0.7364558998862065, 0.7818768625181136, 0.5355798380512353, 0.9909051630196608, 0.6093543906245399, 0.6383805514004204, 0.5391328323887571, 0.8472579340301962, 0.8017136631283874, 0.6473314079719594, 0.8439397140434304, 0.7619251163864974, 0.9232221712842352, 0.560345900123155, 0.6955995713670421, 0.9239277740776859, 0.6734917519956031, 0.9287302416089498, 0.6410234631037431, 0.5735708280146149, 0.7236857322317983, 0.5190924907579109, 0.8787077625123592, 0.8946467843464834, 0.7623385802110635, 0.6184546401245229, 0.9341175154126706, 0.7680200116893561, 0.9186079419883468, 0.5582703836570675, 0.9863539870253077, 0.7586412041759159, 0.8388117015642135, 0.5649512959827268, 0.7466285477550068, 0.6284457864034292, 0.5961473173901246, 0.5769279355632588, 0.5226154297443147, 0.8267795564925231, 0.5010275687239147, 0.7224006657919162, 0.7224206154823831, 0.683674763913419, 0.9650213910856558, 0.7031648941006267, 0.9444627678501711, 0.9136923059656876, 0.5180705233017129, 0.5605227519456233, 0.5365470368570775, 0.5161913145174238, 0.9765278090348395, 0.5984739301851469, 0.944261797377743, 0.5994858030747228, 0.7480190325572571, 0.5095576652585024, 0.6809501933958331, 0.9784396500678894, 0.996535657254878, 0.6534517051235509, 0.6765829808636249, 0.9839134250751203, 0.7988658577677347, 0.5125505932568486, 0.8978990003538941, 0.6073664088679769, 0.6289913012172992, 0.7718798312703317, 0.5353976232621032, 0.7933274270839743, 0.7532847135722931, 0.8668471478375879, 0.9122623843541426, 0.6652365589906414, 0.9079868568625565, 0.7682719088314693, 0.6515999786647115, 0.9509714049213401, 0.7993833852578853, 0.9244185500532256, 0.5195972807043752, 0.9158179455720349, 0.5333295606928006, 0.7835166291942235, 0.7427354710523075, 0.7350758446018226, 0.7312116092430554, 0.5579740239631359, 0.801611397135607, 0.87665871711832, 0.5529982726736942, 0.84166927960982, 0.9979192995257459, 0.9934440149294692, 0.5509317462319674, 0.5327616819264409, 0.948966195143136, 0.920843960912448, 0.9072211662892102, 0.9071932558264257, 0.6576238339980853, 0.6646881802392547, 0.5949746438913668, 0.9562281698035553, 0.6302709067069734, 0.5079463601151992, 0.5991540516273105, 0.9369992000003351, 0.9269768903623818, 0.6302260783911255, 0.6894884950771683, 0.7190753658634246, 0.7944899338654505, 0.5470130970877152, 0.535054817875972, 0.7126325235969432, 0.5895249215621496, 0.5694736661281159, 0.9727878746703005, 0.789838291864734, 0.7262043387399473, 0.6005460058497076, 0.5313564133737885, 0.6400177457687424, 0.5396008924065798, 0.9483092314745267, 0.5022397210711607, 0.6335103278276811, 0.6070499565586647, 0.5148437597141942, 0.6198265792688253, 0.9782898814461238, 0.8478107610274296, 0.504411882841227, 0.5759877550933947, 0.5066266165218858, 0.8214159724305371, 0.5996222765076713, 0.8930581042747732, 0.6058495622128364, 0.9754215498270731, 0.9301990358955415, 0.7856750346259068, 0.7623499029220693, 0.6933998992488057, 0.5907344473033989, 0.5164709643513172, 0.6472439028952743, 0.6801852642983387, 0.7104014283543683, 0.8015744266966831, 0.763271614786097, 0.5609423908887861, 0.8428262645096336, 0.8269181627170468, 0.6380827215162923, 0.8548787448788193, 0.6957091731997986, 0.9669984581635831, 0.6175116408749077, 0.592140064838832, 0.5520421648145044, 0.6252131043645378, 0.6083132315806559, 0.7860481982971486, 0.5314467767129825, 0.9169321887467748, 0.8965257761528251, 0.8858893422874996, 0.5397345148721437, 0.9539553409797763, 0.8380809537780358, 0.68766332113808, 0.5406916755550835, 0.9823208646793965, 0.7895532077217049, 0.8103960090863405, 0.8191413128502908, 0.8776679589907914, 0.6218191344278065, 0.6457876502461952, 0.8868507128363246, 0.9421663955817103, 0.7680227485979023, 0.9083447698447549, 0.8777297138477569, 0.829186525807774, 0.9116989477454862, 0.5266946669396912, 0.7162915918787562, 0.5688638084372406, 0.947798295538163, 0.9672892848038548, 0.8138433450132253, 0.6639176998002424, 0.5220502470034625, 0.6587218696698739, 0.6741643599306845, 0.569454520786931, 0.5188126636622514, 0.5377515528529344, 0.9845076742587502, 0.6881305442809742, 0.8088969908483452, 0.7781047651186273, 0.9786541967096796, 0.8441745008984816, 0.8395154905038754, 0.8063407622991081, 0.5146739241296239, 0.5818996015873126, 0.9110288674076074, 0.7946498799412365, 0.5570099561177959, 0.8512378574908651, 0.8663225327438417, 0.87756778822023, 0.7828865703337088, 0.5577316750746645, 0.8623631396194423, 0.8416314999963787, 0.9965837907595516, 0.716335145108796, 0.7780888919166922, 0.7613463467135982, 0.69292641765149, 0.7595195858210828, 0.8365388361631692, 0.869364918737521, 0.8328237519320123, 0.7989560393075481, 0.7564952596313359, 0.6103096581754677, 0.8679994775170212, 0.7601921429340106, 0.7389955214581825, 0.7429014721603788, 0.9022677805297633, 0.7862617341844915, 0.6250118150953754, 0.8814044010441493, 0.8784880943549735, 0.7576692992640041, 0.5388391525174847, 0.8760926193084954, 0.8296977862516157, 0.9034925542398946, 0.85504454644508, 0.5715494679333966, 0.8999922303983943, 0.6612723629314556, 0.5063169255033578, 0.986653332654482, 0.8431866097910112, 0.9160175187336798, 0.8680675815877464, 0.6126282923580018, 0.7233717333189856, 0.8205933109379389, 0.8339393888009126, 0.9914600674727366, 0.9934282450346024, 0.8429985325324868, 0.6818879338718851, 0.8192142229335182, 0.5745321411265067, 0.6531931671477378, 0.5550402211526639, 0.6998535071596101, 0.9339262234876113, 0.7395072108402304, 0.7040968538628916, 0.5211509327728076, 0.5389919592009746, 0.8674994998972704, 0.7932222010423341, 0.7824607681067093, 0.7143094347435976, 0.581316330599708, 0.8340150369306405, 0.957368789892352, 0.5245858692747757, 0.9256840813496341, 0.5609146903149247, 0.5881346276093432, 0.7400961350588822, 0.5075221278818083, 0.6951491411011129, 0.7887019294274573, 0.734200523408483, 0.6934476712119272, 0.5728382305095916, 0.6659276189326704, 0.8484816615245747, 0.5434490857941616, 0.6012436991608272, 0.6133834617036096, 0.6940172413735454, 0.9934636230607248, 0.8054365539556754, 0.67154162436768, 0.5720080068148203, 0.7491145994772476, 0.509175265604096, 0.9872635562353302, 0.6528560488269297, 0.5674844078123504, 0.6897994200991255, 0.6548735337792064, 0.6289419494153332, 0.6438304948866238, 0.7738159600173783, 0.6906372628676787, 0.9974069959466865, 0.7875077369152362, 0.7563864663350612, 0.6794624885008363, 0.6644649843876609, 0.8932651809328664, 0.9639881793214227, 0.8376584071162252, 0.7457872506062881, 0.5702808148521411, 0.5221936968028433, 0.9599576194270668, 0.5544647888678002, 0.7903548788798085, 0.9382961973688708, 0.9149659077745596, 0.8291147296497298, 0.7642682096632247, 0.9017798870609866, 0.9240210312025974, 0.5328919112825647, 0.9165182597645669, 0.8482971901118332, 0.9591240314163743, 0.9872804763515448, 0.8683105404838181, 0.9336291920407631, 0.7799024478981704, 0.8476684180580156, 0.7977154774588355, 0.9960113340701071, 0.8715146775869047, 0.7239083935020674, 0.7205533874970909, 0.9071791612971929, 0.8426299261441219, 0.9573068698979523, 0.8771137449580584, 0.7026304839997248, 0.7944290101276874, 0.7851637581818154, 0.886207564968648, 0.9092600264126055, 0.5678915406562759, 0.6168054068227508, 0.9503080882000048, 0.7760060798483905, 0.8030967969270079, 0.9321037251838291, 0.6178463454399257, 0.9058329565384852, 0.738827282568971, 0.9681224318860251, 0.8924489344416177, 0.5725816599561637, 0.5843249761199485, 0.9411117169044951, 0.7086384471082918, 0.7307730185605186, 0.700113126874708, 0.8830097851289346, 0.678277544276942, 0.6864434186176791, 0.5937455940826749, 0.6542560789924221, 0.9829470346109761, 0.8080704184271337, 0.5854643181529785, 0.7257770018968204, 0.995433847763001, 0.8981055411652159, 0.5728198676211042, 0.7212234571103271, 0.7963456132896569, 0.85385887259728, 0.6611973040533112, 0.5464690129894412, 0.6011570101708315, 0.6757174069574752, 0.9242721203810185, 0.9870654265259594, 0.8187008264110027, 0.5501410575198012, 0.8078552533733079, 0.8089506407747084, 0.8965165461490094, 0.9730818511705392, 0.8116075393748616, 0.5079508038092524, 0.9487348792866928, 0.8886607762689188, 0.9024940140191464, 0.5828010412303783, 0.7831761485862951, 0.5054224711020865, 0.811885602117107, 0.8029384715592863, 0.6724669930315552, 0.9919075999713814, 0.7654756942587516, 0.8066106459256788, 0.6275987121587842, 0.6248088202899327, 0.7200037514219515, 0.8576231665689937, 0.9256892884960601, 0.6319782846549418, 0.7378201653989442, 0.5368714746744578, 0.8899911506693208, 0.6066036857942021, 0.9759728172325481, 0.8961265037206887, 0.97992326406494, 0.8933053220388776, 0.5071299891486263, 0.5262103015763222, 0.6724549931664039, 0.7985850916271886, 0.541337995155929, 0.6275915190124471, 0.704581107964257, 0.9273219805350867, 0.9966951086792977, 0.9100917151433605, 0.9437519225765809, 0.5331168306583662, 0.8645826190673551, 0.6383801979873803, 0.6100460192893473, 0.7261575720117907, 0.5112509806927974, 0.5034931349329503, 0.6394678761566622, 0.9312209977976477, 0.6866160897432012, 0.9110387881469619, 0.9027834980274885, 0.5479140492151908, 0.6780616241396324, 0.9141432358603199, 0.6067224836629181, 0.8893192099236686, 0.6299417637782767, 0.8834438601519952, 0.7816162587440286, 0.8542125368699862, 0.6499474244794095, 0.8257461780949638, 0.6418955649246496, 0.9299330703959697, 0.5596958797579438, 0.5504020849017699, 0.6759030940776996, 0.8349632885532496, 0.6022611330689509, 0.8008939913443722, 0.7200013159891037, 0.7840493642097095, 0.974906922575688, 0.5147319946541036, 0.9308062116392308, 0.763372688620955, 0.5767791941873204, 0.8161186698894214, 0.9831671114080467, 0.7859484615877091, 0.8132895351892144, 0.5940993077930619, 0.6713552410291328, 0.9688053418799343, 0.9729855384926888, 0.5951254527784612, 0.5472837153464635, 0.9533455084776469, 0.9037855694789302, 0.5711527883105305, 0.9134456596892073, 0.7358001220699846, 0.6069271046359966, 0.6400095330245174, 0.5653839110652679, 0.9753105694425712, 0.9711076176462826, 0.7847146041202566, 0.6823441914182196, 0.7717013924050574, 0.9097388864765966, 0.706272190629069, 0.5987339426077971, 0.5535793190519358, 0.9938777960445476, 0.741198263749492, 0.5057199543084596, 0.5113759210718248, 0.6265118160334846, 0.8804019463895033, 0.6950355806261028, 0.7596829570775516, 0.6351281688885154, 0.5418620313996496, 0.8304148692670761, 0.6250859390382602, 0.9245704220573674, 0.7502258087382534, 0.8702817801230778, 0.6680644727878464, 0.56188900317211, 0.527230731335752, 0.779033525418231, 0.6906464807088027, 0.8831369255518215, 0.7299705869751859, 0.6717571934498453, 0.8807385335412121, 0.5298376713506463, 0.8662939317298424, 0.7273111714598062, 0.8030255598509539, 0.7110664146937609, 0.6612646555131885, 0.6675729105769743, 0.861657157556341, 0.7710109405127008, 0.6525201440629154, 0.6443437882556622, 0.6038087873707567, 0.8246166546961031, 0.5375919487358454, 0.5762283772140898, 0.850226793674322, 0.5526739146017149, 0.5782560664138432, 0.5750125353012387, 0.9190941518912494, 0.8119352540576519, 0.7832925150428426, 0.5719175497508602, 0.8356597408883881, 0.781472366081488, 0.5738725823553531, 0.9095634622535786, 0.6122276741134385, 0.8566322039443829, 0.8700604585504143, 0.7856158243915461, 0.7659848537005316, 0.6679258507553769, 0.5843240482907293, 0.9127133832011921, 0.604662061782677, 0.7341705701802065, 0.5195950034516221, 0.8211578979695382, 0.7284560444187185, 0.9206600426150747, 0.5577733138549812, 0.8152276992689301, 0.915722553188115, 0.665717271541761, 0.8698154671433029, 0.8417203741165817, 0.6789439698049999, 0.8443574161994283, 0.6855136454930766, 0.8699364637616008, 0.6915057315334368, 0.8039891812035904, 0.8128060060934224, 0.9666081672132424, 0.8359355919615303, 0.5906046046890076, 0.5179249589344304, 0.895977679503928, 0.9424224215705529, 0.6509367193450974, 0.6228608182174644, 0.6448537036850228, 0.7627121300064809, 0.5358609749802059, 0.5769602945759028, 0.8214607451480981, 0.5153617621181222, 0.7451376066659832, 0.597557873601615, 0.9875466354304177, 0.5783596180311478, 0.6276176749333009, 0.9694165198169393, 0.5658549912559658, 0.7473575987291827, 0.7074754568082584, 0.666257931741562, 0.635672776861699, 0.857905685882763, 0.8590081369296843, 0.8706800137700019, 0.846571742225497, 0.5090027654499124, 0.9755032355904376, 0.9377767235293966, 0.5606533887639483, 0.9410185595573042, 0.9650461726390633, 0.7654608793173531, 0.7694843789910538, 0.9726864094754188, 0.7644282998900953, 0.809825803931064, 0.7321745513458433, 0.7965075934372812, 0.7219838971626042, 0.6968619002618264, 0.5147805683910418, 0.6602812662153448, 0.6008690111904544, 0.5250544107203646, 0.8583813997976433, 0.8921515696024913, 0.8185157861109659, 0.867277514996478, 0.6582802859031469, 0.7716238994662181, 0.8946574979414453, 0.7223947212140704, 0.9507459477813467, 0.5358982814012989, 0.5081820950834746, 0.8496745604670642, 0.7988710300011315, 0.9090353405526539, 0.6213746886585829, 0.522608631683239, 0.5785039702294257, 0.9228959154418763, 0.7133363381260058, 0.8924640475884744, 0.6426538836473508, 0.9591591459043238, 0.5347729707671245, 0.8976344232130264, 0.5931164454580344, 0.8743756654126813, 0.689413161912184, 0.7607853555368466, 0.7706752656793243, 0.6559969408571119, 0.6439330205829205, 0.5141879466904693, 0.9814840497507851, 0.9148296292838902, 0.5943604964627073, 0.835177615640287, 0.6297050724292756, 0.971442566988681, 0.8190873877188967, 0.745523878984754, 0.542332163658489, 0.653005221622899, 0.8971073509068564, 0.773265606576313, 0.5063652981285414, 0.7390110470592164, 0.5701063519345033, 0.5222271473221444, 0.8660219624367593, 0.540895769532902, 0.6642565342632993, 0.5085314418209135, 0.9139544107407773, 0.5187276352172407, 0.7938150287169736, 0.8697486018740642, 0.8635431969700325, 0.7685817720505934, 0.7899172751706272, 0.5980248062514253, 0.6460340650024897, 0.5422536318461408, 0.5319166021602613, 0.8495533888749547, 0.7799457686119049, 0.6649210476154506, 0.5923508370991182, 0.9254193174930644, 0.9153352947980141, 0.8247867014998076, 0.5663210321721235, 0.5359777042852576, 0.9742552274081039, 0.8670902386070665, 0.8430712989246065, 0.5544033000296904, 0.6297392511348624, 0.5538251725276204, 0.6566617998886262, 0.6980120490680873, 0.5863366476964453, 0.7271190826591456, 0.6021731713226276, 0.9785500745920936, 0.6588410919799215, 0.9971573336473452, 0.8919617374068769, 0.9131551226474298, 0.9510035836804014, 0.6091755800231337, 0.7272966869448932, 0.8316371150386002, 0.6374296934239214, 0.6083444749964219, 0.8240414390451979, 0.7150959790919658, 0.7373388351118708, 0.9037913369311804, 0.9759179366357309, 0.8566777120289788, 0.7484104447714124, 0.7152921409484541, 0.5117335692406648, 0.9864074896043853, 0.5144628020234234, 0.5514517095465556, 0.5661917411812787, 0.6170490458881703, 0.955474064944017, 0.8798153169296419, 0.589145052367881, 0.8499308290166867, 0.8707487171297255, 0.5374487772694824, 0.7494225144022797, 0.6862324323640268, 0.7109073803209787, 0.9300340066437187, 0.6753064584981205, 0.7247122172561422, 0.7464386534566181, 0.5463728841369601, 0.9747726977897184, 0.9889756837913475, 0.8620278723834047, 0.6883370919111087, 0.8768740049884052, 0.603979269227578, 0.5845678571226853, 0.9128220242489591, 0.533540202642502, 0.8126014635764313, 0.6817681719567112, 0.919793602794049, 0.5708506286602353, 0.9459413191397319, 0.833737994559415, 0.7809248899422301, 0.6054793971933315, 0.6763920725541881, 0.9754702567825767, 0.9688489850073226, 0.9954656028420383, 0.9242294497392203, 0.6499450782631327, 0.8674150398357365, 0.6477826776989449, 0.6919346046607171, 0.9697736637792981, 0.9451313239250166, 0.6791867321862074, 0.5334545214915352, 0.9844350520577558, 0.836394265246845, 0.598426629315315, 0.7437494342754036, 0.7844340518895133, 0.9043802300358261, 0.7783224616209501, 0.8415240386153235, 0.6891141529789175, 0.8792558429357518, 0.7393309007230378, 0.6971972498349492, 0.6744445814720434, 0.8997650422026467, 0.505897762868605, 0.8181066675185842, 0.5755765475213747, 0.7918722408716217, 0.8754747735289721, 0.73483031489397, 0.9464201658133027, 0.8480103662391176, 0.5897827058036889, 0.8405378731422481, 0.9635925029487349, 0.7710617366679879, 0.7265684983534848, 0.9846027883045444, 0.9787723400808523, 0.7572288455225425, 0.5315419785255133, 0.7493163623891468, 0.824620045047674, 0.6282456780664313, 0.9667525978926351, 0.8753846181385461, 0.7347527060885222, 0.6867996548102695, 0.5405407485163618, 0.9202400235604841, 0.8667407312877873, 0.7558582470978661, 0.7709080243548634, 0.8184935851398518, 0.9841619023427292, 0.5998362231762507, 0.8085655666271472, 0.8789272034952442, 0.7534501712249669, 0.5785892800849219, 0.6436105605094262, 0.7174168030247021, 0.7533202600767724, 0.7482111651626511, 0.7385366572067491, 0.6382401083553543, 0.7724457082108187, 0.8433254800460377, 0.9698390234934716, 0.6981731311111959, 0.7388149110827698, 0.6438266736467536, 0.9331450610249078, 0.9348778200733682, 0.6318390817022033, 0.6658733322410216, 0.9839394766428917, 0.785338251763005, 0.5523004083265957, 0.95417172886817, 0.6949551866981855, 0.9007136593451852, 0.5576473766197485, 0.6169481990519559, 0.5273761015818847, 0.9728274571898228, 0.9756791375996084, 0.5036756023170796, 0.7250975503054097, 0.5051298869450689, 0.9025679850099175, 0.7475085617866246, 0.5387548781561831, 0.7484309490756089, 0.8530905548679206, 0.9287302927429313, 0.9420359915849004, 0.510191551343636, 0.5939037747939273, 0.7288286627747973, 0.5545670778037923, 0.7084728165997288, 0.728174496347945, 0.6930090532152652, 0.5964026863079916, 0.8965500117840002, 0.8495749123292815, 0.5926386409344816, 0.6398886941048092, 0.6390136573804392, 0.9172969127292089, 0.7002009211809201, 0.9174658060417238, 0.5436221594439817, 0.5795426983557197, 0.74122678385823, 0.7225983500970966, 0.9631051227840481, 0.5267934956923559, 0.9630769510977621, 0.6412424668280311, 0.8460882225371925, 0.5605686407381802, 0.5826291740506441, 0.7269147346792755, 0.5801402672912468, 0.9621261003774937, 0.5006020970296613, 0.70427484128643, 0.9536964709441277, 0.6024963573429241, 0.795131756227959, 0.8318902675168203, 0.8544724101974308, 0.8978110661725229, 0.9192611384055064, 0.9173137654697701, 0.8165377052281371, 0.6099691099254654, 0.934794113789144, 0.7365402372815815, 0.8587469904434251, 0.7857363105153736, 0.8788816982001522, 0.8014525023619326, 0.5289173226741349, 0.9423230957377359, 0.5223701464234815, 0.7329244367118506, 0.7853928034745273, 0.7086088323918973, 0.6268946562057616, 0.5503176655898976, 0.6908457688512512, 0.7568381614415018, 0.8804212629175179, 0.5885841878174733, 0.89615481814631, 0.8783842417814856, 0.8313670288692484, 0.910030110149373, 0.8985595831095522, 0.7895079137109868, 0.7923092019309399, 0.737833113978092, 0.7702514206490017, 0.7302645448399161, 0.5638935537910854, 0.7322086932752478, 0.8449245682367686, 0.9834921951549633, 0.5096008542984527, 0.647717465730945, 0.572886887764468, 0.7266177382094765, 0.9096537548450947, 0.6140869400648681, 0.7338327155154483, 0.7490005015616312, 0.5034958105333063, 0.6197814185515396, 0.8468612067784903, 0.5575203186374169, 0.654717811077437, 0.6174953523393706, 0.6855704895309642, 0.565815924933833, 0.5859428530330535, 0.5721085130074375, 0.7347335284747016, 0.5833952384531538, 0.9032087141757981, 0.9051805624930457, 0.561487261107932, 0.929603420976102, 0.8145545639127296, 0.9885876825917481, 0.8741112544083314, 0.726137862487879, 0.578643921792762, 0.7323844476280466, 0.9284144194452495, 0.7541630957036072, 0.6716140848342474, 0.9805867297141211, 0.6521459389980496, 0.5094492546567387, 0.9284578365733784, 0.7579592296215022, 0.8796310562844911, 0.8685736083974467, 0.7880575529893161, 0.8537092081595261, 0.9100084118838747, 0.6893452708315866, 0.7984018416063987, 0.7629299058550661, 0.5129617107969918, 0.5946681580882605, 0.9486874591506012, 0.6702199630041334, 0.7606450873199637, 0.8278202084346127, 0.7543167513288747, 0.6449520357350658, 0.637623951425464, 0.9147994031780841, 0.5219307282538995, 0.7232191722440442, 0.6909974126397269, 0.6769632118518822, 0.892949799691122, 0.70973445336419, 0.7098121037431445, 0.7319463764126083, 0.9689488535748115, 0.9804370790643759, 0.520844161992708, 0.5340634934813715, 0.9426447251532117, 0.6300376600149598, 0.9236767944905895, 0.6586128106904079, 0.6872589178373352, 0.5925306469303953, 0.5637415232859622, 0.5671874270404587, 0.8657357694456382, 0.5641626769395125, 0.8310004720727182, 0.7562214687663005, 0.9574601129291254, 0.6819560005244605, 0.5514192175795084, 0.8401184019484766, 0.7374147536039097, 0.6912965759306678, 0.9217583379465251, 0.7807181347246803, 0.574975594813843, 0.5892671356896018, 0.6973622196644347, 0.6191301357602808, 0.5846660167629718, 0.7271439775323003, 0.8668865293360263, 0.7043360353755977, 0.9048344657423153, 0.5167745296046858, 0.9491839304474714, 0.9990115969510713, 0.6027011432951048, 0.71078778591866, 0.7060445414257073, 0.7765744318032908, 0.8718419333887626, 0.965732057140064, 0.5125521557583805, 0.9531959706488147, 0.8336896696055679, 0.873827845336143, 0.5069235586901133, 0.8003850167356714, 0.7603443310985146, 0.9480103207361954, 0.7829367849572089, 0.5619708443502689, 0.9000569674847774, 0.8867631785597985, 0.5277596242601021, 0.841490729562162, 0.6561915817001549, 0.9553198547840103, 0.7000734500941426, 0.6761787006663074, 0.6631540471020054, 0.6608240036039574, 0.9028629513253299, 0.5013888953216463, 0.8405836079469025, 0.9627277243627153, 0.584659945825809, 0.7283292252587119, 0.9702688461187601, 0.5186329291682157, 0.6975506077936215, 0.9115049148889631, 0.696287581749597, 0.9353616059108396, 0.817018763704283, 0.9807231761689871, 0.8563707796945441, 0.86030732369015, 0.5837111980301282, 0.6220535156806819, 0.8357624148866427, 0.6702686114205462, 0.8656985071653176, 0.676297822145431, 0.5915379859782108, 0.6702647277632577, 0.8389342633148293, 0.7944215511700716, 0.7889279069845818, 0.6231000094600184, 0.9216115225222271, 0.645951822313032, 0.7968345789388269, 0.862799572927436, 0.7820574813261018, 0.9128198078421903, 0.8349329999531687, 0.5718888656371717, 0.5101787078030839, 0.6230246272502824, 0.5046840402854138, 0.7924704921519097, 0.6788664526787489, 0.5107232601440257, 0.7306736282227049, 0.9737421386181528, 0.753276820273615, 0.9008009359953728, 0.5374040813682603, 0.89592795556305, 0.673877859770456, 0.6753004069925124, 0.5102111560368523, 0.696261201932914, 0.9598061038086022, 0.8010875825305568, 0.6365538353920988, 0.9707693109518856, 0.6407158767909866, 0.654493698975348, 0.9966044889940846, 0.8617705093321277, 0.5798365963300713, 0.5823802055212521, 0.6155749346581465, 0.761138276183753, 0.5221897029389151, 0.5051869478277876, 0.9748544093117548, 0.631338115681529, 0.575066415725278, 0.9560108829789378, 0.6006260431686088, 0.7690093244032981, 0.9851409265062524, 0.9913325169214379, 0.6783808705551537, 0.5712151369004506, 0.7038693426211118, 0.5606504760313653, 0.9974520381525602, 0.699965285152041, 0.8817567324863815, 0.663943037519305, 0.8723405613573442, 0.9781363289829594, 0.9108643820331666, 0.6956682268651293, 0.6698610116442503, 0.6375135883887237, 0.8740815985385746, 0.6484056265341576, 0.6629115342873195, 0.9190126644063423, 0.9936976793061323, 0.8030059439223938, 0.5634766706892727, 0.8852919606481258, 0.9949788538540274, 0.9649993225029809, 0.9631224504275885, 0.8652016282617996, 0.6795175227791357, 0.9608567349951523, 0.6079057504461929, 0.8852494668090145, 0.9353930736607534, 0.6772031811124876, 0.9620015411977383, 0.5447487158750424, 0.6413208869270557, 0.608042280373973, 0.9086343551415107, 0.5826649513190756, 0.6759700970019351, 0.8720044413403418, 0.770827592925082, 0.7913553753615227, 0.5024113683675273, 0.5507612246817001, 0.6396290964245779, 0.7365753238968078, 0.5067171023871764, 0.6039479657229097, 0.8488896018272716, 0.7147318640950358, 0.9154538245043153, 0.6653789120371335, 0.8076579411899166, 0.9216829667228897, 0.571227888474877, 0.6787312590461589, 0.8100189674376965, 0.7822973911771087, 0.5862377862094844, 0.9433631104023823, 0.83856526241283, 0.5135403605815072, 0.510107149412081, 0.903169166504294, 0.726661570885673, 0.7013769965042889, 0.8038425532186428, 0.8271696818886406, 0.5980729581898278, 0.567136987907555, 0.6197302464959837, 0.9701835462092996, 0.9006853130527233, 0.983605194204046, 0.8065628494082601, 0.8481337619061612, 0.7054418001668935, 0.6949384365052753, 0.9463722815228344, 0.8730178695661247, 0.8056262118775557, 0.8818400976711118, 0.8512761881167863, 0.5842986195156538, 0.651200079264771, 0.5240941721332908, 0.850343669720768, 0.8628648579826912, 0.6639229181409874, 0.5172788910359478, 0.6753165818907468, 0.9370163165088727, 0.6723497069859516, 0.9303982717333614, 0.6165137844364695, 0.5864770190337847, 0.9211674714182332, 0.6012597902894672, 0.5460341109612825, 0.5673871313311417, 0.6474931021814141, 0.8051074985178823, 0.798177161477497, 0.7037040339230562, 0.7580849754643955, 0.6698612420958638, 0.8465284806058903, 0.6112870991988077, 0.7855183820010425, 0.5841994415718679, 0.5519859545220562, 0.9881442116472232, 0.9107590598859876, 0.6404437987758571, 0.7777295508413958, 0.6488628263941748, 0.8729567585126161, 0.7526487488698472, 0.6836009700541674, 0.9587183625689644, 0.7532451999788521, 0.9484672487297781, 0.6801267941701665, 0.7604017771696008, 0.5744384780337131, 0.5601143931644298, 0.8001636866385844, 0.9510853981827905, 0.7421339274050702, 0.687241709595036, 0.5902537479479847, 0.8843953439791525, 0.8513704212815352, 0.7731296780272617, 0.9094784545380057, 0.7476290549797182, 0.6370030595487841, 0.9013288781558846, 0.7366773312170964, 0.6520851817914688, 0.6995166644446424, 0.6456212922785245, 0.7934307347962695, 0.8619627847703598, 0.848379895418232, 0.901479479412969, 0.9719290460405137, 0.85665984413498, 0.763570064682439, 0.6961449880091644, 0.7125826009966773, 0.8995854809238797, 0.7156712026233569, 0.9505890807991371, 0.9324604553328883, 0.8134811489364788, 0.7116640027528001, 0.6201040566601479, 0.9663174829052779, 0.7534985490792794, 0.6807832313069436, 0.6185653146856084, 0.906927033331843, 0.5286287026632697, 0.8212839391177101, 0.6885918644301234, 0.6330369219455727, 0.6797882304952743, 0.9102151939118448, 0.5659025083108753, 0.5173033422002465, 0.5690681256868169, 0.5800702759014724, 0.7579470219590702, 0.7418150726984705, 0.6675830315464255, 0.8773589889275527, 0.7233582814725946, 0.8456350686726037, 0.7386166959423539, 0.6556837744830966, 0.7260564920879191, 0.5131425330003572, 0.7704862749560464, 0.8937551766902606, 0.9827534659991081, 0.7294382286235317, 0.9037677749439095, 0.939949134699289, 0.6069683575196386, 0.8911204611035897, 0.9602619029597471, 0.7192677551028139, 0.8008269351410493, 0.6993941271489156, 0.7628109111143984, 0.8649848168098354, 0.5063610818109702, 0.708578637263158, 0.7361546647619204, 0.6979541606673236, 0.6762542877477863, 0.9683189770627125, 0.6286972413730212, 0.7714695660564779, 0.6734913993351623, 0.6522544005286428, 0.739677511923553, 0.8210188341811553, 0.7792612864063904, 0.642642772192628, 0.8290585625828725, 0.7407091252246454, 0.6401660466425377, 0.6247676366730495, 0.8870906660789539, 0.9362414517194839, 0.9461719649071698, 0.767382506869069, 0.5838735153393957, 0.71036444435835, 0.7006686177448456, 0.583513380563716, 0.8760257967885136, 0.7206705836545153, 0.5758759634772458, 0.7005240882208341, 0.7293604582311022, 0.5362944225679045, 0.9654665804618625, 0.7135322298776243, 0.9829469929419498, 0.7210985299708843, 0.5927270230328042, 0.7411707327401751, 0.9337700917089102, 0.5026914697612364, 0.5262931842109473, 0.653359873521193, 0.8482856655965367, 0.6828454886426019, 0.5003146596797765, 0.8819390338762102, 0.501495244768816, 0.9971609797660665, 0.5487161603289352, 0.7612587785545022, 0.6028503280241668, 0.8733877842191085, 0.7204053815226106, 0.8640787369202323, 0.8955294168649692, 0.6194126980089828, 0.688683121687716, 0.8177437006521227, 0.7045808420948798, 0.8478060490975279, 0.9095367206563699, 0.9629277810409992, 0.8497176529109365, 0.9579286705825774, 0.6491685809254097, 0.5250503111992796, 0.8887802053241138, 0.7922235277617025, 0.9289730847614721, 0.5640598389481755, 0.568294331299396, 0.6839819254008556, 0.929843065352606, 0.6519661874150952, 0.9661788410396626, 0.9056962005833699, 0.7735887369416468, 0.5954019970105731, 0.9756944616372951, 0.6338052388538895, 0.6434805372559099, 0.6322148516762456, 0.9130117509800199, 0.9194362466792061, 0.7214470134523321, 0.5413657745736584, 0.6133093573686038, 0.695226816206693, 0.9729353968357065, 0.8251523137243186, 0.7460757316533826, 0.9592975503869321, 0.9856302315915824, 0.5204161581248246, 0.5439847353233347, 0.705163865654104, 0.8721333355544201, 0.5324614055168237, 0.7594004246042299, 0.8725297989984542, 0.6700486329117998, 0.9412232773312925, 0.9102818922104887, 0.5183113102052737, 0.9090530121232483, 0.8764706939925255, 0.8982790963612877, 0.9761109878020617, 0.8689755570003384, 0.5018718453559264, 0.9122728454511921, 0.9586838368228845, 0.7895451979963086, 0.9280515789029821, 0.9230543203378515, 0.5477546103441902, 0.7708091726776916, 0.9282753499025119, 0.596163729123586, 0.8741085922010026, 0.919652262127846, 0.8588013701355794, 0.6396139774310884, 0.9859409907435458, 0.7583820203117911, 0.9027191947655862, 0.9483030745344272, 0.6633130569700831, 0.9106019424087215, 0.9209170576958148, 0.985903645589381, 0.522076142010399, 0.5196454007201039, 0.5616091721196556, 0.5340940921242394, 0.7922607064946678, 0.7647028034322874, 0.6734690068239746, 0.8005677769600017, 0.7889260762598405, 0.5598314266408964, 0.5352706389476235, 0.9635575520991997, 0.8113974418009874, 0.9834048051981212, 0.8739739617810405, 0.8655389220819757, 0.6865494697185262, 0.5934427993529754, 0.7473439030529412, 0.8429557824914232, 0.8629479747510307, 0.8061036579394065, 0.7966475184956658, 0.7876414946892828, 0.5532655505370725, 0.7393864302618325, 0.7123594273485551, 0.8241009607787784, 0.6199687151161509, 0.8769776233260098, 0.6746660386725918, 0.570923829489367, 0.9841989757905065, 0.5795800991688451, 0.7664272782161741, 0.9856992808050888, 0.5522043160923498, 0.9531785777176873, 0.6137745199074072, 0.5758424567611374, 0.7641842032914854, 0.5899768459049188, 0.9387607602708772, 0.62578316139673, 0.7943959735382738, 0.7984024990578216, 0.9321530516875968, 0.7012623698525011, 0.6102143897683049, 0.5370346328729126, 0.85857274684631, 0.8707250892017433, 0.8845802100006384, 0.5393409278303807, 0.5770738802387553, 0.9502978353226434, 0.6041448975581315, 0.5064035956672797, 0.5655080997417115, 0.7730630091973987, 0.8501376054265102, 0.6859691486452436, 0.7133027394928654, 0.8788019985316159, 0.8243431092161426, 0.6217308955878903, 0.689109264047258, 0.6427400953463405, 0.8660887638680936, 0.9604438297877738, 0.9526368962654084, 0.845014590782541, 0.6055217807295729, 0.5012053235010748, 0.5756748208010163, 0.8539730235763701, 0.5038818418764555, 0.8012350661906287, 0.7072845054252089, 0.8465484261429151, 0.5798136971408073, 0.9174576529763665, 0.9305863921969625, 0.5123927853573131, 0.7915261521447099, 0.7223788381673948, 0.7387165183205893, 0.6907735941216101, 0.5087362679512287, 0.7398612993657238, 0.7494088424195615, 0.6322050445898317, 0.9700636039694157, 0.915607428909981, 0.5207661335325724, 0.6314102760421119, 0.5538800559760185, 0.5553672013980866, 0.5509282426304107, 0.662240720912374, 0.8239028557160648, 0.9080062525146233, 0.7021018438699564, 0.6035738745021824, 0.7330390961439102, 0.7180421631812377, 0.5078685502238717, 0.5363321495830304, 0.581599720088666, 0.7648604946199753, 0.5084390185418979, 0.6681485494730605, 0.868028039696218, 0.7844245647952794, 0.8830879287872123, 0.7842411130192468, 0.7963566631194967, 0.6812430185678615, 0.6602121913217734, 0.8723682431572857, 0.6479020239386221, 0.5299619311204198, 0.5351278856890529, 0.8282968146740384, 0.6997349290971655, 0.7465194813929539, 0.920504705941539, 0.9272822735873365, 0.7979277262514588, 0.9264504026866397, 0.5880146863702334, 0.9338517091264091, 0.8479826119085712, 0.6503591808702306, 0.8273104421937347, 0.6360550813341364, 0.8420420622601209, 0.8263911240589827, 0.8412713003941805, 0.889191250178964, 0.7540147928681511, 0.7047540605812306, 0.7451467572959019, 0.6069593206185219, 0.6575089624107209, 0.6104816818064418, 0.9563133475043297, 0.999056364544358, 0.5775809107945955, 0.9789293056089385, 0.9731269224125352, 0.7781232531153248, 0.5711342981409904, 0.990013331400083, 0.7840217021739404, 0.8599017911520961, 0.5399263403469683, 0.7278573354288989, 0.8923823824287219, 0.9651107216485361, 0.8852331008242156, 0.5243431797587921, 0.7254561142508804, 0.8466823256520923, 0.5808636675081446, 0.9099473532577074, 0.5782114044992775, 0.7486684835737674, 0.7854002352024407, 0.9735057129445241, 0.9987345509088297, 0.5397908933211554, 0.7810855247147422, 0.8509472330680088, 0.6314010650333499, 0.5850587320867295, 0.8341099288026438, 0.5591304690298826, 0.7327352815097878, 0.6713708076432201, 0.6778386723699947, 0.9957669188000176, 0.7097444771095225, 0.7187105325386454, 0.633448996966249, 0.9053970461343908, 0.8929016147003068, 0.9257171723901703, 0.975694755833121, 0.9809163325838923, 0.5458972513361714, 0.7625987321591096, 0.6096307599521862, 0.5081164276972564, 0.8775404126721836, 0.7365750128945099, 0.6941460245816549, 0.6737050275068646, 0.5227249787838999, 0.8223431714318309, 0.8982770796882145, 0.5945200015460139, 0.9394584718445738, 0.740588621653011, 0.6427607962000051, 0.6729662802389308, 0.6868064090608468, 0.7999967870552924, 0.7019490390104575, 0.9692139145314755, 0.8850719059184525, 0.8237867535794845, 0.6242936697209092, 0.9808674482379096, 0.6823187979151712, 0.7253213107163492, 0.5405857814998347, 0.6565091541534964, 0.8094028621249262, 0.9654421345941202, 0.7649155765730358, 0.5952596118870459, 0.647371605856712, 0.740716629473376, 0.7032881373934865, 0.9691340630659282, 0.5376248484566788, 0.77871804807567, 0.7054168461647332, 0.8569643007000391, 0.8495850953431628, 0.7555149790580262, 0.8971406636796458, 0.8115770547017928, 0.9012456815744727, 0.6923214186719102, 0.675713555987399, 0.6407276184674935, 0.7461961260290275, 0.9054390716312174, 0.5450226770695983, 0.973861107396778, 0.7749716108792883, 0.8826335666075423, 0.8565367386495837, 0.60070594028924, 0.880668515186376, 0.7802433625407936, 0.5791996220882097, 0.9662090921590065, 0.6912621892302917, 0.6564097666775015, 0.5655704591768473, 0.9495376759238985, 0.7248282581650364, 0.8876963983398422, 0.5946404463334112, 0.735129330657059, 0.5507235789765279, 0.7541606298125578, 0.5947860578684769, 0.9899934490377121, 0.6236084442537055, 0.8866660882577782, 0.5984686670321413, 0.7306440565699441, 0.5823721805956318, 0.6075240071351812, 0.7392114771782079, 0.8429330768692637, 0.5056156206264681, 0.8803199497972396, 0.7711420345683797, 0.7513010667967355, 0.7588798521229778, 0.6991257578962606, 0.601385899470975, 0.8120248464914849, 0.5705469934472122, 0.5748968840644721, 0.5976028843401318, 0.953790814552224, 0.9408189891866542, 0.8310195135935178, 0.6800551483345636, 0.850907899190768, 0.5786932148417334, 0.8093336945145919, 0.766104634810403, 0.8603910216526165, 0.7719329610470927, 0.5838946281685341, 0.9695094105421834, 0.9562573253276563, 0.8876127695256604, 0.7243137284918473, 0.9748063745777122, 0.8905250134395114, 0.8228334369285155, 0.5944512011490015, 0.9357238336647108, 0.8773217230644981, 0.6593921172407069, 0.9254416652093398, 0.9870618099782313, 0.8140917892844048, 0.6414547696451223, 0.8196921535048244, 0.5898945063194905, 0.9212722535805511, 0.5643781667952271, 0.9323517253117389, 0.9973977976562907, 0.5373204171044224, 0.9982803918151186, 0.8860751717159829, 0.8453471632608033, 0.8372576438984614, 0.7573835192294827, 0.8982160096958804, 0.83845032041917, 0.9803756658749135, 0.787769707336163, 0.5789749576640153, 0.9700063521252044, 0.6628428316720627, 0.6876728804249752, 0.5969534496754507, 0.7044872344326867, 0.9963784275905244, 0.5524978185841652, 0.8930853777436364, 0.6617025155742822, 0.6975893870963664, 0.59963362509096, 0.8003080837200856, 0.9433591758243477, 0.6383880327550501, 0.7841660665271286, 0.7223745986304593, 0.8069463042191156, 0.9056683237959076, 0.6521035889023541, 0.7123224456318864, 0.9660495473784843, 0.8326835246750703, 0.5961056985929367, 0.6296795395162721, 0.7083898592508852, 0.6178964018590789, 0.729990359462743, 0.6141786464937465, 0.6919710886300469, 0.7703224271660547, 0.5734959881587924, 0.7450164471351537, 0.759630333287888, 0.843389682103193, 0.5751190147815493, 0.6642537529674795, 0.8495749012942102, 0.8219830391531768, 0.8175053585512184, 0.6303821629049535, 0.9552144284137705, 0.9511320903741263, 0.5626947639070135, 0.8959358852968187, 0.6403219075874167, 0.5709845083073808, 0.8510446003472976, 0.6718479934450693, 0.7855704068002787, 0.6779342102527963, 0.927935501971196, 0.8183392421430078, 0.9078446809226736, 0.5706503799375624, 0.5180387042296033, 0.7574261687331202, 0.9244355816212948, 0.58574833559162, 0.7920742240139448, 0.6263600788358095, 0.644464828459036, 0.5135643456190717, 0.6603142261768984, 0.8597207998234593, 0.9507093425735786, 0.79408630782423, 0.7027645146546684, 0.678283510273427, 0.8798777512130199, 0.5157424995199328, 0.9218022134468887, 0.6080707765670423, 0.6461212999653316, 0.5814718308393995, 0.6112632946173975, 0.995453779735245, 0.8432299072642822, 0.6317508641876001, 0.5543859339984653, 0.8511344825043586, 0.5605205912915616, 0.8407726556124459, 0.6633373114602215, 0.8170872496649971, 0.7283287501383698, 0.5367873896016392, 0.6190519878374388, 0.8395161909770841, 0.9675058621904125, 0.7858850963690617, 0.7813975314082833, 0.6572371546245102, 0.9572445004923983, 0.7427325151556512, 0.8752390440693394, 0.5240759539424145, 0.7642775217595235, 0.9022502771788863, 0.7341917005284244, 0.6898398006376882, 0.7001538594150838, 0.937470245313919, 0.8567119781845389, 0.9790562790123911, 0.8841069156778196, 0.9638590402050016, 0.6980436792255182, 0.6395106654753114, 0.6514656407786305, 0.811253447648217, 0.7437977588126004, 0.504574584576733, 0.9133066606611497, 0.9015541847622839, 0.8699416527875428, 0.8811389414816005, 0.8999342526672265, 0.8546885348924862, 0.9652974476388518, 0.6883030536605299, 0.7227814266513495, 0.6877447218328829, 0.6191961241540596, 0.7471672129199621, 0.7604014755327781, 0.8986540563292322, 0.9786965728968755, 0.6196643635891089, 0.9030038447792745, 0.8493457414995386, 0.5587823066814194, 0.9297237073999054, 0.6512130454533938, 0.6060945824562785, 0.9078411922791976, 0.632320123709047, 0.6897560709706396, 0.7297760689869721, 0.6923691468055531, 0.7339517157743067, 0.9535385152836564, 0.9673226738493295, 0.725304966645056, 0.6248608188939849, 0.8359890355586682, 0.8317365513641328, 0.8442726953992954, 0.9560389418012586, 0.5387926675537398, 0.582058054819222, 0.7928558020776815, 0.5183478845478694, 0.7330887687307996, 0.8230351973451834, 0.5599092575192961, 0.7196912903598611, 0.674386882092117, 0.9778992394125287, 0.7780194603880193, 0.8443222458610575, 0.5035398121299559, 0.8846044923358358, 0.9186924767651639, 0.6823893789677092, 0.5377555273648944, 0.738661958602966, 0.5545492240377718, 0.5424612378412315, 0.6194901305054223, 0.9138771694461163, 0.9210472426041987, 0.5896926045711711, 0.6217579658401587, 0.882018048877764, 0.7751595806992859, 0.8981054117861718, 0.9727340861389469, 0.5737743803499731, 0.6442459617578005, 0.93295542516476, 0.5775170645958712, 0.8803567320629692, 0.5459702371629496, 0.9422336811090596, 0.7765824773481257, 0.909503837726746, 0.8007267148638995, 0.8191286917548001, 0.9193706575985203, 0.6061114036243755, 0.5127134083557046, 0.9191492070626672, 0.8739369491099265, 0.5310951226220852, 0.6047603425328187, 0.8072449398692905, 0.5227259166342288, 0.5221178060525384, 0.5088261907121071, 0.8204559788877699, 0.9625856009900698, 0.5257188205714842, 0.9557990036981905, 0.8914192999866994, 0.8046791706518563, 0.6570274080409568, 0.9454317538586627, 0.8917011451600703, 0.6543810128398642, 0.5476936732583381, 0.6455939221158533, 0.7358780750333862, 0.5264363983281901, 0.975745306634085, 0.9068148970891827, 0.5073466874650042, 0.8042776269184944, 0.6905025949202763, 0.941835699387074, 0.808028702929588, 0.9085310077397535, 0.7341107763169972, 0.762196462831529, 0.8936815985522539, 0.5194579788880895, 0.8570539628879912, 0.5807423212823295, 0.6743465552100396, 0.9252162787808678, 0.538641620263085, 0.5106654890882734, 0.6909023228515871, 0.5867775389785725, 0.9292952966912025, 0.5377961505161902, 0.5347853040793793, 0.6539725651358126, 0.6141045984361975, 0.7315160739964897, 0.655610518638805, 0.8759036657940851, 0.8535010584080334, 0.8915922616610534, 0.7422020068701181, 0.5149979115535358, 0.9557972816580919, 0.8277853157715741, 0.6415438375430766, 0.8533876307889171, 0.9098920568771598, 0.6292081671229593, 0.8958929343558483, 0.6578831135639935, 0.7532047319040658, 0.7631038602452711, 0.9573652918943226, 0.9220314395620466, 0.645353405925462, 0.7306226464425756, 0.9742552417082444, 0.5758382787750849, 0.9408187332324041, 0.978005906562927, 0.6212216817042835, 0.9047655364304638, 0.690905036429818, 0.5959829893411086, 0.6674248326142596, 0.5861958131539846, 0.8105121504949981, 0.5812335203464107, 0.8962952552725367, 0.643111048591973, 0.717360174693499, 0.6317684400318065, 0.5378976117617529, 0.5995570048897175, 0.5816800330909032, 0.5786567151383099, 0.8348826652911373, 0.9354757267515462, 0.5025156036491062, 0.941716883616632, 0.5077318611304984, 0.856905281817741, 0.7341430035718074, 0.884534745063376, 0.6288308978752064, 0.7819053939613427, 0.8785300050490423, 0.8187987662196938, 0.9415287231165412, 0.6729049651553051, 0.524471993386091, 0.7997815129684038, 0.7725276904432212, 0.9975627856102218, 0.5775994028678177, 0.679869300969161, 0.7629445719372177, 0.8383571325676782, 0.5645607847232905, 0.93942183583466, 0.9707837613619629, 0.8347662072618085, 0.9979659725750494, 0.5473317729091737, 0.6542133203409574, 0.7002938728175294, 0.5307148130819132, 0.9330884677053761, 0.6372470921995346, 0.9650770783591099, 0.9217566421624694, 0.9130477403193284, 0.6837485739216791, 0.5412679142190138, 0.6633841543384407, 0.99907086162353, 0.9670655176914361, 0.734266948044928, 0.5191222539440261, 0.7266449536835422, 0.8237747214588497, 0.9961227678263931, 0.59953215530583, 0.8535376880278732, 0.7581468564231171, 0.979541981673179, 0.8772915686986916, 0.7286966011512623, 0.5225580871695346, 0.7916986208266654, 0.8587365138091794, 0.7311649403393824, 0.9398397395500957, 0.932150000309238, 0.5463255899189894, 0.6028554779444049, 0.6598460702137969, 0.5532676602818691, 0.7271636024110602, 0.7024736759848526, 0.6159689281673928, 0.9162305837801552, 0.746538239603473, 0.9501516110547641, 0.5602358963701708, 0.5449696000498592, 0.7455932126376787, 0.7106247771098888, 0.7614702456321066, 0.7382340697234541, 0.9650050832210137, 0.8283484632954885, 0.6641914149232153, 0.922608961286434, 0.776888569665299, 0.5705394262900193, 0.6335837674240286, 0.9605177170028716, 0.5128036274061425, 0.6455336262121679, 0.8222184353067448, 0.8908720259455756, 0.669735445714091, 0.6551590560351499, 0.936128888814917, 0.6224651584650097, 0.7762531021330992, 0.9366539459006196, 0.9445829681641885, 0.7871472573138669, 0.6160647107273096, 0.5638515650522222, 0.7917417174987389, 0.8640682947584478, 0.9377456837974916, 0.7593096870864487, 0.9737436117934994, 0.6717106463552668, 0.6970013493457121, 0.6662095232104033, 0.6523477828020428, 0.8774192845611883, 0.5421344454218815, 0.8222258005347748, 0.7940318830159403, 0.7958369124903211, 0.7256151352030751, 0.5068201670790806, 0.6580213140913187, 0.6607378996119351, 0.7870577159934993, 0.9781973543714317, 0.8294008668355846, 0.8199135927555865, 0.9481642683491249, 0.640348282477571, 0.5998283652359053, 0.5281615079995924, 0.5728354406169109, 0.729165434445447, 0.5529967098951883, 0.6773380851642844, 0.5344848901800845, 0.654374866500871, 0.7804443849595901, 0.7661375343847314, 0.9739571804085665, 0.6510655918085793, 0.57783217136788, 0.7657978227592975, 0.5085598907427502, 0.5152010458341248, 0.9396307299227141, 0.5301274246336078, 0.9596077813435803, 0.8890161944438368, 0.7617935520126725, 0.9503670457156086, 0.5827346508262741, 0.5932476353295615, 0.5787873938418547, 0.9594807686520741, 0.6429832804831048, 0.9069339279636094, 0.8398262405278097, 0.828066659650901, 0.6376443289623023, 0.8459046320902253, 0.9241778504145317, 0.9146402658197793, 0.9376960887088845, 0.5833523110066969, 0.733272719695742, 0.6518322135172264, 0.577686521760473, 0.5999971949652584, 0.8731073163646992, 0.6740530554612245, 0.9883662596823578, 0.6891281849273387, 0.7563363340883935, 0.8084671679464708, 0.7133295114286642, 0.8197662625339688, 0.5879647800607701, 0.7702287624956128, 0.6461830361413632, 0.9239516858388555, 0.560332153678988, 0.7874972671425147, 0.954788193511757, 0.8663433625230216, 0.8752686766019906, 0.8948235973425305, 0.5368343638613361, 0.5264915825393484, 0.9752326557960806, 0.5424482364761434, 0.8697689375167857, 0.8735970003036269, 0.9958779197420841, 0.590402766577115, 0.7853678567486834, 0.6353064722456221, 0.9728988363731197, 0.5553149589680018, 0.8300979718090584, 0.7270313533175268, 0.5407755023701728, 0.5254859610357996, 0.8556070785263101, 0.9915330126448623, 0.9520753675092988, 0.642665650232229, 0.7364011562075015, 0.5647956840452617, 0.9511553050044765, 0.5024331344966384, 0.991393390042324, 0.5592919770645778, 0.7992046548281657, 0.8466086580017074, 0.7764855333049818, 0.7260731715226831, 0.9841458556173033, 0.8302621719959229, 0.7686182300957956, 0.7435344380122029, 0.9821095378862214, 0.8781073110859223, 0.5689417472211141, 0.5958075552407529, 0.9836481752750285, 0.8891811731223055, 0.5079196684049789, 0.788588545039131, 0.6563966921525954, 0.9626091340868144, 0.5545791158050373, 0.848805762354037, 0.7458982353596355, 0.5604686491864892, 0.7507139953946362, 0.7438782982685708, 0.9559144340687658, 0.5200254305010172, 0.8315102311358509, 0.6022967398683832, 0.6348034783548379, 0.8326200151140946, 0.8860130790765799, 0.7944925527218909, 0.6334963693604178, 0.9429800814394926, 0.8748058206431861, 0.7859674595742376, 0.8556106856213677, 0.7261403482365845, 0.8748854846586407, 0.6943897650547128, 0.8145566868127788, 0.82881074911652, 0.8366591265055675, 0.6057862264448108, 0.7848711303618241, 0.7749385197710381, 0.8720230741621775, 0.8324194975167456, 0.7103204113844992, 0.5614948682563867, 0.742579114454192, 0.8083820741958148, 0.6292435972901957, 0.5361789038985503, 0.7929059302020206, 0.6508631700320229, 0.5356494767232056, 0.6166393391572152, 0.9445793881952123, 0.836660743731833, 0.8248292414281617, 0.6229048857637653, 0.5811454452356515, 0.6713221229539226, 0.8808735029506011, 0.8252425382996554, 0.6574556581780284, 0.7266528275835579, 0.5583031367154061, 0.9837204358517002, 0.7597068732196504, 0.5494721517686262, 0.7434255332781248, 0.6433430822968792, 0.623060054297432, 0.6873105722311905, 0.7995202124669236, 0.7587477104823672, 0.9729066857454574, 0.734333800039366, 0.8421921707336576, 0.6141701908709409, 0.6986220665506675, 0.9759437422610426, 0.7574156773879563, 0.7236096697737928, 0.8041898573089604, 0.8367778781203261, 0.9838173795690781, 0.7033231671701963, 0.8502207132491475, 0.5037982130916411, 0.51000585594514, 0.9079810635430778, 0.6853997282758446, 0.8528828135556457, 0.87188126207257, 0.7719851102398325, 0.6052090449514647, 0.5439525276524442, 0.682675183270885, 0.8985844607429044, 0.9244979425720699, 0.9431921169584396, 0.9418565302849833, 0.5694727276227745, 0.9534472438835313, 0.5942742855914966, 0.8429859531998312, 0.74791882101791, 0.5379240029504191, 0.8993932629980099, 0.763374334444403, 0.7393895159588348, 0.6373764537647151, 0.5341033236518924, 0.6929918287924259, 0.6624909411460157, 0.6044835396852889, 0.765964389517412, 0.712851323961366, 0.6844157731844653, 0.7825413320245036, 0.7594409874290764, 0.6858676198293324, 0.8752176878144852, 0.7903119366522648, 0.7085003118434878, 0.7728976551450325, 0.6977071677331403, 0.5451914690505006, 0.8691251536277584, 0.9327479293483008, 0.9705828320682013, 0.8089116284706008, 0.5701036432846285, 0.9983408394509797, 0.9419864900185628, 0.5978568386240585, 0.962581550796276, 0.7530734534879872, 0.5471704165522373, 0.8765331867455605, 0.5063923190614499, 0.5576159995886836, 0.6817149353138281, 0.776400753070484, 0.9133000176882684, 0.6052166805574155, 0.5620923462220323, 0.7330112367466914, 0.6575310977658707, 0.8170113536393138, 0.6013701455413147, 0.7922727521870417, 0.8542593035921527, 0.6694910025684322, 0.6690600182264681, 0.9581812937944528, 0.9305703301987547, 0.7000032100737643, 0.7816918685480367, 0.6207157741141341, 0.6813659019009743, 0.7861698891284206, 0.6834164827645137, 0.7993589031837256, 0.7945656094495045, 0.9276353042968118, 0.8676439718537908, 0.5139624545655843, 0.9980801173507696, 0.5411711101170709, 0.9529160105196668, 0.8872438016486355, 0.7094991584276964, 0.6246304004459575, 0.7829728074064866, 0.6875087553364885, 0.9321058828941455, 0.82490152255788, 0.6741802253039462, 0.672527072958155, 0.6533725265528525, 0.5157944715842857, 0.6862204058436541, 0.5386629802819114, 0.6686133192834485, 0.7595118107827032, 0.5629930232786307, 0.5443617027598977, 0.9567775869844825, 0.5267380386921797, 0.8259099030376709, 0.8247903782301618, 0.7998670186485825, 0.7100817855913724, 0.9455368362588076, 0.6373146872655051, 0.7829759996005201, 0.9641952702616102, 0.7379061687914397, 0.6026363099553813, 0.5979123545964343, 0.7179722712330308, 0.7377579950184692, 0.5423229935078536, 0.7432279855836146, 0.9508563119920257, 0.5567387729137094, 0.7283452699962597, 0.8452511018457717, 0.5379099110928142, 0.5344240315416176, 0.9931484934708605, 0.6551387724370433, 0.8419372770279039, 0.8103241217375315, 0.8643408513686817, 0.5393215788111236, 0.6826276097223396, 0.5042055270043664, 0.7164638786947117, 0.812495179831806, 0.6803456515435913, 0.8665779987099416, 0.7434754637189807, 0.5418790651645224, 0.5969841234965321, 0.5714115129003565, 0.5296179052184365, 0.941745439668991, 0.6724763724633647, 0.6489156532557645, 0.504396796822572, 0.9117598496823007, 0.7781107012862507, 0.7668192851455653, 0.649998375329447, 0.5823189733502152, 0.5235009480101604, 0.7760256196079008, 0.502417740926908, 0.8354277745447491, 0.8050688027098019, 0.6325257499554592, 0.9183218973488798, 0.7427055772467563, 0.8647319298250131, 0.973317426180679, 0.8197318206753121, 0.985894688937051, 0.5847295629662874, 0.9366592110665866, 0.6440964750849779, 0.9374043973195703, 0.6934894003912062, 0.8238919839078758, 0.5428723359774618, 0.9720409571425758, 0.9072113469082762, 0.8220770304476176, 0.722846697083432, 0.6362310799607367, 0.5969611953683343, 0.6144945806610742, 0.8132173037445745, 0.7580253318365922, 0.8366226650563456, 0.6593223355443956, 0.6011310072394416, 0.9578065099530665, 0.6676630998735124, 0.6666540621937143, 0.598695012326725, 0.5888961909140482, 0.6816785069156851, 0.7191304941746133, 0.7674980704313197, 0.6937625556664437, 0.8861898009558686, 0.7564103534255798, 0.5862328138326715, 0.7304388966690478, 0.6876474111419402, 0.7100910673682006, 0.6578796426683646, 0.9989775510915446, 0.6230680110843487, 0.8437516394152887, 0.8901359706791371, 0.9697831958579324, 0.7025759399461017, 0.7346108603963605, 0.8437668015838188, 0.8333442328899533, 0.8130047331452743, 0.5027269958025478, 0.9190226481557232, 0.776038421196717, 0.6266776650365383, 0.6119346631793215, 0.921836765932325, 0.8476632082989659, 0.6172047368337772, 0.7313543787954244, 0.7926330685062252, 0.7603290233044823, 0.5080260763861443, 0.6349345123683838, 0.5371303368193714, 0.9892995381534511, 0.5081073138326617, 0.5997074211776425, 0.8052730786390123, 0.7488002830490176, 0.5094640373323592, 0.9007626500716743, 0.6070935415942754, 0.6723994763973502, 0.6805390825067671, 0.7698575712498194, 0.5898395236831719, 0.6912828151047359, 0.8581099583704712, 0.8280074393531984, 0.9994599014850238, 0.7542077577873895, 0.805306581179291, 0.5611091969257713, 0.9400010349553405, 0.7203407731045735, 0.581510552457088, 0.6593051964225676, 0.7596437281444015, 0.8732439523405535, 0.928075028408016, 0.7669710571550463, 0.6171779744118003, 0.7147599201098879, 0.9403171261881806, 0.7088561243213507, 0.6711666854714922, 0.8007430358348666, 0.7611410788582464, 0.9107902046878738, 0.8019068793081018, 0.6840153571016974, 0.8093625631337481, 0.8856127293832835, 0.8122950155076324, 0.9688128662288201, 0.8890842795024578, 0.9491460579906079, 0.7417565666021101, 0.8397263728118498, 0.8844564467484931, 0.9767838972574381, 0.7331377380005264, 0.9538212587133674, 0.525991103242453, 0.948735361011768, 0.9514377040712645, 0.646820757418808, 0.9798326229408245, 0.9330663635365752, 0.5594126565637778, 0.9009030151730182, 0.6163220375412689, 0.8196971127821873, 0.8564691073385102, 0.9826343142863739, 0.8841501385803107, 0.9253429909343539, 0.6446113385027867, 0.511859386829933, 0.7870784245548446, 0.5522886058700687, 0.7596689356879304, 0.7775620478158616, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0};
int h_B[]= {
0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64, 66, 68, 70, 72, 74, 76, 78, 80, 82, 84, 86, 88, 90, 92, 94, 96, 98, 100, 102, 104, 106, 108, 110, 112, 114, 116, 118, 120, 122, 124, 126, 128, 130, 132, 134, 138, 140, 142, 144, 146, 148, 150, 152, 154, 156, 158, 160, 162, 164, 166, 168, 170, 172, 174, 176, 178, 180, 182, 184, 186, 188, 190, 192, 194, 196, 198, 200, 202, 204, 206, 208, 210, 212, 214, 216, 218, 220, 222, 224, 226, 228, 230, 232, 234, 236, 238, 240, 242, 244, 246, 248, 250, 252, 254, 256, 258, 260, 262, 264, 266, 268, 270, 272, 274, 276, 278, 280, 282, 284, 286, 288, 290, 292, 294, 296, 298, 300, 302, 304, 306, 308, 310, 312, 314, 316, 318, 320, 322, 324, 326, 328, 330, 332, 334, 336, 338, 340, 342, 344, 346, 348, 350, 352, 354, 356, 358, 360, 362, 364, 366, 368, 370, 372, 375, 377, 379, 381, 383, 385, 387, 389, 391, 393, 396, 398, 400, 402, 404, 406, 410, 412, 414, 416, 418, 420, 422, 424, 426, 428, 430, 432, 434, 436, 438, 440, 442, 444, 446, 448, 450, 452, 454, 456, 458, 460, 462, 464, 466, 468, 470, 472, 474, 476, 478, 480, 482, 484, 486, 488, 490, 492, 495, 497, 499, 501, 503, 505, 507, 509, 511, 513, 515, 517, 519, 521, 524, 526, 528, 530, 532, 534, 536, 538, 540, 542, 544, 546, 548, 550, 552, 554, 556, 558, 560, 562, 564, 566, 568, 570, 573, 575, 577, 579, 581, 583, 585, 587, 589, 591, 593, 595, 597, 599, 601, 603, 605, 607, 609, 611, 613, 615, 617, 619, 621, 623, 625, 627, 629, 631, 633, 635, 637, 639, 641, 643, 645, 647, 649, 651, 654, 656, 659, 661, 663, 665, 667, 669, 671, 673, 675, 677, 679, 681, 683, 685, 687, 689, 691, 693, 695, 697, 699, 701, 703, 705, 707, 709, 711, 713, 715, 717, 719, 721, 723, 725, 727, 729, 731, 733, 735, 737, 739, 741, 743, 745, 747, 749, 752, 754, 756, 758, 761, 763, 765, 767, 771, 773, 775, 777, 779, 781, 783, 785, 787, 789, 791, 793, 795, 797, 799, 801, 803, 805, 807, 809, 811, 813, 815, 817, 819, 821, 823, 825, 827, 829, 831, 833, 836, 838, 840, 842, 846, 848, 850, 852, 854, 856, 859, 861, 863, 865, 868, 870, 873, 875, 880, 882, 884, 886, 888, 890, 893, 895, 897, 899, 901, 903, 905, 907, 909, 911, 913, 915, 918, 920, 923, 925, 928, 930, 933, 935, 938, 940, 942, 944, 947, 949, 952, 954, 959, 961, 963, 965, 967, 969, 971, 973, 975, 977, 979, 981, 983, 985, 987, 989, 991, 993, 995, 997, 999, 1001, 1003, 1005, 1007, 1009, 1011, 1013, 1015, 1017, 1019, 1021, 1023, 1025, 1027, 1029, 1031, 1033, 1035, 1037, 1040, 1042, 1044, 1046, 1049, 1051, 1053, 1055, 1057, 1059, 1061, 1063, 1065, 1067, 1069, 1071, 1073, 1075, 1077, 1079, 1082, 1084, 1086, 1088, 1092, 1094, 1096, 1098, 1100, 1102, 1104, 1106, 1108, 1110, 1112, 1114, 1116, 1118, 1120, 1122, 1125, 1127, 1129, 1131, 1133, 1135, 1137, 1139, 1142, 1144, 1147, 1149, 1152, 1154, 1157, 1159, 1165, 1167, 1170, 1172, 1175, 1177, 1180, 1182, 1185, 1187, 1189, 1191, 1193, 1195, 1198, 1200, 1203, 1205, 1207, 1209, 1211, 1213, 1215, 1217, 1219, 1221, 1223, 1225, 1227, 1229, 1231, 1233, 1236, 1238, 1240, 1242, 1244, 1246, 1248, 1250, 1252, 1254, 1257, 1259, 1261, 1263, 1265, 1267, 1269, 1271, 1273, 1275, 1277, 1279, 1281, 1283, 1285, 1287, 1289, 1291, 1293, 1295, 1297, 1299, 1301, 1303, 1306, 1308, 1310, 1312, 1314, 1316, 1318, 1320, 1322, 1324, 1326, 1328, 1330, 1332, 1334, 1336, 1338, 1340, 1342, 1344, 1346, 1348, 1350, 1352, 1354, 1356, 1358, 1360, 1362, 1364, 1366, 1368, 1370, 1372, 1374, 1376, 1379, 1381, 1383, 1385, 1387, 1389, 1391, 1393, 1396, 1398, 1400, 1402, 1405, 1407, 1409, 1411, 1413, 1415, 1417, 1419, 1421, 1423, 1425, 1427, 1430, 1432, 1434, 1436, 1438, 1440, 1443, 1445, 1448, 1450, 1456, 1458, 1461, 1463, 1467, 1469, 1471, 1473, 1475, 1477, 1480, 1482, 1485, 1487, 1490, 1492, 1495, 1497, 1500, 1502, 1505, 1507, 1510, 1512, 1514, 1516, 1518, 1520, 1523, 1525, 1527, 1529, 1531, 1533, 1535, 1537, 1539, 1541, 1543, 1545, 1547, 1549, 1551, 1553, 1555, 1557, 1560, 1562, 1564, 1566, 1569, 1571, 1574, 1576, 1581, 1583, 1586, 1588, 1591, 1593, 1596, 1598, 1601, 1603, 1606, 1608, 1611, 1613, 1616, 1618, 1621, 1623, 1626, 1628, 1631, 1633, 1156, 1156, 1164, 1162, 1161, 1164, 1162, 1161, 1590, 1455, 1453, 409, 408, 1590, 1455, 1453, 1455, 1453, 1578, 1573, 1585, 1504, 1509, 1455, 1453, 1455, 1453, 409, 408, 1455, 1453, 1455, 1453, 409, 408, 1453, 1455, 1455, 1453, 1504, 1509, 1479, 1489, 1479, 1489, 1635, 1630, 1585, 1455, 1453, 1455, 1453, 1465, 1460, 1455, 1453, 1455, 1453, 1465, 1460, 1455, 1453, 1455, 1453, 409, 408, 1455, 1453, 1455, 1453, 1455, 1453, 1455, 1453, 1378, 1504, 1509, 1504, 1509, 1378, 1504, 1509, 1504, 1509, 1578, 1573, 1585, 1590, 1578, 1573, 1585, 1590, 1630, 1635, 1635, 1630, 1578, 1573, 1585, 1590, 946, 958, 1455, 1453, 1489, 1489, 1504, 1509, 1504, 1509, 1578, 1573, 1578, 1573, 1585, 1590, 1578, 1573, 1578, 1573, 1585, 1590, 1580, 1305, 858, 858, 845, 845, 879, 879, 958, 946, 946, 958, 1141, 1141, 1091, 1091, 1164, 1162, 1164, 1162, 1164, 1162, 1164, 1162, 1455, 1453, 1479, 1479, 1479, 1489, 1479, 1489, 1455, 1453, 1305, 1509, 1509, 1504, 1504, 1455, 1453, 1455, 1453, 1455, 1453, 1455, 1453, 1635, 1630, 1635, 1630, 1580, 3088, 3090, 3092, 3094, 3096, 3098, 3100, 3102, 3104, 3106, 3108, 3110, 3112, 3114, 3116, 3118, 3120, 3122, 3124, 3126, 3128, 3130, 3132, 3134, 3136, 3138, 3140, 3142, 3144, 3146, 3148, 3150, 3152, 3154, 3156, 3158, 3160, 3162, 3164, 3166, 3168, 3170, 3172, 3174, 3176, 3178, 3180, 3182, 3184, 3186, 3188, 3190, 3192, 3194, 3196, 3198, 3200, 3202, 3204, 3206, 3208, 3210, 3212, 3214, 3216, 3218, 3220, 3222, 3224, 3226, 3228, 3230, 3232, 3234, 3236, 3238, 3240, 3242, 3244, 3246, 3248, 3250, 3252, 3254, 3256, 3258, 3260, 3262, 3264, 3266, 3268, 3270, 3272, 3274, 3276, 3278, 3280, 3282, 3284, 3286, 3288, 3290, 3292, 3294, 3296, 3298, 3300, 3302, 3304, 3306, 3308, 3310, 3312, 3314, 3316, 3318, 3320, 3322, 3324, 3326, 3328, 3330, 3332, 3334, 3336, 3338, 3340, 3342, 3344, 3346, 3348, 3350, 3352, 3354, 3356, 3358, 3360, 3362, 3364, 3366, 3368, 3370, 3372, 3374, 3376, 3378, 3380, 3382, 3384, 3386, 3388, 3390, 3392, 3394, 3396, 3398, 3400, 3402, 3404, 3406, 3408, 3410, 3412, 3414, 3416, 3418, 3420, 3422, 3424, 3426, 3428, 3430, 3432, 3434, 3436, 3438, 3440, 3442, 3444, 3446, 3448, 3450, 3452, 3454, 3456, 3458, 3460, 3462, 3464, 3466, 3468, 3470, 3472, 3474, 3476, 3478, 3480, 3482, 3484, 3486, 3488, 3490, 3492, 3494, 3496, 3498, 3500, 3502, 3504, 3506, 3508, 3510, 3512, 3514, 3516, 3518, 3520, 3522, 3524, 3526, 3528, 3530, 3532, 3534, 3536, 3538, 3540, 3542, 3544, 3546, 3548, 3550, 3552, 3554, 3556, 3558, 3560, 3562, 3564, 3566, 3568, 3570, 3572, 3574, 3576, 3578, 3580, 3582, 3584, 3586, 3588, 3590, 3592, 3594, 3596, 3598, 3600, 3602, 3604, 3606, 3608, 3610, 3612, 3614, 3616, 3618, 3620, 3622, 3624, 3626, 3628, 3630, 3632, 3634, 3636, 3638, 3640, 3642, 3644, 3646, 3648, 3650, 3652, 3654, 3656, 3658, 3660, 3662, 3664, 3666, 3668, 3670, 3672, 3674, 3676, 3678, 3680, 3682, 3684, 3686, 3688, 3690, 3692, 3694, 3696, 3698, 3700, 3702, 3704, 3706, 3708, 3710, 3712, 3714, 3716, 3718, 3720, 3722, 3724, 3726, 3728, 3730, 3732, 3734, 3736, 3738, 3740, 3742, 3744, 3746, 3748, 3750, 3752, 3754, 3756, 3758, 3760, 3762, 3764, 3766, 3768, 3770, 3772, 3774, 3776, 3778, 3780, 3782, 3784, 3786, 3788, 3790, 3792, 3794, 3796, 3798, 3800, 3802, 3804, 3806, 3808, 3810, 3812, 3814, 3816, 3818, 3820, 3822, 3824, 3826, 3828, 3830, 3832, 3834, 3836, 3838, 3840, 3842, 3844, 3846, 3848, 3850, 3852, 3854, 3856, 3858, 3859, 3860, 3861, 3862, 3863, 3864, 3865, 3866, 3867, 3868, 3869, 3870, 3871, 3872, 3873, 3874, 3875, 3876, 3877, 3878, 3879, 3880, 3881, 3882, 3883, 3884, 3885, 3886, 3887, 3888, 3889, 3890, 3891, 3892, 3893, 3894, 3895, 3896, 3897, 3898, 3899, 3900, 3901, 3902, 3903, 3904, 3905, 3906, 3907, 3908, 3909, 3910, 3911, 3912, 3913, 3914, 3915, 3916, 3917, 3918, 3919, 3920, 3921, 3922, 3923, 3924, 3925, 3926, 3927, 3928, 3929, 3930, 3931, 3932, 3933, 3934, 3935, 3936, 3937, 3938, 3939, 3940, 3941, 3942, 3943, 3944, 3945, 3946, 3947, 3948, 3949, 3950, 3951, 3952, 3953, 3954, 3955, 3956, 3957, 3958, 3959, 3960, 3961, 3962, 3963, 3964, 3965, 3966, 3967, 3968, 3969, 3970, 3971, 3972, 3973, 3974, 3975, 3976, 3977, 3978, 3979, 3980, 3981, 3982, 3983, 3984, 3985, 3986, 3987, 3988, 3989, 3990, 3991, 3992, 3993, 3994, 3995, 3996, 3997, 3998, 3999, 4000, 4001, 4002, 4003, 4004, 4005, 4006, 4007, 4008, 4009, 4010, 4011, 4012, 4013, 4014, 4015, 4016, 4017, 4018, 4019, 4020, 4021, 4022, 4023, 4024, 4025, 4026, 4027, 4028, 4029, 4030, 4031, 15, 877, 872, 4035, 858, 927, 922, 956, 951, 653, 653, 892, 1146, 1151, 1151, 1146, 1162, 1179, 1174, 658, 658, 927, 922, 4053, 956, 951, 927, 922, 1151, 1146, 1146, 1151, 658, 653, 1202, 1197, 4419, 4422, 1202, 1197, 1465, 1460, 1484, 1479, 1452, 1447, 4426, 4428, 1484, 1499, 1494, 4077, 4431, 1452, 1447, 4433, 1465, 1460, 1484, 1509, 4082, 4435, 1452, 1447, 1453, 1455, 1465, 1460, 4160, 1479, 1489, 1499, 1494, 4438, 1452, 1447, 4440, 1452, 1447, 4442, 1460, 4444, 1452, 1447, 4446, 1452, 1447, 4448, 1465, 4450, 1452, 1447, 1452, 1447, 1484, 1484, 1499, 1494, 1484, 1484, 1499, 1494, 1484, 1484, 1499, 1494, 1504, 4454, 1479, 1499, 1494, 1447, 1452, 1455, 1453, 1465, 1460, 4160, 1479, 1489, 1494, 1499, 4456, 1455, 1453, 1465, 1460, 4160, 4458, 4460, 4119, 4120, 4462, 4394, 1452, 1447, 4465, 1452, 1447, 4467, 4469, 4125, 1447, 1452, 4471, 1452, 1447, 4473, 4475, 4130, 1452, 1447, 4477, 1452, 1447, 4479, 1465, 1460, 4481, 1484, 1484, 1499, 1494, 1499, 1494, 1499, 1494, 1504, 1452, 1447, 4483, 1452, 1447, 4485, 1465, 1460, 4142, 1452, 1447, 4487, 1452, 1447, 4489, 1465, 1460, 4381, 1484, 1479, 1499, 1494, 1504, 1499, 1494, 1499, 1494, 1452, 1447, 1455, 1453, 1465, 1460, 4153, 1479, 1489, 1499, 1494, 4492, 1499, 1494, 4494, 1447, 1452, 1453, 1455, 1465, 1460, 4160, 1479, 1489, 1499, 1494, 4497, 1494, 1499, 4499, 4501, 4503, 1600, 4505, 4507, 1595, 1610, 1605, 1615, 1625, 1620, 1625, 1620, 1625, 1620, 4511, 4513, 4515, 927, 922, 4176, 956, 951, 927, 922, 4182, 956, 951, 1151, 1146, 1151, 1146, 1146, 1151, 1156, 658, 653, 1202, 1197, 4519, 1479, 1484, 1484, 1499, 1494, 1509, 1499, 1494, 1504, 1447, 1452, 1453, 1455, 1465, 1460, 4212, 1378, 1479, 1489, 1499, 1494, 4523, 1499, 1494, 4525, 4527, 4529, 4531, 1600, 1595, 4533, 4535, 4537, 1600, 1595, 4394, 4341, 1590, 1585, 1595, 1600, 1605, 1610, 1559, 1620, 1625, 1635, 1630, 877, 872, 877, 872, 877, 872, 877, 872, 4236, 4238, 877, 872, 858, 877, 872, 877, 872, 4247, 892, 927, 922, 4252, 956, 951, 4547, 927, 922, 937, 932, 956, 951, 956, 951, 1151, 1146, 1151, 1146, 1146, 1151, 1156, 1162, 1169, 1151, 1146, 1151, 1146, 1146, 1151, 1156, 1164, 1169, 1151, 1146, 1151, 1146, 1146, 1151, 1156, 1162, 1164, 1161, 1179, 1174, 1179, 1174, 4316, 1202, 1197, 1151, 1146, 1141, 1151, 1146, 1156, 4555, 1169, 4557, 1161, 1151, 1146, 1141, 1151, 1146, 1156, 4559, 1161, 4561, 1169, 1179, 1174, 1184, 4316, 1202, 1197, 1573, 1590, 1585, 4326, 4563, 1484, 1484, 1578, 1590, 1585, 1452, 1447, 1455, 1453, 1465, 1460, 4326, 1378, 4567, 1499, 1494, 1494, 1499, 1452, 1447, 1455, 1453, 1465, 1460, 4349, 1378, 4569, 1499, 1494, 1494, 1499, 1452, 1447, 1452, 1447, 4571, 1484, 1479, 1509, 1504, 4341, 4343, 1452, 1447, 1453, 1455, 1460, 1465, 4349, 1378, 1489, 1479, 1499, 1494, 1499, 1494, 1447, 1452, 1455, 1453, 1465, 1460, 4360, 1378, 1479, 1489, 1499, 1494, 1499, 1494, 1452, 1447, 4578, 1452, 1447, 4580, 1465, 1460, 4374, 1452, 1447, 4582, 1452, 1447, 4584, 1465, 1460, 4381, 1484, 1479, 1484, 1489, 1499, 1494, 1509, 1504, 4394, 1590, 1585, 1600, 1595, 1605, 1610, 1559, 1625, 1620, 4586, 4394, 1585, 1590, 1595, 1600, 1605, 1610, 1559, 1625, 1620, 4588, 1578, 1573, 1590, 1585, 1600, 1595, 1610, 1605, 1615, 1625, 1620, 1635, 1630, 14, 15, 4592, 4593, 4594, 4595, 4596, 4597, 4598, 4599, 4600, 4601, 4602, 4603, 4604, 4605, 4606, 4607, 4608, 4609, 4610, 4611, 4612, 4613, 4614, 4615, 4616, 4617, 4618, 4619, 4620, 4621, 4622, 4623, 4624, 4625, 4626, 4629, 4630, 4631, 4632, 4633, 4634, 4635, 4636, 4639, 4640, 4641, 4642, 4644, 4645, 4647, 4648, 4649, 4650, 4651, 4653, 4654, 4655, 4656, 4657, 4658, 4659, 4660, 4661, 4662, 4663, 4665, 4666, 4668, 4669, 4671, 4673, 4674, 4676, 4677, 4679, 4681, 4682, 4683, 4684, 4685, 4686, 4687, 4688, 4689, 4690, 4691, 4692, 4693, 4694, 4695, 4696, 4697, 4699, 4700, 4701, 4702, 4703, 4704, 4705, 4706, 4707, 4708, 4709, 4710, 4711, 4712, 4714, 4715, 4716, 4717, 4718, 4721, 4722, 4724, 4725, 4726, 4728, 4729, 4732, 4733, 4734, 4736, 4737, 4740, 4741, 4742, 4744, 4745, 4747, 4748, 4750, 4751, 4752, 4753, 4754, 4755, 4756, 4757, 4758, 4759, 4760, 4762, 4763, 4765, 4766, 4767, 4768, 4769, 4771, 4772, 4774, 4775, 4776, 4777, 4778, 4779, 4780, 4781, 4782, 4783, 4784, 4785, 4786, 4787, 4788, 4789, 4790, 4791, 4792, 4793, 4794, 4795, 4796, 4798, 4799, 4801, 4802, 4803, 4804, 4805, 4806, 4807, 4808, 4809, 4810, 4811, 4813, 4814, 4818, 4821, 4822, 4823, 4824, 4825, 4826, 4827, 4828, 4829, 4830, 4834, 4835, 4836, 4837, 4838, 4839, 4840, 4841, 4842, 4843, 4844, 4845, 4846, 4847, 4848, 4849, 4850, 4851, 4852, 4853, 4854, 4856, 4857, 4858, 4859, 4860, 4861, 4862, 4863, 4864, 4865, 4866, 4867, 4868, 4869, 4870, 4871, 4872, 4873, 4874, 4875, 4876, 4878, 4879, 4884, 4885, 4889, 4890, 4891, 4892, 4893, 4894, 4895, 4896, 4897, 4898, 4899, 4900, 4901, 4902, 4903, 4904, 4905, 4906, 4907, 4908, 4909, 4910, 4911, 4912, 4913, 4914, 4915, 4916, 4917, 4918, 4919, 4920, 4921, 4922, 4923, 4924, 4925, 4926, 4927, 4929, 4930, 4931, 4932, 4933, 4934, 4935, 4936, 4937, 4938, 4939, 4940, 4941, 4942, 4943, 4944, 4945, 4946, 4947, 4948, 4949, 4950, 4951, 4952, 4953, 4954, 4955, 4956, 4957, 4958, 4959, 4960, 4961, 4962, 4963, 4964, 4965, 4966, 4967, 4968, 4969, 4970, 4971, 4972, 4973, 4974, 4975, 4976, 4977, 4979, 4981, 4982, 4983, 4984, 4985, 4986, 4987, 4989, 4991, 4992, 4993, 4994, 4995, 4996, 4997, 4998, 4999, 5000, 5001, 5003, 5004, 5005, 5006, 5007, 5008, 5009, 5010, 5011, 5012, 5013, 5014, 5015, 5017, 5018, 5019, 5020, 5021, 5022, 5023, 5024, 5025, 5026, 5027, 5028, 5030, 5031, 5032, 5033, 5034, 5035, 5036, 5037, 5039, 5040, 5041, 5042, 5043, 5044, 5045, 5046, 5047, 5048, 5049, 5050, 5051, 5052, 5053, 5054, 5055, 5056, 5057, 5058, 5059, 5060, 5061, 5062, 5063, 5064, 5065, 5066, 5067, 5068, 5069, 5070, 5071, 5072, 5073, 5074, 5076, 5077, 5079, 5080, 5081, 5082, 5083, 5085, 5086, 5088, 5089, 5090, 5091, 5092, 5093, 5094, 5095, 5096, 5097, 5098, 5099, 5100, 5101, 5102, 5103, 5104, 5105, 5106, 5107, 5108, 5110, 5111, 5112, 5113, 5114, 5115, 5116, 5117, 5118, 5119, 5121, 5122, 5123, 5124, 5125, 5126, 5127, 5128, 5129, 5130, 5131, 5132, 5133, 4424, 4421, 4540, 4540, 4720, 4719, 4540, 4540, 4720, 4719, 4540, 4540, 4540, 4540, 4539, 4540, 4539, 12, 13, 14, 15, 5136, 5140, 5142, 5147, 5149, 5152, 5156, 5159, 5161, 5163, 5165, 5167, 5169, 5171, 5173, 5177, 5180, 5183, 5185, 5190, 5192, 5194, 5197, 5199, 5201, 5203, 5206, 5208, 5211, 5213, 5217, 5221, 5225, 5229, 5231, 5233, 5235, 5238, 5240, 5242, 5244, 5250, 5252, 5255, 5257, 5260, 5262, 5264, 5268, 5270, 5272, 5275, 5277, 5279, 5282, 5284, 5286, 5291, 5294, 5296, 5298, 5300, 5302, 5305, 5307, 5309, 5311, 5313, 5315, 5318, 5320, 5322, 5326, 5329, 5331, 5333, 5335, 5338, 5340, 5343, 5345, 5347, 5349, 5352, 5354, 5359, 5362, 5365, 5367, 5369, 5373, 5375, 5377, 5379, 5381, 5385, 5387, 5389, 5392, 5394, 5396, 5398, 5400, 5402, 5406, 5409, 5411, 5415, 5418, 5420, 5422, 5424, 5426, 5428, 5430, 5432, 5437, 5439, 5441, 5446, 5448, 5450, 5453, 5456, 5458, 5461, 5463, 5466, 5471, 5474, 5479, 5483, 5486, 5492, 5494, 5496, 5498, 5502, 5504, 5506, 5508, 5510, 5514, 5516, 5518, 5520, 5524, 5528, 5530, 5532, 5536, 5538, 5540, 5542, 5544, 5546, 5550, 5552, 5554, 5556, 5558, 5560, 5563, 5565, 5567, 5574, 5576, 5579, 5581, 5583, 5586, 5589, 5591, 5593, 5596, 5598, 5600, 5602, 5604, 5607, 5609, 4544, 4543, 5146, 5436, 5445, 5470, 5469, 5478, 5477, 4544, 4543, 5146, 5436, 5445, 5470, 5469, 5478, 5477, 4544, 4543, 5414, 5436, 5445, 5470, 5469, 5478, 5611, 5612, 5356, 4522, 4521, 5613, 4539, 5614, 4539, 5615, 5616, 5176, 4638, 4680, 5254, 5259, 4522, 4521, 4522, 4521, 4522, 4521, 4539, 5324, 4539, 5325, 4539, 4522, 4521, 5356, 5617, 4539, 5618, 4539, 4672, 4680, 5254, 5259, 4522, 4521, 4522, 4521, 4522, 4521, 5228, 5619, 5620, 4539, 5324, 4539, 5325, 4539, 5254, 5259, 4522, 4521, 5290, 5621, 5324, 5622, 5325, 5623, 4522, 4521, 5356, 5624, 5625, 5626, 5627, 4540, 4539, 4544, 4543, 5414, 5436, 5445, 5470, 5469, 5478, 5477, 5523, 5573, 4573, 5573, 4566, 4565, 4573, 5016, 5029, 5523, 5573, 4573, 4573, 5573, 5571, 4590, 4590, 4546, 4545, 4542, 4541, 5813, 5814, 5139, 4546, 4545, 5815, 5337, 4517, 5342, 4518, 5742, 4549, 4550, 4551, 4417, 5816, 4417, 4551, 5817, 4417, 4551, 5455, 4554, 4553, 5644, 4417, 4551, 5818, 5819, 4417, 4551, 5820, 5821, 5481, 5645, 4546, 4545, 4542, 4541, 5822, 5823, 4546, 4545, 5408, 5824, 5337, 4517, 5342, 4518, 5742, 4550, 4549, 4551, 4417, 5825, 4417, 4551, 5826, 4417, 4551, 5455, 4554, 4553, 5644, 4417, 4551, 5827, 5828, 4417, 4551, 5829, 5830, 5481, 5645, 4546, 4545, 4542, 4541, 5831, 5832, 4546, 4545, 5408, 5833, 5158, 4517, 5342, 4518, 5742, 4550, 4549, 4418, 4552, 5834, 4418, 4552, 5835, 4418, 4552, 5455, 4554, 4553, 5644, 4418, 4552, 5836, 5837, 4418, 4552, 5838, 5481, 5645, 4646, 4643, 5562, 5087, 5084, 5569, 5841, 5842, 5843, 5364, 5188, 5720, 5371, 5722, 4880, 4877, 5845, 5809, 5606, 5812, 5847, 5728, 5391, 5731, 5671, 5246, 5848, 4800, 4797, 4764, 4698, 5281, 4773, 4770, 5288, 5850, 5293, 4575, 4574, 5667, 5237, 5669, 4713, 4670, 4637, 5851, 4678, 4675, 5852, 4730, 4727, 5853, 4738, 4453, 4452, 5854, 5855, 5856, 4574, 5857, 5858, 4575, 5859, 5860, 5227, 5652, 5196, 5654, 4664, 5861, 5862, 5863, 5864, 5328, 4510, 4509, 4723, 5865, 5728, 5391, 5731, 4646, 4643, 5562, 5087, 5084, 5569, 5866, 5867, 5868, 5364, 5188, 5720, 5371, 5722, 4880, 4877, 5870, 5809, 5606, 5812, 5872, 5728, 5391, 5731, 5652, 5196, 5654, 4664, 4670, 4667, 5873, 4678, 4675, 5874, 4730, 4727, 5875, 4738, 4453, 4452, 5876, 5877, 5878, 4574, 5879, 5880, 4575, 5881, 5882, 5227, 4764, 4698, 5281, 4773, 4770, 5288, 5883, 5293, 4575, 4574, 5667, 5237, 5669, 4713, 5671, 5246, 5884, 4800, 4797, 5886, 5887, 5888, 5889, 5328, 4510, 4509, 4723, 5890, 5728, 5391, 5731, 4730, 4727, 5891, 4738, 4735, 5892, 4746, 4743, 4749, 5893, 5894, 5274, 4575, 4574, 4764, 4761, 5281, 4773, 4770, 5288, 5895, 4575, 4574, 5293, 5693, 5304, 5695, 4800, 4797, 5699, 5317, 5701, 4815, 4812, 5897, 5899, 5328, 4831, 4510, 4509, 5728, 5391, 5731, 5337, 4517, 5342, 4518, 5351, 4552, 4551, 5716, 5075, 4855, 5562, 5087, 5084, 5569, 5901, 5902, 5903, 5364, 5361, 5720, 5371, 5722, 4880, 4877, 5904, 5725, 5906, 5726, 5606, 5812, 5908, 5909, 5728, 5391, 5731, 4546, 4545, 4542, 4541, 5910, 5911, 4546, 4545, 5408, 5912, 5417, 4928, 5742, 4550, 4549, 5434, 4552, 4551, 5913, 5443, 4552, 4551, 5914, 5452, 4552, 4551, 5455, 4554, 4553, 5757, 5468, 5465, 5915, 5916, 5476, 5473, 5917, 5918, 5481, 5763, 5780, 5534, 5782, 4575, 4574, 5786, 5488, 5788, 4577, 4576, 5075, 5078, 5562, 5084, 5038, 5569, 5919, 5920, 5778, 5921, 5809, 5606, 5812, 5780, 5534, 5782, 4575, 4574, 5786, 5488, 5788, 4577, 4576, 5075, 5002, 5562, 5084, 5038, 5569, 5922, 5923, 5924, 5798, 5925, 5809, 5606, 5812, 5767, 5500, 5926, 4577, 4576, 5772, 5512, 5927, 4577, 4576, 5780, 5534, 5782, 4575, 4574, 5075, 5078, 5562, 5084, 5038, 5569, 5928, 5929, 5778, 5930, 5800, 5585, 5109, 5931, 5804, 5595, 5120, 5780, 5534, 5782, 4575, 4574, 5786, 5548, 5788, 4577, 4576, 5078, 5075, 5562, 5087, 5084, 5569, 5932, 5933, 5798, 5934, 5800, 5585, 5109, 5935, 5804, 5595, 5120, 4590, 5809, 5606, 5812, 10, 11, 12, 13, 14, 15, 5936, 5937, 5938, 5939, 5940, 5942, 5943, 5944, 5946, 5947, 5948, 5949, 5950, 5951, 5952, 5953, 5954, 5956, 5957, 5959, 5960, 5961, 5962, 5963, 5964, 5965, 5966, 5967, 5969, 5970, 5971, 5973, 5974, 5975, 5976, 5977, 5978, 5979, 5981, 5982, 5983, 5985, 5986, 5987, 5988, 5989, 5990, 5991, 5992, 5993, 5995, 5996, 5998, 5999, 6000, 6001, 6002, 6003, 6004, 6005, 6006, 6008, 6009, 6010, 6012, 6013, 6014, 6015, 6016, 6017, 6018, 6020, 6021, 6022, 6024, 6025, 6026, 6027, 6028, 6029, 6030, 6031, 6032, 6034, 6035, 6037, 6038, 6039, 6040, 6041, 6042, 6043, 6044, 6045, 6047, 6048, 6049, 6050, 6051, 6052, 6053, 6054, 6055, 6056, 6057, 6058, 6061, 6062, 6063, 6064, 6065, 6066, 6067, 5844, 6069, 6070, 6071, 5846, 6073, 6074, 6075, 6076, 6077, 6079, 6080, 6081, 6082, 6083, 6084, 6085, 6086, 6088, 6089, 6090, 6091, 6092, 6093, 6094, 6095, 6096, 6098, 6099, 6101, 6102, 6104, 6105, 6106, 6108, 6110, 6111, 6113, 6114, 6116, 6117, 6118, 6119, 6120, 6125, 6126, 6127, 6128, 6130, 6131, 6132, 6133, 6134, 6135, 6136, 6137, 6138, 6139, 6142, 6143, 6144, 6145, 6146, 6147, 6148, 5869, 6150, 6151, 6152, 5871, 6154, 6155, 6156, 6157, 6158, 6159, 6160, 6161, 6162, 6164, 6165, 6167, 6168, 6170, 6171, 6172, 6174, 6176, 6177, 6179, 6180, 6182, 6183, 6184, 6185, 6186, 6187, 6188, 6190, 6191, 6192, 6193, 6194, 6195, 6196, 6197, 6198, 6200, 6201, 6206, 6207, 6208, 6209, 6211, 6212, 6213, 6214, 6215, 6217, 6218, 6220, 6221, 6222, 6223, 6225, 6226, 6227, 6228, 6229, 6230, 6231, 6232, 6233, 6235, 6236, 6237, 6238, 6239, 6240, 6241, 6242, 6243, 6244, 6245, 6246, 6247, 6250, 6251, 6252, 6253, 6254, 6255, 6256, 6257, 6258, 6259, 6260, 6261, 6262, 6263, 6264, 6265, 6266, 6267, 6268, 6269, 6270, 6271, 6274, 6275, 6276, 6277, 6278, 6279, 6280, 6282, 6284, 6285, 6286, 6287, 6289, 6290, 6291, 6292, 6293, 6294, 6295, 6296, 6298, 6299, 6300, 6302, 6303, 6304, 6305, 6306, 6307, 6308, 6309, 6311, 6312, 6313, 6315, 6316, 6317, 6318, 6319, 6320, 6321, 6322, 6323, 6324, 6326, 6327, 6328, 6330, 6331, 6332, 6333, 6334, 6335, 6336, 6337, 6338, 6339, 6340, 6341, 6342, 6343, 6344, 6345, 6346, 6347, 6348, 6350, 6352, 6353, 6354, 6355, 6356, 6357, 6358, 6359, 6360, 6361, 6362, 6363, 6364, 6365, 6366, 6367, 6368, 6369, 6370, 6371, 6374, 6376, 6377, 6378, 6379, 6380, 6382, 6383, 6384, 6385, 6387, 6388, 6389, 6390, 6391, 6392, 6393, 6394, 6395, 6396, 6397, 6398, 6399, 6400, 6402, 6404, 6405, 6406, 6408, 6409, 6410, 6411, 6412, 6413, 6414, 6415, 6416, 6417, 6418, 6419, 6420, 6421, 6422, 6423, 6424, 6425, 6426, 6427, 6429, 6431, 6432, 6433, 6435, 6436, 6437, 6438, 6439, 6440, 6441, 6124, 6122, 6205, 6203, 6249, 6248, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 6448, 6450, 6453, 6461, 6463, 6465, 6467, 6470, 6473, 6476, 6481, 6483, 6486, 6494, 6496, 6498, 6500, 6503, 6506, 6509, 6514, 6516, 6519, 6527, 6529, 6531, 6533, 6536, 6539, 6542, 6544, 6547, 6550, 6553, 6554, 6559, 6571, 6573, 6576, 6579, 6586, 6588, 6590, 6592, 6606, 6612, 6615, 6618, 6619, 6624, 6638, 6640, 6642, 6644, 6653, 6656, 6659, 6668, 6671, 6677, 6679, 6681, 6685, 6688, 6691, 6694, 6700, 6705, 6708, 6718, 6722, 6725, 6728, 6729, 6734, 6744, 6746, 6749, 6755, 6757, 6760, 6763, 6767, 6770, 6773, 6781, 6786, 6788, 6791, 6802, 6807, 6809, 6812, 6815, 6822, 6826, 6831, 6833, 6836, 6850, 6855, 6857, 6860, 6459, 6457, 6480, 6492, 6490, 6513, 6525, 6523, 6546, 6557, 6562, 6564, 6566, 6568, 6570, 6583, 6585, 6600, 6598, 6596, 6602, 6604, 6875, 6876, 6609, 6611, 6622, 6627, 6629, 6631, 6633, 6635, 6637, 6652, 6650, 6648, 6663, 6665, 6667, 6877, 6878, 6674, 6676, 6698, 6703, 6879, 6880, 6711, 6713, 6717, 6715, 6777, 6732, 6737, 6736, 6739, 6741, 6743, 6753, 6777, 6779, 6784, 6795, 6796, 6798, 6800, 6805, 6817, 6819, 6821, 6825, 6829, 6840, 6841, 6843, 6844, 6846, 6848, 6853, 6864, 6865, 6867, 6868, 6870, 6872, 6874, 13, 14, 15, 6896, 6898, 6906, 6908, 6916, 6918, 6935, 6939, 6940, 6949, 6952, 6954, 6958, 6961, 6964, 6965, 6971, 6973, 6975, 6976, 6977, 6899, 6999, 7000, 6469, 5958, 5955, 6472, 6478, 6475, 7001, 6909, 7002, 7003, 6502, 5997, 5994, 6505, 6511, 6508, 7004, 6919, 7005, 7006, 6535, 6036, 6033, 6538, 6926, 6541, 7007, 6552, 6549, 6930, 7008, 6931, 7009, 7010, 7011, 7012, 7013, 6932, 6578, 6575, 7014, 7015, 6103, 6100, 6097, 7016, 7017, 7018, 7019, 7020, 7021, 7023, 7024, 6617, 6614, 6944, 7025, 6945, 7026, 7027, 7028, 7029, 7030, 7031, 6169, 6166, 6163, 7032, 7033, 7034, 6658, 6655, 7035, 7036, 7037, 6953, 7038, 7040, 7041, 6683, 6219, 6216, 6693, 6690, 7042, 6962, 7043, 6963, 7044, 7046, 7047, 6974, 7048, 7049, 6721, 6775, 6772, 7050, 6727, 6724, 6969, 7051, 6970, 7052, 7053, 7054, 7055, 7056, 6974, 7057, 6769, 6775, 6772, 7058, 7059, 6981, 7060, 6982, 6793, 6790, 7061, 7062, 7063, 7064, 6985, 7065, 6986, 6814, 6811, 6816, 7066, 7067, 7068, 6990, 7069, 6991, 7070, 6992, 6838, 6835, 7071, 7072, 7073, 7074, 7075, 7076, 6995, 7077, 6996, 6862, 6859, 7078, 7079, 7080, 7081, 7082, 7083, 7084, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 5945, 6452, 7109, 7112, 7113, 7114, 7115, 7116, 7117, 5984, 6485, 7119, 7122, 7123, 7124, 7125, 7126, 7127, 6023, 6518, 7129, 7132, 7133, 7134, 7135, 7136, 7137, 7139, 7140, 7141, 7143, 7149, 7150, 7151, 7094, 6107, 7154, 7155, 7156, 7157, 7096, 7165, 7166, 7167, 7169, 6173, 7176, 7177, 7178, 7179, 7182, 7183, 7098, 7187, 7099, 7191, 7192, 7193, 7100, 7194, 7195, 7101, 7197, 7199, 7102, 6301, 6748, 7203, 6766, 6314, 6310, 7206, 7207, 7208, 7210, 7211, 7212, 7214, 7215, 6301, 6748, 7220, 6766, 6314, 6310, 7222, 7223, 7224, 7227, 7229, 7230, 7231, 7236, 7238, 7239, 7240, 7241, 7245, 7247, 7249, 7250, 7251, 7258, 7260, 7261, 7262, 7147, 7145, 7161, 7153, 7164, 7173, 7171, 7175, 7185, 7190, 7202, 7219, 7234, 7243, 7256, 7254, 7269, 7267, 7265, 13, 14, 15, 7280, 7281, 7282, 7283, 7287, 7289, 7290, 7291, 7292, 7296, 7298, 7299, 7300, 7301, 7305, 7307, 7312, 7314, 7315, 7317, 7319, 7320, 7321, 7325, 7327, 7329, 7330, 7332, 7334, 7335, 7338, 7339, 7341, 7344, 7345, 7346, 7347, 7348, 7349, 7350, 7352, 7354, 7359, 7360, 7361, 7362, 7363, 7364, 7366, 7370, 7374, 7380, 7384, 7310, 7386, 7387, 7388, 7389, 7311, 7390, 7324, 7391, 7392, 7393, 7333, 7394, 7395, 7343, 7342, 7396, 7357, 7397, 7217, 7369, 7368, 7398, 7373, 7372, 7399, 7379, 7378, 7377, 7400, 7401, 7383, 7382, 7402, 7403, 7404, 9, 10, 11, 12, 13, 14, 15, 7408, 7410, 7411, 7413, 7415, 7416, 7418, 7420, 7421, 7426, 7431, 7437, 7442, 7444, 7445, 7450, 7453, 7118, 7128, 7138, 7461, 7309, 7462, 7425, 7466, 7429, 7468, 7323, 7469, 7435, 7472, 7436, 7475, 7476, 7440, 7441, 7209, 7478, 7356, 7480, 7225, 7232, 7481, 7482, 7376, 7484, 7485, 7252, 7487, 7488, 7489, 7490, 7263, 7492, 7493, 7494, 8, 9, 10, 11, 12, 13, 14, 15, 7513, 7514, 7518, 7520, 7505, 7521, 7286, 7508, 7522, 7295, 7511, 7523, 7304, 7525, 7527, 7529, 7531, 7533, 7471, 7535, 7538, 7438, 7536, 7539, 7517, 7540, 7542, 7479, 7452, 7544, 7545, 7548, 7551, 7553, 7556, 7559, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 7572, 7574, 7575, 7577, 7578, 7580, 7524, 7428, 7464, 7467, 7530, 7433, 7474, 7589, 7477, 7592, 7351, 7541, 7596, 7365, 7598, 7599, 7600, 7602, 8, 9, 10, 11, 12, 13, 14, 15, 7573, 7576, 7579, 7623, 7627, 7588, 7632, 7635, 7636, 7637, 7638, 7639, 7532, 7526, 7595, 15, 7651, 7652, 7590, 7593, 7597, 7650, 7649, 7648, 7660, 7661, 7662, 7603, 7555, 7486, 7483, 15, 7624, 7665, 7669, 7670, 7671, 7630, 7667, 7668, 7675, 7676, 7677, 7678, 12, 13, 14, 15, 7680, 7681, 7682, 7685, 7686, 7687, 7688, 7690, 8, 9, 10, 11, 12, 13, 14, 15, 7698, 7628, 7625, 7702, 7674, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 7713, 7714, 7715, 7716, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 7672, 7699, 7730, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 7744, 7746, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 7760, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 7776, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 7792, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 7761, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
int h_C[]= {
1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 33, 35, 37, 39, 41, 43, 45, 47, 49, 51, 53, 55, 57, 59, 61, 63, 65, 67, 69, 71, 73, 75, 77, 79, 81, 83, 85, 87, 89, 91, 93, 95, 97, 99, 101, 103, 105, 107, 109, 111, 113, 115, 117, 119, 121, 123, 125, 127, 129, 131, 133, 135, 139, 141, 143, 145, 147, 149, 151, 153, 155, 157, 159, 161, 163, 165, 167, 169, 171, 173, 175, 177, 179, 181, 183, 185, 187, 189, 191, 193, 195, 197, 199, 201, 203, 205, 207, 209, 211, 213, 215, 217, 219, 221, 223, 225, 227, 229, 231, 233, 235, 237, 239, 241, 243, 245, 247, 249, 251, 253, 255, 257, 259, 261, 263, 265, 267, 269, 271, 273, 275, 277, 279, 281, 283, 285, 287, 289, 291, 293, 295, 297, 299, 301, 303, 305, 307, 309, 311, 313, 315, 317, 319, 321, 323, 325, 327, 329, 331, 333, 335, 337, 339, 341, 343, 345, 347, 349, 351, 353, 355, 357, 359, 361, 363, 365, 367, 369, 371, 373, 376, 378, 380, 382, 384, 386, 388, 390, 392, 394, 397, 399, 401, 403, 405, 407, 411, 413, 415, 417, 419, 421, 423, 425, 427, 429, 431, 433, 435, 437, 439, 441, 443, 445, 447, 449, 451, 453, 455, 457, 459, 461, 463, 465, 467, 469, 471, 473, 475, 477, 479, 481, 483, 485, 487, 489, 491, 493, 496, 498, 500, 502, 504, 506, 508, 510, 512, 514, 516, 518, 520, 522, 525, 527, 529, 531, 533, 535, 537, 539, 541, 543, 545, 547, 549, 551, 553, 555, 557, 559, 561, 563, 565, 567, 569, 571, 574, 576, 578, 580, 582, 584, 586, 588, 590, 592, 594, 596, 598, 600, 602, 604, 606, 608, 610, 612, 614, 616, 618, 620, 622, 624, 626, 628, 630, 632, 634, 636, 638, 640, 642, 644, 646, 648, 650, 652, 655, 657, 660, 662, 664, 666, 668, 670, 672, 674, 676, 678, 680, 682, 684, 686, 688, 690, 692, 694, 696, 698, 700, 702, 704, 706, 708, 710, 712, 714, 716, 718, 720, 722, 724, 726, 728, 730, 732, 734, 736, 738, 740, 742, 744, 746, 748, 750, 753, 755, 757, 759, 762, 764, 766, 768, 772, 774, 776, 778, 780, 782, 784, 786, 788, 790, 792, 794, 796, 798, 800, 802, 804, 806, 808, 810, 812, 814, 816, 818, 820, 822, 824, 826, 828, 830, 832, 834, 837, 839, 841, 843, 847, 849, 851, 853, 855, 857, 860, 862, 864, 866, 869, 871, 874, 876, 881, 883, 885, 887, 889, 891, 894, 896, 898, 900, 902, 904, 906, 908, 910, 912, 914, 916, 919, 921, 924, 926, 929, 931, 934, 936, 939, 941, 943, 945, 948, 950, 953, 955, 960, 962, 964, 966, 968, 970, 972, 974, 976, 978, 980, 982, 984, 986, 988, 990, 992, 994, 996, 998, 1000, 1002, 1004, 1006, 1008, 1010, 1012, 1014, 1016, 1018, 1020, 1022, 1024, 1026, 1028, 1030, 1032, 1034, 1036, 1038, 1041, 1043, 1045, 1047, 1050, 1052, 1054, 1056, 1058, 1060, 1062, 1064, 1066, 1068, 1070, 1072, 1074, 1076, 1078, 1080, 1083, 1085, 1087, 1089, 1093, 1095, 1097, 1099, 1101, 1103, 1105, 1107, 1109, 1111, 1113, 1115, 1117, 1119, 1121, 1123, 1126, 1128, 1130, 1132, 1134, 1136, 1138, 1140, 1143, 1145, 1148, 1150, 1153, 1155, 1158, 1160, 1166, 1168, 1171, 1173, 1176, 1178, 1181, 1183, 1186, 1188, 1190, 1192, 1194, 1196, 1199, 1201, 1204, 1206, 1208, 1210, 1212, 1214, 1216, 1218, 1220, 1222, 1224, 1226, 1228, 1230, 1232, 1234, 1237, 1239, 1241, 1243, 1245, 1247, 1249, 1251, 1253, 1255, 1258, 1260, 1262, 1264, 1266, 1268, 1270, 1272, 1274, 1276, 1278, 1280, 1282, 1284, 1286, 1288, 1290, 1292, 1294, 1296, 1298, 1300, 1302, 1304, 1307, 1309, 1311, 1313, 1315, 1317, 1319, 1321, 1323, 1325, 1327, 1329, 1331, 1333, 1335, 1337, 1339, 1341, 1343, 1345, 1347, 1349, 1351, 1353, 1355, 1357, 1359, 1361, 1363, 1365, 1367, 1369, 1371, 1373, 1375, 1377, 1380, 1382, 1384, 1386, 1388, 1390, 1392, 1394, 1397, 1399, 1401, 1403, 1406, 1408, 1410, 1412, 1414, 1416, 1418, 1420, 1422, 1424, 1426, 1428, 1431, 1433, 1435, 1437, 1439, 1441, 1444, 1446, 1449, 1451, 1457, 1459, 1462, 1464, 1468, 1470, 1472, 1474, 1476, 1478, 1481, 1483, 1486, 1488, 1491, 1493, 1496, 1498, 1501, 1503, 1506, 1508, 1511, 1513, 1515, 1517, 1519, 1521, 1524, 1526, 1528, 1530, 1532, 1534, 1536, 1538, 1540, 1542, 1544, 1546, 1548, 1550, 1552, 1554, 1556, 1558, 1561, 1563, 1565, 1567, 1570, 1572, 1575, 1577, 1582, 1584, 1587, 1589, 1592, 1594, 1597, 1599, 1602, 1604, 1607, 1609, 1612, 1614, 1617, 1619, 1622, 1624, 1627, 1629, 1632, 1634, 1039, 1048, 1163, 1163, 136, 1163, 1163, 137, 760, 1442, 1442, 1429, 1429, 751, 1454, 1454, 1442, 1442, 572, 572, 760, 1395, 1395, 1442, 1442, 1454, 1454, 1429, 1429, 1442, 1442, 1454, 1454, 1429, 1429, 1454, 1454, 1442, 1442, 1404, 1404, 1235, 1235, 1256, 1256, 1568, 1568, 751, 1454, 1454, 1442, 1442, 374, 374, 1454, 1454, 1442, 1442, 395, 395, 1442, 1442, 1454, 1454, 1429, 1429, 1442, 1442, 1454, 1454, 1454, 1454, 1442, 1442, 494, 1395, 1395, 1404, 1404, 523, 1395, 1395, 1404, 1404, 572, 572, 751, 751, 572, 572, 751, 751, 1522, 1522, 1568, 1568, 572, 572, 751, 751, 917, 917, 1442, 1442, 1235, 1256, 1395, 1395, 1404, 1404, 769, 769, 769, 769, 751, 751, 769, 769, 769, 769, 760, 760, 770, 770, 867, 878, 835, 844, 867, 878, 917, 917, 957, 957, 1039, 1048, 1081, 1090, 1124, 1124, 1124, 1124, 1163, 1163, 1163, 1163, 1442, 1442, 1235, 1256, 1235, 1235, 1256, 1256, 1454, 1454, 1579, 1395, 1404, 1395, 1404, 1454, 1454, 1442, 1442, 1442, 1442, 1454, 1454, 1522, 1522, 1568, 1568, 1579, 3089, 3091, 3093, 3095, 3097, 3099, 3101, 3103, 3105, 3107, 3109, 3111, 3113, 3115, 3117, 3119, 3121, 3123, 3125, 3127, 3129, 3131, 3133, 3135, 3137, 3139, 3141, 3143, 3145, 3147, 3149, 3151, 3153, 3155, 3157, 3159, 3161, 3163, 3165, 3167, 3169, 3171, 3173, 3175, 3177, 3179, 3181, 3183, 3185, 3187, 3189, 3191, 3193, 3195, 3197, 3199, 3201, 3203, 3205, 3207, 3209, 3211, 3213, 3215, 3217, 3219, 3221, 3223, 3225, 3227, 3229, 3231, 3233, 3235, 3237, 3239, 3241, 3243, 3245, 3247, 3249, 3251, 3253, 3255, 3257, 3259, 3261, 3263, 3265, 3267, 3269, 3271, 3273, 3275, 3277, 3279, 3281, 3283, 3285, 3287, 3289, 3291, 3293, 3295, 3297, 3299, 3301, 3303, 3305, 3307, 3309, 3311, 3313, 3315, 3317, 3319, 3321, 3323, 3325, 3327, 3329, 3331, 3333, 3335, 3337, 3339, 3341, 3343, 3345, 3347, 3349, 3351, 3353, 3355, 3357, 3359, 3361, 3363, 3365, 3367, 3369, 3371, 3373, 3375, 3377, 3379, 3381, 3383, 3385, 3387, 3389, 3391, 3393, 3395, 3397, 3399, 3401, 3403, 3405, 3407, 3409, 3411, 3413, 3415, 3417, 3419, 3421, 3423, 3425, 3427, 3429, 3431, 3433, 3435, 3437, 3439, 3441, 3443, 3445, 3447, 3449, 3451, 3453, 3455, 3457, 3459, 3461, 3463, 3465, 3467, 3469, 3471, 3473, 3475, 3477, 3479, 3481, 3483, 3485, 3487, 3489, 3491, 3493, 3495, 3497, 3499, 3501, 3503, 3505, 3507, 3509, 3511, 3513, 3515, 3517, 3519, 3521, 3523, 3525, 3527, 3529, 3531, 3533, 3535, 3537, 3539, 3541, 3543, 3545, 3547, 3549, 3551, 3553, 3555, 3557, 3559, 3561, 3563, 3565, 3567, 3569, 3571, 3573, 3575, 3577, 3579, 3581, 3583, 3585, 3587, 3589, 3591, 3593, 3595, 3597, 3599, 3601, 3603, 3605, 3607, 3609, 3611, 3613, 3615, 3617, 3619, 3621, 3623, 3625, 3627, 3629, 3631, 3633, 3635, 3637, 3639, 3641, 3643, 3645, 3647, 3649, 3651, 3653, 3655, 3657, 3659, 3661, 3663, 3665, 3667, 3669, 3671, 3673, 3675, 3677, 3679, 3681, 3683, 3685, 3687, 3689, 3691, 3693, 3695, 3697, 3699, 3701, 3703, 3705, 3707, 3709, 3711, 3713, 3715, 3717, 3719, 3721, 3723, 3725, 3727, 3729, 3731, 3733, 3735, 3737, 3739, 3741, 3743, 3745, 3747, 3749, 3751, 3753, 3755, 3757, 3759, 3761, 3763, 3765, 3767, 3769, 3771, 3773, 3775, 3777, 3779, 3781, 3783, 3785, 3787, 3789, 3791, 3793, 3795, 3797, 3799, 3801, 3803, 3805, 3807, 3809, 3811, 3813, 3815, 3817, 3819, 3821, 3823, 3825, 3827, 3829, 3831, 3833, 3835, 3837, 3839, 3841, 3843, 3845, 3847, 3849, 3851, 3853, 3855, 3857, 1655, 1672, 1673, 1674, 1675, 1676, 1677, 1678, 1681, 1688, 1689, 1690, 1691, 1696, 1697, 1698, 1701, 1702, 1708, 1709, 1710, 1722, 1723, 1726, 1727, 1730, 1731, 1733, 1734, 1737, 1738, 1741, 1742, 1744, 1745, 1748, 1751, 1765, 1766, 1781, 1782, 1788, 1789, 1790, 1791, 1794, 1795, 1797, 1800, 1801, 1804, 1805, 1806, 1807, 1811, 1812, 1815, 1816, 1817, 1818, 1822, 1823, 1826, 1827, 1830, 1831, 1843, 1844, 1847, 1848, 1854, 1855, 1858, 1859, 1879, 1884, 1885, 1888, 1889, 1897, 1902, 1903, 1906, 1907, 1908, 1909, 1910, 1911, 1913, 1914, 1915, 1916, 1923, 1926, 1929, 1930, 1931, 1932, 1933, 1934, 1940, 1946, 1958, 1959, 1962, 1964, 1983, 1984, 1987, 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1997, 1998, 1999, 2000, 2001, 2002, 2006, 2008, 2022, 2025, 2031, 2033, 2039, 2042, 2050, 2051, 2058, 2061, 2082, 2085, 2094, 2097, 2107, 2108, 2110, 2111, 2119, 2120, 2122, 2123, 2135, 2136, 2138, 2140, 2152, 2153, 2166, 2167, 2176, 2177, 2184, 2197, 2200, 2213, 2216, 2219, 2220, 2223, 2224, 2230, 2231, 2234, 2235, 2257, 2258, 2269, 2270, 2273, 15, 4033, 4032, 4034, 4036, 4038, 4037, 4040, 4039, 4041, 4315, 4042, 4044, 4043, 4046, 4045, 4047, 4049, 4048, 4063, 4315, 4051, 4050, 4052, 4055, 4054, 4057, 4056, 4059, 4058, 4061, 4060, 4063, 4062, 4065, 4064, 4420, 4423, 4067, 4066, 4069, 4068, 4070, 4071, 4073, 4072, 4427, 4429, 4074, 4076, 4075, 572, 4432, 4139, 4138, 4434, 4079, 4078, 4080, 4081, 572, 4436, 4205, 4083, 4157, 4207, 4210, 4158, 4118, 4215, 4161, 4365, 4364, 4439, 4085, 4084, 4441, 4087, 4086, 4443, 4088, 4445, 4090, 4089, 4447, 4092, 4091, 4449, 4093, 4451, 4095, 4094, 4096, 4095, 4097, 4098, 4100, 4099, 4101, 4102, 4104, 4103, 4105, 4106, 4108, 4107, 4109, 4455, 4110, 4203, 4202, 4156, 4205, 4207, 4111, 4210, 4112, 4118, 4215, 4113, 4366, 4332, 4457, 4115, 4114, 4117, 4116, 4118, 4459, 4461, 572, 572, 4463, 572, 4122, 4121, 4466, 4124, 4123, 4468, 4470, 1466, 4127, 4126, 4472, 4129, 4128, 4474, 4476, 1466, 4139, 4131, 4478, 4336, 4335, 4480, 4141, 4132, 4482, 4133, 4134, 4147, 4135, 4149, 4148, 4203, 4136, 4137, 4139, 4138, 4484, 4336, 4335, 4486, 4141, 4140, 1429, 4378, 4377, 4488, 4376, 4375, 4490, 4380, 4379, 1466, 4143, 4196, 4145, 4144, 4204, 4147, 4146, 4149, 4148, 4150, 4156, 4151, 4157, 4210, 4158, 4152, 4215, 4154, 4365, 4155, 4493, 4216, 4352, 4495, 4156, 4205, 4157, 4207, 4210, 4158, 4159, 4215, 4161, 4365, 4364, 4498, 4366, 4332, 4500, 4502, 4504, 4162, 4506, 4508, 4163, 4165, 4164, 4166, 4168, 4167, 4170, 4169, 4172, 4171, 4512, 4514, 4516, 4174, 4173, 4175, 4178, 4177, 4180, 4179, 4181, 4184, 4183, 4186, 4185, 4188, 4187, 4190, 4189, 4191, 4193, 4192, 4195, 4194, 4520, 4196, 4197, 4198, 4200, 4199, 4201, 4203, 4202, 4204, 4206, 4205, 4208, 4207, 4210, 4209, 4211, 4213, 4215, 4214, 4365, 4364, 4524, 4216, 4352, 4526, 4528, 4530, 4532, 4218, 4217, 4534, 4536, 4538, 4220, 4219, 769, 769, 4395, 4390, 4221, 4409, 4400, 4222, 4223, 4402, 4224, 4226, 4225, 4228, 4227, 4230, 4229, 4232, 4231, 4234, 4233, 4235, 4237, 4240, 4239, 4241, 4243, 4242, 4245, 4244, 4246, 4248, 4250, 4249, 4251, 4254, 4253, 4548, 4256, 4255, 4258, 4257, 4260, 4259, 4262, 4261, 4264, 4263, 4266, 4265, 4268, 4267, 4269, 4270, 4271, 4273, 4272, 4275, 4274, 4277, 4276, 4278, 4279, 4280, 4282, 4281, 4284, 4283, 4286, 4285, 4287, 4289, 4288, 4290, 4292, 4291, 4294, 4293, 4315, 4318, 4295, 4297, 4296, 4298, 4300, 4299, 4301, 4556, 4302, 4558, 4303, 4305, 4304, 4306, 4308, 4307, 4309, 4560, 4310, 4562, 4311, 4313, 4312, 4314, 4315, 4318, 4317, 4319, 4395, 4320, 4330, 4564, 4321, 4322, 4323, 4325, 4324, 4327, 4354, 4329, 4328, 4358, 4357, 4330, 4331, 4568, 4365, 4364, 4366, 4332, 4327, 4354, 4329, 4328, 4358, 4357, 4330, 4331, 4570, 4365, 4364, 4366, 4332, 4334, 4333, 4336, 4335, 4572, 4337, 4338, 4340, 4339, 4342, 4342, 4345, 4344, 4355, 4346, 4357, 4347, 4348, 4350, 4362, 4351, 4365, 4364, 4367, 4352, 4354, 4353, 4356, 4355, 4358, 4357, 4359, 4361, 4363, 4362, 4365, 4364, 4367, 4366, 4369, 4368, 4579, 4371, 4370, 4581, 4373, 4372, 1429, 4376, 4375, 4583, 4378, 4377, 4585, 4380, 4379, 1466, 4382, 4383, 4384, 4385, 4387, 4386, 4389, 4388, 4393, 4395, 4390, 4397, 4391, 4400, 4399, 4392, 4403, 4402, 4587, 4393, 4396, 4395, 4398, 4397, 4400, 4399, 4401, 4403, 4402, 4589, 4405, 4404, 4407, 4406, 4409, 4408, 4411, 4410, 4412, 4414, 4413, 4416, 4415, 14, 15, 1636, 1637, 1638, 1639, 1640, 1641, 1642, 1643, 1644, 1645, 1646, 1647, 1648, 1649, 1650, 1651, 1652, 1653, 1654, 1656, 1657, 1658, 1659, 1660, 1661, 1662, 1663, 1664, 1665, 1666, 1667, 1668, 1669, 1670, 1671, 1679, 1680, 1682, 1683, 1684, 1685, 1686, 1687, 1692, 1693, 1694, 1695, 1699, 1700, 1703, 1704, 1705, 1706, 1707, 1711, 1712, 1713, 1714, 1715, 1716, 1717, 1718, 1719, 1720, 1721, 1724, 1725, 1728, 1729, 1732, 1735, 1736, 1739, 1740, 1743, 1746, 1747, 1749, 1750, 1752, 1753, 1754, 1755, 1756, 1757, 1758, 1759, 1760, 1761, 1762, 1763, 1764, 1767, 1768, 1769, 1770, 1771, 1772, 1773, 1774, 1775, 1776, 1777, 1778, 1779, 1780, 1783, 1784, 1785, 1786, 1787, 1792, 1793, 1796, 1798, 1799, 1802, 1803, 1808, 1809, 1810, 1813, 1814, 1819, 1820, 1821, 1824, 1825, 1828, 1829, 1832, 1833, 1834, 1835, 1836, 1837, 1838, 1839, 1840, 1841, 1842, 1845, 1846, 1849, 1850, 1851, 1852, 1853, 1856, 1857, 1860, 1861, 1862, 1863, 1864, 1865, 1866, 1867, 1868, 1869, 1870, 1871, 1872, 1873, 1874, 1875, 1876, 1877, 1878, 1880, 1881, 1882, 1883, 1886, 1887, 1890, 1891, 1892, 1893, 1894, 1895, 1896, 1898, 1899, 1900, 1901, 1904, 1905, 1912, 1917, 1918, 1919, 1920, 1921, 1922, 1924, 1925, 1927, 1928, 1935, 1936, 1937, 1938, 1939, 1941, 1942, 1943, 1944, 1945, 1947, 1948, 1949, 1950, 1951, 1952, 1953, 1954, 1955, 1956, 1957, 1960, 1961, 1963, 1965, 1966, 1967, 1968, 1969, 1970, 1971, 1972, 1973, 1974, 1975, 1976, 1977, 1978, 1979, 1980, 1981, 1982, 1985, 1986, 1995, 1996, 2003, 2004, 2005, 2007, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021, 2023, 2024, 2026, 2027, 2028, 2029, 2030, 2032, 2034, 2035, 2036, 2037, 2038, 2040, 2041, 2043, 2044, 2045, 2046, 2047, 2048, 2049, 2052, 2053, 2054, 2055, 2056, 2057, 2059, 2060, 2062, 2063, 2064, 2065, 2066, 2067, 2068, 2069, 2070, 2071, 2072, 2073, 2074, 2075, 2076, 2077, 2078, 2079, 2080, 2081, 2083, 2084, 2086, 2087, 2088, 2089, 2090, 2091, 2092, 2093, 2095, 2096, 2098, 2099, 2100, 2101, 2102, 2103, 2104, 2105, 2106, 2109, 2112, 2113, 2114, 2115, 2116, 2117, 2118, 2121, 2124, 2125, 2126, 2127, 2128, 2129, 2130, 2131, 2132, 2133, 2134, 2137, 2139, 2141, 2142, 2143, 2144, 2145, 2146, 2147, 2148, 2149, 2150, 2151, 2154, 2155, 2156, 2157, 2158, 2159, 2160, 2161, 2162, 2163, 2164, 2165, 2168, 2169, 2170, 2171, 2172, 2173, 2174, 2175, 2178, 2179, 2180, 2181, 2182, 2183, 2185, 2186, 2187, 2188, 2189, 2190, 2191, 2192, 2193, 2194, 2195, 2196, 2198, 2199, 2201, 2202, 2203, 2204, 2205, 2206, 2207, 2208, 2209, 2210, 2211, 2212, 2214, 2215, 2217, 2218, 2221, 2222, 2225, 2226, 2227, 2228, 2229, 2232, 2233, 2236, 2237, 2238, 2239, 2240, 2241, 2242, 2243, 2244, 2245, 2246, 2247, 2248, 2249, 2250, 2251, 2252, 2253, 2254, 2255, 2256, 2259, 2260, 2261, 2262, 2263, 2264, 2265, 2266, 2267, 2268, 2271, 2272, 2274, 2275, 2276, 2277, 2278, 2279, 2280, 2281, 2282, 2283, 2284, 4628, 4627, 4652, 4832, 4491, 4491, 4652, 4832, 4491, 4491, 4816, 4819, 4832, 4882, 4881, 4887, 4886, 12, 13, 14, 15, 5137, 5141, 5143, 5148, 5150, 5153, 5157, 5160, 5162, 5164, 5166, 5168, 5170, 5172, 5174, 5178, 5181, 5184, 5186, 5191, 5193, 5195, 5198, 5200, 5202, 5204, 5207, 5209, 5212, 5214, 5218, 5222, 5226, 5230, 5232, 5234, 5236, 5239, 5241, 5243, 5245, 5251, 5253, 5256, 5258, 5261, 5263, 5265, 5269, 5271, 5273, 5276, 5278, 5280, 5283, 5285, 5287, 5292, 5295, 5297, 5299, 5301, 5303, 5306, 5308, 5310, 5312, 5314, 5316, 5319, 5321, 5323, 5327, 5330, 5332, 5334, 5336, 5339, 5341, 5344, 5346, 5348, 5350, 5353, 5355, 5360, 5363, 5366, 5368, 5370, 5374, 5376, 5378, 5380, 5382, 5386, 5388, 5390, 5393, 5395, 5397, 5399, 5401, 5403, 5407, 5410, 5412, 5416, 5419, 5421, 5423, 5425, 5427, 5429, 5431, 5433, 5438, 5440, 5442, 5447, 5449, 5451, 5454, 5457, 5459, 5462, 5464, 5467, 5472, 5475, 5480, 5484, 5487, 5493, 5495, 5497, 5499, 5503, 5505, 5507, 5509, 5511, 5515, 5517, 5519, 5521, 5525, 5529, 5531, 5533, 5537, 5539, 5541, 5543, 5545, 5547, 5551, 5553, 5555, 5557, 5559, 5561, 5564, 5566, 5568, 5575, 5577, 5580, 5582, 5584, 5587, 5590, 5592, 5594, 5597, 5599, 5601, 5603, 5605, 5608, 5610, 5405, 5138, 5413, 5151, 5444, 4980, 4978, 4990, 4988, 5405, 5404, 5413, 5151, 5444, 4980, 4978, 4990, 4988, 5405, 5404, 5413, 5435, 5444, 4980, 4978, 4990, 2399, 2400, 5522, 5358, 5187, 2419, 5189, 2424, 5249, 2431, 2432, 5175, 5205, 5210, 4731, 4739, 5216, 5179, 5220, 5219, 5224, 5223, 5182, 4430, 5248, 4430, 5249, 5358, 5187, 5570, 2503, 5189, 2508, 5249, 5205, 5210, 4731, 4739, 5216, 5215, 5220, 5219, 5224, 5223, 5289, 2555, 2556, 5247, 4464, 5248, 4464, 5249, 4731, 4739, 5267, 5266, 5289, 2605, 4817, 2607, 4820, 2613, 5358, 5357, 5570, 2641, 2642, 2644, 2645, 5384, 5383, 5405, 5404, 5413, 5435, 5444, 4980, 4978, 4990, 4988, 5522, 5572, 5485, 5572, 5490, 5489, 5491, 5501, 5513, 5522, 5572, 5526, 5527, 5572, 5570, 5578, 5588, 5735, 5734, 5733, 5632, 2289, 2290, 5736, 5738, 5737, 2294, 5633, 5639, 5710, 5711, 5741, 5743, 5634, 5636, 5635, 2304, 5750, 5748, 2307, 5753, 5751, 5754, 5756, 5637, 5144, 5759, 5758, 2316, 2317, 5761, 5760, 2320, 2321, 5762, 5145, 5735, 5734, 5733, 5732, 2328, 2329, 5738, 5737, 5736, 2333, 5708, 5639, 5710, 5711, 5741, 5744, 5743, 5636, 5635, 2343, 5750, 5748, 2346, 5753, 5751, 5754, 5756, 5637, 5154, 5759, 5758, 2355, 2356, 5761, 5760, 2359, 2360, 5762, 5155, 5735, 5734, 5733, 5732, 2367, 2368, 5738, 5737, 5736, 2372, 5638, 5639, 5640, 5711, 5741, 5744, 5743, 5642, 5641, 2382, 5750, 5749, 2385, 5753, 5752, 5754, 5756, 5755, 5643, 5759, 5758, 2394, 2395, 5761, 5760, 2398, 5762, 5482, 5649, 5791, 5650, 5795, 5794, 5796, 2409, 2410, 2411, 5718, 5717, 5719, 5721, 5372, 5724, 5723, 2420, 4425, 5810, 5811, 2425, 4425, 5729, 5730, 5692, 5672, 5849, 5697, 5696, 5684, 5683, 5646, 5687, 5686, 5688, 2441, 5665, 5691, 5690, 5666, 5668, 4496, 5670, 5657, 5647, 2451, 5659, 5658, 2454, 5674, 5673, 2457, 5676, 5661, 5660, 2461, 2462, 2463, 5648, 2465, 2466, 5663, 2468, 2469, 5664, 5651, 5653, 4496, 5655, 2475, 2476, 2477, 2478, 5704, 5706, 5705, 5707, 2483, 4430, 5729, 5730, 5649, 5777, 5650, 5795, 5794, 5796, 2493, 2494, 2495, 5718, 5717, 5719, 5721, 5372, 5724, 5723, 2504, 4437, 5810, 5811, 2509, 4437, 5729, 5730, 5651, 5653, 4496, 5655, 5657, 5656, 2519, 5659, 5658, 2522, 5674, 5673, 2525, 5676, 5661, 5660, 2529, 2530, 2531, 5662, 2533, 2534, 5663, 2536, 2537, 5664, 5684, 5683, 5685, 5687, 5686, 5688, 2545, 5665, 5691, 5690, 5666, 5668, 4496, 5670, 5692, 5672, 5885, 5697, 5696, 2559, 2560, 2561, 2562, 5704, 5706, 5705, 5707, 2567, 4464, 5729, 5730, 5674, 5673, 2573, 5676, 5675, 2576, 5678, 5677, 5679, 2580, 2581, 5682, 5681, 5680, 5684, 5683, 5685, 5687, 5686, 5688, 2591, 5691, 5690, 5689, 5692, 5694, 4491, 5697, 5696, 5698, 5700, 4496, 5703, 5702, 2606, 2608, 5704, 5707, 5706, 5705, 4833, 5729, 5730, 5708, 5709, 5710, 5711, 5714, 5713, 5712, 5715, 5791, 5776, 5793, 5795, 5794, 5796, 2631, 2632, 2633, 5718, 5717, 5719, 5721, 5372, 5724, 5723, 5905, 4883, 5907, 4888, 5810, 5811, 2649, 2650, 5727, 5729, 5730, 5735, 5734, 5733, 5732, 2658, 2659, 5738, 5737, 5736, 2663, 5739, 5740, 5741, 5744, 5743, 5747, 5746, 5745, 2672, 5750, 5749, 5748, 2676, 5753, 5752, 5751, 5754, 5756, 5755, 5460, 5759, 5758, 2686, 2687, 5761, 5760, 2690, 2691, 5762, 5482, 5779, 5781, 5535, 5784, 5783, 5785, 5787, 5549, 5790, 5789, 5777, 5792, 5793, 5794, 5795, 5796, 2710, 2711, 5797, 2713, 5764, 5810, 5811, 5779, 5781, 5535, 5784, 5783, 5785, 5787, 5549, 5790, 5789, 5777, 5792, 5793, 5794, 5795, 5796, 2733, 2734, 2735, 5797, 2737, 5765, 5810, 5811, 5766, 5768, 2743, 5770, 5769, 5771, 5773, 2748, 5775, 5774, 5779, 5781, 5535, 5784, 5783, 5777, 5776, 5793, 5794, 5795, 5796, 2762, 2763, 5797, 2765, 5799, 5801, 5802, 2769, 5803, 5805, 5806, 5779, 5781, 5535, 5784, 5783, 5785, 5787, 5549, 5790, 5789, 5792, 5791, 5793, 5795, 5794, 5796, 2789, 2790, 5797, 2792, 5799, 5801, 5802, 2796, 5803, 5805, 5806, 5807, 5808, 5810, 5811, 10, 11, 12, 13, 14, 15, 2285, 2286, 2287, 2288, 5941, 2291, 2292, 2293, 2295, 2296, 2297, 2298, 2299, 2300, 2301, 2302, 2303, 2305, 2306, 2308, 2309, 2310, 2311, 2312, 2313, 2314, 2315, 5968, 2318, 2319, 5972, 2322, 2323, 2324, 2325, 2326, 2327, 5980, 2330, 2331, 2332, 2334, 2335, 2336, 2337, 2338, 2339, 2340, 2341, 2342, 2344, 2345, 2347, 2348, 2349, 2350, 2351, 2352, 2353, 2354, 6007, 2357, 2358, 6011, 2361, 2362, 2363, 2364, 2365, 2366, 6019, 2369, 2370, 2371, 2373, 2374, 2375, 2376, 2377, 2378, 2379, 2380, 2381, 2383, 2384, 2386, 2387, 2388, 2389, 2390, 2391, 2392, 2393, 6046, 2396, 2397, 5839, 2401, 2402, 2403, 2404, 2405, 2406, 2407, 2408, 6059, 2412, 2413, 2414, 2415, 2416, 2417, 2418, 6068, 2421, 2422, 2423, 6072, 2426, 2427, 2428, 2429, 2430, 2433, 2434, 2435, 2436, 2437, 2438, 2439, 2440, 2442, 2443, 2444, 2445, 2446, 2447, 2448, 2449, 2450, 2452, 2453, 2455, 2456, 2458, 2459, 2460, 6109, 2464, 6112, 2467, 6115, 2470, 2471, 2472, 2473, 2474, 2479, 2480, 2481, 2482, 2484, 2485, 2486, 2487, 2488, 2489, 2490, 2491, 2492, 6140, 2496, 2497, 2498, 2499, 2500, 2501, 2502, 6149, 2505, 2506, 2507, 6153, 2510, 2511, 2512, 2513, 2514, 2515, 2516, 2517, 2518, 2520, 2521, 2523, 2524, 2526, 2527, 2528, 6175, 2532, 6178, 2535, 6181, 2538, 2539, 2540, 2541, 2542, 2543, 2544, 2546, 2547, 2548, 2549, 2550, 2551, 2552, 2553, 2554, 2557, 2558, 2563, 2564, 2565, 2566, 2568, 2569, 2570, 2571, 2572, 2574, 2575, 2577, 2578, 2579, 6224, 2582, 2583, 2584, 2585, 2586, 2587, 2588, 2589, 2590, 2592, 2593, 2594, 2595, 2596, 2597, 2598, 2599, 2600, 2601, 2602, 2603, 2604, 2609, 2610, 2611, 2612, 2614, 2615, 2616, 2617, 2618, 2619, 2620, 2621, 2622, 2623, 2624, 2625, 2626, 2627, 2628, 2629, 2630, 6272, 2634, 2635, 2636, 2637, 2638, 2639, 2640, 2643, 2646, 2647, 2648, 6288, 2651, 2652, 2653, 2654, 2655, 2656, 2657, 6297, 2660, 2661, 2662, 2664, 2665, 2666, 2667, 2668, 2669, 2670, 2671, 2673, 2674, 2675, 2677, 2678, 2679, 2680, 2681, 2682, 2683, 2684, 2685, 6325, 2688, 2689, 6329, 2692, 2693, 2694, 2695, 2696, 2697, 2698, 2699, 2700, 2701, 2702, 2703, 2704, 2705, 2706, 2707, 2708, 2709, 6349, 2712, 2714, 2715, 2716, 2717, 2718, 2719, 2720, 2721, 2722, 2723, 2724, 2725, 2726, 2727, 2728, 2729, 2730, 2731, 2732, 6372, 2736, 2738, 2739, 2740, 2741, 2742, 2744, 2745, 2746, 2747, 2749, 2750, 2751, 2752, 2753, 2754, 2755, 2756, 2757, 2758, 2759, 2760, 2761, 6401, 2764, 2766, 2767, 2768, 2770, 2771, 2772, 2773, 2774, 2775, 2776, 2777, 2778, 2779, 2780, 2781, 2782, 2783, 2784, 2785, 2786, 2787, 2788, 6428, 2791, 2793, 2794, 2795, 2797, 2798, 2799, 2800, 2801, 2802, 2803, 6123, 6121, 6204, 6202, 5898, 5896, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 6449, 6451, 6454, 6462, 6464, 6466, 6468, 6471, 6474, 6477, 6482, 6484, 6487, 6495, 6497, 6499, 6501, 6504, 6507, 6510, 6515, 6517, 6520, 6528, 6530, 6532, 6534, 6537, 6540, 6543, 5840, 6548, 6551, 6060, 6555, 6560, 6572, 6574, 6577, 6580, 6587, 6589, 6591, 6593, 6607, 6613, 6616, 6141, 6620, 6625, 6639, 6641, 6643, 6645, 6654, 6657, 6660, 6669, 6672, 6678, 6680, 6682, 6686, 6689, 6692, 6695, 6701, 6706, 6709, 6719, 6723, 6726, 6273, 6730, 6735, 6745, 6747, 6750, 6756, 6758, 6761, 6764, 6768, 6771, 6774, 6782, 6787, 6789, 6792, 6803, 6808, 6810, 6813, 6373, 6823, 6827, 6832, 6834, 6837, 6851, 6856, 6858, 6861, 6458, 6456, 6479, 6491, 6489, 6512, 6524, 6522, 6545, 6556, 6561, 6563, 6565, 6567, 6569, 6582, 6584, 6599, 6597, 6595, 6601, 6603, 2865, 2866, 6129, 6610, 6621, 6626, 6628, 6630, 6632, 6634, 6636, 6651, 6649, 6647, 6662, 6664, 6666, 2895, 2896, 6210, 6675, 6697, 6702, 2911, 2912, 5900, 6712, 6716, 6714, 6776, 6731, 6283, 6281, 6738, 6740, 6742, 6752, 6776, 6778, 6783, 6794, 6351, 6797, 6799, 6804, 6375, 6818, 6820, 6824, 6828, 6839, 6403, 6842, 6407, 6845, 6847, 6852, 6863, 6430, 6866, 6434, 6869, 6871, 6873, 13, 14, 15, 6897, 6455, 6907, 6488, 6917, 6521, 6581, 6594, 6608, 6646, 6661, 6673, 6687, 6696, 6710, 6720, 6972, 6751, 6759, 6762, 6765, 6460, 2807, 2808, 6902, 6901, 6900, 6903, 6905, 6904, 2815, 6493, 2819, 2820, 6912, 6911, 6910, 6913, 6915, 6914, 2827, 6526, 2831, 2832, 6922, 6921, 6920, 6923, 6925, 6924, 2839, 6928, 6927, 6929, 2843, 6558, 2845, 2846, 2847, 2848, 2849, 6078, 6934, 6933, 2854, 2855, 6938, 6937, 6936, 2860, 2861, 2862, 2863, 2864, 7022, 2868, 2869, 6942, 6941, 6943, 2873, 6623, 2875, 2876, 2877, 2878, 2879, 2880, 6948, 6947, 6946, 2885, 2886, 2887, 6951, 6950, 2891, 2892, 2893, 6199, 7039, 2898, 2899, 6957, 6956, 6955, 6960, 6959, 2907, 6699, 2909, 6704, 7045, 2914, 2915, 6754, 2919, 2920, 6978, 6980, 6979, 2927, 6967, 6966, 6968, 2931, 6733, 2933, 2934, 2935, 2936, 2937, 6754, 2941, 6978, 6980, 6979, 2948, 2949, 6780, 2951, 6785, 6984, 6983, 2955, 2956, 2957, 2958, 6801, 2960, 6806, 6988, 6987, 6989, 2965, 2966, 2967, 6381, 2969, 6386, 2971, 6830, 6994, 6993, 2975, 2976, 2977, 2978, 2979, 2980, 6849, 2982, 6854, 6998, 6997, 2986, 2987, 2988, 2989, 2990, 2991, 2992, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 7089, 7088, 2806, 2809, 2810, 2811, 2812, 2813, 2814, 7091, 7090, 2818, 2821, 2822, 2823, 2824, 2825, 2826, 7093, 7092, 2830, 2833, 2834, 2835, 2836, 2837, 2838, 2840, 2841, 2842, 2844, 2850, 2851, 2852, 6087, 7095, 2857, 2858, 2859, 7158, 6605, 2870, 2871, 2872, 2874, 7097, 2882, 2883, 2884, 7180, 2888, 2889, 6189, 2894, 6670, 2900, 2901, 2902, 6684, 2904, 2905, 6234, 2908, 2910, 6707, 7105, 7104, 2918, 7108, 7107, 7103, 2924, 2925, 2926, 2928, 2929, 2930, 2932, 7216, 7105, 7104, 2940, 7108, 7107, 7106, 2945, 2946, 2947, 2950, 2952, 2953, 2954, 2959, 2961, 2962, 2963, 2964, 2968, 2970, 2972, 2973, 2974, 2981, 2983, 2984, 2985, 7146, 7144, 7160, 7152, 7163, 7172, 7170, 7174, 7184, 7189, 7201, 7218, 7233, 7242, 7255, 7253, 7268, 7266, 7264, 13, 14, 15, 2804, 2805, 7110, 7284, 7288, 2816, 2817, 7120, 7293, 7297, 2828, 2829, 7130, 7302, 7306, 7308, 7313, 2853, 2856, 7318, 7159, 2867, 7322, 2881, 7328, 7181, 7331, 2890, 2897, 7336, 2903, 7340, 2906, 2913, 2916, 2917, 7204, 2921, 2922, 2923, 7353, 7355, 2938, 2939, 7221, 2942, 2943, 2944, 7367, 7371, 7375, 7381, 7385, 7142, 3004, 3005, 3006, 3009, 7148, 3011, 7168, 3015, 3016, 3019, 7186, 3021, 3022, 7198, 7196, 3028, 7213, 3035, 7358, 7228, 7226, 3043, 7237, 7235, 3047, 7248, 7246, 7244, 3052, 3053, 7259, 7257, 3057, 3058, 3059, 9, 10, 11, 12, 13, 14, 15, 7409, 7111, 7285, 7414, 7121, 7294, 7419, 7131, 7303, 7316, 7326, 7337, 7443, 7205, 7446, 7451, 7454, 7412, 7417, 7422, 3002, 7423, 7463, 7424, 3010, 7162, 3013, 7430, 7470, 7434, 3020, 7188, 3024, 3025, 7439, 7200, 7448, 3033, 7449, 3036, 7456, 7457, 3041, 3042, 7458, 3045, 3046, 7459, 3049, 3050, 3051, 7491, 7460, 3055, 3056, 7495, 8, 9, 10, 11, 12, 13, 14, 15, 7427, 7432, 7447, 7455, 7504, 2994, 7506, 7507, 2997, 7509, 7510, 3000, 7512, 3003, 3007, 3012, 3014, 3018, 7534, 3023, 3026, 7515, 7537, 3029, 7516, 3031, 3034, 7543, 7519, 3038, 3040, 3044, 3048, 7554, 3054, 7496, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 2993, 2995, 2996, 2998, 2999, 3001, 7581, 7568, 7582, 7583, 7584, 7569, 7587, 3027, 7591, 3030, 7570, 7594, 3037, 7571, 7546, 7549, 7552, 7557, 8, 9, 10, 11, 12, 13, 14, 15, 7617, 7619, 7621, 3008, 3017, 7629, 3032, 3039, 7547, 7550, 7601, 7558, 7626, 7622, 7633, 15, 7465, 7585, 7653, 7654, 7655, 7620, 7618, 7616, 3063, 3067, 3069, 7659, 7658, 7657, 7656, 15, 7664, 7586, 3060, 3061, 3062, 7666, 7631, 7634, 3071, 3072, 3073, 3074, 12, 13, 14, 15, 7528, 7473, 7683, 3065, 3068, 3070, 7689, 7691, 8, 9, 10, 11, 12, 13, 14, 15, 7684, 7697, 7696, 7703, 7700, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 3064, 3066, 7701, 3076, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 7728, 7729, 3075, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 7745, 7731, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 7673, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 7712, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 3077, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 7808, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
bool h_Op[]= {
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
#define THREADS_PER_BLOCK 16
#define BLOCKS_PER_GRID 1
#define SIZE_OF_IN 3088
#define SIZE_OF_AC 4752
__device__ void
ac(float *A, const int *B, const int *C, const bool *Op, int n_iter) {
int i= blockDim.x * blockIdx.x + threadIdx.x;
__shared__ float R[490*THREADS_PER_BLOCK];
const int t= THREADS_PER_BLOCK;
__shared__ float final;
final=0;
R[i + 0*t] = A[i + 0*t];
R[i + 1*t] = A[i + 1*t];
R[i + 2*t] = A[i + 2*t];
R[i + 3*t] = A[i + 3*t];
R[i + 4*t] = A[i + 4*t];
R[i + 5*t] = A[i + 5*t];
R[i + 6*t] = A[i + 6*t];
R[i + 7*t] = A[i + 7*t];
R[i + 8*t] = A[i + 8*t];
R[i + 9*t] = A[i + 9*t];
R[i + 10*t] = A[i + 10*t];
R[i + 11*t] = A[i + 11*t];
R[i + 12*t] = A[i + 12*t];
R[i + 13*t] = A[i + 13*t];
R[i + 14*t] = A[i + 14*t];
R[i + 15*t] = A[i + 15*t];
R[i + 16*t] = A[i + 16*t];
R[i + 17*t] = A[i + 17*t];
R[i + 18*t] = A[i + 18*t];
R[i + 19*t] = A[i + 19*t];
R[i + 20*t] = A[i + 20*t];
R[i + 21*t] = A[i + 21*t];
R[i + 22*t] = A[i + 22*t];
R[i + 23*t] = A[i + 23*t];
R[i + 24*t] = A[i + 24*t];
R[i + 25*t] = A[i + 25*t];
R[i + 26*t] = A[i + 26*t];
R[i + 27*t] = A[i + 27*t];
R[i + 28*t] = A[i + 28*t];
R[i + 29*t] = A[i + 29*t];
R[i + 30*t] = A[i + 30*t];
R[i + 31*t] = A[i + 31*t];
R[i + 32*t] = A[i + 32*t];
R[i + 33*t] = A[i + 33*t];
R[i + 34*t] = A[i + 34*t];
R[i + 35*t] = A[i + 35*t];
R[i + 36*t] = A[i + 36*t];
R[i + 37*t] = A[i + 37*t];
R[i + 38*t] = A[i + 38*t];
R[i + 39*t] = A[i + 39*t];
R[i + 40*t] = A[i + 40*t];
R[i + 41*t] = A[i + 41*t];
R[i + 42*t] = A[i + 42*t];
R[i + 43*t] = A[i + 43*t];
R[i + 44*t] = A[i + 44*t];
R[i + 45*t] = A[i + 45*t];
R[i + 46*t] = A[i + 46*t];
R[i + 47*t] = A[i + 47*t];
R[i + 48*t] = A[i + 48*t];
R[i + 49*t] = A[i + 49*t];
R[i + 50*t] = A[i + 50*t];
R[i + 51*t] = A[i + 51*t];
R[i + 52*t] = A[i + 52*t];
R[i + 53*t] = A[i + 53*t];
R[i + 54*t] = A[i + 54*t];
R[i + 55*t] = A[i + 55*t];
R[i + 56*t] = A[i + 56*t];
R[i + 57*t] = A[i + 57*t];
R[i + 58*t] = A[i + 58*t];
R[i + 59*t] = A[i + 59*t];
R[i + 60*t] = A[i + 60*t];
R[i + 61*t] = A[i + 61*t];
R[i + 62*t] = A[i + 62*t];
R[i + 63*t] = A[i + 63*t];
R[i + 64*t] = A[i + 64*t];
R[i + 65*t] = A[i + 65*t];
R[i + 66*t] = A[i + 66*t];
R[i + 67*t] = A[i + 67*t];
R[i + 68*t] = A[i + 68*t];
R[i + 69*t] = A[i + 69*t];
R[i + 70*t] = A[i + 70*t];
R[i + 71*t] = A[i + 71*t];
R[i + 72*t] = A[i + 72*t];
R[i + 73*t] = A[i + 73*t];
R[i + 74*t] = A[i + 74*t];
R[i + 75*t] = A[i + 75*t];
R[i + 76*t] = A[i + 76*t];
R[i + 77*t] = A[i + 77*t];
R[i + 78*t] = A[i + 78*t];
R[i + 79*t] = A[i + 79*t];
R[i + 80*t] = A[i + 80*t];
R[i + 81*t] = A[i + 81*t];
R[i + 82*t] = A[i + 82*t];
R[i + 83*t] = A[i + 83*t];
R[i + 84*t] = A[i + 84*t];
R[i + 85*t] = A[i + 85*t];
R[i + 86*t] = A[i + 86*t];
R[i + 87*t] = A[i + 87*t];
R[i + 88*t] = A[i + 88*t];
R[i + 89*t] = A[i + 89*t];
R[i + 90*t] = A[i + 90*t];
R[i + 91*t] = A[i + 91*t];
R[i + 92*t] = A[i + 92*t];
R[i + 93*t] = A[i + 93*t];
R[i + 94*t] = A[i + 94*t];
R[i + 95*t] = A[i + 95*t];
R[i + 96*t] = A[i + 96*t];
R[i + 97*t] = A[i + 97*t];
R[i + 98*t] = A[i + 98*t];
R[i + 99*t] = A[i + 99*t];
R[i + 100*t] = A[i + 100*t];
R[i + 101*t] = A[i + 101*t];
R[i + 102*t] = A[i + 102*t];
R[i + 103*t] = A[i + 103*t];
R[i + 104*t] = A[i + 104*t];
R[i + 105*t] = A[i + 105*t];
R[i + 106*t] = A[i + 106*t];
R[i + 107*t] = A[i + 107*t];
R[i + 108*t] = A[i + 108*t];
R[i + 109*t] = A[i + 109*t];
R[i + 110*t] = A[i + 110*t];
R[i + 111*t] = A[i + 111*t];
R[i + 112*t] = A[i + 112*t];
R[i + 113*t] = A[i + 113*t];
R[i + 114*t] = A[i + 114*t];
R[i + 115*t] = A[i + 115*t];
R[i + 116*t] = A[i + 116*t];
R[i + 117*t] = A[i + 117*t];
R[i + 118*t] = A[i + 118*t];
R[i + 119*t] = A[i + 119*t];
R[i + 120*t] = A[i + 120*t];
R[i + 121*t] = A[i + 121*t];
R[i + 122*t] = A[i + 122*t];
R[i + 123*t] = A[i + 123*t];
R[i + 124*t] = A[i + 124*t];
R[i + 125*t] = A[i + 125*t];
R[i + 126*t] = A[i + 126*t];
R[i + 127*t] = A[i + 127*t];
R[i + 128*t] = A[i + 128*t];
R[i + 129*t] = A[i + 129*t];
R[i + 130*t] = A[i + 130*t];
R[i + 131*t] = A[i + 131*t];
R[i + 132*t] = A[i + 132*t];
R[i + 133*t] = A[i + 133*t];
R[i + 134*t] = A[i + 134*t];
R[i + 135*t] = A[i + 135*t];
R[i + 136*t] = A[i + 136*t];
R[i + 137*t] = A[i + 137*t];
R[i + 138*t] = A[i + 138*t];
R[i + 139*t] = A[i + 139*t];
R[i + 140*t] = A[i + 140*t];
R[i + 141*t] = A[i + 141*t];
R[i + 142*t] = A[i + 142*t];
R[i + 143*t] = A[i + 143*t];
R[i + 144*t] = A[i + 144*t];
R[i + 145*t] = A[i + 145*t];
R[i + 146*t] = A[i + 146*t];
R[i + 147*t] = A[i + 147*t];
R[i + 148*t] = A[i + 148*t];
R[i + 149*t] = A[i + 149*t];
R[i + 150*t] = A[i + 150*t];
R[i + 151*t] = A[i + 151*t];
R[i + 152*t] = A[i + 152*t];
R[i + 153*t] = A[i + 153*t];
R[i + 154*t] = A[i + 154*t];
R[i + 155*t] = A[i + 155*t];
R[i + 156*t] = A[i + 156*t];
R[i + 157*t] = A[i + 157*t];
R[i + 158*t] = A[i + 158*t];
R[i + 159*t] = A[i + 159*t];
R[i + 160*t] = A[i + 160*t];
R[i + 161*t] = A[i + 161*t];
R[i + 162*t] = A[i + 162*t];
R[i + 163*t] = A[i + 163*t];
R[i + 164*t] = A[i + 164*t];
R[i + 165*t] = A[i + 165*t];
R[i + 166*t] = A[i + 166*t];
R[i + 167*t] = A[i + 167*t];
R[i + 168*t] = A[i + 168*t];
R[i + 169*t] = A[i + 169*t];
R[i + 170*t] = A[i + 170*t];
R[i + 171*t] = A[i + 171*t];
R[i + 172*t] = A[i + 172*t];
R[i + 173*t] = A[i + 173*t];
R[i + 174*t] = A[i + 174*t];
R[i + 175*t] = A[i + 175*t];
R[i + 176*t] = A[i + 176*t];
R[i + 177*t] = A[i + 177*t];
R[i + 178*t] = A[i + 178*t];
R[i + 179*t] = A[i + 179*t];
R[i + 180*t] = A[i + 180*t];
R[i + 181*t] = A[i + 181*t];
R[i + 182*t] = A[i + 182*t];
R[i + 183*t] = A[i + 183*t];
R[i + 184*t] = A[i + 184*t];
R[i + 185*t] = A[i + 185*t];
R[i + 186*t] = A[i + 186*t];
R[i + 187*t] = A[i + 187*t];
R[i + 188*t] = A[i + 188*t];
R[i + 189*t] = A[i + 189*t];
R[i + 190*t] = A[i + 190*t];
R[i + 191*t] = A[i + 191*t];
R[i + 192*t] = A[i + 192*t];
__syncthreads();
for (int iter=0; iter< n_iter; iter++) {
R[i + 193*t] = Op[i + 0*t] ? R[B[i + 0*t]] * R[C[i + 0*t]] : R[B[i + 0*t]] + R[C[i + 0*t]];
R[i + 194*t] = Op[i + 1*t] ? R[B[i + 1*t]] * R[C[i + 1*t]] : R[B[i + 1*t]] + R[C[i + 1*t]];
R[i + 195*t] = Op[i + 2*t] ? R[B[i + 2*t]] * R[C[i + 2*t]] : R[B[i + 2*t]] + R[C[i + 2*t]];
R[i + 196*t] = Op[i + 3*t] ? R[B[i + 3*t]] * R[C[i + 3*t]] : R[B[i + 3*t]] + R[C[i + 3*t]];
R[i + 197*t] = Op[i + 4*t] ? R[B[i + 4*t]] * R[C[i + 4*t]] : R[B[i + 4*t]] + R[C[i + 4*t]];
R[i + 198*t] = Op[i + 5*t] ? R[B[i + 5*t]] * R[C[i + 5*t]] : R[B[i + 5*t]] + R[C[i + 5*t]];
R[i + 199*t] = Op[i + 6*t] ? R[B[i + 6*t]] * R[C[i + 6*t]] : R[B[i + 6*t]] + R[C[i + 6*t]];
R[i + 200*t] = Op[i + 7*t] ? R[B[i + 7*t]] * R[C[i + 7*t]] : R[B[i + 7*t]] + R[C[i + 7*t]];
R[i + 201*t] = Op[i + 8*t] ? R[B[i + 8*t]] * R[C[i + 8*t]] : R[B[i + 8*t]] + R[C[i + 8*t]];
R[i + 202*t] = Op[i + 9*t] ? R[B[i + 9*t]] * R[C[i + 9*t]] : R[B[i + 9*t]] + R[C[i + 9*t]];
R[i + 203*t] = Op[i + 10*t] ? R[B[i + 10*t]] * R[C[i + 10*t]] : R[B[i + 10*t]] + R[C[i + 10*t]];
R[i + 204*t] = Op[i + 11*t] ? R[B[i + 11*t]] * R[C[i + 11*t]] : R[B[i + 11*t]] + R[C[i + 11*t]];
R[i + 205*t] = Op[i + 12*t] ? R[B[i + 12*t]] * R[C[i + 12*t]] : R[B[i + 12*t]] + R[C[i + 12*t]];
R[i + 206*t] = Op[i + 13*t] ? R[B[i + 13*t]] * R[C[i + 13*t]] : R[B[i + 13*t]] + R[C[i + 13*t]];
R[i + 207*t] = Op[i + 14*t] ? R[B[i + 14*t]] * R[C[i + 14*t]] : R[B[i + 14*t]] + R[C[i + 14*t]];
R[i + 208*t] = Op[i + 15*t] ? R[B[i + 15*t]] * R[C[i + 15*t]] : R[B[i + 15*t]] + R[C[i + 15*t]];
R[i + 209*t] = Op[i + 16*t] ? R[B[i + 16*t]] * R[C[i + 16*t]] : R[B[i + 16*t]] + R[C[i + 16*t]];
R[i + 210*t] = Op[i + 17*t] ? R[B[i + 17*t]] * R[C[i + 17*t]] : R[B[i + 17*t]] + R[C[i + 17*t]];
R[i + 211*t] = Op[i + 18*t] ? R[B[i + 18*t]] * R[C[i + 18*t]] : R[B[i + 18*t]] + R[C[i + 18*t]];
R[i + 212*t] = Op[i + 19*t] ? R[B[i + 19*t]] * R[C[i + 19*t]] : R[B[i + 19*t]] + R[C[i + 19*t]];
R[i + 213*t] = Op[i + 20*t] ? R[B[i + 20*t]] * R[C[i + 20*t]] : R[B[i + 20*t]] + R[C[i + 20*t]];
R[i + 214*t] = Op[i + 21*t] ? R[B[i + 21*t]] * R[C[i + 21*t]] : R[B[i + 21*t]] + R[C[i + 21*t]];
R[i + 215*t] = Op[i + 22*t] ? R[B[i + 22*t]] * R[C[i + 22*t]] : R[B[i + 22*t]] + R[C[i + 22*t]];
R[i + 216*t] = Op[i + 23*t] ? R[B[i + 23*t]] * R[C[i + 23*t]] : R[B[i + 23*t]] + R[C[i + 23*t]];
R[i + 217*t] = Op[i + 24*t] ? R[B[i + 24*t]] * R[C[i + 24*t]] : R[B[i + 24*t]] + R[C[i + 24*t]];
R[i + 218*t] = Op[i + 25*t] ? R[B[i + 25*t]] * R[C[i + 25*t]] : R[B[i + 25*t]] + R[C[i + 25*t]];
R[i + 219*t] = Op[i + 26*t] ? R[B[i + 26*t]] * R[C[i + 26*t]] : R[B[i + 26*t]] + R[C[i + 26*t]];
R[i + 220*t] = Op[i + 27*t] ? R[B[i + 27*t]] * R[C[i + 27*t]] : R[B[i + 27*t]] + R[C[i + 27*t]];
R[i + 221*t] = Op[i + 28*t] ? R[B[i + 28*t]] * R[C[i + 28*t]] : R[B[i + 28*t]] + R[C[i + 28*t]];
R[i + 222*t] = Op[i + 29*t] ? R[B[i + 29*t]] * R[C[i + 29*t]] : R[B[i + 29*t]] + R[C[i + 29*t]];
R[i + 223*t] = Op[i + 30*t] ? R[B[i + 30*t]] * R[C[i + 30*t]] : R[B[i + 30*t]] + R[C[i + 30*t]];
R[i + 224*t] = Op[i + 31*t] ? R[B[i + 31*t]] * R[C[i + 31*t]] : R[B[i + 31*t]] + R[C[i + 31*t]];
R[i + 225*t] = Op[i + 32*t] ? R[B[i + 32*t]] * R[C[i + 32*t]] : R[B[i + 32*t]] + R[C[i + 32*t]];
R[i + 226*t] = Op[i + 33*t] ? R[B[i + 33*t]] * R[C[i + 33*t]] : R[B[i + 33*t]] + R[C[i + 33*t]];
R[i + 227*t] = Op[i + 34*t] ? R[B[i + 34*t]] * R[C[i + 34*t]] : R[B[i + 34*t]] + R[C[i + 34*t]];
R[i + 228*t] = Op[i + 35*t] ? R[B[i + 35*t]] * R[C[i + 35*t]] : R[B[i + 35*t]] + R[C[i + 35*t]];
R[i + 229*t] = Op[i + 36*t] ? R[B[i + 36*t]] * R[C[i + 36*t]] : R[B[i + 36*t]] + R[C[i + 36*t]];
R[i + 230*t] = Op[i + 37*t] ? R[B[i + 37*t]] * R[C[i + 37*t]] : R[B[i + 37*t]] + R[C[i + 37*t]];
R[i + 231*t] = Op[i + 38*t] ? R[B[i + 38*t]] * R[C[i + 38*t]] : R[B[i + 38*t]] + R[C[i + 38*t]];
R[i + 232*t] = Op[i + 39*t] ? R[B[i + 39*t]] * R[C[i + 39*t]] : R[B[i + 39*t]] + R[C[i + 39*t]];
R[i + 233*t] = Op[i + 40*t] ? R[B[i + 40*t]] * R[C[i + 40*t]] : R[B[i + 40*t]] + R[C[i + 40*t]];
R[i + 234*t] = Op[i + 41*t] ? R[B[i + 41*t]] * R[C[i + 41*t]] : R[B[i + 41*t]] + R[C[i + 41*t]];
R[i + 235*t] = Op[i + 42*t] ? R[B[i + 42*t]] * R[C[i + 42*t]] : R[B[i + 42*t]] + R[C[i + 42*t]];
R[i + 236*t] = Op[i + 43*t] ? R[B[i + 43*t]] * R[C[i + 43*t]] : R[B[i + 43*t]] + R[C[i + 43*t]];
R[i + 237*t] = Op[i + 44*t] ? R[B[i + 44*t]] * R[C[i + 44*t]] : R[B[i + 44*t]] + R[C[i + 44*t]];
R[i + 238*t] = Op[i + 45*t] ? R[B[i + 45*t]] * R[C[i + 45*t]] : R[B[i + 45*t]] + R[C[i + 45*t]];
R[i + 239*t] = Op[i + 46*t] ? R[B[i + 46*t]] * R[C[i + 46*t]] : R[B[i + 46*t]] + R[C[i + 46*t]];
R[i + 240*t] = Op[i + 47*t] ? R[B[i + 47*t]] * R[C[i + 47*t]] : R[B[i + 47*t]] + R[C[i + 47*t]];
R[i + 241*t] = Op[i + 48*t] ? R[B[i + 48*t]] * R[C[i + 48*t]] : R[B[i + 48*t]] + R[C[i + 48*t]];
R[i + 242*t] = Op[i + 49*t] ? R[B[i + 49*t]] * R[C[i + 49*t]] : R[B[i + 49*t]] + R[C[i + 49*t]];
R[i + 243*t] = Op[i + 50*t] ? R[B[i + 50*t]] * R[C[i + 50*t]] : R[B[i + 50*t]] + R[C[i + 50*t]];
R[i + 244*t] = Op[i + 51*t] ? R[B[i + 51*t]] * R[C[i + 51*t]] : R[B[i + 51*t]] + R[C[i + 51*t]];
R[i + 245*t] = Op[i + 52*t] ? R[B[i + 52*t]] * R[C[i + 52*t]] : R[B[i + 52*t]] + R[C[i + 52*t]];
R[i + 246*t] = Op[i + 53*t] ? R[B[i + 53*t]] * R[C[i + 53*t]] : R[B[i + 53*t]] + R[C[i + 53*t]];
R[i + 247*t] = Op[i + 54*t] ? R[B[i + 54*t]] * R[C[i + 54*t]] : R[B[i + 54*t]] + R[C[i + 54*t]];
R[i + 248*t] = Op[i + 55*t] ? R[B[i + 55*t]] * R[C[i + 55*t]] : R[B[i + 55*t]] + R[C[i + 55*t]];
R[i + 249*t] = Op[i + 56*t] ? R[B[i + 56*t]] * R[C[i + 56*t]] : R[B[i + 56*t]] + R[C[i + 56*t]];
R[i + 250*t] = Op[i + 57*t] ? R[B[i + 57*t]] * R[C[i + 57*t]] : R[B[i + 57*t]] + R[C[i + 57*t]];
R[i + 251*t] = Op[i + 58*t] ? R[B[i + 58*t]] * R[C[i + 58*t]] : R[B[i + 58*t]] + R[C[i + 58*t]];
__syncthreads();
R[i + 252*t] = Op[i + 59*t] ? R[B[i + 59*t]] * R[C[i + 59*t]] : R[B[i + 59*t]] + R[C[i + 59*t]];
R[i + 253*t] = Op[i + 60*t] ? R[B[i + 60*t]] * R[C[i + 60*t]] : R[B[i + 60*t]] + R[C[i + 60*t]];
R[i + 254*t] = Op[i + 61*t] ? R[B[i + 61*t]] * R[C[i + 61*t]] : R[B[i + 61*t]] + R[C[i + 61*t]];
R[i + 255*t] = Op[i + 62*t] ? R[B[i + 62*t]] * R[C[i + 62*t]] : R[B[i + 62*t]] + R[C[i + 62*t]];
R[i + 256*t] = Op[i + 63*t] ? R[B[i + 63*t]] * R[C[i + 63*t]] : R[B[i + 63*t]] + R[C[i + 63*t]];
R[i + 257*t] = Op[i + 64*t] ? R[B[i + 64*t]] * R[C[i + 64*t]] : R[B[i + 64*t]] + R[C[i + 64*t]];
R[i + 258*t] = Op[i + 65*t] ? R[B[i + 65*t]] * R[C[i + 65*t]] : R[B[i + 65*t]] + R[C[i + 65*t]];
R[i + 259*t] = Op[i + 66*t] ? R[B[i + 66*t]] * R[C[i + 66*t]] : R[B[i + 66*t]] + R[C[i + 66*t]];
R[i + 260*t] = Op[i + 67*t] ? R[B[i + 67*t]] * R[C[i + 67*t]] : R[B[i + 67*t]] + R[C[i + 67*t]];
R[i + 261*t] = Op[i + 68*t] ? R[B[i + 68*t]] * R[C[i + 68*t]] : R[B[i + 68*t]] + R[C[i + 68*t]];
R[i + 262*t] = Op[i + 69*t] ? R[B[i + 69*t]] * R[C[i + 69*t]] : R[B[i + 69*t]] + R[C[i + 69*t]];
R[i + 263*t] = Op[i + 70*t] ? R[B[i + 70*t]] * R[C[i + 70*t]] : R[B[i + 70*t]] + R[C[i + 70*t]];
R[i + 264*t] = Op[i + 71*t] ? R[B[i + 71*t]] * R[C[i + 71*t]] : R[B[i + 71*t]] + R[C[i + 71*t]];
R[i + 265*t] = Op[i + 72*t] ? R[B[i + 72*t]] * R[C[i + 72*t]] : R[B[i + 72*t]] + R[C[i + 72*t]];
R[i + 266*t] = Op[i + 73*t] ? R[B[i + 73*t]] * R[C[i + 73*t]] : R[B[i + 73*t]] + R[C[i + 73*t]];
R[i + 267*t] = Op[i + 74*t] ? R[B[i + 74*t]] * R[C[i + 74*t]] : R[B[i + 74*t]] + R[C[i + 74*t]];
R[i + 268*t] = Op[i + 75*t] ? R[B[i + 75*t]] * R[C[i + 75*t]] : R[B[i + 75*t]] + R[C[i + 75*t]];
R[i + 269*t] = Op[i + 76*t] ? R[B[i + 76*t]] * R[C[i + 76*t]] : R[B[i + 76*t]] + R[C[i + 76*t]];
R[i + 270*t] = Op[i + 77*t] ? R[B[i + 77*t]] * R[C[i + 77*t]] : R[B[i + 77*t]] + R[C[i + 77*t]];
R[i + 271*t] = Op[i + 78*t] ? R[B[i + 78*t]] * R[C[i + 78*t]] : R[B[i + 78*t]] + R[C[i + 78*t]];
R[i + 272*t] = Op[i + 79*t] ? R[B[i + 79*t]] * R[C[i + 79*t]] : R[B[i + 79*t]] + R[C[i + 79*t]];
R[i + 273*t] = Op[i + 80*t] ? R[B[i + 80*t]] * R[C[i + 80*t]] : R[B[i + 80*t]] + R[C[i + 80*t]];
R[i + 274*t] = Op[i + 81*t] ? R[B[i + 81*t]] * R[C[i + 81*t]] : R[B[i + 81*t]] + R[C[i + 81*t]];
R[i + 275*t] = Op[i + 82*t] ? R[B[i + 82*t]] * R[C[i + 82*t]] : R[B[i + 82*t]] + R[C[i + 82*t]];
R[i + 276*t] = Op[i + 83*t] ? R[B[i + 83*t]] * R[C[i + 83*t]] : R[B[i + 83*t]] + R[C[i + 83*t]];
R[i + 277*t] = Op[i + 84*t] ? R[B[i + 84*t]] * R[C[i + 84*t]] : R[B[i + 84*t]] + R[C[i + 84*t]];
R[i + 278*t] = Op[i + 85*t] ? R[B[i + 85*t]] * R[C[i + 85*t]] : R[B[i + 85*t]] + R[C[i + 85*t]];
R[i + 279*t] = Op[i + 86*t] ? R[B[i + 86*t]] * R[C[i + 86*t]] : R[B[i + 86*t]] + R[C[i + 86*t]];
R[i + 280*t] = Op[i + 87*t] ? R[B[i + 87*t]] * R[C[i + 87*t]] : R[B[i + 87*t]] + R[C[i + 87*t]];
R[i + 281*t] = Op[i + 88*t] ? R[B[i + 88*t]] * R[C[i + 88*t]] : R[B[i + 88*t]] + R[C[i + 88*t]];
R[i + 282*t] = Op[i + 89*t] ? R[B[i + 89*t]] * R[C[i + 89*t]] : R[B[i + 89*t]] + R[C[i + 89*t]];
R[i + 283*t] = Op[i + 90*t] ? R[B[i + 90*t]] * R[C[i + 90*t]] : R[B[i + 90*t]] + R[C[i + 90*t]];
R[i + 284*t] = Op[i + 91*t] ? R[B[i + 91*t]] * R[C[i + 91*t]] : R[B[i + 91*t]] + R[C[i + 91*t]];
R[i + 285*t] = Op[i + 92*t] ? R[B[i + 92*t]] * R[C[i + 92*t]] : R[B[i + 92*t]] + R[C[i + 92*t]];
R[i + 286*t] = Op[i + 93*t] ? R[B[i + 93*t]] * R[C[i + 93*t]] : R[B[i + 93*t]] + R[C[i + 93*t]];
__syncthreads();
R[i + 287*t] = Op[i + 94*t] ? R[B[i + 94*t]] * R[C[i + 94*t]] : R[B[i + 94*t]] + R[C[i + 94*t]];
R[i + 288*t] = Op[i + 95*t] ? R[B[i + 95*t]] * R[C[i + 95*t]] : R[B[i + 95*t]] + R[C[i + 95*t]];
R[i + 289*t] = Op[i + 96*t] ? R[B[i + 96*t]] * R[C[i + 96*t]] : R[B[i + 96*t]] + R[C[i + 96*t]];
R[i + 290*t] = Op[i + 97*t] ? R[B[i + 97*t]] * R[C[i + 97*t]] : R[B[i + 97*t]] + R[C[i + 97*t]];
R[i + 291*t] = Op[i + 98*t] ? R[B[i + 98*t]] * R[C[i + 98*t]] : R[B[i + 98*t]] + R[C[i + 98*t]];
R[i + 292*t] = Op[i + 99*t] ? R[B[i + 99*t]] * R[C[i + 99*t]] : R[B[i + 99*t]] + R[C[i + 99*t]];
R[i + 293*t] = Op[i + 100*t] ? R[B[i + 100*t]] * R[C[i + 100*t]] : R[B[i + 100*t]] + R[C[i + 100*t]];
R[i + 294*t] = Op[i + 101*t] ? R[B[i + 101*t]] * R[C[i + 101*t]] : R[B[i + 101*t]] + R[C[i + 101*t]];
R[i + 295*t] = Op[i + 102*t] ? R[B[i + 102*t]] * R[C[i + 102*t]] : R[B[i + 102*t]] + R[C[i + 102*t]];
R[i + 296*t] = Op[i + 103*t] ? R[B[i + 103*t]] * R[C[i + 103*t]] : R[B[i + 103*t]] + R[C[i + 103*t]];
R[i + 297*t] = Op[i + 104*t] ? R[B[i + 104*t]] * R[C[i + 104*t]] : R[B[i + 104*t]] + R[C[i + 104*t]];
R[i + 298*t] = Op[i + 105*t] ? R[B[i + 105*t]] * R[C[i + 105*t]] : R[B[i + 105*t]] + R[C[i + 105*t]];
R[i + 299*t] = Op[i + 106*t] ? R[B[i + 106*t]] * R[C[i + 106*t]] : R[B[i + 106*t]] + R[C[i + 106*t]];
R[i + 300*t] = Op[i + 107*t] ? R[B[i + 107*t]] * R[C[i + 107*t]] : R[B[i + 107*t]] + R[C[i + 107*t]];
R[i + 301*t] = Op[i + 108*t] ? R[B[i + 108*t]] * R[C[i + 108*t]] : R[B[i + 108*t]] + R[C[i + 108*t]];
R[i + 302*t] = Op[i + 109*t] ? R[B[i + 109*t]] * R[C[i + 109*t]] : R[B[i + 109*t]] + R[C[i + 109*t]];
R[i + 303*t] = Op[i + 110*t] ? R[B[i + 110*t]] * R[C[i + 110*t]] : R[B[i + 110*t]] + R[C[i + 110*t]];
R[i + 304*t] = Op[i + 111*t] ? R[B[i + 111*t]] * R[C[i + 111*t]] : R[B[i + 111*t]] + R[C[i + 111*t]];
R[i + 305*t] = Op[i + 112*t] ? R[B[i + 112*t]] * R[C[i + 112*t]] : R[B[i + 112*t]] + R[C[i + 112*t]];
R[i + 306*t] = Op[i + 113*t] ? R[B[i + 113*t]] * R[C[i + 113*t]] : R[B[i + 113*t]] + R[C[i + 113*t]];
R[i + 307*t] = Op[i + 114*t] ? R[B[i + 114*t]] * R[C[i + 114*t]] : R[B[i + 114*t]] + R[C[i + 114*t]];
R[i + 308*t] = Op[i + 115*t] ? R[B[i + 115*t]] * R[C[i + 115*t]] : R[B[i + 115*t]] + R[C[i + 115*t]];
R[i + 309*t] = Op[i + 116*t] ? R[B[i + 116*t]] * R[C[i + 116*t]] : R[B[i + 116*t]] + R[C[i + 116*t]];
R[i + 310*t] = Op[i + 117*t] ? R[B[i + 117*t]] * R[C[i + 117*t]] : R[B[i + 117*t]] + R[C[i + 117*t]];
R[i + 311*t] = Op[i + 118*t] ? R[B[i + 118*t]] * R[C[i + 118*t]] : R[B[i + 118*t]] + R[C[i + 118*t]];
R[i + 312*t] = Op[i + 119*t] ? R[B[i + 119*t]] * R[C[i + 119*t]] : R[B[i + 119*t]] + R[C[i + 119*t]];
R[i + 313*t] = Op[i + 120*t] ? R[B[i + 120*t]] * R[C[i + 120*t]] : R[B[i + 120*t]] + R[C[i + 120*t]];
R[i + 314*t] = Op[i + 121*t] ? R[B[i + 121*t]] * R[C[i + 121*t]] : R[B[i + 121*t]] + R[C[i + 121*t]];
R[i + 315*t] = Op[i + 122*t] ? R[B[i + 122*t]] * R[C[i + 122*t]] : R[B[i + 122*t]] + R[C[i + 122*t]];
R[i + 316*t] = Op[i + 123*t] ? R[B[i + 123*t]] * R[C[i + 123*t]] : R[B[i + 123*t]] + R[C[i + 123*t]];
R[i + 317*t] = Op[i + 124*t] ? R[B[i + 124*t]] * R[C[i + 124*t]] : R[B[i + 124*t]] + R[C[i + 124*t]];
R[i + 318*t] = Op[i + 125*t] ? R[B[i + 125*t]] * R[C[i + 125*t]] : R[B[i + 125*t]] + R[C[i + 125*t]];
R[i + 319*t] = Op[i + 126*t] ? R[B[i + 126*t]] * R[C[i + 126*t]] : R[B[i + 126*t]] + R[C[i + 126*t]];
R[i + 320*t] = Op[i + 127*t] ? R[B[i + 127*t]] * R[C[i + 127*t]] : R[B[i + 127*t]] + R[C[i + 127*t]];
__syncthreads();
R[i + 321*t] = Op[i + 128*t] ? R[B[i + 128*t]] * R[C[i + 128*t]] : R[B[i + 128*t]] + R[C[i + 128*t]];
R[i + 322*t] = Op[i + 129*t] ? R[B[i + 129*t]] * R[C[i + 129*t]] : R[B[i + 129*t]] + R[C[i + 129*t]];
R[i + 323*t] = Op[i + 130*t] ? R[B[i + 130*t]] * R[C[i + 130*t]] : R[B[i + 130*t]] + R[C[i + 130*t]];
R[i + 324*t] = Op[i + 131*t] ? R[B[i + 131*t]] * R[C[i + 131*t]] : R[B[i + 131*t]] + R[C[i + 131*t]];
R[i + 325*t] = Op[i + 132*t] ? R[B[i + 132*t]] * R[C[i + 132*t]] : R[B[i + 132*t]] + R[C[i + 132*t]];
R[i + 326*t] = Op[i + 133*t] ? R[B[i + 133*t]] * R[C[i + 133*t]] : R[B[i + 133*t]] + R[C[i + 133*t]];
R[i + 327*t] = Op[i + 134*t] ? R[B[i + 134*t]] * R[C[i + 134*t]] : R[B[i + 134*t]] + R[C[i + 134*t]];
R[i + 328*t] = Op[i + 135*t] ? R[B[i + 135*t]] * R[C[i + 135*t]] : R[B[i + 135*t]] + R[C[i + 135*t]];
R[i + 329*t] = Op[i + 136*t] ? R[B[i + 136*t]] * R[C[i + 136*t]] : R[B[i + 136*t]] + R[C[i + 136*t]];
R[i + 330*t] = Op[i + 137*t] ? R[B[i + 137*t]] * R[C[i + 137*t]] : R[B[i + 137*t]] + R[C[i + 137*t]];
R[i + 331*t] = Op[i + 138*t] ? R[B[i + 138*t]] * R[C[i + 138*t]] : R[B[i + 138*t]] + R[C[i + 138*t]];
R[i + 332*t] = Op[i + 139*t] ? R[B[i + 139*t]] * R[C[i + 139*t]] : R[B[i + 139*t]] + R[C[i + 139*t]];
R[i + 333*t] = Op[i + 140*t] ? R[B[i + 140*t]] * R[C[i + 140*t]] : R[B[i + 140*t]] + R[C[i + 140*t]];
R[i + 334*t] = Op[i + 141*t] ? R[B[i + 141*t]] * R[C[i + 141*t]] : R[B[i + 141*t]] + R[C[i + 141*t]];
R[i + 335*t] = Op[i + 142*t] ? R[B[i + 142*t]] * R[C[i + 142*t]] : R[B[i + 142*t]] + R[C[i + 142*t]];
R[i + 336*t] = Op[i + 143*t] ? R[B[i + 143*t]] * R[C[i + 143*t]] : R[B[i + 143*t]] + R[C[i + 143*t]];
R[i + 337*t] = Op[i + 144*t] ? R[B[i + 144*t]] * R[C[i + 144*t]] : R[B[i + 144*t]] + R[C[i + 144*t]];
R[i + 338*t] = Op[i + 145*t] ? R[B[i + 145*t]] * R[C[i + 145*t]] : R[B[i + 145*t]] + R[C[i + 145*t]];
R[i + 339*t] = Op[i + 146*t] ? R[B[i + 146*t]] * R[C[i + 146*t]] : R[B[i + 146*t]] + R[C[i + 146*t]];
R[i + 340*t] = Op[i + 147*t] ? R[B[i + 147*t]] * R[C[i + 147*t]] : R[B[i + 147*t]] + R[C[i + 147*t]];
R[i + 341*t] = Op[i + 148*t] ? R[B[i + 148*t]] * R[C[i + 148*t]] : R[B[i + 148*t]] + R[C[i + 148*t]];
R[i + 342*t] = Op[i + 149*t] ? R[B[i + 149*t]] * R[C[i + 149*t]] : R[B[i + 149*t]] + R[C[i + 149*t]];
R[i + 343*t] = Op[i + 150*t] ? R[B[i + 150*t]] * R[C[i + 150*t]] : R[B[i + 150*t]] + R[C[i + 150*t]];
R[i + 344*t] = Op[i + 151*t] ? R[B[i + 151*t]] * R[C[i + 151*t]] : R[B[i + 151*t]] + R[C[i + 151*t]];
R[i + 345*t] = Op[i + 152*t] ? R[B[i + 152*t]] * R[C[i + 152*t]] : R[B[i + 152*t]] + R[C[i + 152*t]];
R[i + 346*t] = Op[i + 153*t] ? R[B[i + 153*t]] * R[C[i + 153*t]] : R[B[i + 153*t]] + R[C[i + 153*t]];
R[i + 347*t] = Op[i + 154*t] ? R[B[i + 154*t]] * R[C[i + 154*t]] : R[B[i + 154*t]] + R[C[i + 154*t]];
R[i + 348*t] = Op[i + 155*t] ? R[B[i + 155*t]] * R[C[i + 155*t]] : R[B[i + 155*t]] + R[C[i + 155*t]];
R[i + 349*t] = Op[i + 156*t] ? R[B[i + 156*t]] * R[C[i + 156*t]] : R[B[i + 156*t]] + R[C[i + 156*t]];
R[i + 350*t] = Op[i + 157*t] ? R[B[i + 157*t]] * R[C[i + 157*t]] : R[B[i + 157*t]] + R[C[i + 157*t]];
R[i + 351*t] = Op[i + 158*t] ? R[B[i + 158*t]] * R[C[i + 158*t]] : R[B[i + 158*t]] + R[C[i + 158*t]];
__syncthreads();
R[i + 352*t] = Op[i + 159*t] ? R[B[i + 159*t]] * R[C[i + 159*t]] : R[B[i + 159*t]] + R[C[i + 159*t]];
R[i + 353*t] = Op[i + 160*t] ? R[B[i + 160*t]] * R[C[i + 160*t]] : R[B[i + 160*t]] + R[C[i + 160*t]];
R[i + 354*t] = Op[i + 161*t] ? R[B[i + 161*t]] * R[C[i + 161*t]] : R[B[i + 161*t]] + R[C[i + 161*t]];
R[i + 355*t] = Op[i + 162*t] ? R[B[i + 162*t]] * R[C[i + 162*t]] : R[B[i + 162*t]] + R[C[i + 162*t]];
R[i + 356*t] = Op[i + 163*t] ? R[B[i + 163*t]] * R[C[i + 163*t]] : R[B[i + 163*t]] + R[C[i + 163*t]];
R[i + 357*t] = Op[i + 164*t] ? R[B[i + 164*t]] * R[C[i + 164*t]] : R[B[i + 164*t]] + R[C[i + 164*t]];
R[i + 358*t] = Op[i + 165*t] ? R[B[i + 165*t]] * R[C[i + 165*t]] : R[B[i + 165*t]] + R[C[i + 165*t]];
R[i + 359*t] = Op[i + 166*t] ? R[B[i + 166*t]] * R[C[i + 166*t]] : R[B[i + 166*t]] + R[C[i + 166*t]];
R[i + 360*t] = Op[i + 167*t] ? R[B[i + 167*t]] * R[C[i + 167*t]] : R[B[i + 167*t]] + R[C[i + 167*t]];
R[i + 361*t] = Op[i + 168*t] ? R[B[i + 168*t]] * R[C[i + 168*t]] : R[B[i + 168*t]] + R[C[i + 168*t]];
R[i + 362*t] = Op[i + 169*t] ? R[B[i + 169*t]] * R[C[i + 169*t]] : R[B[i + 169*t]] + R[C[i + 169*t]];
R[i + 363*t] = Op[i + 170*t] ? R[B[i + 170*t]] * R[C[i + 170*t]] : R[B[i + 170*t]] + R[C[i + 170*t]];
R[i + 364*t] = Op[i + 171*t] ? R[B[i + 171*t]] * R[C[i + 171*t]] : R[B[i + 171*t]] + R[C[i + 171*t]];
R[i + 365*t] = Op[i + 172*t] ? R[B[i + 172*t]] * R[C[i + 172*t]] : R[B[i + 172*t]] + R[C[i + 172*t]];
R[i + 366*t] = Op[i + 173*t] ? R[B[i + 173*t]] * R[C[i + 173*t]] : R[B[i + 173*t]] + R[C[i + 173*t]];
R[i + 367*t] = Op[i + 174*t] ? R[B[i + 174*t]] * R[C[i + 174*t]] : R[B[i + 174*t]] + R[C[i + 174*t]];
R[i + 368*t] = Op[i + 175*t] ? R[B[i + 175*t]] * R[C[i + 175*t]] : R[B[i + 175*t]] + R[C[i + 175*t]];
R[i + 369*t] = Op[i + 176*t] ? R[B[i + 176*t]] * R[C[i + 176*t]] : R[B[i + 176*t]] + R[C[i + 176*t]];
R[i + 370*t] = Op[i + 177*t] ? R[B[i + 177*t]] * R[C[i + 177*t]] : R[B[i + 177*t]] + R[C[i + 177*t]];
__syncthreads();
R[i + 371*t] = Op[i + 178*t] ? R[B[i + 178*t]] * R[C[i + 178*t]] : R[B[i + 178*t]] + R[C[i + 178*t]];
R[i + 372*t] = Op[i + 179*t] ? R[B[i + 179*t]] * R[C[i + 179*t]] : R[B[i + 179*t]] + R[C[i + 179*t]];
R[i + 373*t] = Op[i + 180*t] ? R[B[i + 180*t]] * R[C[i + 180*t]] : R[B[i + 180*t]] + R[C[i + 180*t]];
R[i + 374*t] = Op[i + 181*t] ? R[B[i + 181*t]] * R[C[i + 181*t]] : R[B[i + 181*t]] + R[C[i + 181*t]];
R[i + 375*t] = Op[i + 182*t] ? R[B[i + 182*t]] * R[C[i + 182*t]] : R[B[i + 182*t]] + R[C[i + 182*t]];
R[i + 376*t] = Op[i + 183*t] ? R[B[i + 183*t]] * R[C[i + 183*t]] : R[B[i + 183*t]] + R[C[i + 183*t]];
R[i + 377*t] = Op[i + 184*t] ? R[B[i + 184*t]] * R[C[i + 184*t]] : R[B[i + 184*t]] + R[C[i + 184*t]];
R[i + 378*t] = Op[i + 185*t] ? R[B[i + 185*t]] * R[C[i + 185*t]] : R[B[i + 185*t]] + R[C[i + 185*t]];
R[i + 379*t] = Op[i + 186*t] ? R[B[i + 186*t]] * R[C[i + 186*t]] : R[B[i + 186*t]] + R[C[i + 186*t]];
R[i + 380*t] = Op[i + 187*t] ? R[B[i + 187*t]] * R[C[i + 187*t]] : R[B[i + 187*t]] + R[C[i + 187*t]];
R[i + 381*t] = Op[i + 188*t] ? R[B[i + 188*t]] * R[C[i + 188*t]] : R[B[i + 188*t]] + R[C[i + 188*t]];
R[i + 382*t] = Op[i + 189*t] ? R[B[i + 189*t]] * R[C[i + 189*t]] : R[B[i + 189*t]] + R[C[i + 189*t]];
R[i + 383*t] = Op[i + 190*t] ? R[B[i + 190*t]] * R[C[i + 190*t]] : R[B[i + 190*t]] + R[C[i + 190*t]];
R[i + 384*t] = Op[i + 191*t] ? R[B[i + 191*t]] * R[C[i + 191*t]] : R[B[i + 191*t]] + R[C[i + 191*t]];
R[i + 385*t] = Op[i + 192*t] ? R[B[i + 192*t]] * R[C[i + 192*t]] : R[B[i + 192*t]] + R[C[i + 192*t]];
R[i + 386*t] = Op[i + 193*t] ? R[B[i + 193*t]] * R[C[i + 193*t]] : R[B[i + 193*t]] + R[C[i + 193*t]];
R[i + 387*t] = Op[i + 194*t] ? R[B[i + 194*t]] * R[C[i + 194*t]] : R[B[i + 194*t]] + R[C[i + 194*t]];
R[i + 388*t] = Op[i + 195*t] ? R[B[i + 195*t]] * R[C[i + 195*t]] : R[B[i + 195*t]] + R[C[i + 195*t]];
R[i + 389*t] = Op[i + 196*t] ? R[B[i + 196*t]] * R[C[i + 196*t]] : R[B[i + 196*t]] + R[C[i + 196*t]];
R[i + 390*t] = Op[i + 197*t] ? R[B[i + 197*t]] * R[C[i + 197*t]] : R[B[i + 197*t]] + R[C[i + 197*t]];
R[i + 391*t] = Op[i + 198*t] ? R[B[i + 198*t]] * R[C[i + 198*t]] : R[B[i + 198*t]] + R[C[i + 198*t]];
R[i + 392*t] = Op[i + 199*t] ? R[B[i + 199*t]] * R[C[i + 199*t]] : R[B[i + 199*t]] + R[C[i + 199*t]];
R[i + 393*t] = Op[i + 200*t] ? R[B[i + 200*t]] * R[C[i + 200*t]] : R[B[i + 200*t]] + R[C[i + 200*t]];
R[i + 394*t] = Op[i + 201*t] ? R[B[i + 201*t]] * R[C[i + 201*t]] : R[B[i + 201*t]] + R[C[i + 201*t]];
R[i + 395*t] = Op[i + 202*t] ? R[B[i + 202*t]] * R[C[i + 202*t]] : R[B[i + 202*t]] + R[C[i + 202*t]];
R[i + 396*t] = Op[i + 203*t] ? R[B[i + 203*t]] * R[C[i + 203*t]] : R[B[i + 203*t]] + R[C[i + 203*t]];
R[i + 397*t] = Op[i + 204*t] ? R[B[i + 204*t]] * R[C[i + 204*t]] : R[B[i + 204*t]] + R[C[i + 204*t]];
R[i + 398*t] = Op[i + 205*t] ? R[B[i + 205*t]] * R[C[i + 205*t]] : R[B[i + 205*t]] + R[C[i + 205*t]];
R[i + 399*t] = Op[i + 206*t] ? R[B[i + 206*t]] * R[C[i + 206*t]] : R[B[i + 206*t]] + R[C[i + 206*t]];
R[i + 400*t] = Op[i + 207*t] ? R[B[i + 207*t]] * R[C[i + 207*t]] : R[B[i + 207*t]] + R[C[i + 207*t]];
R[i + 401*t] = Op[i + 208*t] ? R[B[i + 208*t]] * R[C[i + 208*t]] : R[B[i + 208*t]] + R[C[i + 208*t]];
R[i + 402*t] = Op[i + 209*t] ? R[B[i + 209*t]] * R[C[i + 209*t]] : R[B[i + 209*t]] + R[C[i + 209*t]];
__syncthreads();
R[i + 403*t] = Op[i + 210*t] ? R[B[i + 210*t]] * R[C[i + 210*t]] : R[B[i + 210*t]] + R[C[i + 210*t]];
R[i + 404*t] = Op[i + 211*t] ? R[B[i + 211*t]] * R[C[i + 211*t]] : R[B[i + 211*t]] + R[C[i + 211*t]];
R[i + 405*t] = Op[i + 212*t] ? R[B[i + 212*t]] * R[C[i + 212*t]] : R[B[i + 212*t]] + R[C[i + 212*t]];
R[i + 406*t] = Op[i + 213*t] ? R[B[i + 213*t]] * R[C[i + 213*t]] : R[B[i + 213*t]] + R[C[i + 213*t]];
R[i + 407*t] = Op[i + 214*t] ? R[B[i + 214*t]] * R[C[i + 214*t]] : R[B[i + 214*t]] + R[C[i + 214*t]];
R[i + 408*t] = Op[i + 215*t] ? R[B[i + 215*t]] * R[C[i + 215*t]] : R[B[i + 215*t]] + R[C[i + 215*t]];
R[i + 409*t] = Op[i + 216*t] ? R[B[i + 216*t]] * R[C[i + 216*t]] : R[B[i + 216*t]] + R[C[i + 216*t]];
R[i + 410*t] = Op[i + 217*t] ? R[B[i + 217*t]] * R[C[i + 217*t]] : R[B[i + 217*t]] + R[C[i + 217*t]];
R[i + 411*t] = Op[i + 218*t] ? R[B[i + 218*t]] * R[C[i + 218*t]] : R[B[i + 218*t]] + R[C[i + 218*t]];
R[i + 412*t] = Op[i + 219*t] ? R[B[i + 219*t]] * R[C[i + 219*t]] : R[B[i + 219*t]] + R[C[i + 219*t]];
R[i + 413*t] = Op[i + 220*t] ? R[B[i + 220*t]] * R[C[i + 220*t]] : R[B[i + 220*t]] + R[C[i + 220*t]];
R[i + 414*t] = Op[i + 221*t] ? R[B[i + 221*t]] * R[C[i + 221*t]] : R[B[i + 221*t]] + R[C[i + 221*t]];
R[i + 415*t] = Op[i + 222*t] ? R[B[i + 222*t]] * R[C[i + 222*t]] : R[B[i + 222*t]] + R[C[i + 222*t]];
R[i + 416*t] = Op[i + 223*t] ? R[B[i + 223*t]] * R[C[i + 223*t]] : R[B[i + 223*t]] + R[C[i + 223*t]];
R[i + 417*t] = Op[i + 224*t] ? R[B[i + 224*t]] * R[C[i + 224*t]] : R[B[i + 224*t]] + R[C[i + 224*t]];
R[i + 418*t] = Op[i + 225*t] ? R[B[i + 225*t]] * R[C[i + 225*t]] : R[B[i + 225*t]] + R[C[i + 225*t]];
R[i + 419*t] = Op[i + 226*t] ? R[B[i + 226*t]] * R[C[i + 226*t]] : R[B[i + 226*t]] + R[C[i + 226*t]];
R[i + 420*t] = Op[i + 227*t] ? R[B[i + 227*t]] * R[C[i + 227*t]] : R[B[i + 227*t]] + R[C[i + 227*t]];
R[i + 421*t] = Op[i + 228*t] ? R[B[i + 228*t]] * R[C[i + 228*t]] : R[B[i + 228*t]] + R[C[i + 228*t]];
R[i + 422*t] = Op[i + 229*t] ? R[B[i + 229*t]] * R[C[i + 229*t]] : R[B[i + 229*t]] + R[C[i + 229*t]];
R[i + 423*t] = Op[i + 230*t] ? R[B[i + 230*t]] * R[C[i + 230*t]] : R[B[i + 230*t]] + R[C[i + 230*t]];
R[i + 424*t] = Op[i + 231*t] ? R[B[i + 231*t]] * R[C[i + 231*t]] : R[B[i + 231*t]] + R[C[i + 231*t]];
R[i + 425*t] = Op[i + 232*t] ? R[B[i + 232*t]] * R[C[i + 232*t]] : R[B[i + 232*t]] + R[C[i + 232*t]];
R[i + 426*t] = Op[i + 233*t] ? R[B[i + 233*t]] * R[C[i + 233*t]] : R[B[i + 233*t]] + R[C[i + 233*t]];
R[i + 427*t] = Op[i + 234*t] ? R[B[i + 234*t]] * R[C[i + 234*t]] : R[B[i + 234*t]] + R[C[i + 234*t]];
R[i + 428*t] = Op[i + 235*t] ? R[B[i + 235*t]] * R[C[i + 235*t]] : R[B[i + 235*t]] + R[C[i + 235*t]];
R[i + 429*t] = Op[i + 236*t] ? R[B[i + 236*t]] * R[C[i + 236*t]] : R[B[i + 236*t]] + R[C[i + 236*t]];
R[i + 430*t] = Op[i + 237*t] ? R[B[i + 237*t]] * R[C[i + 237*t]] : R[B[i + 237*t]] + R[C[i + 237*t]];
__syncthreads();
R[i + 431*t] = Op[i + 238*t] ? R[B[i + 238*t]] * R[C[i + 238*t]] : R[B[i + 238*t]] + R[C[i + 238*t]];
R[i + 432*t] = Op[i + 239*t] ? R[B[i + 239*t]] * R[C[i + 239*t]] : R[B[i + 239*t]] + R[C[i + 239*t]];
R[i + 433*t] = Op[i + 240*t] ? R[B[i + 240*t]] * R[C[i + 240*t]] : R[B[i + 240*t]] + R[C[i + 240*t]];
R[i + 434*t] = Op[i + 241*t] ? R[B[i + 241*t]] * R[C[i + 241*t]] : R[B[i + 241*t]] + R[C[i + 241*t]];
R[i + 435*t] = Op[i + 242*t] ? R[B[i + 242*t]] * R[C[i + 242*t]] : R[B[i + 242*t]] + R[C[i + 242*t]];
R[i + 436*t] = Op[i + 243*t] ? R[B[i + 243*t]] * R[C[i + 243*t]] : R[B[i + 243*t]] + R[C[i + 243*t]];
R[i + 437*t] = Op[i + 244*t] ? R[B[i + 244*t]] * R[C[i + 244*t]] : R[B[i + 244*t]] + R[C[i + 244*t]];
R[i + 438*t] = Op[i + 245*t] ? R[B[i + 245*t]] * R[C[i + 245*t]] : R[B[i + 245*t]] + R[C[i + 245*t]];
R[i + 439*t] = Op[i + 246*t] ? R[B[i + 246*t]] * R[C[i + 246*t]] : R[B[i + 246*t]] + R[C[i + 246*t]];
R[i + 440*t] = Op[i + 247*t] ? R[B[i + 247*t]] * R[C[i + 247*t]] : R[B[i + 247*t]] + R[C[i + 247*t]];
R[i + 441*t] = Op[i + 248*t] ? R[B[i + 248*t]] * R[C[i + 248*t]] : R[B[i + 248*t]] + R[C[i + 248*t]];
R[i + 442*t] = Op[i + 249*t] ? R[B[i + 249*t]] * R[C[i + 249*t]] : R[B[i + 249*t]] + R[C[i + 249*t]];
__syncthreads();
R[i + 443*t] = Op[i + 250*t] ? R[B[i + 250*t]] * R[C[i + 250*t]] : R[B[i + 250*t]] + R[C[i + 250*t]];
R[i + 444*t] = Op[i + 251*t] ? R[B[i + 251*t]] * R[C[i + 251*t]] : R[B[i + 251*t]] + R[C[i + 251*t]];
R[i + 445*t] = Op[i + 252*t] ? R[B[i + 252*t]] * R[C[i + 252*t]] : R[B[i + 252*t]] + R[C[i + 252*t]];
R[i + 446*t] = Op[i + 253*t] ? R[B[i + 253*t]] * R[C[i + 253*t]] : R[B[i + 253*t]] + R[C[i + 253*t]];
R[i + 447*t] = Op[i + 254*t] ? R[B[i + 254*t]] * R[C[i + 254*t]] : R[B[i + 254*t]] + R[C[i + 254*t]];
R[i + 448*t] = Op[i + 255*t] ? R[B[i + 255*t]] * R[C[i + 255*t]] : R[B[i + 255*t]] + R[C[i + 255*t]];
R[i + 449*t] = Op[i + 256*t] ? R[B[i + 256*t]] * R[C[i + 256*t]] : R[B[i + 256*t]] + R[C[i + 256*t]];
R[i + 450*t] = Op[i + 257*t] ? R[B[i + 257*t]] * R[C[i + 257*t]] : R[B[i + 257*t]] + R[C[i + 257*t]];
R[i + 451*t] = Op[i + 258*t] ? R[B[i + 258*t]] * R[C[i + 258*t]] : R[B[i + 258*t]] + R[C[i + 258*t]];
R[i + 452*t] = Op[i + 259*t] ? R[B[i + 259*t]] * R[C[i + 259*t]] : R[B[i + 259*t]] + R[C[i + 259*t]];
R[i + 453*t] = Op[i + 260*t] ? R[B[i + 260*t]] * R[C[i + 260*t]] : R[B[i + 260*t]] + R[C[i + 260*t]];
R[i + 454*t] = Op[i + 261*t] ? R[B[i + 261*t]] * R[C[i + 261*t]] : R[B[i + 261*t]] + R[C[i + 261*t]];
__syncthreads();
R[i + 455*t] = Op[i + 262*t] ? R[B[i + 262*t]] * R[C[i + 262*t]] : R[B[i + 262*t]] + R[C[i + 262*t]];
R[i + 456*t] = Op[i + 263*t] ? R[B[i + 263*t]] * R[C[i + 263*t]] : R[B[i + 263*t]] + R[C[i + 263*t]];
R[i + 457*t] = Op[i + 264*t] ? R[B[i + 264*t]] * R[C[i + 264*t]] : R[B[i + 264*t]] + R[C[i + 264*t]];
R[i + 458*t] = Op[i + 265*t] ? R[B[i + 265*t]] * R[C[i + 265*t]] : R[B[i + 265*t]] + R[C[i + 265*t]];
R[i + 459*t] = Op[i + 266*t] ? R[B[i + 266*t]] * R[C[i + 266*t]] : R[B[i + 266*t]] + R[C[i + 266*t]];
R[i + 460*t] = Op[i + 267*t] ? R[B[i + 267*t]] * R[C[i + 267*t]] : R[B[i + 267*t]] + R[C[i + 267*t]];
R[i + 461*t] = Op[i + 268*t] ? R[B[i + 268*t]] * R[C[i + 268*t]] : R[B[i + 268*t]] + R[C[i + 268*t]];
R[i + 462*t] = Op[i + 269*t] ? R[B[i + 269*t]] * R[C[i + 269*t]] : R[B[i + 269*t]] + R[C[i + 269*t]];
__syncthreads();
R[i + 463*t] = Op[i + 270*t] ? R[B[i + 270*t]] * R[C[i + 270*t]] : R[B[i + 270*t]] + R[C[i + 270*t]];
R[i + 464*t] = Op[i + 271*t] ? R[B[i + 271*t]] * R[C[i + 271*t]] : R[B[i + 271*t]] + R[C[i + 271*t]];
R[i + 465*t] = Op[i + 272*t] ? R[B[i + 272*t]] * R[C[i + 272*t]] : R[B[i + 272*t]] + R[C[i + 272*t]];
R[i + 466*t] = Op[i + 273*t] ? R[B[i + 273*t]] * R[C[i + 273*t]] : R[B[i + 273*t]] + R[C[i + 273*t]];
R[i + 467*t] = Op[i + 274*t] ? R[B[i + 274*t]] * R[C[i + 274*t]] : R[B[i + 274*t]] + R[C[i + 274*t]];
R[i + 468*t] = Op[i + 275*t] ? R[B[i + 275*t]] * R[C[i + 275*t]] : R[B[i + 275*t]] + R[C[i + 275*t]];
__syncthreads();
R[i + 469*t] = Op[i + 276*t] ? R[B[i + 276*t]] * R[C[i + 276*t]] : R[B[i + 276*t]] + R[C[i + 276*t]];
R[i + 470*t] = Op[i + 277*t] ? R[B[i + 277*t]] * R[C[i + 277*t]] : R[B[i + 277*t]] + R[C[i + 277*t]];
R[i + 471*t] = Op[i + 278*t] ? R[B[i + 278*t]] * R[C[i + 278*t]] : R[B[i + 278*t]] + R[C[i + 278*t]];
R[i + 472*t] = Op[i + 279*t] ? R[B[i + 279*t]] * R[C[i + 279*t]] : R[B[i + 279*t]] + R[C[i + 279*t]];
__syncthreads();
R[i + 473*t] = Op[i + 280*t] ? R[B[i + 280*t]] * R[C[i + 280*t]] : R[B[i + 280*t]] + R[C[i + 280*t]];
R[i + 474*t] = Op[i + 281*t] ? R[B[i + 281*t]] * R[C[i + 281*t]] : R[B[i + 281*t]] + R[C[i + 281*t]];
R[i + 475*t] = Op[i + 282*t] ? R[B[i + 282*t]] * R[C[i + 282*t]] : R[B[i + 282*t]] + R[C[i + 282*t]];
__syncthreads();
R[i + 476*t] = Op[i + 283*t] ? R[B[i + 283*t]] * R[C[i + 283*t]] : R[B[i + 283*t]] + R[C[i + 283*t]];
R[i + 477*t] = Op[i + 284*t] ? R[B[i + 284*t]] * R[C[i + 284*t]] : R[B[i + 284*t]] + R[C[i + 284*t]];
__syncthreads();
R[i + 478*t] = Op[i + 285*t] ? R[B[i + 285*t]] * R[C[i + 285*t]] : R[B[i + 285*t]] + R[C[i + 285*t]];
__syncthreads();
R[i + 479*t] = Op[i + 286*t] ? R[B[i + 286*t]] * R[C[i + 286*t]] : R[B[i + 286*t]] + R[C[i + 286*t]];
__syncthreads();
R[i + 480*t] = Op[i + 287*t] ? R[B[i + 287*t]] * R[C[i + 287*t]] : R[B[i + 287*t]] + R[C[i + 287*t]];
__syncthreads();
R[i + 481*t] = Op[i + 288*t] ? R[B[i + 288*t]] * R[C[i + 288*t]] : R[B[i + 288*t]] + R[C[i + 288*t]];
__syncthreads();
R[i + 482*t] = Op[i + 289*t] ? R[B[i + 289*t]] * R[C[i + 289*t]] : R[B[i + 289*t]] + R[C[i + 289*t]];
__syncthreads();
R[i + 483*t] = Op[i + 290*t] ? R[B[i + 290*t]] * R[C[i + 290*t]] : R[B[i + 290*t]] + R[C[i + 290*t]];
__syncthreads();
R[i + 484*t] = Op[i + 291*t] ? R[B[i + 291*t]] * R[C[i + 291*t]] : R[B[i + 291*t]] + R[C[i + 291*t]];
__syncthreads();
R[i + 485*t] = Op[i + 292*t] ? R[B[i + 292*t]] * R[C[i + 292*t]] : R[B[i + 292*t]] + R[C[i + 292*t]];
__syncthreads();
R[i + 486*t] = Op[i + 293*t] ? R[B[i + 293*t]] * R[C[i + 293*t]] : R[B[i + 293*t]] + R[C[i + 293*t]];
__syncthreads();
R[i + 487*t] = Op[i + 294*t] ? R[B[i + 294*t]] * R[C[i + 294*t]] : R[B[i + 294*t]] + R[C[i + 294*t]];
__syncthreads();
R[i + 488*t] = Op[i + 295*t] ? R[B[i + 295*t]] * R[C[i + 295*t]] : R[B[i + 295*t]] + R[C[i + 295*t]];
__syncthreads();
R[i + 489*t] = Op[i + 296*t] ? R[B[i + 296*t]] * R[C[i + 296*t]] : R[B[i + 296*t]] + R[C[i + 296*t]];
if (i==0) { final += R[489*t]; }
__syncthreads();
}
if (i==0) { A[0]= final;}
}
|
20,552 | #include <stdio.h>
#include <math.h>
#include <assert.h>
#define epsilon (float) 1e-5
#define THREADxBLOCKalongXorY 16
typedef float DataType_t;
//
// Kernels
//
void MatrixMulOnHost(DataType_t* M, DataType_t* N, DataType_t* P, int Width)
{
int i, j, k;
DataType_t pvalue;
for (i = 0; i < Width; i++)
{
for (j = 0; j < Width; j++)
{
pvalue = 0;
for (k = 0; k < Width; k++)
pvalue += M[i * Width + k] * N[k * Width + j];
P[i*Width + j] = pvalue;
}
}
}
__global__ void MatrixMulKernel(DataType_t* dM, DataType_t* dN, DataType_t* dP, int Width)
{
int i, j, k;
DataType_t pvalue;
i = blockIdx.y * blockDim.y + threadIdx.y;
j = blockIdx.x * blockDim.x + threadIdx.x;
if (i < Width && j < Width)
{
pvalue = 0;
for (k = 0; k < Width; k++)
pvalue += dM[i*Width + k] * dN[k*Width + j];
dP[i*Width + j] = pvalue;
}
}
void MatrixMulOnDevice(DataType_t* M, DataType_t* N, DataType_t* P, int Width)
{
int gridsize, size;
float mflops;
DataType_t *dM, *dN, *dP;
cudaError_t mycudaerror;
cudaEvent_t start, stop;
float elapsed;
size = Width * Width * sizeof(DataType_t);
// CUDA grid management
gridsize = Width / THREADxBLOCKalongXorY;
if (gridsize * THREADxBLOCKalongXorY < Width)
gridsize = gridsize + 1;
dim3 dimGrid(gridsize, gridsize);
dim3 dimBlock(THREADxBLOCKalongXorY, THREADxBLOCKalongXorY);
printf("Gridsize: %d\n", gridsize);
cudaMalloc(&dM, size);
cudaMemcpy(dM, M, size, cudaMemcpyHostToDevice);
cudaMalloc(&dN, size);
cudaMemcpy(dN, N, size, cudaMemcpyHostToDevice);
cudaMalloc(&dP, size);
// cudaGetLastError call to reset previous CUDA errors
mycudaerror = cudaGetLastError();
// Create start and stop CUDA events
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Kernel launch
cudaEventRecord(start);
MatrixMulKernel<<<dimGrid, dimBlock>>>(dM, dN, dP, Width);
cudaEventRecord(stop);
// Device synchronization and cudaGetLastError call
cudaEventSynchronize(stop);
// Event record, synchronization, elapsed time and destruction
cudaEventElapsedTime(&elapsed, start, stop);
elapsed /= 1000.f; // Convert to seconds
// calculate Mflops
mflops = 1e-6 * 2 * pow(Width, 3) / elapsed;
printf("Kernel elapsed time %fs \n", elapsed);
printf("Mflops: %f\n", mflops);
// copy back results from device
cudaMemcpy(P, dP, size, cudaMemcpyDeviceToHost);
// free memory on device
cudaFree(dM);
cudaFree(dN);
cudaFree(dP);
}
//
// Main
//
int main(int argc, char** argv)
{
int Width;
DataType_t *M, *N, *hP, *gP;
DataType_t it;
int x, y;
int errCnt;
if (argc < 2)
{
fprintf(stderr, "Usage: %s Width\n", argv[0]);
exit(1);
}
Width = atoi(argv[1]);
if (Width < 1)
{
fprintf(stderr, "Error Width=%d, must be > 0\n", Width);
exit(1);
}
M = (DataType_t*) malloc(Width * Width * sizeof(DataType_t));
N = (DataType_t*) malloc(Width * Width * sizeof(DataType_t));
hP = (DataType_t*) malloc(Width * Width * sizeof(DataType_t));
gP = (DataType_t*) malloc(Width * Width * sizeof(DataType_t));
if (M == NULL)
{
fprintf(stderr,"Could not get memory for M\n");
exit(1);
}
if (N == NULL)
{
fprintf(stderr,"Could not get memory for N\n");
exit(1);
}
if (hP == NULL)
{
fprintf(stderr,"Could not get memory for hP\n");
exit(1);
}
if (gP == NULL)
{
fprintf(stderr,"Could not get memory for gP\n");
exit(1);
}
memset(gP, 0, Width * Width * sizeof(DataType_t));
memset(hP, 0, Width * Width * sizeof(DataType_t));
for (y = 0; y < Width; y++)
{
for (x = 0; x < Width; x++)
{
M[y*Width + x] = (DataType_t) (((y + 1) * Width + x + 1) / Width);
N[y*Width + x] = (DataType_t) (((y + 1) * Width + x + 1) / Width);
}
}
MatrixMulOnHost(M, N, hP, Width);
MatrixMulOnDevice(M, N, gP, Width);
errCnt = 0;
for (y = 0; y < Width; y++)
{
for (x = 0; x < Width; x++)
{
it = hP[y*Width + x];
if (fabs(it - gP[y*Width + x]) > epsilon*it)
{
printf("failing x=%d, y=%d: %f!=%f \n", x, y, it, gP[y*Width + x]);
errCnt++;
}
}
}
if (errCnt == 0)
printf("\nTEST PASSED\n");
else
printf("\n\nTEST FAILED: number of errors: %d\n", errCnt);
}
|
20,553 | #include<bits/stdc++.h>
#include<cuda.h>
using namespace std;
struct edge{
int u, v, c, f;
};
// pushes flow along the edges adjacent to a vertex, concurrently for all vertices
__global__ void push(int n, int* excess, int* excess_inc, int* prefix_deg, int* adj, int* height, int* new_height,
edge* d_edges){
int id = blockIdx.x * blockDim.x + threadIdx.x;
// push flow only for intermediate vertices (non-terminals)
if(id > 0 && id < n - 1){
new_height[id] = 3 * n;
for(int i = prefix_deg[id]; i < prefix_deg[id + 1] && excess[id]; i++){
int idx = adj[i];
int u = d_edges[idx].u, v = d_edges[idx].v, c = d_edges[idx].c, f = d_edges[idx].f;
// pushes flow along forward edge
if(u == id && f < c && height[u] > height[v]){
int push_flow = min(c - f, excess[u]);
atomicAdd(excess_inc + v, push_flow);
atomicAdd(&(d_edges[idx].f), push_flow);
excess[u] -= push_flow;
}
// pushes flow along reverse edge
if(v == id && f && height[v] > height[u]){
int push_flow = min(f, excess[v]);
atomicAdd(excess_inc + u, push_flow);
atomicAdd(&(d_edges[idx].f), -push_flow);
excess[v] -= push_flow;
}
}
}
}
// computes labels (out of place)
__global__ void compute_label(int n, int m, int* excess, int* excess_inc, int* height, int* new_height, edge* d_edges){
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < m){
int u = d_edges[id].u, v = d_edges[id].v, c = d_edges[id].c, f = d_edges[id].f;
if(u > 0 && u < n - 1 && (excess[u] || excess_inc[u]) && f < c)
atomicMin(new_height + u, height[v] + 1);
if(v > 0 && v < n - 1 && (excess[v] || excess_inc[v]) && f)
atomicMin(new_height + v, height[u] + 1);
}
}
// applies the labels found in computer_label and updates excess of each vertex
__global__ void relabel(int n, int* excess, int* excess_inc, int* height, int* new_height, int* is_excess){
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id > 0 && id < n - 1){
if(new_height[id] != 3 * n)
height[id] = new_height[id];
excess[id] += excess_inc[id];
excess_inc[id] = 0;
if(excess[id])
atomicAdd(is_excess, 1);
}
}
// computes the flow out of source
__global__ void compute_maxflow(int m, int* total_flow, edge* d_edges)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < m){
int u = d_edges[id].u, v = d_edges[id].v, f = d_edges[id].f;
if(!u) atomicAdd(total_flow, f);
if(!v) atomicAdd(total_flow, -f);
}
}
// global relabeling heuristic - performs BFS from sink to find lower bound on labels
__global__ void global_label(int m, int cur_wave, int* height, int* wave, int* progress, edge* d_edges)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < m){
int u = d_edges[id].u, v = d_edges[id].v, c = d_edges[id].c, f = d_edges[id].f;
if(wave[v] == cur_wave && f < c && atomicCAS(wave + u, -1, cur_wave + 1) == -1){
height[u] = height[v] + 1;
atomicAdd(progress, 1);
}
if(wave[u] == cur_wave && f && atomicCAS(wave + v, -1, cur_wave + 1) == -1){
height[v]=height[u] + 1;
atomicAdd(progress, 1);
}
}
}
int main(int argc, char* argv[]){
auto clk=clock();
if(argc < 3){
cout<<"Enter file name (graph) and global relabel heuristic flag (0 or 1)"<<endl;
return 0;
}
int n, m;
edge *edges, *d_edges;
int *excess, *d_excess, *prefix_deg, *d_prefix_deg, *adj, *d_adj, *d_excess_inc, *d_is_excess, *height, *new_height,
*d_total_flow, *progress, *wave;
ifstream fin(argv[1]);
int global_relabel_flag = atoi(argv[2]);
fin >> n >> m;
vector<int> deg(n);
vector<vector<int>> edge_idx(n);
edges = new edge[m];
for(int i = 0; i < m; i++){
fin >> edges[i].u >> edges[i].v >> edges[i].c;
edges[i].u--;
edges[i].v--;
deg[edges[i].u]++;
deg[edges[i].v]++;
edge_idx[edges[i].u].push_back(i);
edge_idx[edges[i].v].push_back(i);
edges[i].f = 0;
}
int* reset_height = new int[n];
for(int i = 0; i < n;i++)
reset_height[i] = n;
excess = (int*) malloc(n * sizeof(int));
memset(excess, 0, n * sizeof(int));
for(int i: edge_idx[0]){
int u = edges[i].u, v = edges[i].v, c = edges[i].c;
if(!u){
edges[i].f = edges[i].c;
excess[v] += c;
}
}
prefix_deg = (int*) malloc((n + 1) * sizeof(int));
prefix_deg[0] = 0;
for(int i = 1; i <= n; i++){
prefix_deg[i] = prefix_deg[i-1] + deg[i-1];
}
adj = (int*) malloc(2 * m * sizeof(int));
for(int i = 0, c = 0; i < n ; i++){
for(int j = 0; j < edge_idx[i].size(); j++, c++){
adj[c] = edge_idx[i][j];
}
}
cudaMalloc(&d_excess, n * sizeof(int));
cudaMalloc(&d_excess_inc, n * sizeof(int));
cudaMalloc(&progress, sizeof(int));
cudaMalloc(&wave, n * sizeof(int));
cudaMalloc(&d_prefix_deg, (n + 1) * sizeof(int));
cudaMalloc(&d_adj, 2 * m * sizeof(int));
cudaMalloc(&height, n * sizeof(int));
cudaMalloc(&new_height, n * sizeof(int));
cudaMalloc(&d_edges, m * sizeof(edge));
cudaMalloc(&d_is_excess, sizeof(int));
cudaMemset(height, 0, n * sizeof(int));
cudaMemset(d_excess_inc, 0, n * sizeof(int));
cudaMemcpy(d_excess, excess, n * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_prefix_deg, prefix_deg, (n + 1) * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_adj, adj, 2 * m * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(height, &n, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_edges, edges, m * sizeof(edge), cudaMemcpyHostToDevice);
int threads = 1024;
int m_blocks = ceil((float)m/threads);
int n_blocks = ceil((float)n/threads);
int total_flow = 0;
int is_excess;
int iter = 0,prog;
// loop to push flow along edges
// if there is no excess vertex, the loop breaks
do{
iter++;
push<<<n_blocks, threads>>>(n, d_excess, d_excess_inc, d_prefix_deg, d_adj, height, new_height, d_edges);
compute_label<<<m_blocks, threads>>>(n, m, d_excess, d_excess_inc, height, new_height, d_edges);
cudaMemset(d_is_excess, 0, sizeof(int));
relabel<<<n_blocks, threads>>>(n, d_excess, d_excess_inc, height, new_height, d_is_excess);
cudaMemcpy(&is_excess, d_is_excess, sizeof(int), cudaMemcpyDeviceToHost);
// applies global relabeling every n iterations
if(global_relabel_flag && iter % n == 0){ // perform global relabeling heuristic
cudaMemset(wave, -1, n * sizeof(int));
cudaMemset(wave + (n - 1) , 0, sizeof(int));
int cur_wave = 0;
do{
cudaMemset(progress, 0, sizeof(int));
global_label<<<m_blocks, threads>>>(m, cur_wave++, height, wave, progress, d_edges);
cudaMemcpy(&prog, progress, sizeof(int), cudaMemcpyDeviceToHost);
}while(prog);
}
}while(is_excess);
cudaMalloc(&d_total_flow, sizeof(int));
cudaMemset(d_total_flow, 0, sizeof(int));
compute_maxflow<<<m_blocks, threads>>>(m, d_total_flow, d_edges);
cudaMemcpy(&total_flow, d_total_flow, sizeof(int), cudaMemcpyDeviceToHost);
double t_elapsed = (double)(clock()-clk)/CLOCKS_PER_SEC;
printf("|V|:%d |E|:%d Flow:%d\nTime:%f\n", n, m, total_flow, t_elapsed);
} |
20,554 | #include "includes.h"
__global__ void kern_NormLogBuffer(float* agreement, float* output, float maxOut, int size, short max)
{
int idx = CUDASTDOFFSET;
float locAgreement = (float) agreement[idx];
float logValue = (locAgreement > 0.0f) ? log((float)max)-log(locAgreement): maxOut;
logValue = (logValue > 0.0f) ? logValue : 0.0f;
logValue = (logValue < maxOut) ? logValue / maxOut: 1.0f;
if( idx < size )
{
output[idx] = logValue;
}
} |
20,555 | #include "includes.h"
__global__ void cuConvertRGBToHSVKernel(const float4* src, float4* dst, size_t stride, int width, int height, bool normalize)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
int c = y*stride + x;
if (x<width && y<height)
{
// Read
float4 in = src[c];
float R = in.x;
float G = in.y;
float B = in.z;
float Ma = fmaxf(R, fmaxf(G, B));
float mi = fminf(R, fminf(G, B));
float C = Ma-mi;
// Hue
float H = 0.0f;
if (C != 0.0f)
{
if (Ma == R)
H = fmod((G - B)/C, 6.0f);
if (Ma == G)
H = (B - R)/C + 2.0f;
if (Ma == B)
H = (R - G)/C + 4.0f;
}
H *= 60.0f;
// Value
float V = Ma;
// Saturation
float S = 0.0f;
if (C != 0.0f)
S = C/V;
if (H < 0.0f)
H += 360.0f;
// Normalize
if (normalize)
H /= 360.0f;
// Write Back
dst[c] = make_float4(H, S, V, in.w);
}
} |
20,556 | // Checks errors generated by passing a bad value for --cuda-gpu-arch.
// REQUIRES: clang-driver
// REQUIRES: x86-registered-target
// REQUIRES: nvptx-registered-target
// RUN: %clang -### -target x86_64-linux-gnu --cuda-gpu-arch=compute_20 -c %s 2>&1 \
// RUN: | FileCheck -check-prefix BAD %s
// RUN: %clang -### -target x86_64-linux-gnu --cuda-gpu-arch=sm20 -c %s 2>&1 \
// RUN: | FileCheck -check-prefix BAD %s
// RUN: %clang -### -target x86_64-linux-gnu --cuda-gpu-arch=sm_19 -c %s 2>&1 \
// RUN: | FileCheck -check-prefix BAD %s
// BAD: error: Unsupported CUDA gpu architecture
// RUN: %clang -### -target x86_64-linux-gnu --cuda-gpu-arch=sm_20 -c %s 2>&1 \
// RUN: | FileCheck -check-prefix OK %s
// RUN: %clang -### -target x86_64-linux-gnu --cuda-gpu-arch=sm_52 -c %s 2>&1 \
// RUN: | FileCheck -check-prefix OK %s
// RUN: %clang -### -target x86_64-linux-gnu -c %s 2>&1 \
// RUN: | FileCheck -check-prefix OK %s
// OK-NOT: error: Unsupported CUDA gpu architecture
|
20,557 | #include <thrust/device_vector.h>
#include <stdio.h>
#include <iostream>
#include <time.h>
#include <chrono>
int main(int argc, char** argv){
int size = atoi(argv[1]);
thrust::device_vector<int> test(size);
thrust::fill(test.begin(), test.end(), 1);
auto started = std::chrono::high_resolution_clock::now();
thrust::exclusive_scan(test.begin(), test.end(), test.begin(), 0, thrust::plus<int>());
auto end = std::chrono::high_resolution_clock::now();
printf("Thrust time %.4f\n", (std::chrono::duration_cast<std::chrono::milliseconds>(end - started).count()) / 1000.0);
return 0;
}
|
20,558 | #include <cuda_runtime.h>
#include <stdio.h>
__global__ void checkIndex(void){
printf("threadIdx:(%d,%d,%d) blockIdx:(%d,%d,%d) blockDim:(%d,%d,%d) gridDim:(%d,%d,%d) \n",
threadIdx.x,threadIdx.y,threadIdx.z,
blockIdx.x,blockIdx.y,blockIdx.z,
blockDim.x,blockDim.y,blockDim.z,
gridDim.x,gridDim.y,gridDim.z);
}
int main(int argc,char **argv){
// data volume
int nElem=6;
// grid & block
dim3 block(3);
dim3 grid((nElem+block.x-1)/block.x);
// check grid & block from the Host side
printf("grid.x %d grid.y %d grid.z %d\n",grid.x,grid.y,grid.z);
printf("block.x %d block.y %d block.z %d\n",block.x,block.y,block.z);
// check grid & block from the Dev side
checkIndex<<<grid,block>>>();
// reset device, the device will not run with out it!
cudaDeviceReset();
return(0);
}
|
20,559 | #include<stdio.h>
#define NUM_BLOCKS 15
#define BLOCK_WIDTH 1
__global__ void hello() {
printf("Hello world! I am a thread block %d\n", blockIdx.x);
}
int main(int argc, char **argv) {
// Launch the kernal
hello<<<NUM_BLOCKS, BLOCK_WIDTH>>>();
// force the printf()s to flush
cudaDeviceSynchronize();
printf("that is all\n");
return 0;
} |
20,560 | #include "vscale.cuh"
#include <cuda.h>
#include <stdio.h>
#include <random>
#define NUM_THREADS 512 // another option is 16 based on the problem statement
// reference code is: https://github.com/DanNegrut/ME759/blob/main/2021Spring/Assignments/general/timing.md
int main(int argc, char *argv[]) {
int n = atoi(argv[1]);
// set up random value generator for both matrix
std::random_device entropy_source;
std::mt19937_64 generator(entropy_source());
const float minA = -10.0, maxA = 10.0;
const float minB = 0.0, maxB = 1.0;
// there are tons of oter distributino that could be found from https://en.cppreference.com/w/cpp/header/random
std::uniform_real_distribution<float> distA(minA, maxA);
std::uniform_real_distribution<float> distB(minB, maxB);
// allocate arrays
float *a, *b;
cudaMallocManaged((void **)&a, sizeof(float) * n);
cudaMallocManaged((void **)&b, sizeof(float) * n);
// initialize with appropriate random value
for (int i = 0; i < n; i++) {
a[i] = distA(generator);
b[i] = distB(generator);
}
// set up timing variables for cuda
cudaEvent_t start;
cudaEvent_t stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// allocate device
int device = -1;
cudaGetDevice(&device);
cudaMemPrefetchAsync(a, sizeof(float) * n, device, NULL);
cudaMemPrefetchAsync(b, sizeof(float) * n, device, NULL);
// calculate number of blocks needed for the number of threads
int NUM_BLOCKS = (n + NUM_THREADS - 1) / NUM_THREADS;
// timing the kernel call
cudaEventRecord(start);
vscale<<<NUM_BLOCKS, NUM_THREADS>>>(a, b, n);
cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
// Get the elapsed time in milliseconds
float ms;
cudaEventElapsedTime(&ms, start, stop);
// print out time
std::printf("%f\n%f\n%f\n", ms, b[0], b[n - 1]);
// clean memory
cudaFree(a);
cudaFree(b);
}
|
20,561 | // hello.cu
//
// Fred J. Frigo
// 01-Sep-2020
//
// See section B19.4:
// https://docs.nvidia.com/cuda/archive/9.1/pdf/CUDA_C_Programming_Guide.pdf`
//
// To compile: nvcc hello.cu -o hello
//
#include <stdio.h>
__global__ void helloCUDA(float f)
{
printf("Hello thread %d, f=%f\n", threadIdx.x, f);
}
int main()
{
helloCUDA<<<1, 5>>>(1.2345f);
cudaDeviceSynchronize();
printf("Hello CUDA!\n");
return 0;
} |
20,562 | #include <stdio.h>
#include <time.h>
#include <math.h>
#include <float.h>
#include "cuda.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
const int blocksize = 800;
const int N = 16;
const int PROFILE_SIZE = 8376;
const int PROFILE_ARRAY_SIZE = PROFILE_SIZE * 6;
__device__ const int GPU_PROFILE_SIZE = 8376;
const int FRAME_WIDTH = 1024;
const int FRAME_HEIGHT = 768;
const int PART_FRAME_WIDTH = FRAME_WIDTH/10;
const int PART_FRAME_HEIGHT = FRAME_HEIGHT/10;
const int FRAME_DIMENSIONS = FRAME_WIDTH * FRAME_HEIGHT;
const int PARTITIONED_FRAME_DIMENSIONS = (FRAME_WIDTH/10) * (FRAME_HEIGHT/10);
const int FRAME_ARRAY_SIZE = FRAME_DIMENSIONS * 3;
__device__ const int GPU_FRAME_DIMENSIONS = FRAME_DIMENSIONS;
__device__ double X,Y,Z,L,A,B;
__device__ struct xyz
{
double X;
double Y;
double Z;
};
__device__ struct LAB
{
double L;
double A;
double B;
};
__device__ double distance(double L1,double A1,double B1,double L2,double A2,double B2)
{
double l, a, b, result,sqresult;
l = L1 - L2;
a = A1 - A2;
b = B1 - B2;
l = l * l;
a = a * a;
b = b * b;
result = l + a + b;
sqresult = sqrt(result);
return sqresult;
}
__device__ void addXYZ(double X1,double Y1,double Z1,double X2,double Y2,double Z2)
{
X=X1+X2;
Y=Y1+Y2;
Z=Z1+Z2;
}
__device__ struct xyz addXYZ_st(double X1,double Y1,double Z1,double X2,double Y2,double Z2)
{
struct xyz XYZ;
XYZ.X=X1+X2;
XYZ.Y=Y1+Y2;
XYZ.Z=Z1+Z2;
return XYZ;
/*
if(X>0.9504)
X=0.9504;
else if (X<0)
X=0;
else
X=X;
if(X>1)
Y=1
else if (Y<0)
Y=0;
else
Y=Y;
if(Z>1.0888)
Z=1.0888;
else if (Z<0)
Z=0;
else
Z=Z;
*/
}
// rgb to xyz
//__device__ void RGBToXYZ(unsigned char R,unsigned char G,unsigned char B )
__device__ void RGBToXYZ(int R,int G,int B )
{
// by the formula given the the web page http://www.brucelindbloom.com/index.html [XYZ]=[M][RGB]
//In order to properly use this matrix, the RGB values must be linear and in the nominal range [0.0, 1.0].
// RGB values may first need conversion (for example, dividing by 255 and then raising them to a power)
// Where M for D65: 0.4124564 0.3575761 0.1804375
//0.2126729 0.7151522 0.0721750
//0.0193339 0.1191920 0.9503041
//// to make rgb values linear red, green, blue values
double rLinear = (double)R / 255.0;
double gLinear = (double)G / 255.0;
double bLinear = (double)B / 255.0;
// convert to a sRGB form
//double r = Math.pow((rLinear ), 2.2) ;
//double g = Math.pow((gLinear ), 2.2) ;
//double b = Math.pow((bLinear ), 2.2) ;
double r, g, b;
if (rLinear > 0.04045)
r = pow(((rLinear + 0.055) / 1.055), 2.2);
else
r = rLinear / 12.92;
if (gLinear > 0.04045)
g = pow(((gLinear + 0.055) / 1.055), 2.2);
else
g = gLinear / 12.92;
if (bLinear > 0.04045)
b = pow(((bLinear + 0.055) / 1.055), 2.2);
else
b = bLinear / 12.92;
X=(r * 0.4124564 + g * 0.3575761 + b * 0.1804375);
Y=(r * 0.2126729 + g * 0.7151522 + b * 0.0721750);
Z=(r * 0.0193339 + g * 0.1191920 + b * 0.9503041);
/*
if(X>0.9504)
X=0.9504;
else if (X<0)
X=0;
else
X=X;
if(X>1)
Y=1
else if (Y<0)
Y=0;
else
Y=Y;
if(Z>1.0888)
Z=1.0888;
else if (Z<0)
Z=0;
else
Z=Z;
*/
}
__device__ struct xyz RGBToXYZ_St(int R,int G,int B )
{
// by the formula given the the web page http://www.brucelindbloom.com/index.html [XYZ]=[M][RGB]
//In order to properly use this matrix, the RGB values must be linear and in the nominal range [0.0, 1.0].
// RGB values may first need conversion (for example, dividing by 255 and then raising them to a power)
// Where M for D65: 0.4124564 0.3575761 0.1804375
//0.2126729 0.7151522 0.0721750
//0.0193339 0.1191920 0.9503041
//// to make rgb values linear red, green, blue values
struct xyz XYZ;
double rLinear = (double)R / 255.0;
double gLinear = (double)G / 255.0;
double bLinear = (double)B / 255.0;
// convert to a sRGB form
//double r = Math.pow((rLinear ), 2.2) ;
//double g = Math.pow((gLinear ), 2.2) ;
//double b = Math.pow((bLinear ), 2.2) ;
double r, g, b;
if (rLinear > 0.04045)
r = pow(((rLinear + 0.055) / 1.055), 2.2);
else
r = rLinear / 12.92;
if (gLinear > 0.04045)
g = pow(((gLinear + 0.055) / 1.055), 2.2);
else
g = gLinear / 12.92;
if (bLinear > 0.04045)
b = pow(((bLinear + 0.055) / 1.055), 2.2);
else
b = bLinear / 12.92;
XYZ.X=(r * 0.4124564 + g * 0.3575761 + b * 0.1804375);
XYZ.Y=(r * 0.2126729 + g * 0.7151522 + b * 0.0721750);
XYZ.Z=(r * 0.0193339 + g * 0.1191920 + b * 0.9503041);
return XYZ;
/*
if(X>0.9504)
X=0.9504;
else if (X<0)
X=0;
else
X=X;
if(X>1)
Y=1
else if (Y<0)
Y=0;
else
Y=Y;
if(Z>1.0888)
Z=1.0888;
else if (Z<0)
Z=0;
else
Z=Z;
*/
}
__device__ double FX(double e)
{
if(e > 0.008856)
e=pow(e, (1/3));
else
e=((903.3 * e) + 16) / 116;
return e;
}
__device__ double Lxyz(double e)
{
if (e > 0.008856)
e= (116 * pow(e, (1 / 3))) - 16 ;
else
e= (903.3 * e);
return e;
}
//rgb to lab
__device__ void ToLAB(int R,int G,int B )
{
double Fx, Fy, Fz;
RGBToXYZ(R,G,B);
double yr = Y / 1.0000;
double xr = X /0.9504;
double zr = Z / 1.0888;
Fx = FX(xr);
Fy = FX(yr);
Fz = FX(zr);
L = Lxyz(yr);
A = 500 * (Fx - Fy);
B = 200 * (Fy - Fz);
}
__device__ struct LAB ToLAB_st(int R,int G,int B )
{
struct LAB lab;
struct xyz XYZ;
double Fx, Fy, Fz;
XYZ=RGBToXYZ_St(R,G,B);
double yr = XYZ.Y / 1.0000;
double xr = XYZ.X /0.9504;
double zr = XYZ.Z / 1.0888;
Fx = FX(xr);
Fy = FX(yr);
Fz = FX(zr);
lab.L = Lxyz(yr);
lab.A = 500 * (Fx - Fy);
lab.B = 200 * (Fy - Fz);
return lab;
}
__device__ void XYZtoLAB(double X1, double Y1,double Z1)
{
double Fx, Fy, Fz;
double yr = Y1 / 1.0000;
double xr = X1 /0.9504;
double zr = Z1 / 1.0888;
Fx = FX(xr);
Fy = FX(yr);
Fz = FX(zr);
L = Lxyz(yr);
A = 500 * (Fx - Fy);
B = 200 * (Fy - Fz);
}
__device__ struct LAB XYZtoLAB_st(double X1, double Y1,double Z1)
{
struct LAB lab;
double Fx, Fy, Fz;
double yr =(double) Y1; /// 1.0000;
double xr = (double)X1 /0.9504;
double zr = (double)Z1 / 1.0888;
Fx = FX(xr);
Fy = FX(yr);
Fz = FX(zr);
lab.L = Lxyz(yr);
lab.A = 500 * (Fx - Fy);
lab.B = 200 * (Fy - Fz);
return lab;
}
__device__ void LABToXYZ(double L1,double A1,double B1)
{
double xr, yr, zr, Xr, Yr, Zr, Fx, Fy, Fz;
Fy = (L1 + 16) / 116;
Fx = (A1 / 500) + Fy;
Fz = Fy - (B1 / 200);
if (pow(Fx, 3) > (0.008856))
xr=pow(Fx, 3);
else
xr=((116 * Fx - 16) / 903.3);
if(L1 > (0.008856 * 903.3))
yr= pow(Fy, 3) ;
else
yr=L1 / 903.3;
if (pow(Fz, 3) > (0.008856))
zr= pow(Fz, 3);
else
zr= ((116 * Fz - 16) / 903.3);
//for D65
Xr = 0.9504;
Yr = 1;
Zr = 1.0888;
X = xr * Xr;
Y = yr * Yr;
Z = zr * Zr;
/*
if(X>0.9504)
X=0.9504;
else if (X<0)
X=0;
else
X=X;
if(X>1)
Y=1
else if (Y<0)
Y=0;
else
Y=Y;
if(Z>1.0888)
Z=1.0888;
else if (Z<0)
Z=0;
else
Z=Z;
*/
}
__device__ struct xyz LABToXYZ_st(double L1,double A1,double B1)
{
struct xyz XYZ;
double xr, yr, zr, Xr, Yr, Zr, Fx, Fy, Fz;
Fy = (L1 + 16) / 116;
Fx = (A1 / 500) + Fy;
Fz = Fy - (B1 / 200);
if (pow(Fx, 3) > (0.008856))
xr=pow(Fx, 3);
else
xr=((116 * Fx - 16) / 903.3);
if(L1 > (0.008856 * 903.3))
yr= pow(Fy, 3) ;
else
yr=L1 / 903.3;
if (pow(Fz, 3) > (0.008856))
zr= pow(Fz, 3);
else
zr= ((116 * Fz - 16) / 903.3);
//for D65
Xr = 0.9504;
Yr = 1;
Zr = 1.0888;
XYZ.X = xr * Xr;
XYZ.Y = yr * Yr;
XYZ.Z = zr * Zr;
return XYZ;
/*
if(X>0.9504)
X=0.9504;
else if (X<0)
X=0;
else
X=X;
if(X>1)
Y=1
else if (Y<0)
Y=0;
else
Y=Y;
if(Z>1.0888)
Z=1.0888;
else if (Z<0)
Z=0;
else
Z=Z;
*/
}
// __global__ void correct(int *block_frame, double *block_background, double *block_profile,char *a, int *b)
//{
// //a[threadIdx.x] += b[threadIdx.x];
// //GPU_FRAME_DIMENSIONS
// for(int pixel = 0 ; pixel < 1 ; pixel++)
// {
// int R = block_frame[3*pixel + 0];
// int G = block_frame[3*pixel + 1];
// int B = block_frame[3*pixel + 2];
//
// double bgX = block_background[3*pixel + 0];
// double bgY = block_background[3*pixel + 1];
// double bgZ = block_background[3*pixel + 2];
//
// /*double bgX1 = block_background[3*pixel + 0];
// double bgY1 = block_background[3*pixel + 1];
// double bgZ1 = block_background[3*pixel + 2];
//
// block_frame[3*pixel + 0]=(int)(block_background[3*pixel + 0]*block_background[3*pixel + 0]);
// block_frame[3*pixel + 1]=(int)(block_background[3*pixel + 1]*block_background[3*pixel + 1]);
// block_frame[3*pixel + 2]=(int)(block_background[3*pixel + 2]*block_background[3*pixel + 2]);
//
// }*/
//
//
// double FrameX,FrameY,FrameZ,FrameL,FrameA,FrameB,BlendL,BlendA,BlendB;
//
//
// // to find the best fit bin color
// // SAVES LAB in device global L,A,B variables
// ToLAB(R,G,B);
// FrameL=L;
// FrameA=A;
// FrameB=B;
//
// double DistanceInBin = DBL_MIN;
// double ClosestBinDistance;
// int BinIndex=0;
//
// double keyL;
// double keyA;
// double keyB;
//
// for(int bin = 0 ; bin < PROFILE_SIZE ; bin++)
// {
// keyL = block_profile[6*bin + 0];
// keyA = block_profile[6*bin + 1];
// keyB = block_profile[6*bin + 2];
//
// ClosestBinDistance=distance(FrameL,FrameA,FrameB,keyL,keyA,keyB);
//
// if (DistanceInBin >= ClosestBinDistance)
// continue;
//
// DistanceInBin = ClosestBinDistance;
// BinIndex=bin;
// }
//
// FrameL=block_profile[6*BinIndex + 3];
// FrameA=block_profile[6*BinIndex + 4];
// FrameB=block_profile[6*BinIndex + 5];
//
// //DO YOUR MAGIC
// DistanceInBin = DBL_MIN;
// ClosestBinDistance=0;
// BinIndex=0;
// for(int bin = 0 ; bin < PROFILE_SIZE ; bin++)
// {
//
// double valueL = block_profile[6*bin + 3];
// double valueA = block_profile[6*bin + 4];
// double valueB = block_profile[6*bin + 5];
//
// //getting the xyz values of the chocen bin
// LABToXYZ(valueL,valueA,valueB);
//
// FrameX=X;
// FrameY=Y;
// FrameZ=Z;
//
// addXYZ(FrameX,FrameY,FrameZ,bgX,bgY,bgZ);
//
// XYZtoLAB(X,Y,Z);
// BlendL=L;
// BlendA=A;
// BlendB=B;
//
//
// ClosestBinDistance=distance(FrameL,FrameA,FrameB,BlendL,BlendA,BlendB);
//
// if (DistanceInBin >= ClosestBinDistance)
// continue;
//
// DistanceInBin = ClosestBinDistance;
// BinIndex=bin;
//
// }
//
// block_frame[3*pixel + 0]=(int)block_profile[6*BinIndex + 3];
// block_frame[3*pixel + 1]=(int)block_profile[6*BinIndex + 4];
// block_frame[3*pixel + 2]=(int)block_profile[6*BinIndex + 5];
//
// }
//}
//
__global__ void correct2(int *block_frame, double *block_background, double *block_profile,char *a, int *b)
{
//a[threadIdx.x] += b[threadIdx.x];
//GPU_FRAME_DIMENSIONS
for(int pixel = 0 ; pixel <1 ; pixel++)
{
struct xyz XYZ;
struct xyz XYZ_blend;
struct LAB lab;
struct LAB Keylab;
struct LAB lab_blend;
int R = block_frame[3*pixel + 0];
int G = block_frame[3*pixel + 1];
int B = block_frame[3*pixel + 2];
double bgX = block_background[3*pixel + 0];
double bgY = block_background[3*pixel + 1];
double bgZ = block_background[3*pixel + 2];
/*double bgX1 = block_background[3*pixel + 0];
double bgY1 = block_background[3*pixel + 1];
double bgZ1 = block_background[3*pixel + 2];
block_frame[3*pixel + 0]=(int)(block_background[3*pixel + 0]*block_background[3*pixel + 0]);
block_frame[3*pixel + 1]=(int)(block_background[3*pixel + 1]*block_background[3*pixel + 1]);
block_frame[3*pixel + 2]=(int)(block_background[3*pixel + 2]*block_background[3*pixel + 2]);
}*/
//double FrameX,FrameY,FrameZ,FrameL,FrameA,FrameB,BlendL,BlendA,BlendB;
// to find the best fit bin color
// SAVES LAB in device global L,A,B variables
lab=ToLAB_st(R,G,B);
double DistanceInBin = DBL_MAX;
double ClosestBinDistance;
int BinIndex=0;
for(int bin = 0 ; bin < 8376 ; bin++)
{
Keylab.L = block_profile[6*bin + 0];
Keylab.A = block_profile[6*bin + 1];
Keylab.B = block_profile[6*bin + 2];
ClosestBinDistance=distance(lab.L,lab.A,lab.B, Keylab.L, Keylab.A, Keylab.B);
if (DistanceInBin >= ClosestBinDistance)
continue;
DistanceInBin = ClosestBinDistance;
BinIndex=bin;
}
lab.L=block_profile[6*BinIndex + 3];
lab.A=block_profile[6*BinIndex + 4];
lab.B=block_profile[6*BinIndex + 5];
//DO YOUR MAGIC
DistanceInBin = DBL_MAX;
ClosestBinDistance=0;
BinIndex=0;
for(int bin = 0 ; bin < 8376 ; bin++)
{
double valueL = block_profile[6*bin + 3];
double valueA = block_profile[6*bin + 4];
double valueB = block_profile[6*bin + 5];
//getting the xyz values of the chocen bin
XYZ=LABToXYZ_st(valueL,valueA,valueB);
XYZ_blend=addXYZ_st(XYZ.X,XYZ.Y,XYZ.Z,bgX,bgY,bgZ);
lab_blend=XYZtoLAB_st(XYZ_blend.X,XYZ_blend.Y,XYZ_blend.Z);
ClosestBinDistance=distance(lab.L,lab.A,lab.B,lab_blend.L,lab_blend.A,lab_blend.B);
if (DistanceInBin >= ClosestBinDistance)
continue;
DistanceInBin = ClosestBinDistance;
BinIndex=bin;
}
block_frame[3*pixel + 0]=(int)block_profile[6*BinIndex + 3];
block_frame[3*pixel + 1]=(int)block_profile[6*BinIndex + 4];
block_frame[3*pixel + 2]=(int)block_profile[6*BinIndex + 5];
}
}
__global__ void correct3(int *block_frame, double *block_background, double *block_profile)
{
int pixel = 0;
int block_background_index = 3 * pixel;
double blendX;
double blendY;
double blendZ;
double tempX;
double tempY;
double tempZ;
double bgX = block_background[block_background_index + 0];
double bgY = block_background[block_background_index + 1];
double bgZ = block_background[block_background_index + 2];
double closestColor = DBL_MAX;
double closestBinDistance;
int binIndex=0;
int block_profile_index = 0;
double diffX, diffY, diffZ, result;
for(int bin = 0 ; bin < 8000; bin++)
{
block_profile_index = 6*bin;
tempX = block_profile[block_profile_index + 3];
tempY = block_profile[block_profile_index + 4];
tempZ = block_profile[block_profile_index + 5];
//getting the xyz values of the chocen bin
/*blendX = tempX + bgX;
blendY = tempY + bgY;
blendZ = tempZ + bgZ;*/
{
diffX = blendX - 0;
diffY = blendY - 0;
diffZ = blendZ - 0;
diffX = diffX * diffX;
diffY = diffY * diffY;
diffZ = diffZ * diffZ;
closestBinDistance = sqrt(diffX + diffY + diffZ);
}
if (closestBinDistance >= closestColor)
continue;
closestColor = closestBinDistance;
binIndex = bin;
}
block_profile_index = 6*binIndex;
block_frame[block_background_index + 0]= (int)block_profile[block_profile_index + 3];
block_frame[block_background_index + 1]= (int)block_profile[block_profile_index + 4];
block_frame[block_background_index + 2]= (int)block_profile[block_profile_index + 5];
}
//basic cuda whihc runs on a single thread
int main(int argc, char** argv)
{
clock_t tstart;
clock_t end;
double runTime;
double *background,*profile,*partitioned_background;
int *frame,*partitioned_frame;
//var display-profile -- lookup table in LAB
profile = (double*) malloc(PROFILE_ARRAY_SIZE * sizeof(double));
for(int index = 0 ; index < PROFILE_ARRAY_SIZE ; index++)
profile[index] = 30;
//var frame-image -- XYZ
frame = (int*)malloc(FRAME_ARRAY_SIZE * sizeof(int));
for(int index = 0 ; index < FRAME_ARRAY_SIZE ; index++)
frame[index] = 1;
//var background-image -- background image in XYZ
background= (double*)malloc(FRAME_ARRAY_SIZE * sizeof(double));
for(int index = 0 ; index < FRAME_ARRAY_SIZE ; index++)
background[index] = 0;
//broken frame
partitioned_frame = (int*)malloc( PARTITIONED_FRAME_DIMENSIONS* sizeof(int));
///broken background-image
partitioned_background= (double*)malloc(PARTITIONED_FRAME_DIMENSIONS * sizeof(double));
//pointers on the device
double *gpu_profile;
int *gpu_frame;
double *gpu_background;
printf("prg starting\n");
const int psize = PROFILE_ARRAY_SIZE * sizeof(double);
const int fsize = FRAME_ARRAY_SIZE * sizeof(int);
const int bgsize = FRAME_ARRAY_SIZE * sizeof(double);
//memory allocation on the GPU
cudaMalloc(&gpu_profile, PROFILE_ARRAY_SIZE * sizeof(double));
cudaMalloc(&gpu_frame, FRAME_ARRAY_SIZE * sizeof(int));
cudaMalloc(&gpu_background, FRAME_ARRAY_SIZE * sizeof(double));
//1- copy the profile
cudaMemcpy(gpu_profile, profile, psize, cudaMemcpyHostToDevice);
// start outer timer
tstart = clock();
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
int partition_index=0;
//image partitioned into 10 parts
for(int f = 0 ; f < FRAME_DIMENSIONS ; f=f+PARTITIONED_FRAME_DIMENSIONS)
{
// assigning partitioed values of the frame and background
for(int index = f ; index < f+PARTITIONED_FRAME_DIMENSIONS ; index++)
{
partitioned_frame[partition_index] = frame[index];
partitioned_background[partition_index] = background[index];
partition_index++;
}
partition_index=0;
//2- pass the image to correct to the GPU
//cudaMemcpy(gpu_frame, frame, FRAME_ARRAY_SIZE * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(gpu_frame, partitioned_frame, PARTITIONED_FRAME_DIMENSIONS * sizeof(int), cudaMemcpyHostToDevice);
//3- pass the background image to the GPU
//cudaMemcpy(gpu_background, background, FRAME_ARRAY_SIZE * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(gpu_background, partitioned_background, PARTITIONED_FRAME_DIMENSIONS * sizeof(double), cudaMemcpyHostToDevice);
dim3 threadsPerBlock(32, 32);
dim3 numBlocks(PART_FRAME_WIDTH/threadsPerBlock.x, PART_FRAME_HEIGHT/threadsPerBlock.y);
// Start record
cudaEventRecord(start, NULL);
correct3<<<numBlocks, threadsPerBlock>>>(gpu_frame, gpu_background, gpu_profile);
cudaMemcpy(frame, gpu_frame, FRAME_ARRAY_SIZE * sizeof(int), cudaMemcpyDeviceToHost);
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop);
float elapsedTime=-1;
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("Run time is: %f \n",elapsedTime);
}
cudaEventDestroy(start);
cudaEventDestroy(stop);
for(int index = 0 ; index < 3 ; index++)
printf("%d\n", frame[index]);
end = clock();
runTime = (end-tstart);
printf("total Run time is %g mil;liseconds \n",runTime);
cudaFree( gpu_profile );
cudaFree( gpu_frame );
cudaFree( gpu_background );
free(profile);
free(frame );
free(background );
return EXIT_SUCCESS;
}
|
20,563 | #include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <time.h>
#define BLOCK_SIZE 16
#define CONVERGENCE_CHECK 1
__global__ void convoluteBlock(unsigned char *src, unsigned char *dst, int x, int y, int multiplier) {
int x_dim = blockIdx.x * blockDim.x + threadIdx.x;
int y_dim = blockIdx.y * blockDim.y + threadIdx.y;
float h[3][3] = {{1/16.0, 2/16.0, 1/16.0},
{2/16.0, 4/16.0, 2/16.0},
{1/16.0, 2/16.0, 1/16.0}};
float red = 0.0, green = 0.0, blue = 0.0;
if (x_dim >= 0 && x_dim < y && y_dim >= 0 && y_dim < x) {
if (multiplier == 1) {
dst[x_dim* x + y_dim] = h[0][0] * src[(x_dim- 1) * x + y_dim-1] +
h[0][1] * src[(x_dim- 1) * x + y_dim] +
h[0][2] * src[(x_dim- 1) * x + y_dim+1] +
h[1][0] * src[x_dim* x + y_dim-1] +
h[1][1] * src[x_dim* x + y_dim] +
h[1][2] * src[x_dim* x + y_dim+1] +
h[2][0] * src[(x_dim+ 1) * x + y_dim-1] +
h[2][1] * src[(x_dim+ 1) * x + y_dim] +
h[2][2] * src[(x_dim+ 1) * x + y_dim+1];
} else {
red = h[0][0] * src[(x_dim - 1) * x * multiplier + y_dim * multiplier - multiplier] +
h[0][1] * src[(x_dim - 1) * x * multiplier + y_dim * multiplier] +
h[0][2] * src[(x_dim - 1) * x * multiplier + y_dim * multiplier + multiplier] +
h[1][0] * src[x_dim * x * multiplier + y_dim * multiplier - multiplier] +
h[1][1] * src[x_dim * x * multiplier + y_dim * multiplier] +
h[1][2] * src[x_dim * x * multiplier + y_dim * multiplier + multiplier] +
h[2][0] * src[(x_dim + 1) * x * multiplier + y_dim * multiplier - multiplier] +
h[2][1] * src[(x_dim + 1) * x * multiplier + y_dim * multiplier] +
h[2][2] * src[(x_dim + 1) * x * multiplier + y_dim * multiplier + multiplier];
green = h[0][0] * src[(x_dim - 1) * x * multiplier + y_dim * multiplier - multiplier + 1] +
h[0][1] * src[(x_dim - 1) * x * multiplier + y_dim * multiplier + 1] +
h[0][2] * src[(x_dim - 1) * x * multiplier + y_dim * multiplier + multiplier + 1] +
h[1][0] * src[x_dim * x * multiplier + y_dim * multiplier - multiplier + 1] +
h[1][1] * src[x_dim * x * multiplier + y_dim * multiplier + 1] +
h[1][2] * src[x_dim * x * multiplier + y_dim * multiplier + multiplier + 1] +
h[2][0] * src[(x_dim + 1) * x * multiplier + y_dim * multiplier - multiplier + 1] +
h[2][1] * src[(x_dim + 1) * x * multiplier + y_dim * multiplier + 1] +
h[2][2] * src[(x_dim + 1) * x * multiplier + y_dim * multiplier + multiplier + 1];
blue = h[0][0] * src[(x_dim - 1) * x * multiplier + y_dim * multiplier - multiplier + 2] +
h[0][1] * src[(x_dim - 1) * x * multiplier + y_dim * multiplier + 2] +
h[0][2] * src[(x_dim - 1) * x * multiplier + y_dim * multiplier + multiplier + 2] +
h[1][0] * src[x_dim * x * multiplier + y_dim * multiplier - multiplier + 2] +
h[1][1] * src[x_dim * x * multiplier + y_dim * multiplier + 2] +
h[1][2] * src[x_dim * x * multiplier + y_dim * multiplier + multiplier + 2] +
h[2][0] * src[(x_dim + 1) * x * multiplier + y_dim * multiplier - multiplier + 2] +
h[2][1] * src[(x_dim + 1) * x * multiplier + y_dim * multiplier + 2] +
h[2][2] * src[(x_dim + 1) * x * multiplier + y_dim * multiplier + multiplier + 2];
dst[x_dim * x * multiplier + y_dim * multiplier] = red;
dst[x_dim * x * multiplier + y_dim * multiplier + 1] = green;
dst[x_dim * x * multiplier + y_dim * multiplier + 2] = blue;
}
}
}
__global__ void convergence_grey(unsigned char *src, unsigned char *dst, int x, int y, char *convbool, int multiplier) {
int x_dim = blockIdx.x * blockDim.x + threadIdx.x;
int y_dim = blockIdx.y * blockDim.y + threadIdx.y;
int blockId = blockIdx.x + blockIdx.y * gridDim.x;
/*Use of shared memory for the convergence check of the current thread's block*/
__shared__ char blockconvalues[BLOCK_SIZE][BLOCK_SIZE];
if (0 <= x_dim && x_dim < y && 0 <= y_dim && y_dim < x) {
if (dst[x_dim * x + y_dim] != src[x_dim * x + y_dim])
blockconvalues[threadIdx.x][threadIdx.y] = 0;
else
blockconvalues[threadIdx.x][threadIdx.y] = 1;
__syncthreads();
/*First thread of the block, checks if every thread of the block converges*/
if (threadIdx.x == 0 && threadIdx.y == 0) {
int blockconv = 1;
for (int i = 0; i < BLOCK_SIZE; i++) {
for (int j = 0; j < BLOCK_SIZE; j++) {
if (blockconvalues[i][j] != 1) {
blockconv = 0;
break;
}
}
if (blockconv == 0)
break;
}
if (blockconv == 1)
convbool[blockId] = 1;
else
convbool[blockId] = 0;
}
}
}
__global__ void convergence_rgb(unsigned char *src, unsigned char *dst, int x, int y, char *convbool, int multiplier) {
int x_dim = blockIdx.x * blockDim.x + threadIdx.x;
int y_dim = blockIdx.y * blockDim.y + threadIdx.y;
int blockId = blockIdx.x + blockIdx.y * gridDim.x;
/*Use of shared memory for the convergence check of the current thread's block*/
__shared__ char blockconvalues[BLOCK_SIZE][BLOCK_SIZE * 3];
if (0 <= x_dim && x_dim < x && 0 <= y_dim && y_dim < y) {
if (dst[x_dim * x * multiplier + y_dim * multiplier] != src[x_dim * x * multiplier + y_dim * multiplier])
blockconvalues[threadIdx.x][threadIdx.y] = 0;
else
blockconvalues[threadIdx.x][threadIdx.y] = 1;
__syncthreads();
/*First thread of the block, checks if every thread of the block converges*/
if (threadIdx.x == 0 && threadIdx.y == 0) {
int blockconv = 1;
for (int i = 0; i < BLOCK_SIZE; i++) {
for (int j = 0; j < BLOCK_SIZE * 3; j += 3) {
if (blockconvalues[i][j] != 1 || blockconvalues[i][j+1] != 1 || blockconvalues[i][j+2] != 1) {
blockconv = 0;
break;
}
}
if (blockconv == 0)
break;
}
if (blockconv == 1)
convbool[blockId] = 1;
else
convbool[blockId] = 0;
}
}
}
extern "C" void convolute(unsigned char *vector, int x, int y, int multiplier, int loops) {
unsigned char *vector_a, *vector_b, *temp;
char *convbool, *convboolhost;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Initialize arrays
//printf("%d %d %d\n", x, y, x * y);
convboolhost = (char *)calloc((x * y * multiplier) / BLOCK_SIZE, sizeof(char));
assert(convboolhost != NULL);
cudaMalloc(&vector_a, x * y * multiplier * sizeof(unsigned char));
cudaMalloc(&vector_b, x * y * multiplier * sizeof(unsigned char));
assert(vector_a != NULL);
assert(vector_b != NULL);
cudaMalloc(&convbool, sizeof(char) * ((x * y * multiplier) / BLOCK_SIZE));
assert(convbool != NULL);
cudaMemcpy(vector_a, vector, x * y * multiplier * sizeof(unsigned char), cudaMemcpyHostToDevice);
cudaMemset(vector_b, 0, x * y * multiplier * sizeof(unsigned char));
int blocksperlinex = (int)ceil((double)((x * multiplier)/ BLOCK_SIZE));
int blocksperliney = (int)ceil((double)(y / BLOCK_SIZE));
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(blocksperliney, blocksperlinex);
int i = 0;
int totalconv = 0, first_conv = -1;;
cudaEventRecord(start);
for (i = 0; i < loops; i++) {
if (i > 0) {
temp = vector_a;
vector_a = vector_b;
vector_b = temp;
}
convoluteBlock<<<dimGrid, dimBlock>>>(vector_a, vector_b, x, y, multiplier);
if (i % CONVERGENCE_CHECK == 0 & i > 0) {
for (int j = 0; j < (x * y * multiplier) / BLOCK_SIZE; j++)
convboolhost[i] = 0;
cudaMemcpy(convbool, convboolhost, sizeof(char) * ((x * y * multiplier) / BLOCK_SIZE), cudaMemcpyHostToDevice);
if (multiplier == 1)
convergence_grey<<<dimGrid, dimBlock>>>(vector_a, vector_b, x, y, convbool, multiplier);
else
convergence_rgb<<<dimGrid, dimBlock>>>(vector_a, vector_b, x, y, convbool, multiplier);
cudaMemcpy(convboolhost, convbool, sizeof(char) * ((x * y * multiplier) / BLOCK_SIZE), cudaMemcpyDeviceToHost);
for (int j = 0; j < (x * y * multiplier) / BLOCK_SIZE; j++) {
if (convboolhost[i] != 0)
totalconv = 1;
else {
totalconv = 0;
break;
}
}
}
if (totalconv == 1 && first_conv == -1) {
first_conv = i;
}
cudaThreadSynchronize();
}
cudaEventRecord(stop);
cudaMemcpy(vector, vector_a, x * y * multiplier * sizeof(unsigned char), cudaMemcpyDeviceToHost);
if (first_conv >= 0)
printf("Convergence at %d\n", first_conv);
else
printf("No convergence\n");
cudaFree(vector_a);
cudaFree(vector_b);
float msecs = 0.0f;
cudaEventElapsedTime(&msecs, start, stop);
printf("Elapsed time = %3.2lf secs\n", msecs / 1000.0);
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
|
20,564 | #include "cuda_runtime.h"
#include "stdio.h"
int main(){
// define total data elements
int nElem = 1024;
// define grid and block structure
dim3 block(1024);
dim3 grid((nElem+block.x-1)/block.x);
printf("grid.x: %d block.x %d \n", grid.x, block.x);
// reset block
block.x = 512;
grid.x = (nElem+block.x-1)/block.x;
printf("grid.x: %d block.x %d \n", grid.x, block.x);
// reset block
block.x = 256;
grid.x = (nElem+block.x-1)/block.x;
printf("grid.x: %d block.x %d \n", grid.x, block.x);
// reset block
block.x = 128;
grid.x = (nElem+block.x-1)/block.x;
printf("grid.x: %d block.x %d \n", grid.x, block.x);
} |
20,565 | #include<stdio.h>
#include<iostream>
#include<stdlib.h>
#include<math.h>
#include<cmath>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
using namespace std;
#define X 32
#define Y 32
#define X_IN 10
#define Y_IN 10
#define N ((X+1) * (Y+1))
#define TIME 10
double h_x = 0.2;
double h_y = 0.2;
double h_t = 1;
__global__ void Gauss_forward(double *matr, double *slv, int n)
{
int j = blockIdx.y * blockDim.y + threadIdx.y;
for (int i = j + 1; i < n; i++)
{
double koef = matr[i*N + j] / matr[j*N + j];
for(int k = j; k < n; k++)
{
matr[i*N + k] -= koef * matr[j*N + k];
}
slv[i] -= koef * slv[j];
}
}
__global__ void Gauss_reverse(double *matr, double *slv, double *solve, int n)
{
int k = n - 1 - blockIdx.x * blockDim.x + threadIdx.x;
double sum = 0.0;
for(int j = k + 1; j < n; j++)
{
sum += solve[j] * matr[k*N + j];
}
solve[k] = (slv[k] - sum) / matr[k*N + k];
}
int inside_tube(int i, int j, int flag)
{
if(flag)
{
if((j >= (X - X_IN) / 2) && (j <= (X + X_IN) / 2) && (i >= (Y - Y_IN) / 2) && (i <= (Y + Y_IN) / 2) )
{
return 1;
}
else
{
return 0;
}
}
else
{
if((j > (X - X_IN) / 2) && (j < (X + X_IN) / 2) && (i > (Y - Y_IN) / 2) && (i < (Y + Y_IN) / 2) )
{
return 1;
}
else
{
return 0;
}
}
}
double sss[2];
int main()
{
//FILE* new_file = fopen("stat2", "w");
double *matr;
matr = (double*)malloc(sizeof(double) * N * N);
/*
timespec start, end;
clock_gettime(CLOCK_MONOTONIC_RAW, &start);*/
// заполняем матрицу нулями
for(int i = 0; i < N; ++i)
{
for (int j = 0; j < N; ++j)
{
matr[i*N + j] = 0.0;
}
}
double* solve = new double [N];
double* x = new double [N];
for(int i = 0; i <= Y; ++i)
{
for (int j = 0; j <= X; ++j)
{
if( inside_tube(i, j, 1) )
{
if(inside_tube(i, j, 0))
{
// температура будет решением для уравнения Гаусса во всех точках,
// кроме пограничных, где заданы ГУ 3 рода
solve[i * (X + 1) + j] = 0;
}
else
{
solve[i * (X + 1) + j] = 100.0;
}
}
else
{
solve[i * (X + 1) + j] = 10.0;
}
}
}
// ГУ первого рода на внутренней стенке. Температура в узлах будет сохраняться
for(int i = (Y - Y_IN) / 2; i <= (Y + Y_IN) / 2; ++i)
{
for (int j = (X - X_IN) / 2; j <= (X + X_IN) / 2; ++j)
{
int k = i * (X + 1) + j;
matr[k*N + k] = 1;
}
}
// ГУ 3го рода на внешних стенках
for (int j = 1; j < X; ++j)
{
//int i = 0;
int k_vnesh = j;
int k_vnytr = j + (X + 1);
solve[k_vnesh] = 0;
matr[k_vnesh*N + k_vnesh] = 1 / h_y + 1;
matr[k_vnesh*N + k_vnytr] = -1 / h_y;
}
for (int j = 1; j < X; ++j)
{
int i = Y;
int k_vnesh = j + (X + 1) * i;
int k_vnytr = j + (X + 1) * (i - 1);
solve[k_vnesh] = 0;
matr[k_vnesh*N + k_vnesh] = 1 / h_y + 1;
matr[k_vnesh*N + k_vnytr] = -1 / h_y;
}
for (int i = 0; i <= Y; ++i)
{
//int j = 0;
int k_vnesh = i * (X + 1);
int k_vnytr = i * (X + 1) + 1;
solve[k_vnesh] = 0;
matr[k_vnesh*N + k_vnesh] = 1 / h_x + 1;
matr[k_vnesh*N + k_vnytr] = - 1 / h_x;
}
for (int i = 0; i <= Y; ++i)
{
int j = X;
int k_vnesh = i * (X + 1) + j;
int k_vnytr = i * (X + 1) + j - 1;
solve[k_vnesh] = 0;
matr[k_vnesh*N + k_vnesh] = 1 / h_x + 1;
matr[k_vnesh*N + k_vnytr] = - 1 / h_x;
}
// Внутренние узлы pаданные с помощью стандартной неявной схемы (по формуле 2)
for (int i = 1; i < Y; ++i)
{
for (int j = 1; j < X; ++j)
{
if( inside_tube(i, j, 1) )
{
continue;
}
int ij = i * (X + 1) + j;
int im1j = (i - 1) * (X + 1) + j;
int ip1j = (i + 1) * (X + 1) + j;
int ijm1 = i * (X + 1) + j - 1;
int ijp1 = i * (X + 1) + j + 1;
matr[ij*N + ij] = 2 * ( h_t / (h_x * h_x) + h_t / (h_y * h_y) ) + 1;
matr[ij*N + im1j] = - h_t / (h_y * h_y);
matr[ij*N + ip1j] = - h_t / (h_y * h_y); matr[ij*N + ijm1] = - h_t / (h_x * h_x); matr[ij*N + ijp1] = - h_t / (h_x * h_x);
solve[ij] = x[ij];
}
}
timespec start, end;
clock_gettime(CLOCK_MONOTONIC_RAW, &start);
int n = N;
double *device_matr;
double *device_slv;
double *device_solve;
unsigned int size_matr = sizeof(double) * n * n;
unsigned int size_slv = sizeof(double) * n;
cudaError cudaStatus;
cudaMalloc((void**)&device_matr, size_matr);
cudaMalloc((void**)&device_slv, size_slv);
cudaMalloc((void**)&device_solve, size_slv);
float timerValueGPU;
cudaEvent_t start2, stop2;
cudaEventCreate(&start2);
cudaEventCreate(&stop2);
cudaEventRecord(start2, 0);
dim3 N_Treads(8);
dim3 N_Block(n / 8);
for (int k = 0; k <= TIME / h_t; k+= h_t)
{
cout << k << endl;
// в solve после решения системы уравнений методом Гаусса буде храниться температура трубы.
// Чтобы избежать копирования темературы в другой массив, заметим, что в большинстве узлов
// получення температура является решением для следующей итерации расчётов.
// Температура не является решением только на границах трубы, где заданы ГУ 3-го рода.
// В этих узлах в столбце-ответе должен стоять 0, установим его.
for (int j = 1; j < X; ++j)
{
solve[j] = 0;
solve[j + (X + 1) * Y] = 0;
}
for (int i = 0; i <= Y; ++i)
{
solve[i * (X + 1)] = 0;
solve[i * (X + 1) + X] = 0;
}
cudaMemcpy(device_solve, solve, size_slv, cudaMemcpyHostToDevice);
double *matr2;
matr2 = (double*)malloc(sizeof(double) * N * N);
double *slv = new double[n];
for (int i = 0; i < N; i++)
{
for (int j = 0; j < N; j++)
{
matr2[i*N + j] = matr[i*N + j];
}
slv[i] = solve[i];
}
cudaMemcpy(device_matr, matr2, size_matr, cudaMemcpyHostToDevice);
cudaMemcpy(device_slv, slv, size_slv, cudaMemcpyHostToDevice);
Gauss_forward <<< N_Block, N_Treads >>> (device_matr, device_slv, n);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess)
{
cout << "Solve last error:" << cudaGetErrorString(cudaStatus) << endl;
return 0;
}
cudaDeviceSynchronize();
cudaMemcpy(slv, device_slv, size_slv, cudaMemcpyDeviceToHost);
cudaMemcpy(matr2, device_matr, size_matr, cudaMemcpyDeviceToHost);
Gauss_reverse <<< N_Block, N_Treads >>> (device_matr, device_slv, device_solve, n);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess)
{
cout << "Solve last error:" << cudaGetErrorString(cudaStatus) << endl;
return 0;
}
cudaDeviceSynchronize();
cudaMemcpy(solve, device_solve, size_slv, cudaMemcpyDeviceToHost);
cudaMemcpy(slv, device_slv, size_slv, cudaMemcpyDeviceToHost);
cudaMemcpy(matr2, device_matr, size_matr, cudaMemcpyDeviceToHost);
delete matr2;
delete slv;
// теперь в solve хранится температура. Записываем её в файл.
/* for (int i = 0; i <= Y; i++)
{
for (int j = 0; j <= X; j++)
{
fprintf(new_file, "%3.2lf ", solve[i * (X + 1) + j]);
}
fprintf(new_file, "\n");
}
fprintf(new_file, "\n"); */
}
cudaEventRecord(stop2, 0);
cudaDeviceSynchronize();
cudaEventSynchronize(stop2);
cudaEventElapsedTime(&timerValueGPU, start2, stop2);
cout << "GPU calculation time " << timerValueGPU << " msec" <<endl;
cudaFree(device_matr);
cudaFree(device_slv);
cudaFree(device_solve);
clock_gettime(CLOCK_MONOTONIC_RAW, &end);
printf("Time taken: %lf sec.\n",end.tv_sec-start.tv_sec+ 0.000000001*(end.tv_nsec-start.tv_nsec));
//fclose(new_file);
delete matr;
delete solve;
delete x;
return 0;
} |
20,566 | /*---------------------------------------------------------------*/
/* example 02 - Device Management */
/* Description : get properties of all visible device/GPU */
/* Version : 1.0 for CUDA 2.0 */
/* Compilation : */
/* nvcc --host-compilation C -o example02 example02.cu */
/* Execution : */
/* example02 */
/*---------------------------------------------------------------*/
#include <stdlib.h>
#include <stdio.h>
#include <cuda.h>
int main (int argc, char *argv[] ) {
int nbDevice, dev;
struct cudaDeviceProp prop;
cudaGetDeviceCount( &nbDevice );
printf( "Number of device: %d\n", nbDevice );
for( dev = 0 ; dev < nbDevice ; ++dev ) {
printf( "\nProperties of CUDA device number %d\n", dev );
cudaGetDeviceProperties( &prop, dev );
printf( " Device Name: %s\n", prop.name );
printf( " Clock frequency: %d kHz\n", prop.clockRate );
printf( " Compute capability: %d.%d\n", prop.major, prop.minor );
printf( " Device Overlap: " );
switch( prop.deviceOverlap ) {
case 0:
printf( "No \n" );
break;
case 1:
printf( "Yes\n" );
break;
default :
printf( "Unknown (%d)\n", prop.deviceOverlap );
break;
}
printf( " Number of Multi-processor: %d\n",
prop.multiProcessorCount );
printf( " Global Memory Size: %4d MB\n",
(prop.totalGlobalMem / 1048576) );
printf( " Memory Maximum Pitch: %4d kB\n",
(prop.memPitch / 1024) );
printf( " Texture Alignment: %4d bytes\n",
prop.textureAlignment );
printf( " Constant Memory Size: %4d MB\n",
(prop.totalConstMem / 1024) );
printf( " Shared Memory Size : %4d kB per multi-processor\n",
(prop.sharedMemPerBlock / 1024) );
printf( " Number of 32-bit registers: %d per multi-processor\n",
prop.regsPerBlock );
printf( " Grid maximum size (X,Y,Z): %5d x %5d x %5d\n",
prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2] );
printf( " Thread bloc maximum size (X,Y,Z): %5d x %5d x %5d\n",
prop.maxThreadsDim[0],
prop.maxThreadsDim[1],
prop.maxThreadsDim[2] );
printf( " Maximum Number of threads per block: %d per block of thread\n",
prop.maxThreadsPerBlock );
printf( " Warp Size: %d threads\n", prop.warpSize );
}
return 0;
}
|
20,567 | #include <iostream>
#include <random>
using namespace std;
// Matrices are stored in row-major order:
// M(row, column) = *(M.elements + row * M.stride + col)
typedef struct
{
int width;
int height;
int stride;
float * elements;
} Matrix;
// Thread block size
#define BLOCK_SIZE 16
// Get a matrix element
__device__ float GetElement(const Matrix A, int row, int col)
{
return A.elements[row * A.stride + col];
}
// Set a matrix element
__device__ void SetElement(Matrix A, int row, int col, float value)
{
A.elements[row * A.stride + col] = value;
}
// Get the BLOCK_SIZE x BLOCK_SIZE sub-matrix Asub of A that is located
// col sub-matrices to the right and row sub-matrices down from the
// upper-left corner of A
__device__ Matrix GetSubMatrix(Matrix A, int row, int col)
{
Matrix Asub;
Asub.width = BLOCK_SIZE;
Asub.height = BLOCK_SIZE;
Asub.stride = A.stride;
Asub.elements = &A.elements[A.stride * BLOCK_SIZE * row +
BLOCK_SIZE * col];
return Asub;
}
// Forward declaration of the matrix multiplication kernel
__global__ void MatMulKernel(const Matrix, const Matrix, Matrix);
// Forward declaration of sequential CPU function
void sequential_cpu(Matrix A, Matrix B, Matrix C);
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
void MatMu(const Matrix A, const Matrix B, Matrix C)
{
// Load A and B to device memory
Matrix d_A;
d_A.width = d_A.stride = A.width;
d_A.height = A.height;
size_t size = A.width * A.height * sizeof(float);
cudaMalloc(&d_A.elements, size);
cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice);
Matrix d_B;
d_B.width = d_B.stride = B.width;
d_B.height = B.height;
size = B.width * B.height * sizeof(float);
cudaMalloc(&d_B.elements, size);
cudaMemcpy(d_B.elements, B.elements, size, cudaMemcpyHostToDevice);
// Allocate C in device memory
Matrix d_C;
d_C.width = d_C.stride = C.width;
d_C.height = C.height;
size = C.width * C.height * sizeof(float);
cudaMalloc(&d_C.elements, size);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Invoke kernel
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(B.width/dimBlock.x, A.height/dimBlock.y);
cudaEventRecord(start);
MatMulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
cudaEventRecord(stop);
// Read C from device memory
cudaMemcpy(C.elements, d_C.elements, size, cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
cout << "Kernel call took " << milliseconds << " milliseconds" << endl;
// Free device memory
cudaFree(d_A.elements);
cudaFree(d_B.elements);
cudaFree(d_C.elements);
/*
cudaEventRecord(start);
sequential_cpu(A, B, C);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
cout << "Sequential CPU function call took " << milliseconds << " milliseconds" << endl;
*/
}
// Matrix multiplication kernel called by MatMul()
__global__ void MatMulKernel(Matrix A, Matrix B, Matrix C)
{
// Block row and column
int blockRow = blockIdx.y;
int blockCol = blockIdx.x;
// Each thread block computes one sub-matrix Csub of C
Matrix Csub = GetSubMatrix(C, blockRow, blockCol);
// Each thread computes one element of Csub
// by accumulating results into Cvalue
float Cvalue = 0;
int row = threadIdx.y;
int col = threadIdx.x;
// Loop over all the sub-matrices of A and B that are required to compute Csub
// Multiply each pair of sub-matrices together
// and accumulate the results
for(int m = 0; m < (A.width/BLOCK_SIZE); ++m)
{
// Get sub-matrix Asub of A
Matrix Asub = GetSubMatrix(A, blockRow, m);
// Get sub-matrix Bsub of B
Matrix Bsub = GetSubMatrix(B, m, blockCol);
// Shared memory used to store Asub and Bsub respectively
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load Asub and Bsub from device memory to shared memory
// Each thread loads one element of each sub-matrix
As[row][col] = GetElement(Asub, row, col);
Bs[row][col] = GetElement(Bsub, row, col);
// Synchronize to make sure the sub-matrices are loaded
// before starting the computation
__syncthreads();
// Multiply Asub and Bsub together
for(int e = 0; e < BLOCK_SIZE; ++e)
{
Cvalue += As[row][e] * Bs[e][col];
}
// Synchronize to make sure that the preceding
// computation is done before load two new sub-matrices of A and B in the next
// iteration
__syncthreads();
}
// Write Csub to device memory
// Each thread writes one element
SetElement(Csub, row, col, Cvalue);
}
// Sequential CPU version is given for comparison
void sequential_cpu(Matrix A, Matrix B, Matrix C)
{
for(int i = 0; i < C.height; ++i)
{
for(int j = 0; j < C.width; ++j)
{
C.elements[i*C.width + j] = 0;
for(int ac = 0; ac < A.width; ++ac)
{
for(int br = 0; br < B.height; ++br)
{
C.elements[i*C.width + j] += A.elements[i*A.width + ac]*B.elements[j + br*B.width];
}
}
}
}
}
int main()
{
int n;
size_t size;
std::default_random_engine generator;
std::uniform_real_distribution<float> distribution(-1.0,1.0);
Matrix A;
A.width = A.stride = BLOCK_SIZE*150;
A.height = BLOCK_SIZE*100;
n = A.width * A.height;
size = n * sizeof(float);
A.elements = (float*)malloc(size);
for(int i = 0; i < n; ++i)
A.elements[i] = distribution(generator);
Matrix B;
B.width = B.stride = BLOCK_SIZE*200;
B.height = A.width;
n = B.width * B.height;
size = n * sizeof(float);
B.elements = (float*)malloc(size);
for(int i = 0; i < n; ++i)
B.elements[i] = distribution(generator);
Matrix C;
C.width = C.stride = B.width;
C.height = A.height;
n = C.width * C.height;
size = n * sizeof(float);
C.elements = (float*)malloc(size);
for(int i = 0; i < 5; ++i)
{
printf("i = %d\n", i);
MatMu(A, B, C);
}
}
|
20,568 | #include <iostream>
#include <cstddef>
#include <vector>
#include <random>
#include <algorithm>
#include <chrono>
#define cudaErrorCheck(expr) \
do { \
cudaError_t err; \
err = (expr); \
if (err != cudaSuccess) { \
std::cerr << "line" << __LINE__ << ": cuda error: " << cudaGetErrorString(err) << "\n"; \
exit(1); \
} \
} while(0)
void stream(size_t sz);
int main()
{
stream((size_t)128 * 1024 * 1024);
}
__global__
void copy(float* dst, float* src, size_t sz)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int idx = tid; idx < sz; idx += blockDim.x * gridDim.x) {
dst[idx] = src[idx];
}
}
__global__
void add(float* dst, float* src1, float* src2, size_t sz)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int idx = tid; idx < sz; idx += blockDim.x * gridDim.x) {
dst[idx] = src1[idx] + src2[idx];
}
}
__global__
void scale(float* dst, float* src, float alpha, size_t sz)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int idx = tid; idx < sz; idx += blockDim.x * gridDim.x) {
dst[idx] = alpha * src[idx];
}
}
void stream(size_t sz)
{
std::mt19937 gen(std::random_device{}());
std::uniform_real_distribution<float> dist;
std::vector<float> v1(sz), v2(sz);
std::generate(v1.begin(), v1.end(), [&gen, &dist](){ return dist(gen); });
std::generate(v2.begin(), v2.end(), [&gen, &dist](){ return dist(gen); });
float *d1, *d2, *dr;
cudaErrorCheck(cudaMalloc((void**)&d1, sz * sizeof(float)));
cudaErrorCheck(cudaMalloc((void**)&d2, sz * sizeof(float)));
cudaErrorCheck(cudaMalloc((void**)&dr, sz * sizeof(float)));
cudaErrorCheck(cudaMemcpy(d1, v1.data(), sz * sizeof(float), cudaMemcpyHostToDevice));
cudaErrorCheck(cudaMemcpy(d2, v2.data(), sz * sizeof(float), cudaMemcpyHostToDevice));
std::chrono::system_clock::time_point start, end;
double du;
start = std::chrono::system_clock::now();
copy<<<ceil(sz / 64.0), 64>>>(dr, d1, sz);
cudaDeviceSynchronize();
end = std::chrono::system_clock::now();
du = std::chrono::duration<double>(end-start).count();
std::cerr << "time elapsed for copy: " << du
<< "\nmem bandwidth: " << 2 * sz * sizeof(float) / du << "\n\n";
start = std::chrono::system_clock::now();
scale<<<ceil(sz / 64.0), 64>>>(dr, d1, 3.3f, sz);
cudaDeviceSynchronize();
end = std::chrono::system_clock::now();
du = std::chrono::duration<double>(end-start).count();
std::cerr << "time elapsed for scale: " << du
<< "\nmem bandwidth: " << 2 * sz * sizeof(float) / du << "\n\n";
start = std::chrono::system_clock::now();
add<<<ceil(sz / 64.0), 64>>>(dr, d1, d2, sz);
cudaDeviceSynchronize();
end = std::chrono::system_clock::now();
du = std::chrono::duration<double>(end-start).count();
std::cerr << "time elapsed for add: " << du
<< "\nmem bandwidth: " << 3 * sz * sizeof(float) / du << "\n\n";
cudaFree(d1);
cudaFree(d2);
cudaFree(dr);
}
|
20,569 | #include <stdio.h>
__global__ void kernel(int *a)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
a[idx] = idx;
}
int main()
{
int dimx = 16;
int num_bytes = dimx*sizeof(int);
int *d_a=0, *h_a=0;
int *s_a = 0;
h_a = (int*)malloc(num_bytes);
s_a = (int*)malloc(num_bytes);
cudaMalloc( (void**)&d_a, num_bytes );
if( 0==h_a || 0==d_a )
{
printf("couldn't allocate memory\n");
return 1;
}
for(int i=0; i<dimx; i++)
{
s_a[i] = i;
h_a[i] = i;
}
cudaMemset( d_a, 0, num_bytes );
cudaMemcpy( d_a, s_a, num_bytes, cudaMemcpyHostToDevice );
dim3 grid, block;
block.x = 4;
grid.x = dimx/block.x;
/*kernel<<<grid, block>>>(d_a);*/
cudaMemcpy( h_a, d_a, num_bytes, cudaMemcpyDeviceToHost );
for(int i=0; i<dimx; i++)
printf("%d ", h_a[i] );
printf("\n");
free( h_a );
cudaFree( d_a );
return 0;
}
|
20,570 | //pass
//--blockDim=1024 --gridDim=1 --warp-sync=16 --no-inline
#include <cuda.h>
__global__ void shuffle (int* A)
{
int tid = threadIdx.x;
int warp = tid / 32;
int* B = A + (warp*32);
A[tid] = B[(tid + 1)%32];
}
|
20,571 | //pass
//--blockDim=32 --gridDim=1
#include <cuda.h>
__global__ void test_Prog(int *A, int N) {
const int tid = blockIdx.x*blockDim.x + threadIdx.x;
for(int d = N/2; d > 0; d = d / 2)
{
int tmp=A[tid + d];
for (int i = 0; i < N; ++i)
{
int tmp2=A[tid];
int t2=tmp2;
int t32=t2;
if (tid < d) {
A[tid] = tmp + t32;
}
}
}
} |
20,572 | //pass
//--blockDim=32 --gridDim=1
#include <cuda.h>
__global__ void test_Prog(int *A, int N) {
const int tid = threadIdx.x;
int tmp=A[tid+1];
tmp=tmp+11;
A[tid]+=tmp;
} |
20,573 | #include "includes.h"
extern "C"
__global__ void invertVectorElements(float* vector, int n)
{
int i = threadIdx.x;
if (i < n)
{
vector[i] = 1.0f / vector[i];
}
} |
20,574 | /*
* Copyright 1993-2007 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
/*
Parallel reduction kernels
*/
#ifndef _REDUCE_KERNEL_H_
#define _REDUCE_KERNEL_H_
#include <stdio.h>
/* CWJ includes */
#include <cuda.h>
#ifdef __DEVICE_EMULATION__
#define EMUSYNC __syncthreads()
#else
#define EMUSYNC
#endif
/*
Parallel sum reduction using shared memory
- takes log(n) steps for n input elements
- uses n threads
- only works for power-of-2 arrays
*/
/* This reduction interleaves which threads are active by using the modulo
operator. This operator is very expensive on GPUs, and the interleaved
inactivity means that no whole warps are active, which is also very
inefficient
*/
__global__ void
reduce0(int *g_idata, int *g_odata)
{
extern __shared__ int sdata[];
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = g_idata[i];
__syncthreads();
// do reduction in shared mem
for(unsigned int s=1; s < blockDim.x; s *= 2) {
// modulo arithmetic is slow!
if ((tid % (2*s)) == 0) {
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) {
g_odata[blockIdx.x] = sdata[0];
}
}
/* This version uses contiguous threads, but its interleaved
addressing results in many shared memory bank conflicts. */
__global__ void
reduce1(int *g_idata, int *g_odata)
{
extern __shared__ int sdata[];
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = g_idata[i];
__syncthreads();
// do reduction in shared mem
for(unsigned int s=1; s < blockDim.x; s *= 2) {
int index = 2 * s * tid;
if (index < blockDim.x) {
sdata[index] += sdata[index + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) {
g_odata[blockIdx.x] = sdata[0];
}
}
/*
This version uses sequential addressing -- no divergence or bank conflicts.
*/
__global__ void
reduce2(int *g_idata, int *g_odata)
{
extern __shared__ int sdata[];
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = g_idata[i];
__syncthreads();
// do reduction in shared mem
for(unsigned int s=blockDim.x/2; s>0; s>>=1) {
if (tid < s) {
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) {
g_odata[blockIdx.x] = sdata[0];
}
}
/*
This version uses n/2 threads --
it performs the first level of reduction when reading from global memory
*/
__global__ void
reduce3(int *g_idata, int *g_odata)
{
extern __shared__ int sdata[];
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockDim.x*2) + threadIdx.x;
sdata[tid] = min(g_idata[i], g_idata[i+blockDim.x]);
__syncthreads();
// do reduction in shared mem
for(unsigned int s=blockDim.x/2; s>0; s>>=1) {
if (tid < s) {
sdata[tid]= min(sdata[tid], sdata[tid + s]);
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) {
g_odata[blockIdx.x] = sdata[0];
}
}
/*
This version unrolls the last warp to avoid synchronization where it
isn't needed
*/
__global__ void
reduce4(int *g_idata, int *g_odata)
{
extern __shared__ int sdata[];
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockDim.x*2) + threadIdx.x;
sdata[tid] = min(g_idata[i], g_idata[i+blockDim.x]);
__syncthreads();
// do reduction in shared mem
for(unsigned int s=blockDim.x/2; s>32; s>>=1)
{
if (tid < s)
sdata[tid]= min(sdata[tid], sdata[tid + s]);
__syncthreads();
}
#ifndef __DEVICE_EMULATION__
if (tid < 32)
#endif
{
sdata[tid] = min(sdata[tid], sdata[tid + 32]); EMUSYNC;
sdata[tid] = min(sdata[tid], sdata[tid + 16]); EMUSYNC;
sdata[tid] = min(sdata[tid], sdata[tid + 8]); EMUSYNC;
sdata[tid] = min(sdata[tid], sdata[tid + 4]); EMUSYNC;
sdata[tid] = min(sdata[tid], sdata[tid + 2]); EMUSYNC;
sdata[tid] = min(sdata[tid], sdata[tid + 1]); EMUSYNC;
}
// write result for this block to global mem
if (tid == 0) {
g_odata[blockIdx.x] = sdata[0];
}
}
/*
This version is completely unrolled. It uses a template parameter to achieve
optimal code for any (power of 2) number of threads. This requires a switch
statement in the host code to handle all the different thread block sizes at
compile time.
*/
template <unsigned int blockSize>
__global__ void
reduce5(int *g_idata, int *g_odata)
{
extern __shared__ int sdata[];
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockSize*2) + threadIdx.x;
sdata[tid] = g_idata[i] + g_idata[i+blockSize];
__syncthreads();
// do reduction in shared mem
if (blockSize >= 512) { if (tid < 256) { sdata[tid] += sdata[tid + 256]; } __syncthreads(); }
if (blockSize >= 256) { if (tid < 128) { sdata[tid] += sdata[tid + 128]; } __syncthreads(); }
if (blockSize >= 128) { if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads(); }
#ifndef __DEVICE_EMULATION__
if (tid < 32)
#endif
{
if (blockSize >= 64) { sdata[tid] += sdata[tid + 32]; EMUSYNC; }
if (blockSize >= 32) { sdata[tid] += sdata[tid + 16]; EMUSYNC; }
if (blockSize >= 16) { sdata[tid] += sdata[tid + 8]; EMUSYNC; }
if (blockSize >= 8) { sdata[tid] += sdata[tid + 4]; EMUSYNC; }
if (blockSize >= 4) { sdata[tid] += sdata[tid + 2]; EMUSYNC; }
if (blockSize >= 2) { sdata[tid] += sdata[tid + 1]; EMUSYNC; }
}
// write result for this block to global mem
if (tid == 0) {
g_odata[blockIdx.x] = sdata[0];
}
}
/*
This version adds multiple elements per thread sequentially. This reduces the overall
cost of the algorithm while keeping the work complexity O(n) and the step complexity O(log n).
(Brent's Theorem optimization)
*/
template <unsigned int blockSize>
__global__ void
reduce6(int *g_idata, int *g_odata, unsigned int n)
{
extern __shared__ int sdata[];
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockSize*2) + threadIdx.x;
unsigned int gridSize = blockSize*2*gridDim.x;
sdata[tid] = 0;
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridSize). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
while (i < n)
{
sdata[tid] += g_idata[i] + g_idata[i+blockSize];
i += gridSize;
}
__syncthreads();
// do reduction in shared mem
if (blockSize >= 512) { if (tid < 256) { sdata[tid] += sdata[tid + 256]; } __syncthreads(); }
if (blockSize >= 256) { if (tid < 128) { sdata[tid] += sdata[tid + 128]; } __syncthreads(); }
if (blockSize >= 128) { if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads(); }
#ifndef __DEVICE_EMULATION__
if (tid < 32)
#endif
{
if (blockSize >= 64) { sdata[tid] += sdata[tid + 32]; EMUSYNC; }
if (blockSize >= 32) { sdata[tid] += sdata[tid + 16]; EMUSYNC; }
if (blockSize >= 16) { sdata[tid] += sdata[tid + 8]; EMUSYNC; }
if (blockSize >= 8) { sdata[tid] += sdata[tid + 4]; EMUSYNC; }
if (blockSize >= 4) { sdata[tid] += sdata[tid + 2]; EMUSYNC; }
if (blockSize >= 2) { sdata[tid] += sdata[tid + 1]; EMUSYNC; }
}
// write result for this block to global mem
if (tid == 0) {
g_odata[blockIdx.x] = sdata[0];
}
}
#endif // #ifndef _REDUCE_KERNEL_H_
|
20,575 | #include<stdio.h>
__global__ void hello_world(void)
{
printf("GPU: Hello world!\n");
}
int main(int argc,char **argv)
{
printf("CPU: Hello world!\n");
hello_world<<<1,10>>>();
cudaDeviceReset();//if no this line ,it can not output hello world from gpu
return 0;
}
|
20,576 | #include <stdlib.h>
#include <cuda_runtime.h>
#include <cufft.h>
#include <stdio.h>
#include <sys/time.h>
#define INPUT_SIZE 5120
#define BATCH_SIZE 720
int main(){
struct timeval start, end;
cudaError_t err;
cufftResult res;
double *idata = (double *)malloc(INPUT_SIZE * BATCH_SIZE * sizeof(double));
for(int i=0; i<INPUT_SIZE*BATCH_SIZE; i++){
idata[i] = rand() / (double)RAND_MAX;
}
double *odata = (double *)malloc(2 * INPUT_SIZE * BATCH_SIZE* sizeof(double));
for(int i=0; i<2*INPUT_SIZE*BATCH_SIZE; i++){
odata[i] = 0.0;
}
double *d_idata, *d_odata;
err = cudaMalloc((void **)&d_idata, INPUT_SIZE*BATCH_SIZE*sizeof(double));
if(err != cudaSuccess){
printf("cudaMalloc failed\n");
}
err = cudaMalloc((void **)&d_odata, 2*INPUT_SIZE*BATCH_SIZE*sizeof(double));
if(err != cudaSuccess){
printf("cudaMalloc failed\n");
}
cufftHandle plan;
//res = cufftPlan1d(&plan, INPUT_SIZE, CUFFT_R2C, 720);
res = cufftPlan1d(&plan, INPUT_SIZE*BATCH_SIZE, CUFFT_D2Z, 1);
if(res != CUFFT_SUCCESS){
printf("cufftPlan1d failed\n");
}
for(int i=0; i<10; i++){
gettimeofday(&start, NULL);
err = cudaMemcpy(d_idata, idata, INPUT_SIZE*BATCH_SIZE*sizeof(double), cudaMemcpyHostToDevice);
if(err != cudaSuccess){
printf("cudaMemcpyHostToDevice failed\n");
}
gettimeofday(&end, NULL);
printf("cudaMemcpyHostToDevice took %llu us \n", (end.tv_sec - start.tv_sec)*1000000 + (end.tv_usec - start.tv_usec));
gettimeofday(&start, NULL);
//res = cufftExecR2C(plan, (cufftReal *)d_idata, (cufftComplex *)d_odata);
res = cufftExecD2Z(plan, (cufftDoubleReal *)d_idata, (cufftDoubleComplex *)d_odata);
if(res != CUFFT_SUCCESS){
printf("cufftExecR2C failed\n");
}
err = cudaDeviceSynchronize();
if(err != cudaSuccess){
printf("cudaDeviceSynchronize failed\n");
}
gettimeofday(&end, NULL);
printf("cufftExecD2Z took %llu us \n", (end.tv_sec - start.tv_sec)*1000000 + (end.tv_usec - start.tv_usec));
gettimeofday(&start, NULL);
err = cudaMemcpy(odata, d_odata, 2*INPUT_SIZE*BATCH_SIZE*sizeof(double), cudaMemcpyDeviceToHost);
if(err != cudaSuccess){
printf("cudaMemcpyDeviceToHost failed\n");
}
gettimeofday(&end, NULL);
printf("cudaMemcpyDeviceToHost took %llu us \n", (end.tv_sec - start.tv_sec)*1000000 + (end.tv_usec - start.tv_usec));
}
return 0;
}
|
20,577 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13) {
if (comp == (+1.9791E7f * coshf((var_2 + +0.0f)))) {
comp += atan2f(floorf(+1.4721E-44f), -1.3111E-36f * ldexpf((+1.8350E-35f * var_3 - var_4), 2));
float tmp_1 = +1.0002E28f;
float tmp_2 = -1.2076E-35f;
comp += tmp_2 * tmp_1 / sinf(-0.0f);
if (comp == var_5 + -0.0f * asinf(+1.6377E-41f)) {
float tmp_3 = -1.2597E10f;
comp = tmp_3 / powf(-1.2530E7f, -1.8301E-44f);
}
if (comp >= atan2f(atanf((var_6 / (var_7 - (var_8 - +1.9054E35f * +1.0821E-44f - var_9)))), +0.0f)) {
comp = (var_10 - (-0.0f / cosf(+0.0f)));
}
for (int i=0; i < var_1; ++i) {
float tmp_4 = -1.7750E-18f / -1.2021E-43f;
comp = tmp_4 * (+1.3609E-19f + +0.0f);
comp += var_11 - (+0.0f * +1.1393E4f);
comp = (var_12 - var_13);
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14);
cudaDeviceSynchronize();
return 0;
}
|
20,578 | #include "matrix.cuh"
matrix_list_t* matrix_list_constructor(unsigned int num)
{
matrix_list_t* list = (matrix_list_t*)malloc(sizeof(matrix_list_t));
list->num = num;
list->matrix_list = (matrix_t**)malloc(sizeof(matrix_t*) * num);
return list;
}
void free_matrix_list(matrix_list_t* m)
{
assert(m != NULL);
int i;
for(i=0; i<m->num; i++)
{
free_matrix(m->matrix_list[i]);
}
free(m);
}
matrix_list_t* matrix_list_add(matrix_list_t* m1, matrix_list_t* m2)
{
assert(m1->num == m2->num);
matrix_list_t* m = matrix_list_constructor(m1->num);
int i;
for(i=0; i<m1->num; i++)
{
m->matrix_list[i] = matrix_add(m1->matrix_list[i], m2->matrix_list[i]);
}
return m;
}
matrix_list_t* matrix_list_subtract(matrix_list_t* m1, matrix_list_t* m2)
{
assert(m1->num == m2->num);
matrix_list_t* m = matrix_list_constructor(m1->num);
int i;
for(i=0; i<m1->num; i++)
{
m->matrix_list[i] = matrix_subtract(m1->matrix_list[i], m2->matrix_list[i]);
}
return m;
}
matrix_list_t* matrix_list_scalar_multiply(matrix_list_t* m1, float scalar)
{
matrix_list_t* m = matrix_list_constructor(m1->num);
int i;
for(i=0; i<m1->num; i++)
{
m->matrix_list[i] = matrix_scalar_multiply(m1->matrix_list[i], scalar);
}
return m;
}
unsigned int matrix_list_memory_size(matrix_list_t* m)
{
unsigned int memory_size = sizeof(matrix_list_t);
unsigned int i;
for(i=0; i<m->num; i++)
{
memory_size += matrix_memory_size(m->matrix_list[i]);
}
return memory_size;
}
|
20,579 | /**
* Global Memory (Linear Array)
* Demonstrates:
* - Allocation of linear array by host
* - Passing global memory pointer to device
* - Method in which host accesses global memory
*/
#include <stdio.h>
#include <stdlib.h>
void check_cuda_errors()
{
cudaError_t rc;
rc = cudaGetLastError();
if (rc != cudaSuccess)
{
printf("Last CUDA error %s\n", cudaGetErrorString(rc));
}
}
__global__ void incrementor(int* numbers)
{
numbers[threadIdx.x]++;
}
int main(int argc, char **argv)
{
int *start, *device_mem;
int i, num_elements;
cudaError_t rc;
// Ask user for number of elements
printf("How many elements to increment? ");
scanf("%d", &num_elements);
// Seed our RNG
srand(0);
// Malloc host memory
start = (int*)malloc(num_elements * sizeof(int));
// "Malloc" device memory
cudaMalloc((void **)&device_mem, num_elements * sizeof(int));
printf("Incrementor input:\n");
for (i = 0; i < num_elements; i++) {
start[i] = rand() % 100;
printf("start[%d] = %d\n", i, start[i]);
}
/**
* Copy a value from start to our CUDA
*/
rc = cudaMemcpy(device_mem, start, num_elements * sizeof(int), cudaMemcpyHostToDevice);
if (rc != cudaSuccess)
{
printf("Could not copy to device. Reason: %s\n", cudaGetErrorString(rc));
}
incrementor<<<1, num_elements>>>(device_mem);
check_cuda_errors();
// Retrieve data from global memory
rc = cudaMemcpy(start, device_mem, num_elements * sizeof(int), cudaMemcpyDeviceToHost);
if (rc != cudaSuccess)
{
printf("Could not copy from device. Reason: %s\n", cudaGetErrorString(rc));
}
printf("Incrementor results:\n");
for (i = 0; i < num_elements; i++) {
printf("result[%d] = %d\n", i, start[i]);
}
// Free both host and device memory
free(start);
cudaFree(device_mem);
return 0;
}
|
20,580 | #include "vect-relu-leaky.hh"
#include <cassert>
#include <stdexcept>
#include "ops-builder.hh"
#include "leaky-relu-grad.hh"
#include "graph.hh"
#include "../runtime/node.hh"
#include "../memory/alloc.hh"
namespace ops
{
VectReluLeaky::VectReluLeaky(Op* arg, const dbl_t alpha)
: Op("vect_relu_leaky", arg->shape_get(), {arg})
, alpha(alpha)
{}
void VectReluLeaky::compile()
{
auto& g = Graph::instance();
auto& carg = g.compiled(preds()[0]);
std::size_t len = carg.out_shape.total();
Shape out_shape = carg.out_shape;
dbl_t* out_data = tensor_alloc(len);
auto out_node = rt::Node::op_relu_leaky(carg.out_data, out_data,
len, alpha,
{carg.out_node});
g.add_compiled(this, {out_node}, {out_data}, out_node, out_shape, out_data);
}
Op* VectReluLeaky::child_grad(std::size_t index, Op* dout)
{
assert(index < 1);
(void) index;
if (dout == nullptr)
throw std::runtime_error {"grad(LeakyRelu) can't be computed on last node"};
auto& builder = OpsBuilder::instance();
return builder.leaky_relu_grad(preds()[0], dout, alpha);
}
}
|
20,581 | #include <stdio.h>
#include <stdlib.h>
void vecadd(int nx, float *a, float *b, float *c) {
int i;
for(i=0; i<nx; i++) c[i] = a[i] + b[i];
}
__global__ void vecadd_gpu(int nx, float *a_gpu, float *b_gpu, float *c_gpu) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i<nx) c_gpu[i] = a_gpu[i] + b_gpu[i];
}
int main() {
int i, nx=1000;
// allocate arrays
float *a, *b, *c, *c2;
a = (float *) malloc(nx*sizeof(float));
b = (float *) malloc(nx*sizeof(float));
c = (float *) calloc(nx,sizeof(float));
c2 = (float *) calloc(nx,sizeof(float));
// initialize a, b arrays
for(i=0; i<nx; i++) {
a[i] = i;
b[i] = 2*i;
}
// allocate device arrays
float *a_gpu, *b_gpu, *c_gpu;
cudaMalloc((void**)&a_gpu, nx*sizeof(float));
cudaMalloc((void**)&b_gpu, nx*sizeof(float));
cudaMalloc((void**)&c_gpu, nx*sizeof(float));
// copy arrays host to device
cudaMemcpy(a_gpu, a, nx*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(b_gpu, b, nx*sizeof(float), cudaMemcpyHostToDevice);
// vector add
vecadd(nx, a, b, c);
vecadd_gpu<<<dim3(nx/256+1,1),dim3(256,1,1)>>>(nx, a_gpu, b_gpu, c_gpu);
// copy arrays device to host
cudaMemcpy(c2, c_gpu, nx*sizeof(float), cudaMemcpyDeviceToHost);
// print
for(i=0; i<nx; i++) printf("c[%d]-c_gpu[%d]= %g\n", i, i, c[i]-c2[i]);
return 0;
}
|
20,582 | #include <stdio.h>
#include <cuda.h>
#define N 64
__global__ void exscan() {
__shared__ unsigned a[N]; //= {4, 3, 9, 3, 5, 7, 3, 2};
a[threadIdx.x] = threadIdx.x;
__syncthreads();
unsigned n = sizeof(a) / sizeof (*a);
__syncthreads();
if (threadIdx.x == 0) {
for (unsigned ii = 0; ii < n; ++ii)
printf("%d ", a[ii]);
printf("\n");
}
__syncthreads();
int tmp;
for (int off = 1; off < n; off *= 2) {
if (threadIdx.x >= off) {
tmp = a[threadIdx.x - off];
}
__syncthreads();
if (threadIdx.x >= off) {
a[threadIdx.x] += tmp;
}
__syncthreads();
}
if (threadIdx.x == 0) {
for (unsigned ii = 0; ii < n; ++ii)
printf("%d ", a[ii]);
printf("\n");
}
}
int main() {
//cudaSetDevice(5);
exscan<<<1, N>>>();
cudaThreadSynchronize();
return 0;
}
|
20,583 | #include "math.h"
#define SMALLEST_FLOAT 1.175494351E-38
#define MAX_ELEMENTS_PER_BLOCK 2048
#define NUM_BANKS 32
#define LOG_NUM_BANKS 5
#ifdef ZERO_BANK_CONFLICTS
#define CONFLICT_FREE_OFFSET(n)\
((n) >> NUM_BANKS + (n) >> (2 * LOG_NUM_BANKS))
#else
#define CONFLICT_FREE_OFFSET(n)((n) >> LOG_NUM_BANKS)
#endif
extern "C"
__global__ void JukesCantorGpu(int numCategories, int numStates, int numSitesWithPadding, int numSites,
double apRate, double* rates, double rateScaling,
float* seqNodeLeft, float* seqNodeRight, float* seqAncNode,
double blLeft, double blRight,
double* ufScaling){
//among-site rate heterogenity category index
int categoryIdx = blockIdx.y;
// sequence site index
int siteIdx = threadIdx.x + blockDim.x * blockIdx.x;
// site index within current block
int siteBlockIdx = threadIdx.x;
// index of the ancestral state at the current node
int ancStateIdx = threadIdx.y;
__shared__ double ancSequenceSharedRecord[1024];
__shared__ float leftPartialLikelihoods[1024];
__shared__ float rightPartialLikelihoods[1024];
__shared__ double ufScalingSharedMem[512];
//__shared__ float parentPartialLikelihoods[1024];
// Prefetching partial likelihoods
if(siteIdx < numSitesWithPadding){
leftPartialLikelihoods[threadIdx.x + blockDim.x * threadIdx.y] = seqNodeLeft[siteIdx + ancStateIdx*numSitesWithPadding + categoryIdx*numStates*numSitesWithPadding];
rightPartialLikelihoods[threadIdx.x + blockDim.x * threadIdx.y] = seqNodeRight[siteIdx + ancStateIdx*numSitesWithPadding + categoryIdx*numStates*numSitesWithPadding];
}
double categoryRate = rates[categoryIdx];
double expLeft = exp(-blLeft * rateScaling * categoryRate * apRate);
double diagLeft = 0.25 + (0.75 * expLeft);
double offdiagLeft = 0.25 - (0.25 * expLeft);
double expRight = exp(-blRight * rateScaling * categoryRate * apRate);
double diagRight = 0.25 + (0.75 * expRight);
double offdiagRight = 0.25 - (0.25 * expRight);
int ancSequenceIdx = categoryIdx * numStates * numSitesWithPadding + ancStateIdx * numSitesWithPadding + siteIdx;
double sumLeft = 0;
double sumRight = 0;
if(siteIdx < numSitesWithPadding){
for(int descStateIdx = 0; descStateIdx < numStates; descStateIdx++){
sumLeft += leftPartialLikelihoods[descStateIdx * blockDim.x + siteBlockIdx] *
((descStateIdx == ancStateIdx) ? diagLeft : offdiagLeft);
sumRight += rightPartialLikelihoods[descStateIdx * blockDim.x + siteBlockIdx] *
((descStateIdx == ancStateIdx) ? diagRight : offdiagRight);
}
double prod = sumLeft * sumRight;
ancSequenceSharedRecord[siteBlockIdx * numStates + ancStateIdx] = prod;
}
__syncthreads();
if(ancStateIdx == 0 && siteIdx < numSitesWithPadding){
double maxValue = 0;
for(int i = 0; i < numStates; i++){
double stateValue = ancSequenceSharedRecord[siteBlockIdx * numStates + i];
if(stateValue > maxValue){
maxValue = stateValue;
}
}
if(siteIdx < numSites){
ufScaling[categoryIdx * numSitesWithPadding + siteIdx] *= maxValue;
}
ufScalingSharedMem[siteBlockIdx] = maxValue;
}
__syncthreads();
if(siteIdx < numSitesWithPadding){
// ... = (float)(ancSequenceSharedRecord[siteBlockIdx * numStates + ancStateIdx]/ufScaling[categoryIdx * numSitesWithPadding + siteIdx]); <- this doesn't work for some reason.
//Gives NaN as a result when really small numbers are used (1E-37 or smaller).
if(siteIdx < numSites){
seqAncNode[ancSequenceIdx] = (float)(ancSequenceSharedRecord[siteBlockIdx * numStates + ancStateIdx]*(1/ufScalingSharedMem[siteBlockIdx]));
}else{
seqAncNode[ancSequenceIdx] = 0;
}
}
}
extern "C"
__global__ void PoissonGpu(int numCategories, int numStates, int numSitesWithPadding, int numSites,
double apRate, double* rates, double rateScaling,
float* seqNodeLeft, float* seqNodeRight, float* seqAncNode,
double blLeft, double blRight,
double* ufScaling){
//among-site rate heterogenity category index
int categoryIdx = blockIdx.y;
// sequence site index
int siteIdx = threadIdx.x + blockDim.x * blockIdx.x;
// site index within current block
int siteBlockIdx = threadIdx.x;
// index of the ancestral state at the current node
int ancStateIdx = threadIdx.y;
__shared__ double ancSequenceSharedRecord[1024];
__shared__ float leftPartialLikelihoods[1024];
__shared__ float rightPartialLikelihoods[1024];
__shared__ double ufScalingSharedMem[512];
//__shared__ float parentPartialLikelihoods[1024];
// Prefetching partial likelihoods
if(siteIdx < numSitesWithPadding){
leftPartialLikelihoods[threadIdx.x + blockDim.x * threadIdx.y] = seqNodeLeft[siteIdx + ancStateIdx*numSitesWithPadding + categoryIdx*numStates*numSitesWithPadding];
rightPartialLikelihoods[threadIdx.x + blockDim.x * threadIdx.y] = seqNodeRight[siteIdx + ancStateIdx*numSitesWithPadding + categoryIdx*numStates*numSitesWithPadding];
}
double categoryRate = rates[categoryIdx];
double expLeft = exp(-blLeft * rateScaling * categoryRate * apRate);
double diagLeft = 0.05 + (0.95 * expLeft);
double offdiagLeft = 0.05 - (0.05 * expLeft);
double expRight = exp(-blRight * rateScaling * categoryRate * apRate);
double diagRight = 0.05 + (0.95 * expRight);
double offdiagRight = 0.05 - (0.05 * expRight);
int ancSequenceIdx = categoryIdx * numStates * numSitesWithPadding + ancStateIdx * numSitesWithPadding + siteIdx;
double sumLeft = 0;
double sumRight = 0;
if(siteIdx < numSitesWithPadding){
for(int descStateIdx = 0; descStateIdx < numStates; descStateIdx++){
sumLeft += leftPartialLikelihoods[descStateIdx * blockDim.x + siteBlockIdx] *
((descStateIdx == ancStateIdx) ? diagLeft : offdiagLeft);
sumRight += rightPartialLikelihoods[descStateIdx * blockDim.x + siteBlockIdx] *
((descStateIdx == ancStateIdx) ? diagRight : offdiagRight);
}
double prod = sumLeft * sumRight;
ancSequenceSharedRecord[siteBlockIdx * numStates + ancStateIdx] = prod;
}
__syncthreads();
if(ancStateIdx == 0 && siteIdx < numSitesWithPadding){
double maxValue = 0;
for(int i = 0; i < numStates; i++){
double stateValue = ancSequenceSharedRecord[siteBlockIdx * numStates + i];
if(stateValue > maxValue){
maxValue = stateValue;
}
}
if(siteIdx < numSites) ufScaling[categoryIdx * numSitesWithPadding + siteIdx] *= maxValue;
ufScalingSharedMem[siteBlockIdx] = maxValue;
}
__syncthreads();
if(siteIdx < numSitesWithPadding){
// ... = (float)(ancSequenceSharedRecord[siteBlockIdx * numStates + ancStateIdx]/ufScaling[categoryIdx * numSitesWithPadding + siteIdx]); <- this doesn't work for some reason.
//Gives NaN as a result when really small numbers are used (1E-37 or smaller).
if(siteIdx < numSites){
seqAncNode[ancSequenceIdx] = (float)(ancSequenceSharedRecord[siteBlockIdx * numStates + ancStateIdx]*(1/ufScalingSharedMem[siteBlockIdx]));
}else{
seqAncNode[ancSequenceIdx] = 0;
}
}
}
extern "C"
__global__ void KimuraGpu(int numCategories, int numStates, int numSitesWithPadding, int numSites,
double kappa, double apRate, double* rates, double rateScaling,
float* seqNodeLeft, float* seqNodeRight, float* seqAncNode,
double blLeft, double blRight,
double* ufScaling){
//among-site rate heterogenity category index
int categoryIdx = blockIdx.y;
// sequence site index
int siteIdx = threadIdx.x + blockDim.x * blockIdx.x;
// site index within current block
int siteBlockIdx = threadIdx.x;
// index of the ancestral state at the current node
int ancStateIdx = threadIdx.y;
// State indexes
int A = 0;
int C = 1;
int G = 2;
int T = 3;
//Shared memory initialization
__shared__ double ancSequenceSharedRecord[1024];
__shared__ float leftPartialLikelihoods[1024];
__shared__ float rightPartialLikelihoods[1024];
__shared__ double ufScalingSharedMem[512];
//__shared__ float parentPartialLikelihoods[1024];
// Prefetching partial likelihoods
if(siteIdx < numSitesWithPadding){
leftPartialLikelihoods[threadIdx.x + blockDim.x * threadIdx.y] = seqNodeLeft[siteIdx + ancStateIdx*numSitesWithPadding + categoryIdx*numStates*numSitesWithPadding];
rightPartialLikelihoods[threadIdx.x + blockDim.x * threadIdx.y] = seqNodeRight[siteIdx + ancStateIdx*numSitesWithPadding + categoryIdx*numStates*numSitesWithPadding];
}
double categoryRate = rates[categoryIdx];
float expLeftA = exp(-(blLeft) * rateScaling * categoryRate * apRate);
float expLeftB = exp(-(blLeft) * rateScaling * categoryRate * apRate * ((kappa + 1) / 2));
float diagLeft = 0.25 + (0.25 * expLeftA) + (0.5 * expLeftB);
float tiLeft = 0.25 + (0.25 * expLeftA) - (0.5 * expLeftB);
float tvLeft = 0.25 - (0.25 * expLeftA);
float expRightA = exp(-(blRight) * rateScaling * categoryRate * apRate);
float expRightB = exp(-(blRight) * rateScaling * categoryRate * apRate * ((kappa + 1) / 2));
float diagRight = 0.25 + (0.25 * expRightA) + (0.5 * expRightB);
float tiRight = 0.25 + (0.25 * expRightA) - (0.5 * expRightB);
float tvRight = 0.25 - (0.25 * expRightA);
int ancSequenceIdx = categoryIdx * numStates * numSitesWithPadding + ancStateIdx * numSitesWithPadding + siteIdx;
double sumLeft = 0;
double sumRight = 0;
if(siteIdx < numSitesWithPadding){
if(ancStateIdx == A){
/*sumLeft += seqNodeLeft[categoryIdx * numStates * numSites + A * numSites + siteIdx] * diagLeft;
sumRight += seqNodeRight[categoryIdx * numStates * numSites + A * numSites + siteIdx] * diagRight;
sumLeft += seqNodeLeft[categoryIdx * numStates * numSites + C * numSites + siteIdx] * tvLeft;
sumRight += seqNodeRight[categoryIdx * numStates * numSites + C * numSites + siteIdx] * tvRight;
sumLeft += seqNodeLeft[categoryIdx * numStates * numSites + G * numSites + siteIdx] * tiLeft;
sumRight += seqNodeRight[categoryIdx * numStates * numSites + G * numSites + siteIdx] * tiRight;
sumLeft += seqNodeLeft[categoryIdx * numStates * numSites + T * numSites + siteIdx] * tvLeft;
sumRight += seqNodeRight[categoryIdx * numStates * numSites + T * numSites + siteIdx] * tvRight;
//============================================================================================*/
sumLeft += leftPartialLikelihoods[A * blockDim.x + siteBlockIdx] * diagLeft;
sumRight += rightPartialLikelihoods[A * blockDim.x + siteBlockIdx] * diagRight;
sumLeft += leftPartialLikelihoods[C * blockDim.x + siteBlockIdx] * tvLeft;
sumRight += rightPartialLikelihoods[C * blockDim.x + siteBlockIdx] * tvRight;
sumLeft += leftPartialLikelihoods[G * blockDim.x + siteBlockIdx] * tiLeft;
sumRight += rightPartialLikelihoods[G * blockDim.x + siteBlockIdx] * tiRight;
sumLeft += leftPartialLikelihoods[T * blockDim.x + siteBlockIdx] * tvLeft;
sumRight += rightPartialLikelihoods[T * blockDim.x + siteBlockIdx] * tvRight;
}else if(ancStateIdx == C){
/*sumLeft += seqNodeLeft[categoryIdx * numStates * numSites + A * numSites + siteIdx] * tvLeft;
sumRight += seqNodeRight[categoryIdx * numStates * numSites + A * numSites + siteIdx] * tvRight;
sumLeft += seqNodeLeft[categoryIdx * numStates * numSites + C * numSites + siteIdx] * diagLeft;
sumRight += seqNodeRight[categoryIdx * numStates * numSites + C * numSites + siteIdx] * diagRight;
sumLeft += seqNodeLeft[categoryIdx * numStates * numSites + G * numSites + siteIdx] * tvLeft;
sumRight += seqNodeRight[categoryIdx * numStates * numSites + G * numSites + siteIdx] * tvRight;
sumLeft += seqNodeLeft[categoryIdx * numStates * numSites + T * numSites + siteIdx] * tiLeft;
sumRight += seqNodeRight[categoryIdx * numStates * numSites + T * numSites + siteIdx] * tiRight;
//==============================================================================================*/
sumLeft += leftPartialLikelihoods[A * blockDim.x + siteBlockIdx] * tvLeft;
sumRight += rightPartialLikelihoods[A * blockDim.x + siteBlockIdx] * tvRight;
sumLeft += leftPartialLikelihoods[C * blockDim.x + siteBlockIdx] * diagLeft;
sumRight += rightPartialLikelihoods[C * blockDim.x + siteBlockIdx] * diagRight;
sumLeft += leftPartialLikelihoods[G * blockDim.x + siteBlockIdx] * tvLeft;
sumRight += rightPartialLikelihoods[G * blockDim.x + siteBlockIdx] * tvRight;
sumLeft += leftPartialLikelihoods[T * blockDim.x + siteBlockIdx] * tiLeft;
sumRight += rightPartialLikelihoods[T * blockDim.x + siteBlockIdx] * tiRight;
}else if(ancStateIdx == G){
/*sumLeft += seqNodeLeft[categoryIdx * numStates * numSites + A * numSites + siteIdx] * tiLeft;
sumRight += seqNodeRight[categoryIdx * numStates * numSites + A * numSites + siteIdx] * tiRight;
sumLeft += seqNodeLeft[categoryIdx * numStates * numSites + C * numSites + siteIdx] * tvLeft;
sumRight += seqNodeRight[categoryIdx * numStates * numSites + C * numSites + siteIdx] * tvRight;
sumLeft += seqNodeLeft[categoryIdx * numStates * numSites + G * numSites + siteIdx] * diagLeft;
sumRight += seqNodeRight[categoryIdx * numStates * numSites + G * numSites + siteIdx] * diagRight;
sumLeft += seqNodeLeft[categoryIdx * numStates * numSites + T * numSites + siteIdx] * tvLeft;
sumRight += seqNodeRight[categoryIdx * numStates * numSites + T * numSites + siteIdx] * tvRight;
//===============================================================================================*/
sumLeft += leftPartialLikelihoods[A * blockDim.x + siteBlockIdx] * tiLeft;
sumRight += rightPartialLikelihoods[A * blockDim.x + siteBlockIdx] * tiRight;
sumLeft += leftPartialLikelihoods[C * blockDim.x + siteBlockIdx] * tvLeft;
sumRight += rightPartialLikelihoods[C * blockDim.x + siteBlockIdx] * tvRight;
sumLeft += leftPartialLikelihoods[G * blockDim.x + siteBlockIdx] * diagLeft;
sumRight += rightPartialLikelihoods[G * blockDim.x + siteBlockIdx] * diagRight;
sumLeft += leftPartialLikelihoods[T * blockDim.x + siteBlockIdx] * tvLeft;
sumRight += rightPartialLikelihoods[T * blockDim.x + siteBlockIdx] * tvRight;
}else if(ancStateIdx == T){
/*sumLeft += seqNodeLeft[categoryIdx * numStates * numSites + A * numSites + siteIdx] * tvLeft;
sumRight += seqNodeRight[categoryIdx * numStates * numSites + A * numSites + siteIdx] * tvRight;
sumLeft += seqNodeLeft[categoryIdx * numStates * numSites + C * numSites + siteIdx] * tiLeft;
sumRight += seqNodeRight[categoryIdx * numStates * numSites + C * numSites + siteIdx] * tiRight;
sumLeft += seqNodeLeft[categoryIdx * numStates * numSites + G * numSites + siteIdx] * tvLeft;
sumRight += seqNodeRight[categoryIdx * numStates * numSites + G * numSites + siteIdx] * tvRight;
sumLeft += seqNodeLeft[categoryIdx * numStates * numSites + T * numSites + siteIdx] * diagLeft;
sumRight += seqNodeRight[categoryIdx * numStates * numSites + T * numSites + siteIdx] * diagRight;
//===============================================================================================*/
sumLeft += leftPartialLikelihoods[A * blockDim.x + siteBlockIdx] * tvLeft;
sumRight += rightPartialLikelihoods[A * blockDim.x + siteBlockIdx] * tvRight;
sumLeft += leftPartialLikelihoods[C * blockDim.x + siteBlockIdx] * tiLeft;
sumRight += rightPartialLikelihoods[C * blockDim.x + siteBlockIdx] * tiRight;
sumLeft += leftPartialLikelihoods[G * blockDim.x + siteBlockIdx] * tvLeft;
sumRight += rightPartialLikelihoods[G * blockDim.x + siteBlockIdx] * tvRight;
sumLeft += leftPartialLikelihoods[T * blockDim.x + siteBlockIdx] * diagLeft;
sumRight += rightPartialLikelihoods[T * blockDim.x + siteBlockIdx] * diagRight;
}
double prod = sumLeft * sumRight;
ancSequenceSharedRecord[siteBlockIdx * numStates + ancStateIdx] = prod;
}
__syncthreads();
if(ancStateIdx == 0 && siteIdx < numSitesWithPadding){
double maxValue = 0;
for(int i = 0; i < numStates; i++){
double stateValue = ancSequenceSharedRecord[siteBlockIdx * numStates + i];
if(stateValue > maxValue){
maxValue = stateValue;
}
}
if(siteIdx < numSites) ufScaling[categoryIdx * numSitesWithPadding + siteIdx] *= maxValue;
ufScalingSharedMem[siteBlockIdx] = maxValue;
}
__syncthreads();
if(siteIdx < numSitesWithPadding){
// ... = (float)(ancSequenceSharedRecord[siteBlockIdx * numStates + ancStateIdx]/ufScaling[categoryIdx * numSitesWithPadding + siteIdx]); <- this doesn't work for some reason.
//Gives NaN as a result when really small numbers are used (1E-37 or smaller).
if(siteIdx < numSites){
seqAncNode[ancSequenceIdx] = (float)(ancSequenceSharedRecord[siteBlockIdx * numStates + ancStateIdx]*(1/ufScalingSharedMem[siteBlockIdx]));
}else{
seqAncNode[ancSequenceIdx] = 0;
}
}
}
extern "C"
__global__ void HasegawaGPU(int numCategories, int numStates, int numSitesWithPadding, int numSites,
double kappa, double apRate, double* rates, double rateScaling,
float* seqNodeLeft, float* seqNodeRight, float* seqAncNode,
double* PIj, double* equiFreq,
double* branchLengths,
double* ufScaling){
//among-site rate heterogenity category index
int categoryIdx = blockIdx.y;
// sequence site index
int siteIdx = threadIdx.x + blockDim.x * blockIdx.x;
// site index within current block
int siteBlockIdx = threadIdx.x;
// index of the ancestral state at the current node
int ancStateIdx = threadIdx.y;
// numerical representations of states A, C, G and T.
int A = 0;
int C = 1;
int G = 2;
int T = 3;
//Shared memory initialization
__shared__ double ancSequenceSharedRecord[1024];
__shared__ float leftPartialLikelihoods[1024];
__shared__ float rightPartialLikelihoods[1024];
__shared__ double ufScalingSharedMem[512];
__shared__ double expA[8];
__shared__ double expB[8];
__shared__ double diag[8];
__shared__ double ti[8];
__shared__ double tv[8];
// Prefetching partial likelihoods
if(siteIdx < numSitesWithPadding){
leftPartialLikelihoods[threadIdx.x + blockDim.x * threadIdx.y] = seqNodeLeft[siteIdx + ancStateIdx*numSitesWithPadding + categoryIdx*numStates*numSitesWithPadding];
rightPartialLikelihoods[threadIdx.x + blockDim.x * threadIdx.y] = seqNodeRight[siteIdx + ancStateIdx*numSitesWithPadding + categoryIdx*numStates*numSitesWithPadding];
}
double categoryRate = rates[categoryIdx];
if(siteBlockIdx == 0){
for(int branch = 0; branch < 2; branch++){
expA[branch * numStates + ancStateIdx] = exp(-(branchLengths[branch]) * rateScaling * categoryRate * apRate);
expB[branch * numStates + ancStateIdx] = exp(-(branchLengths[branch]) * rateScaling * categoryRate * apRate * (1.0 + PIj[ancStateIdx] * (kappa - 1.0)));
diag[branch * numStates + ancStateIdx] = equiFreq[ancStateIdx] + equiFreq[ancStateIdx]
* ((1.0/PIj[ancStateIdx])-1.0) * expA[branch * numStates + ancStateIdx] +((PIj[ancStateIdx]-equiFreq[ancStateIdx])/PIj[ancStateIdx])
* expB[branch * numStates + ancStateIdx];
ti[branch * numStates + ancStateIdx] = equiFreq[ancStateIdx] + equiFreq[ancStateIdx]
* ((1.0/PIj[ancStateIdx])-1.0) * expA[branch * numStates + ancStateIdx] - equiFreq[ancStateIdx]/PIj[ancStateIdx]
* expB[branch * numStates + ancStateIdx];
tv[branch * numStates + ancStateIdx] = equiFreq[ancStateIdx] * (1.0 - expA[branch * numStates + ancStateIdx]);
}
}
__syncthreads();
int ancSequenceIdx = categoryIdx * numStates * numSitesWithPadding + ancStateIdx * numSitesWithPadding + siteIdx;
double sumLeft = 0;
double sumRight = 0;
if(siteIdx < numSitesWithPadding){
if(ancStateIdx == A){
sumLeft += leftPartialLikelihoods[A * blockDim.x + siteBlockIdx] * diag[A];
sumRight += rightPartialLikelihoods[A * blockDim.x + siteBlockIdx] * diag[numStates + A];
sumLeft += leftPartialLikelihoods[C * blockDim.x + siteBlockIdx] * tv[C];
sumRight += rightPartialLikelihoods[C * blockDim.x + siteBlockIdx] * tv[numStates + C];
sumLeft += leftPartialLikelihoods[G * blockDim.x + siteBlockIdx] * ti[G];
sumRight += rightPartialLikelihoods[G * blockDim.x + siteBlockIdx] * ti[numStates + G];
sumLeft += leftPartialLikelihoods[T * blockDim.x + siteBlockIdx] * tv[T];
sumRight += rightPartialLikelihoods[T * blockDim.x + siteBlockIdx] * tv[numStates + T];
}else if(ancStateIdx == C){
sumLeft += leftPartialLikelihoods[A * blockDim.x + siteBlockIdx] * tv[A];
sumRight += rightPartialLikelihoods[A * blockDim.x + siteBlockIdx] * tv[numStates + A];
sumLeft += leftPartialLikelihoods[C * blockDim.x + siteBlockIdx] * diag[C];
sumRight += rightPartialLikelihoods[C * blockDim.x + siteBlockIdx] * diag[numStates + C];
sumLeft += leftPartialLikelihoods[G * blockDim.x + siteBlockIdx] * tv[G];
sumRight += rightPartialLikelihoods[G * blockDim.x + siteBlockIdx] * tv[numStates + G];
sumLeft += leftPartialLikelihoods[T * blockDim.x + siteBlockIdx] * ti[T];
sumRight += rightPartialLikelihoods[T * blockDim.x + siteBlockIdx] * ti[numStates + T];
}else if(ancStateIdx == G){
sumLeft += leftPartialLikelihoods[A * blockDim.x + siteBlockIdx] * ti[A];
sumRight += rightPartialLikelihoods[A * blockDim.x + siteBlockIdx] * ti[numStates + A];
sumLeft += leftPartialLikelihoods[C * blockDim.x + siteBlockIdx] * tv[C];
sumRight += rightPartialLikelihoods[C * blockDim.x + siteBlockIdx] * tv[numStates + C];
sumLeft += leftPartialLikelihoods[G * blockDim.x + siteBlockIdx] * diag[G];
sumRight += rightPartialLikelihoods[G * blockDim.x + siteBlockIdx] * diag[numStates + G];
sumLeft += leftPartialLikelihoods[T * blockDim.x + siteBlockIdx] * tv[T];
sumRight += rightPartialLikelihoods[T * blockDim.x + siteBlockIdx] * tv[numStates + T];
}else if(ancStateIdx == T){
sumLeft += leftPartialLikelihoods[A * blockDim.x + siteBlockIdx] * tv[A];
sumRight += rightPartialLikelihoods[A * blockDim.x + siteBlockIdx] * tv[numStates + A];
sumLeft += leftPartialLikelihoods[C * blockDim.x + siteBlockIdx] * ti[C];
sumRight += rightPartialLikelihoods[C * blockDim.x + siteBlockIdx] * ti[numStates + C];
sumLeft += leftPartialLikelihoods[G * blockDim.x + siteBlockIdx] * tv[G];
sumRight += rightPartialLikelihoods[G * blockDim.x + siteBlockIdx] * tv[numStates + G];
sumLeft += leftPartialLikelihoods[T * blockDim.x + siteBlockIdx] * diag[T];
sumRight += rightPartialLikelihoods[T * blockDim.x + siteBlockIdx] * diag[numStates + T];
}
double prod = sumLeft * sumRight;
ancSequenceSharedRecord[siteBlockIdx * numStates + ancStateIdx] = prod;
//debugging[ancSequenceIdx] = prod;
// debugging[A] = ti[A];
// debugging[numStates + A] = ti[numStates + A];
// debugging[C] = ti[C];
// debugging[numStates + C] = ti[numStates + C];
// debugging[G] = ti[G];
// debugging[numStates + G] = ti[numStates + G];
// debugging[T] = ti[T];
// debugging[numStates + T] = ti[numStates + T];
}
__syncthreads();
if(ancStateIdx == 0 && siteIdx < numSitesWithPadding){
double maxValue = 0;
for(int i = 0; i < numStates; i++){
double stateValue = ancSequenceSharedRecord[siteBlockIdx * numStates + i];
if(stateValue > maxValue){
maxValue = stateValue;
}
}
if(siteIdx < numSites) ufScaling[categoryIdx * numSitesWithPadding + siteIdx] *= maxValue;
ufScalingSharedMem[siteBlockIdx] = maxValue;
}
__syncthreads();
if(siteIdx < numSitesWithPadding){
// ... = (float)(ancSequenceSharedRecord[siteBlockIdx * numStates + ancStateIdx]/ufScaling[categoryIdx * numSitesWithPadding + siteIdx]); <- this doesn't work for some reason.
//Gives NaN as a result when really small numbers are used (1E-37 or smaller).
if(siteIdx < numSites){
seqAncNode[ancSequenceIdx] = (float)(ancSequenceSharedRecord[siteBlockIdx * numStates + ancStateIdx]*(1/ufScalingSharedMem[siteBlockIdx]));
}else{
seqAncNode[ancSequenceIdx] = 0;
}
}
}
extern "C"
__global__ void GTRGPU(int numCategories, int numStates, int numSitesWithPadding, int numSites,
float* seqNodeLeft, float* seqNodeRight, float* seqAncNode,
double* TPMleft, double* TPMright,
double* ufScaling, int sequence_split_offset){
//among-site rate heterogenity category index
int categoryIdx = blockIdx.y;
// sequence site index
int siteIdx = threadIdx.x + blockDim.x * blockIdx.x;
// site index within current block
int siteBlockIdx = threadIdx.x;
// index of the ancestral state at the current node
int ancStateIdx = threadIdx.y;
__shared__ double ancSequenceSharedRecord[1024];
__shared__ float leftPartialLikelihoods[1024];
__shared__ float rightPartialLikelihoods[1024];
__shared__ double ufScalingSharedMem[512];
__shared__ double sMatrixColumnLeft[64];
__shared__ double sMatrixColumnRight[64];
// Prefetching partial likelihoods
if(siteIdx < numSitesWithPadding){
leftPartialLikelihoods[threadIdx.x + blockDim.x * threadIdx.y] = seqNodeLeft[siteIdx + ancStateIdx*numSitesWithPadding + categoryIdx*numStates*numSitesWithPadding];
rightPartialLikelihoods[threadIdx.x + blockDim.x * threadIdx.y] = seqNodeRight[siteIdx + ancStateIdx*numSitesWithPadding + categoryIdx*numStates*numSitesWithPadding];
}
int ancSequenceIdx = categoryIdx * numStates * numSitesWithPadding + ancStateIdx * numSitesWithPadding + siteIdx;
double sumLeft = 0;
double sumRight = 0;
for(int descStateIdx = 0; descStateIdx < numStates; descStateIdx++){
if(threadIdx.x == 0){
sMatrixColumnLeft[ancStateIdx] = TPMleft[categoryIdx * numStates * numStates + ancStateIdx * numStates + descStateIdx];
}else if(threadIdx.x == 1){
sMatrixColumnRight[ancStateIdx] = TPMright[categoryIdx * numStates * numStates + ancStateIdx * numStates + descStateIdx];
}
__syncthreads();
if(siteIdx < numSites){
sumLeft += leftPartialLikelihoods[descStateIdx * blockDim.x + siteBlockIdx] * sMatrixColumnLeft[ancStateIdx];
sumRight += rightPartialLikelihoods[descStateIdx * blockDim.x + siteBlockIdx] * sMatrixColumnRight[ancStateIdx];
}
__syncthreads();
}
double prod = sumLeft * sumRight;
ancSequenceSharedRecord[siteBlockIdx * numStates + ancStateIdx] = prod;
__syncthreads();
if(ancStateIdx == 0 && siteIdx < numSitesWithPadding){
double maxValue = 0;
for(int i = 0; i < numStates; i++){
double stateValue = ancSequenceSharedRecord[siteBlockIdx * numStates + i];
if(stateValue > maxValue){
maxValue = stateValue;
}
}
int scaling_split_offset = sequence_split_offset * numCategories;
if(siteIdx < numSites) ufScaling[categoryIdx * numSitesWithPadding + siteIdx + scaling_split_offset] *= maxValue;
ufScalingSharedMem[siteBlockIdx] = maxValue;
}
__syncthreads();
if(siteIdx < numSitesWithPadding){
// ... = (float)(ancSequenceSharedRecord[siteBlockIdx * numStates + ancStateIdx]/ufScaling[categoryIdx * numSitesWithPadding + siteIdx]); <- this doesn't work for some reason.
//Gives NaN as a result when really small numbers are used (1E-37 or smaller).
if(siteIdx < numSites){
seqAncNode[ancSequenceIdx] = (float)(ancSequenceSharedRecord[siteBlockIdx * numStates + ancStateIdx]*(1/ufScalingSharedMem[siteBlockIdx]));
}else{
seqAncNode[ancSequenceIdx] = 0;
}
}
}
extern "C"
__global__ void transMatrixCalc(int n, double* ad, double* bd, double* ed, double* TPMleft, double* TPMright,
double blLeft, double blRight, double catRate, double apRate, int cat) {
//MAKE IT FOR LEFT AND RIGHT
__shared__ double as[32][32];
__shared__ double bs[32][32];
__shared__ double es[32];
int tx = threadIdx.x;
int ty = threadIdx.y;
int x = (blockIdx.x * blockDim.x) + tx;
int y = (blockIdx.y * blockDim.y) + ty;
double vLeft = 0.0;
double vRight = 0.0;
int yn = y * n;
int s = (n + 31) / 32;
//int wholeBlocks = n/32;
for(int m=0; m<s; m++) {
int m32 = m * 32;
as[ty][tx] = ad[yn + (m32 + tx)];
bs[ty][tx] = bd[(m32 + ty) * n + x];
es[tx] = ed[m32 + tx];
__syncthreads();
//skratiti broj iteracija
for(int i=0; i+m32<n && i<32; i++) {
double eigenExponent = exp(blLeft * catRate * apRate * es[i]);
vLeft += as[ty][i] * eigenExponent * bs[i][tx];
eigenExponent = exp(blRight * catRate * apRate * es[i]);
vRight += as[ty][i] * eigenExponent * bs[i][tx];
}
__syncthreads();
}
if(x < n && y < n){
TPMleft[cat*n*n + yn + x] = vLeft;
TPMright[cat*n*n + yn + x] = vRight;
//TPMleft[cat*n*n + yn + x] = cat;
//TPMright[cat*n*n + yn + x] = catRate;
}
}
extern "C"
__global__ void initUnderflowScaling(double* ufScaling, double value, int numElements){
int elementIdx = blockIdx.x * blockDim.x + threadIdx.x;
if(elementIdx < numElements){
ufScaling[elementIdx] = (double)1.0;
}
}
extern "C"
__global__ void reduceStates(int numCategories, int numStates, int numSitesWithPadding, int numSites,
double pInv, double* equiFreq,
float* sequence, double* ufScaling, int ufScalingOffset){
//among-site rate heterogenity category index
int categoryIdx = blockIdx.y;
// sequence site index
int siteIdx = threadIdx.x + blockDim.x * blockIdx.x;
// site index within current block
int siteBlockIdx = threadIdx.x;
// index of the ancestral state at the current node
int stateIdx = threadIdx.y;
__shared__ float partialLikelihoods[1024];
__shared__ double sharedDoubleBuffer[1024];
// Prefetching partial likelihoods
if(siteIdx < numSitesWithPadding){
partialLikelihoods[threadIdx.x + blockDim.x * threadIdx.y] = sequence[siteIdx + stateIdx*numSitesWithPadding + categoryIdx*numStates*numSitesWithPadding];
}
if(siteIdx < numSites){
sharedDoubleBuffer[stateIdx * blockDim.x + siteBlockIdx] = partialLikelihoods[stateIdx * blockDim.x + siteBlockIdx] * equiFreq[stateIdx]
* ((1.0 - pInv)/(double)(numCategories)) * ufScaling[categoryIdx * numSitesWithPadding + siteIdx + ufScalingOffset];
}
__syncthreads();
// Reduce states here
if(stateIdx == 0 && siteIdx < numSites){
double cellLikelihoodAccumulator = 0;
for(int st = 0; st < numStates; st++){
cellLikelihoodAccumulator += sharedDoubleBuffer[st * blockDim.x + siteBlockIdx];
}
ufScaling[categoryIdx * numSitesWithPadding + siteIdx + ufScalingOffset] = cellLikelihoodAccumulator;
}
}
extern "C"
__global__ void reduceCategories(int numCategories, int numSites, int numSitesWithPadding, double pInv, double* ufScaling, double* invSites, int* weights, int split_offset){
int categoryIdx = threadIdx.y;
int siteIdx = threadIdx.x + blockDim.x * blockIdx.x;
int siteBlockIdx = threadIdx.x;
__shared__ double sharedDoubleBuffer[1024];
if(siteIdx < numSites){
sharedDoubleBuffer[categoryIdx * blockDim.x + siteBlockIdx] = ufScaling[categoryIdx * numSitesWithPadding + siteIdx + (split_offset*numCategories)];
}
__syncthreads();
if(categoryIdx == 0 && siteIdx < numSites){
for(int cat = 1; cat < numCategories; cat++){
sharedDoubleBuffer[siteBlockIdx] += sharedDoubleBuffer[cat * blockDim.x + siteBlockIdx];
}
double siteLikelihoodInv = invSites[siteIdx + split_offset] * pInv;
ufScaling[siteIdx + split_offset] = log(sharedDoubleBuffer[siteBlockIdx] + siteLikelihoodInv) * weights[siteIdx + split_offset];
}
}
extern "C"
__global__ void reduceSites(double* g_odata, double* g_idata, int n){
__shared__ double temp[2115];
int thid = threadIdx.x;
int offset = 1;
int ai = thid;
int bi = thid + (MAX_ELEMENTS_PER_BLOCK/2);
int bankOffsetA = CONFLICT_FREE_OFFSET(ai);
int bankOffsetB = CONFLICT_FREE_OFFSET(bi);
//Copy data from global memory to shared memory and apply
// padding for sizes that are not exponents of 2
int blockOffset = MAX_ELEMENTS_PER_BLOCK * blockIdx.x;
if((blockOffset + ai) < n){
temp[ai + bankOffsetA] = g_idata[blockOffset + ai];
}else{
temp[ai + bankOffsetA] = 0;
}
if((blockOffset + bi) < n){
temp[bi + bankOffsetB] = g_idata[blockOffset + bi];
}else{
temp[bi + bankOffsetB] = 0;
}
for(int d = MAX_ELEMENTS_PER_BLOCK >> 1; d > 0; d >>= 1){
__syncthreads();
if(thid < d){
int ai = offset * (2 * thid + 1) - 1;
int bi = offset * (2 * thid + 2) - 1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
temp[bi] += temp[ai];
}
offset *= 2;
}
if(thid == 0){
g_idata[blockIdx.x] = temp[MAX_ELEMENTS_PER_BLOCK - 1 + CONFLICT_FREE_OFFSET(MAX_ELEMENTS_PER_BLOCK - 1)];
}
__syncthreads();
if(thid == 0 && blockIdx.x == 0 && gridDim.x == 1) g_odata[0] = g_idata[0];
} |
20,584 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24) {
comp += cosf((-1.7576E-30f + -1.9558E-37f * (-1.0797E-44f - var_2 * -1.5780E5f)));
for (int i=0; i < var_1; ++i) {
comp = var_3 + (-1.5776E-42f / +0.0f + acosf(asinf(-1.4089E36f)));
comp += asinf(sinhf((-0.0f * +0.0f - (var_4 + -1.2916E-43f - var_5))));
comp = +1.2099E-35f + var_6 / (var_7 - -1.1441E-42f * sinhf(+1.2249E1f));
}
if (comp <= (-0.0f - var_8)) {
comp = (var_9 / (var_10 + var_11 + var_12));
float tmp_1 = powf(var_13 - -1.3566E15f * var_14, ldexpf(+0.0f, 2));
comp += tmp_1 * fmodf((-1.2780E-29f + -1.3688E-35f / (var_15 + -1.3110E34f + -1.8922E-35f)), (-1.3659E-41f + var_16));
}
if (comp == atan2f((-1.3190E34f + -1.3193E9f + var_17 - (+1.3294E-35f / var_18 + +1.8826E-42f)), (-1.9068E34f + (+1.5156E36f + -1.7177E35f + var_19)))) {
comp += +1.6347E-43f * var_20 - -1.7625E-37f + powf((-1.9462E-13f + var_21 - +1.8158E-42f / (-1.5474E34f + -1.0966E-42f + var_22)), var_23 / var_24 * +1.4382E-36f + sinhf((+0.0f + +1.4582E-44f)));
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
float tmp_25 = atof(argv[25]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25);
cudaDeviceSynchronize();
return 0;
}
|
20,585 | #include <vector>
#include <stdio.h>
#include <iostream>
#include <sstream>
#include <string>
#include <fstream>
#include <math.h>
#include <time.h>
#include <sys/time.h>
#define E_SIZE 100
#define H_SIZE 99
#define BLOCK 1024
//this program will assume a 98x98x98 grid with 2 cells of zero padding for the E fields
//the padded zeros act as PEC boundaries
//The H fields will be 99x99x99 (offset by half cell, inside the PEC boundary)
//This version is a very naive version without matrix versions of the calculations
using namespace std;
__global__ void InitWall(double* ey, double init) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
ey[tid] = init;
}
//calculate ex_{i,j,k} for the next time step
//depends on: ex_{i,j,k} for the current time step, hz of adjacent cells, hy of adj. cells,
//the time step, epsilon, and the cell steps
//ended up using this as the general calculation for all E and H components
__device__ double Calc(double exn, double hzp, double hzn, double hyp, double hyn, double d1, double d2, double perm, double dt) {
double term1, term2;
double t1;
t1 = hzp - hzn;
term1 = t1/d1;
term2 = (hyp - hyn)/d2;
return dt*(term1-term2)/perm+exn;
}
__device__ int E2H(int index) {
int i = index / (E_SIZE*E_SIZE);
index -= (E_SIZE*E_SIZE)*i;
int j = index / E_SIZE;
int k = index - E_SIZE*j;
return i*H_SIZE*H_SIZE+j*H_SIZE+k;
}
__device__ int H2E(int index) {
int i = index / (H_SIZE*H_SIZE);
index -= (H_SIZE*H_SIZE)*i;
int j = index / H_SIZE;
int k = index - H_SIZE*j;
return i*E_SIZE*E_SIZE+j*E_SIZE+k;
}
__global__ void Set_H_X(double* hx, double* ey, double* ez, double mu, int size, double dt) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
// don't do anything for any thread ids that are greater
if (tid < size) {
double old, t1p, t1n, t2p, t2n;
int y_offset = 100;
old = hx[tid];
int edex = H2E(tid);
t1p = ey[edex+1];
t1n = ey[edex];
t2p = ez[edex+y_offset];
t2n = ez[edex];
hx[tid] = Calc(old,t1p,t1n,t2p,t2n,1.0,1.0,mu,dt);
}
}
__global__ void Set_H_Y(double* hy, double* ez, double* ex, double mu, int size, double dt) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
// don't do anything for any thread ids that are greater
if (tid < size) {
double old, t1p, t1n, t2p, t2n;
int x_offset = 10000;
old = hy[tid];
int edex = H2E(tid);
t1p = ez[edex+x_offset];
t1n = ez[edex];
t2p = ex[edex+1];
t2n = ex[edex];
hy[tid] = Calc(old,t1p,t1n,t2p,t2n,1.0,1.0,mu,dt);
}
}
__global__ void Set_H_Z(double* hz, double* ex, double* ey, double mu, int size, double dt) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
// don't do anything for any thread ids that are greater
if (tid < size) {
double old, t1p, t1n, t2p, t2n;
int x_offset = 10000;
int y_offset = 100;
old = hz[tid];
int edex = H2E(tid);
t1p = ex[edex+y_offset];
t1n = ex[edex];
t2p = ey[edex+x_offset];
t2n = ey[edex];
hz[tid] = Calc(old,t1p,t1n,t2p,t2n,1.0,1.0,mu,dt);
}
}
// __global__ void Set_E_X(double* ex, double* hz, double* hy, double eps, int size, double dt) {
// int tid = blockIdx.x * blockDim.x + threadIdx.x;
// // respect the border
// int n = 100;
// int n2 = 10000;
// int n3 = 1000000;
// if (tid >= size ||((tid < n2) || (tid > (n3-n2))) || (((0 < tid%n2) && (tid%n2 < n)) || ((n2-n < tid%n2) && (tid%n2 < n2-1))) || ((tid%n == 0) || (tid%n == n-1))){}
// // if ((tid < n2) || (tid > (n3-n2))){}
// // else if (((0 < tid%n2) && (tid%n2 < n)) || ((n2-n < tid%n2) && (tid%n2 < n2-1))){}
// // else if ((tid%n == 0) || (tid%n == n-1)){}
// else {
// // printf("%d\n", tid);
// double old, t1p, t1n, t2p, t2n;
//
// old = ex[tid];
// t1p = hz[tid];
// t1n = hz[tid-H_SIZE];
// t2p = hy[tid];
// t2n = hy[tid-1];
// ex[tid] = Calc(old,t1p,t1n,t2p,t2n,1.0,1.0,eps,dt);
// }
// }
//
// __global__ void Set_E_Y(double* ey, double* hx, double* hz, double eps, int size, double dt) {
// int tid = blockIdx.x * blockDim.x + threadIdx.x;
// // respect the border
// // int n = E_SIZE;
// // int n2 = E_SIZE*E_SIZE;
// // int n3 = n2 * E_SIZE;
//
// int n = 100;
// int n2 = 10000;
// int n3 = 1000000;
// if (tid >= size ||((tid < n2) || (tid > (n3-n2))) || (((0 < tid%n2) && (tid%n2 < n)) || ((n2-n < tid%n2) && (tid%n2 < n2-1))) || ((tid%n == 0) || (tid%n == n-1))){}
// //
// // if ((tid < n2) || (tid > (n3-n2))){}
// // else if (((0 < tid%n2) && (tid%n2 < n)) || ((n2-n < tid%n2) && (tid%n2 < n2-1))){}
// // else if ((tid%n == 0) || (tid%n == n-1)){}
// else {
// double old, t1p, t1n, t2p, t2n;
//
// old = ey[tid];
// t1p = hx[tid];
// t1n = hx[tid-1];
// t2p = hz[tid];
// t2n = hz[tid-H_SIZE*H_SIZE];
// ey[tid] = Calc(old,t1p,t1n,t2p,t2n,1.0,1.0,eps,dt);
// }
// }
//
// __global__ void Set_E_Z(double* ez, double* hy, double* hx, double eps, int size, double dt) {
// int tid = blockIdx.x * blockDim.x + threadIdx.x;
// // respect the border
// // int n = E_SIZE;
// // int n2 = E_SIZE*E_SIZE;
// // int n3 = n2 * E_SIZE;
//
// int n = 100;
// int n2 = 10000;
// int n3 = 1000000;
// if (tid >= size ||((tid < n2) || (tid > (n3-n2))) || (((0 < tid%n2) && (tid%n2 < n)) || ((n2-n < tid%n2) && (tid%n2 < n2-1))) || ((tid%n == 0) || (tid%n == n-1))){}
// //
// // if ((tid < n2) || (tid > (n3-n2))){}
// // else if (((0 < tid%n2) && (tid%n2 < n)) || ((n2-n < tid%n2) && (tid%n2 < n2-1))){}
// // else if ((tid%n == 0) || (tid%n == n-1)){}
// else {
// double old, t1p, t1n, t2p, t2n;
//
// old = ez[tid];
// t1p = hy[tid];
// t1n = hy[tid-H_SIZE*H_SIZE];
// t2p = hx[tid];
// t2n = hx[tid-H_SIZE];
// ez[tid] = Calc(old,t1p,t1n,t2p,t2n,1.0,1.0,eps,dt);
// }
// }
__global__ void Set_E_X(double* ex, double* hz, double* hy, double eps, int size, double dt, int* inner_indices) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
// respect the border
if (tid < size) {
// printf("%d\n", tid);
double old, t1p, t1n, t2p, t2n;
int index = inner_indices[tid];
old = ex[index];
int hdex = E2H(index);
t1p = hz[hdex];
t1n = hz[hdex-99];
t2p = hy[hdex];
t2n = hy[hdex-1];
ex[index] = Calc(old,t1p,t1n,t2p,t2n,1.0,1.0,eps,dt);
}
}
__global__ void Set_E_Y(double* ey, double* hx, double* hz, double eps, int size, double dt, int* inner_indices) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
// respect the border
if (tid < size) {
double old, t1p, t1n, t2p, t2n;
int index = inner_indices[tid];
int hdex = E2H(index);
old = ey[index];
t1p = hx[hdex];
t1n = hx[hdex-1];
t2p = hz[hdex];
t2n = hz[hdex-99*99];
ey[index] = Calc(old,t1p,t1n,t2p,t2n,1.0,1.0,eps,dt);
}
}
__global__ void Set_E_Z(double* ez, double* hy, double* hx, double eps, int size, double dt, int* inner_indices) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
// respect the border
if (tid < size) {
double old, t1p, t1n, t2p, t2n;
int index = inner_indices[tid];
int hdex = E2H(index);
old = ez[index];
t1p = hy[hdex];
t1n = hy[hdex-99*99];
t2p = hx[hdex];
t2n = hx[hdex-99];
ez[index] = Calc(old,t1p,t1n,t2p,t2n,1.0,1.0,eps,dt);
}
}
// Used for time keeping independent of the clock
double get_wall_time(){
struct timeval time;
if (gettimeofday(&time,NULL)){
// Handle error
return 0;
}
return (double)time.tv_sec + (double)time.tv_usec * .000001;
}
int nx, ny, nz;
double dx, dy, dz;
double dt;
// double ex[E_SIZE][E_SIZE][E_SIZE] = {0};
// double ey[E_SIZE][E_SIZE][E_SIZE] = {0};
// double ez[E_SIZE][E_SIZE][E_SIZE] = {0};
// double hx[H_SIZE][H_SIZE][H_SIZE] = {0};
// double hy[H_SIZE][H_SIZE][H_SIZE] = {0};
// double hz[H_SIZE][H_SIZE][H_SIZE] = {0};
// enum Field {e_x, e_y, e_z, h_x, h_y, h_z};
// This is source term
// the argument is time value
//
double source(double t) {
double expnum;
expnum = pow(t-5e-7,2.0);
return exp(-1*expnum/1e-15);
};
//use existing procedures for all calculations
//the various calc_enijk procedures are the exact same math, we will just use one
//depends on: desired quantity, Ex array, Ey array, Ez array, Hx array, Hy array,
//Hz array (all of which are pointers), dx, dy, dz, i, j, k
//type values: 0, 1, 2, 3, 4, 5 = ex, ey, ez, hx, hy, hz
// double calc_int(int type, double ex[][ny][nz], double ey[][ny][nz], double ez[][ny][nz],
// double hx[][ny-1][nz-1], double hy[][ny-1][nz-1], double hz[][ny-1][nz-1],
// double dx, double dy, double dz,
// double dt, int i, int j, int k)
//function to calculate the magnitude of a 3-vector
//used to write out results, not part of simulation
double magn(double x, double y, double z) {
// cout << x << ' ' << y << ' ' << z << endl;
double mag;
mag = sqrt(x*x+y*y+z*z);
return mag;
}
// frunction to write out the magnitude of the E-field to a file
int write_to(ofstream& f, double t, int ind, int stride, double* ex, double* ey, double* ez) {
f << t;
int i;
for (i = 0; i < nx; i+=stride) {
int index = ind*100*100+i*100+49; // middle index for 100
f << "\t" << magn(ex[index],ey[index],ez[index]);
}
f << "\t" << ind << "\t" << i << "\t" << 49;
f << endl;
return 0;
}
// primary simulation chunk
int main() {
double eps = 8.85e-12;
double mu = 1.257e-6;
double *ex, *ey, *ez, *hx, *hy, *hz;
int *inner_indices;
int e_size = E_SIZE*E_SIZE*E_SIZE;
int h_size = H_SIZE*H_SIZE*H_SIZE;
int i_size = 98*98*98;
ex = (double *)malloc((e_size)*sizeof(double));
ey = (double *)malloc((e_size)*sizeof(double));
ez = (double *)malloc((e_size)*sizeof(double));
hx = (double *)malloc((h_size)*sizeof(double));
hy = (double *)malloc((h_size)*sizeof(double));
hz = (double *)malloc((h_size)*sizeof(double));
inner_indices = (int *)malloc((i_size)*sizeof(int));
// initialize to zero
for (int i = 0; i < e_size; i++) {
ex[i] = 0.0;
ey[i] = 0.0;
ez[i] = 0.0;
}
for (int i = 0; i < h_size; i++) {
hx[i] = 0.0;
hy[i] = 0.0;
hz[i] = 0.0;
}
// cuda variables
double *d_ex, *d_ey, *d_ez, *d_hx, *d_hy, *d_hz;
int *d_inner;
cudaMalloc((void **)&d_ex, sizeof(double) * (e_size));
cudaMalloc((void **)&d_ey, sizeof(double) * (e_size));
cudaMalloc((void **)&d_ez, sizeof(double) * (e_size));
cudaMalloc((void **)&d_hx, sizeof(double) * (h_size));
cudaMalloc((void **)&d_hy, sizeof(double) * (h_size));
cudaMalloc((void **)&d_hz, sizeof(double) * (h_size));
cudaMalloc((void **)&d_inner, sizeof(int) * (i_size));
cudaMemcpy(d_ex, ex, sizeof(double) * (e_size), cudaMemcpyHostToDevice);
cudaMemcpy(d_ey, ey, sizeof(double) * (e_size), cudaMemcpyHostToDevice);
cudaMemcpy(d_ez, ez, sizeof(double) * (e_size), cudaMemcpyHostToDevice);
cudaMemcpy(d_hx, hx, sizeof(double) * (h_size), cudaMemcpyHostToDevice);
cudaMemcpy(d_hy, hy, sizeof(double) * (h_size), cudaMemcpyHostToDevice);
cudaMemcpy(d_hz, hz, sizeof(double) * (h_size), cudaMemcpyHostToDevice);
nx = E_SIZE;
ny = E_SIZE;
nz = E_SIZE;
dx = 1;
dy = 1;
dz = 1;
dt = 1e-9;
// cout << "middle element is: " << ex[49][49][49] << endl;
//the courant condition for 1 meter is 1.9e-9
//final time be 1e-6 (for 1000 time steps)
double tf = 1e-6;
double t = 0.0;
int a = 0;
ofstream outFiles[11];
stringstream fname;
for (int it = 0; it < 11; it++) {
fname.str("");
fname << "paraOut/output" << it << ".txt";
outFiles[it].open(fname.str());
};
int outind;
ofstream probef;
probef.open("paraOut/test.txt");
ofstream probef2;
probef2.open("paraOut/test_h.txt");
double difference, w_start, w_finish;
w_start = get_wall_time();
int numBlocksH = h_size/BLOCK+1;
int numBlocksI = i_size/BLOCK+1;
dim3 threadsPerBlock(BLOCK, 1); // Max one dimensional block
int count = 0;
for (int i = 1; i < H_SIZE; i++) {
for (int j = 1; j < H_SIZE; j++) {
for (int k = 1; k < H_SIZE; k++) {
inner_indices[count] = i*E_SIZE*E_SIZE+j*E_SIZE+k;
count++;
}
}
}
cudaMemcpy(d_inner, inner_indices, sizeof(int) * (i_size), cudaMemcpyHostToDevice);
while (t<tf) {
cout << "t = " <<t <<endl;
// set the source value for the incoming plane wave at x boundary
double ey_init = source(t);
// InitWall<<<10,1000>>>(d_ey, ey_init);
for (int g = 0; g < ny; g++) {
for (int h = 0; h < nz; h++) {
int index = g*100 + h;
ey[index] = ey_init;
}
}
// Every tenth time step, write out slices of e-field values to a set of files
if (!(a%10)) {
cudaMemcpy(ex, d_ex, sizeof(double) * e_size, cudaMemcpyDeviceToHost);
cudaMemcpy(ez, d_ez, sizeof(double) * e_size, cudaMemcpyDeviceToHost);
cudaMemcpy(hy, d_hy, sizeof(double) * h_size, cudaMemcpyDeviceToHost);
for (int fn = 0; fn < 11; fn++) {
outind = fn*10;
if (outind > 99) {
outind = 99;
}
write_to(outFiles[fn], t, outind, 10, ex, ey, ez);
}
probef << t;
// write to a couple of debug probes placed in the center of the box
for (int y = 45; y < 55; y+=1) {
int ex_index = 49*100*100+49*100+y;
int hy_index = y*99*99+49*99+49;
probef << "\t" << ex[ex_index];
probef2 << "\t" << hy[hy_index];
};
probef << endl;
probef2 << endl;
};
// after wall, might be better to do wall in CUDA
cudaMemcpy(d_ey, ey, sizeof(double) * (e_size), cudaMemcpyHostToDevice);
Set_H_X<<<numBlocksH, threadsPerBlock>>>(d_hx, d_ey, d_ez, mu, h_size, dt);
cudaDeviceSynchronize();
Set_H_Y<<<numBlocksH, threadsPerBlock>>>(d_hy, d_ez, d_ex, mu, h_size, dt);
cudaDeviceSynchronize();
Set_H_Z<<<numBlocksH, threadsPerBlock>>>(d_hz, d_ex, d_ey, mu, h_size, dt);
cudaDeviceSynchronize();
Set_E_X<<<numBlocksI, threadsPerBlock>>>(d_ex, d_hz, d_hy, eps, i_size, dt, d_inner);
cudaDeviceSynchronize();
Set_E_Y<<<numBlocksI, threadsPerBlock>>>(d_ey, d_hx, d_hz, eps, i_size, dt, d_inner);
cudaDeviceSynchronize();
Set_E_Z<<<numBlocksI, threadsPerBlock>>>(d_ez, d_hy, d_hx, eps, i_size, dt, d_inner);
cudaDeviceSynchronize();
// bring back for wall
cudaMemcpy(ey, d_ey, sizeof(double) * e_size, cudaMemcpyDeviceToHost);
t += dt; // time step counter
a += 1; // printing counter
}
w_finish = get_wall_time();
difference = w_finish - w_start;
cout << "Parallel: " << difference << " seconds\n";
probef.flush();
probef.close();
probef2.flush();
probef2.close();
for (int it = 0; it < 11; it++) {
outFiles[it].flush();
outFiles[it].close();
};
return 0;
}
|
20,586 | __global__ void _add_32_01(int n, float xi, float *y, float *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float yi = y[i];
z[i] = xi+yi;
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void add_32_01(int n, float xi, float *y, float *z) {
_add_32_01<<<128,128>>>(n,xi,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _add_64_01(int n, double xi, double *y, double *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double yi = y[i];
z[i] = xi+yi;
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void add_64_01(int n, double xi, double *y, double *z) {
_add_64_01<<<128,128>>>(n,xi,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _sub_32_01(int n, float xi, float *y, float *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float yi = y[i];
z[i] = xi-yi;
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void sub_32_01(int n, float xi, float *y, float *z) {
_sub_32_01<<<128,128>>>(n,xi,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _sub_64_01(int n, double xi, double *y, double *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double yi = y[i];
z[i] = xi-yi;
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void sub_64_01(int n, double xi, double *y, double *z) {
_sub_64_01<<<128,128>>>(n,xi,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _mul_32_01(int n, float xi, float *y, float *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float yi = y[i];
z[i] = xi*yi;
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void mul_32_01(int n, float xi, float *y, float *z) {
_mul_32_01<<<128,128>>>(n,xi,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _mul_64_01(int n, double xi, double *y, double *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double yi = y[i];
z[i] = xi*yi;
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void mul_64_01(int n, double xi, double *y, double *z) {
_mul_64_01<<<128,128>>>(n,xi,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _div_32_01(int n, float xi, float *y, float *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float yi = y[i];
z[i] = xi/yi;
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void div_32_01(int n, float xi, float *y, float *z) {
_div_32_01<<<128,128>>>(n,xi,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _div_64_01(int n, double xi, double *y, double *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double yi = y[i];
z[i] = xi/yi;
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void div_64_01(int n, double xi, double *y, double *z) {
_div_64_01<<<128,128>>>(n,xi,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _pow_32_01(int n, float xi, float *y, float *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float yi = y[i];
z[i] = pow(xi,yi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void pow_32_01(int n, float xi, float *y, float *z) {
_pow_32_01<<<128,128>>>(n,xi,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _pow_64_01(int n, double xi, double *y, double *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double yi = y[i];
z[i] = pow(xi,yi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void pow_64_01(int n, double xi, double *y, double *z) {
_pow_64_01<<<128,128>>>(n,xi,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _max_32_01(int n, float xi, float *y, float *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float yi = y[i];
z[i] = (xi>yi?xi:yi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void max_32_01(int n, float xi, float *y, float *z) {
_max_32_01<<<128,128>>>(n,xi,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _max_64_01(int n, double xi, double *y, double *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double yi = y[i];
z[i] = (xi>yi?xi:yi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void max_64_01(int n, double xi, double *y, double *z) {
_max_64_01<<<128,128>>>(n,xi,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _min_32_01(int n, float xi, float *y, float *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float yi = y[i];
z[i] = (xi<yi?xi:yi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void min_32_01(int n, float xi, float *y, float *z) {
_min_32_01<<<128,128>>>(n,xi,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _min_64_01(int n, double xi, double *y, double *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double yi = y[i];
z[i] = (xi<yi?xi:yi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void min_64_01(int n, double xi, double *y, double *z) {
_min_64_01<<<128,128>>>(n,xi,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _eq_32_01(int n, float xi, float *y, float *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float yi = y[i];
z[i] = xi==yi;
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void eq_32_01(int n, float xi, float *y, float *z) {
_eq_32_01<<<128,128>>>(n,xi,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _eq_64_01(int n, double xi, double *y, double *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double yi = y[i];
z[i] = xi==yi;
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void eq_64_01(int n, double xi, double *y, double *z) {
_eq_64_01<<<128,128>>>(n,xi,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _ne_32_01(int n, float xi, float *y, float *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float yi = y[i];
z[i] = xi!=yi;
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void ne_32_01(int n, float xi, float *y, float *z) {
_ne_32_01<<<128,128>>>(n,xi,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _ne_64_01(int n, double xi, double *y, double *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double yi = y[i];
z[i] = xi!=yi;
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void ne_64_01(int n, double xi, double *y, double *z) {
_ne_64_01<<<128,128>>>(n,xi,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _gt_32_01(int n, float xi, float *y, float *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float yi = y[i];
z[i] = xi>yi;
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void gt_32_01(int n, float xi, float *y, float *z) {
_gt_32_01<<<128,128>>>(n,xi,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _gt_64_01(int n, double xi, double *y, double *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double yi = y[i];
z[i] = xi>yi;
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void gt_64_01(int n, double xi, double *y, double *z) {
_gt_64_01<<<128,128>>>(n,xi,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _ge_32_01(int n, float xi, float *y, float *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float yi = y[i];
z[i] = xi>=yi;
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void ge_32_01(int n, float xi, float *y, float *z) {
_ge_32_01<<<128,128>>>(n,xi,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _ge_64_01(int n, double xi, double *y, double *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double yi = y[i];
z[i] = xi>=yi;
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void ge_64_01(int n, double xi, double *y, double *z) {
_ge_64_01<<<128,128>>>(n,xi,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _lt_32_01(int n, float xi, float *y, float *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float yi = y[i];
z[i] = xi<yi;
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void lt_32_01(int n, float xi, float *y, float *z) {
_lt_32_01<<<128,128>>>(n,xi,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _lt_64_01(int n, double xi, double *y, double *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double yi = y[i];
z[i] = xi<yi;
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void lt_64_01(int n, double xi, double *y, double *z) {
_lt_64_01<<<128,128>>>(n,xi,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _le_32_01(int n, float xi, float *y, float *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float yi = y[i];
z[i] = xi<=yi;
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void le_32_01(int n, float xi, float *y, float *z) {
_le_32_01<<<128,128>>>(n,xi,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _le_64_01(int n, double xi, double *y, double *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double yi = y[i];
z[i] = xi<=yi;
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void le_64_01(int n, double xi, double *y, double *z) {
_le_64_01<<<128,128>>>(n,xi,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _invxback_32_01(int n, float xi, float *y, float *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float yi = y[i];
z[i] = (-xi*yi*yi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void invxback_32_01(int n, float xi, float *y, float *z) {
_invxback_32_01<<<128,128>>>(n,xi,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _invxback_64_01(int n, double xi, double *y, double *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double yi = y[i];
z[i] = (-xi*yi*yi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void invxback_64_01(int n, double xi, double *y, double *z) {
_invxback_64_01<<<128,128>>>(n,xi,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _reluback_32_01(int n, float xi, float *y, float *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float yi = y[i];
z[i] = (yi>0?xi:0);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void reluback_32_01(int n, float xi, float *y, float *z) {
_reluback_32_01<<<128,128>>>(n,xi,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _reluback_64_01(int n, double xi, double *y, double *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double yi = y[i];
z[i] = (yi>0?xi:0);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void reluback_64_01(int n, double xi, double *y, double *z) {
_reluback_64_01<<<128,128>>>(n,xi,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _sigmback_32_01(int n, float xi, float *y, float *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float yi = y[i];
z[i] = (xi*yi*(1-yi));
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void sigmback_32_01(int n, float xi, float *y, float *z) {
_sigmback_32_01<<<128,128>>>(n,xi,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _sigmback_64_01(int n, double xi, double *y, double *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double yi = y[i];
z[i] = (xi*yi*(1-yi));
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void sigmback_64_01(int n, double xi, double *y, double *z) {
_sigmback_64_01<<<128,128>>>(n,xi,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _tanhback_32_01(int n, float xi, float *y, float *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float yi = y[i];
z[i] = (xi*(1-yi*yi));
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void tanhback_32_01(int n, float xi, float *y, float *z) {
_tanhback_32_01<<<128,128>>>(n,xi,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _tanhback_64_01(int n, double xi, double *y, double *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double yi = y[i];
z[i] = (xi*(1-yi*yi));
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void tanhback_64_01(int n, double xi, double *y, double *z) {
_tanhback_64_01<<<128,128>>>(n,xi,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _rpow_32_01(int n, float xi, float *y, float *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float yi = y[i];
z[i] = pow(yi,xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void rpow_32_01(int n, float xi, float *y, float *z) {
_rpow_32_01<<<128,128>>>(n,xi,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _rpow_64_01(int n, double xi, double *y, double *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double yi = y[i];
z[i] = pow(yi,xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void rpow_64_01(int n, double xi, double *y, double *z) {
_rpow_64_01<<<128,128>>>(n,xi,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _BGH_32_01(int n, float *x, int sx, int nx, float *y, int sy, int ny, float *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float xi = (nx==n ? x[i] : sx==1 ? x[i%nx] : nx==1 ? x[0] : x[(i/sx)%nx]);
float yi = (ny==n ? y[i] : sy==1 ? y[i%ny] : ny==1 ? y[0] : y[(i/sy)%ny]);
float sign = 1.0;
if (xi < 0.0)
{
xi = -xi;
yi = -yi;
sign = -1.0;
}
//float part1 = exp(-(xi*xi)/2) / (1.2533 * erfc(xi / 1.4142));
if (xi > 1e6) /*like say infty*/
{
z[i] = sign * 0.7978846 * (exp(-yi*yi*0.5)) / erfc(yi*0.70710677);
}
else if (xi > 15)
{
z[i] = sign * 0.7978846 * (exp(-yi*yi*0.5)) / erfc(yi*0.70710677) / (1.0 + exp(-2.0*xi - log(0.5 * erfc(yi*0.70710677))));
}
else
{
//float hx = exp(-(yi*yi)/2);
z[i] =sign * (exp(-yi*yi*0.5)) / (2.5066283 * (1.0/expm1f(2.0*xi) + 0.5*erfc(yi*0.70710677)));
}
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void BGH_32_01(int n, float *x, int sx, int nx, float *y, int sy, int ny, float *z) {
_BGH_32_01<<<128,128>>>(n,x,sx,nx,y,sy,ny,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _BGH_64_01(int n, double *x, int sx, int nx, double *y, int sy, int ny, double *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double xi = (nx==n ? x[i] : sx==1 ? x[i%nx] : nx==1 ? x[0] : x[(i/sx)%nx]);
double yi = (ny==n ? y[i] : sy==1 ? y[i%ny] : ny==1 ? y[0] : y[(i/sy)%ny]);
double sign = 1.0;
if (xi < 0.0)
{
xi = -xi;
yi = -yi;
sign = -1.0;
}
//float part1 = exp(-(xi*xi)/2) / (1.2533 * erfc(xi / 1.4142));
if (xi > 1e6) /*like say infty*/
{
z[i] = sign * 0.7978845608028654 * (exp(-yi*yi*0.5)) / erfc(yi*0.7071067811865475);
}
else if (xi > 15)
{
z[i] = sign * 0.7978845608028654 * (exp(-yi*yi*0.5)) / erfc(yi*0.7071067811865475) / (1.0 + exp(-2.0*xi - log(0.5 * erfc(yi*0.7071067811865475))));
}
else
{
//float hx = exp(-(yi*yi)/2);
z[i] =sign * (exp(-yi*yi*0.5)) / (2.5066282746310002 * (1.0/expm1f(2.0*xi) + 0.5*erfc(yi*0.7071067811865475)));
}
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void BGH_64_01(int n, double *x, int sx, int nx, double *y, int sy, int ny, double *z) {
_BGH_64_01<<<128,128>>>(n,x,sx,nx,y,sy,ny,z);
}
#ifdef __cplusplus
}
#endif
|
20,587 | #include<stdio.h>
#include<stdlib.h>
typedef struct {
unsigned char red,green,blue;
} PPMPixel;
typedef struct {
int x, y;
PPMPixel *data;
} PPMImage;
#define CREATOR "COMP3231"
#define RGB_COMPONENT_COLOR 255
#define thread_x 10
#define thread_y 10
#define CUDA_CHECK(err) (cuda_checker(err, __FILE__, __LINE__))
static void cuda_checker(cudaError_t err, const char *file, int line) {
if (err != cudaSuccess) {
printf("%s in %s at line %d\n", cudaGetErrorString(err), file, line);
exit(EXIT_FAILURE);
}
}
static PPMImage *readPPM(const char *filename)
{
char buff[16];
PPMImage *img;
FILE *fp;
int c, rgb_comp_color;
//open PPM file for reading
fp = fopen(filename, "rb");
if (!fp) {
fprintf(stderr, "Unable to open file '%s'\n", filename);
exit(1);
}
//read image format
if (!fgets(buff, sizeof(buff), fp)) {
perror(filename);
exit(1);
}
//check the image format
if (buff[0] != 'P' || buff[1] != '6') {
fprintf(stderr, "Invalid image format (must be 'P6')\n");
exit(1);
}
//alloc memory form image
img = (PPMImage *)malloc(sizeof(PPMImage));
if (!img) {
fprintf(stderr, "Unable to allocate memory\n");
exit(1);
}
//check for comments
c = getc(fp);
while (c == '#') {
while (getc(fp) != '\n') ;
c = getc(fp);
}
ungetc(c, fp);
//read image size information
if (fscanf(fp, "%d %d", &img->x, &img->y) != 2) {
fprintf(stderr, "Invalid image size (error loading '%s')\n", filename);
exit(1);
}
//read rgb component
if (fscanf(fp, "%d", &rgb_comp_color) != 1) {
fprintf(stderr, "Invalid rgb component (error loading '%s')\n", filename);
exit(1);
}
//check rgb component depth
if (rgb_comp_color!= RGB_COMPONENT_COLOR) {
fprintf(stderr, "'%s' does not have 8-bits components\n", filename);
exit(1);
}
while (fgetc(fp) != '\n') ;
//memory allocation for pixel data
img->data = (PPMPixel*)malloc(img->x * img->y * sizeof(PPMPixel));
if (!img) {
fprintf(stderr, "Unable to allocate memory\n");
exit(1);
}
//read pixel data from file
if (fread(img->data, 3 * img->x, img->y, fp) != img->y) {
fprintf(stderr, "Error loading image '%s'\n", filename);
exit(1);
}
fclose(fp);
return img;
}
void writePPM(const char *filename, PPMImage *img)
{
FILE *fp;
//open file for output
fp = fopen(filename, "wb");
if (!fp) {
fprintf(stderr, "Unable to open file '%s'\n", filename);
exit(1);
}
//write the header file
//image format
fprintf(fp, "P6\n");
//comments
fprintf(fp, "# Created by %s\n",CREATOR);
//image size
fprintf(fp, "%d %d\n",img->x,img->y);
// rgb component depth
fprintf(fp, "%d\n",RGB_COMPONENT_COLOR);
// pixel data
fwrite(img->data, 3 * img->x, img->y, fp);
fclose(fp);
}
__constant__ float filter[9] = {0.05, 0.1, 0.05, 0.1, 0.4, 0.1, 0.05, 0.1, 0.05}; // ConstantVar filter
__global__ void blur_kernel(PPMImage *dev_img, PPMPixel *out_data) {
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int idx = x + gridDim.x * blockDim.x * y; // index in whole image
if (idx < dev_img->x * dev_img->y) { // avoid illegal memory access
if (x == 0 & y == 0) {
out_data[idx].red = dev_img->data[idx].red * filter[4] + dev_img->data[idx + 1].red * filter[5] +
dev_img->data[idx + dev_img->x].red * filter[7] + dev_img->data[idx + dev_img->x + 1].red * filter[8];
out_data[idx].green = dev_img->data[idx].green * filter[4] + dev_img->data[idx + 1].green * filter[5] +
dev_img->data[idx + dev_img->x].green * filter[7] + dev_img->data[idx + dev_img->x + 1].green * filter[8];
out_data[idx].blue = dev_img->data[idx].blue * filter[4] + dev_img->data[idx + 1].blue * filter[5] +
dev_img->data[idx + dev_img->x].blue * filter[7] + dev_img->data[idx + dev_img->x + 1].blue * filter[8];
} else if (x == 0 & y == dev_img->y - 1) {
out_data[idx].red = dev_img->data[idx - dev_img->x].red * filter[1] + dev_img->data[idx - dev_img->x + 1].red * filter[2] +
dev_img->data[idx].red * filter[4] + dev_img->data[idx + 1].red * filter[5];
out_data[idx].green = dev_img->data[idx - dev_img->x].green * filter[1] + dev_img->data[idx - dev_img->x + 1].green * filter[2] +
dev_img->data[idx].green * filter[4] + dev_img->data[idx + 1].green * filter[5];
out_data[idx].blue = dev_img->data[idx - dev_img->x].blue * filter[1] + dev_img->data[idx - dev_img->x + 1].blue * filter[2] +
dev_img->data[idx].blue * filter[4] + dev_img->data[idx + 1].blue * filter[5];
} else if (x == dev_img->x - 1 & y == 0) {
out_data[idx].red = dev_img->data[idx - 1].red * filter[3] + dev_img->data[idx].red * filter[4] +
dev_img->data[idx + dev_img->x - 1].red * filter[6] + dev_img->data[idx + dev_img->x].red * filter[7];
out_data[idx].green = dev_img->data[idx - 1].green * filter[3] + dev_img->data[idx].green * filter[4] +
dev_img->data[idx + dev_img->x - 1].green * filter[6] + dev_img->data[idx + dev_img->x].green * filter[7];
out_data[idx].blue = dev_img->data[idx - 1].blue * filter[3] + dev_img->data[idx].blue * filter[4] +
dev_img->data[idx + dev_img->x - 1].blue * filter[6] + dev_img->data[idx + dev_img->x].blue * filter[7];
} else if (x == dev_img->x - 1 & y == dev_img->y - 1) {
out_data[idx].red = dev_img->data[idx - dev_img->x - 1].red * filter[0] + dev_img->data[idx - dev_img->x].red * filter[1] +
dev_img->data[idx - 1].red * filter[3] + dev_img->data[idx].red * filter[4];
out_data[idx].green = dev_img->data[idx - dev_img->x - 1].green * filter[0] + dev_img->data[idx - dev_img->x].green * filter[1] +
dev_img->data[idx - 1].green * filter[3] + dev_img->data[idx].green * filter[4];
out_data[idx].blue = dev_img->data[idx - dev_img->x - 1].blue * filter[0] + dev_img->data[idx - dev_img->x].blue * filter[1] +
dev_img->data[idx - 1].blue * filter[3] + dev_img->data[idx].blue * filter[4];
} else if (x == 0) {
out_data[idx].red = dev_img->data[idx - dev_img->x].red * filter[1] + dev_img->data[idx - dev_img->x + 1].red * filter[2] +
dev_img->data[idx].red * filter[4] + dev_img->data[idx + 1].red * filter[5] +
dev_img->data[idx + dev_img->x].red * filter[7] + dev_img->data[idx + dev_img->x + 1].red * filter[8];
out_data[idx].green = dev_img->data[idx - dev_img->x].green * filter[1] + dev_img->data[idx - dev_img->x + 1].green * filter[2] +
dev_img->data[idx].green * filter[4] + dev_img->data[idx + 1].green * filter[5] +
dev_img->data[idx + dev_img->x].green * filter[7] + dev_img->data[idx + dev_img->x + 1].green * filter[8];
out_data[idx].blue = dev_img->data[idx - dev_img->x].blue * filter[1] + dev_img->data[idx - dev_img->x + 1].blue * filter[2] +
dev_img->data[idx].blue * filter[4] + dev_img->data[idx + 1].blue * filter[5] +
dev_img->data[idx + dev_img->x].blue * filter[7] + dev_img->data[idx + dev_img->x + 1].blue * filter[8];
} else if (x == dev_img->x - 1) {
out_data[idx].red = dev_img->data[idx - dev_img->x - 1].red * filter[0] + dev_img->data[idx - dev_img->x].red * filter[1] +
dev_img->data[idx - 1].red * filter[3] + dev_img->data[idx].red * filter[4] +
dev_img->data[idx + dev_img->x - 1].red * filter[6] + dev_img->data[idx + dev_img->x].red * filter[7];
out_data[idx].green = dev_img->data[idx - dev_img->x - 1].green * filter[0] + dev_img->data[idx - dev_img->x].green * filter[1] +
dev_img->data[idx - 1].green * filter[3] + dev_img->data[idx].green * filter[4] +
dev_img->data[idx + dev_img->x - 1].green * filter[6] + dev_img->data[idx + dev_img->x].green * filter[7];
out_data[idx].blue = dev_img->data[idx - dev_img->x - 1].blue * filter[0] + dev_img->data[idx - dev_img->x].blue * filter[1] +
dev_img->data[idx - 1].blue * filter[3] + dev_img->data[idx].blue * filter[4] +
dev_img->data[idx + dev_img->x - 1].blue * filter[6] + dev_img->data[idx + dev_img->x].blue * filter[7];
} else if (y == 0) {
out_data[idx].red = dev_img->data[idx - 1].red * filter[3] + dev_img->data[idx].red * filter[4] +
dev_img->data[idx + 1].red * filter[5] + dev_img->data[idx + dev_img->x - 1].red * filter[6] +
dev_img->data[idx + dev_img->x].red * filter[7] + dev_img->data[idx + dev_img->x + 1].red * filter[8];
out_data[idx].green = dev_img->data[idx - 1].green * filter[3] + dev_img->data[idx].green * filter[4] +
dev_img->data[idx + 1].green * filter[5] + dev_img->data[idx + dev_img->x - 1].green * filter[6] +
dev_img->data[idx + dev_img->x].green * filter[7] + dev_img->data[idx + dev_img->x + 1].green * filter[8];
out_data[idx].blue = dev_img->data[idx - 1].blue * filter[3] + dev_img->data[idx].blue * filter[4] +
dev_img->data[idx + 1].blue * filter[5] + dev_img->data[idx + dev_img->x - 1].blue * filter[6] +
dev_img->data[idx + dev_img->x].blue * filter[7] + dev_img->data[idx + dev_img->x + 1].blue * filter[8];
} else if (y == dev_img->y - 1) {
out_data[idx].red = dev_img->data[idx - dev_img->x - 1].red * filter[0] + dev_img->data[idx - dev_img->x].red * filter[1] +
dev_img->data[idx - dev_img->x + 1].red * filter[2] + dev_img->data[idx - 1].red * filter[3] +
dev_img->data[idx].red * filter[4] + dev_img->data[idx + 1].red * filter[5];
out_data[idx].green = dev_img->data[idx - dev_img->x - 1].green * filter[0] + dev_img->data[idx - dev_img->x].green * filter[1] +
dev_img->data[idx - dev_img->x + 1].green * filter[2] + dev_img->data[idx - 1].green * filter[3] +
dev_img->data[idx].green * filter[4] + dev_img->data[idx + 1].green * filter[5];
out_data[idx].blue = dev_img->data[idx - dev_img->x - 1].blue * filter[0] + dev_img->data[idx - dev_img->x].blue * filter[1] +
dev_img->data[idx - dev_img->x + 1].blue * filter[2] + dev_img->data[idx - 1].blue * filter[3] +
dev_img->data[idx].blue * filter[4] + dev_img->data[idx + 1].blue * filter[5];
} else {
out_data[idx].red = dev_img->data[idx - dev_img->x - 1].red * filter[0] + dev_img->data[idx - dev_img->x].red * filter[1] +
dev_img->data[idx - dev_img->x + 1].red * filter[2] + dev_img->data[idx - 1].red * filter[3] +
dev_img->data[idx].red * filter[4] + dev_img->data[idx + 1].red * filter[5] +
dev_img->data[idx + dev_img->x - 1].red * filter[6] + dev_img->data[idx + dev_img->x].red * filter[7] +
dev_img->data[idx + dev_img->x + 1].red * filter[8];
out_data[idx].green = dev_img->data[idx - dev_img->x - 1].green * filter[0] + dev_img->data[idx - dev_img->x].green * filter[1] +
dev_img->data[idx - dev_img->x + 1].green * filter[2] + dev_img->data[idx - 1].green * filter[3] +
dev_img->data[idx].green * filter[4] + dev_img->data[idx + 1].green * filter[5] +
dev_img->data[idx + dev_img->x - 1].green * filter[6] + dev_img->data[idx + dev_img->x].green * filter[7] +
dev_img->data[idx + dev_img->x + 1].green * filter[8];
out_data[idx].blue = dev_img->data[idx - dev_img->x - 1].blue * filter[0] + dev_img->data[idx - dev_img->x].blue * filter[1] +
dev_img->data[idx - dev_img->x + 1].blue * filter[2] + dev_img->data[idx - 1].blue * filter[3] +
dev_img->data[idx].blue * filter[4] + dev_img->data[idx + 1].blue * filter[5] +
dev_img->data[idx + dev_img->x - 1].blue * filter[6] + dev_img->data[idx + dev_img->x].blue * filter[7] +
dev_img->data[idx + dev_img->x + 1].blue * filter[8];
}
}
}
void your_gaussian_blur_func(PPMImage *img) {
PPMImage *host_img; // for assigning PPMPixel pointer on device
host_img = (PPMImage *) malloc(sizeof(PPMImage));
memcpy(host_img, img, sizeof(PPMImage));
CUDA_CHECK(cudaMalloc((void**)&(host_img->data), img->x * img->y * sizeof(PPMPixel))); // allocate PPMPixel pointer on device
CUDA_CHECK(cudaMemcpy(host_img->data, img->data, img->x * img->y * sizeof(PPMPixel), cudaMemcpyHostToDevice)); // copy PPMPixel data to device
// PPMPixel data is now on the gpu, now copy the "meta" data to gpu
PPMImage *dev_img; // for assigning PPMImage on device
CUDA_CHECK(cudaMalloc((void**)&dev_img, sizeof(PPMImage))); // allocate memory on device
CUDA_CHECK(cudaMemcpy(dev_img, host_img, sizeof(PPMImage), cudaMemcpyHostToDevice)); // copy memory to device
PPMPixel *out_data;
CUDA_CHECK(cudaMalloc((void**)&(out_data), img->x * img->y * sizeof(PPMPixel))); // allocate PPMPixel pointer on device
dim3 threadsPerBlock = dim3(thread_x, thread_y);
dim3 blocksPerGrid = dim3((img->x + thread_x - 1) / thread_x, (img->y + thread_y - 1) / thread_y);
blur_kernel<<<blocksPerGrid, threadsPerBlock>>>(dev_img, out_data);
CUDA_CHECK(cudaMemcpy(img->data, out_data, img->x * img->y * sizeof(PPMPixel), cudaMemcpyDeviceToHost)); // copy memory to host
CUDA_CHECK(cudaFree(out_data));
CUDA_CHECK(cudaFree(host_img->data));
CUDA_CHECK(cudaFree(dev_img));
free(host_img);
}
int main(){
// read
PPMImage *image;
image = readPPM("input.ppm");
// record execution time
float time;
cudaEvent_t start, stop;
CUDA_CHECK(cudaEventCreate(&start));
CUDA_CHECK(cudaEventCreate(&stop));
CUDA_CHECK(cudaEventRecord(start, 0));
your_gaussian_blur_func(image);
CUDA_CHECK(cudaEventRecord(stop, 0));
CUDA_CHECK(cudaEventSynchronize(stop));
CUDA_CHECK(cudaEventElapsedTime(&time, start, stop));
printf("Time to generate: %3.1f ms \n", time);
// write
writePPM("output.ppm",image);
} |
20,588 | //adding two arrays and storing the results in a third array using CUDA
//(Unified Memory Construct)
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
__global__ void add(int *a, int *b, int *c, int Num) {
//global thread id
int idx = threadIdx.x + blockIdx.x * blockDim.x;
//checking bounds
while(idx < Num) {
c[idx] = a[idx] + b[idx];
//increment thread index
idx += blockDim.x * gridDim.x;
}
}
int main() {
//size of arrays
int Num = 100;
//size of the arrays
size_t bytes = Num*sizeof(int);
//declaring device variables
int *d_a, *d_b, *d_c;
//Memory allocation for device pointers
cudaMallocManaged(&d_a, bytes);
cudaMallocManaged(&d_b, bytes);
cudaMallocManaged(&d_c, bytes);
//Initializing memory
for(int i = 1; i <= Num; ++i){
d_a[i-1] = i;
d_b[i-1] = i;
}
//Number a threads per block
int blockSize = 10;
//Number of blocks in the grid
int gridSize = (Num + blockSize - 1) / blockSize;
//Launch Kernel
add<<<gridSize,blockSize>>>(d_a,d_b,d_c,Num);
//wait for all commands to be completed
cudaDeviceSynchronize();
//print the results
for(int i = 0;i < Num; i++){
printf("%d\n", d_c[0]);
}
//Free device memory (Unified)
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
20,589 | /*
============================================================================
Name : last.cu
Author : christopher
Version :
Copyright : @ copyright notice
Description : CUDA compute reciprocals
============================================================================
*/
#include <iostream>
#include <numeric>
#include <stdlib.h>
#include <sys/time.h>
#include <stdio.h>
#define threads_per_block 256
#define threads_per_warp 32
//#include "/home/chris/Downloads/cuPrintf.cu"
//#include "/home/chris/Downloads/cuPrintf.cuh"
__device__ void hadamard_product_small(double* sh_a, double* sh_b,
int multiplier, int rows) {
int thread_id = threadIdx.y * blockDim.x + threadIdx.x;
// start the computations
for (int i = thread_id * multiplier;
i < thread_id * multiplier + multiplier; i++) {
sh_b[i] = sh_b[i] * sh_a[i] * (i < rows);
}
// result is stored in sh_b vector\
//done
}
__device__ void array_sum_small(double* sha, double& result, int multiplier,
int rows, int start) {
int thread_id = threadIdx.y * blockDim.x + threadIdx.x;
// start the computations
for (int i = threads_per_warp; i < threads_per_block; i = i * 2) {
// switch 1 : even warps add their's neighbors contents
switch ((int) floor(thread_id / (double) i) % 2) {
case 0:
// thread_id % i == even
// add the "more next vector"
sha[thread_id] = sha[thread_id]
+ sha[i + thread_id] * (start + thread_id + i < rows);
break;
default:
// thread_id % i == odd
// do nothing
break;
}
__syncthreads();
// switch2 : odd warps clean up their content
switch ((int) floor(thread_id / (double) i) % 2) {
case 0:
// thread_id % i == even
// do nothing
break;
default:
// thread_id % i == odd
// clean up
sha[thread_id] = 0;
//__syncthreads();
break;
}
__syncthreads();
}
// loop ended, sha[0:threads_per_warp] got the sum
if (thread_id == 0) {
for (int i = 0; i < threads_per_warp; i++) {
result = result + sha[i];
}
}
}
__global__ void array_mult(double* matrix, double* vector, double* result,
int rows, int cols_per_block, int multiplier, double* sigm) {
double* a = &matrix[blockIdx.x * rows * cols_per_block];
//result[0] = 0;
extern __shared__ double shared[];
double* sh_m = shared;
double* sh_v = &sh_m[threads_per_block * multiplier];
double* res = &sh_v[threads_per_block * multiplier];
// thread_id*multiplier ews thread_id*multiplier+multiplier-1
int thread_id = threadIdx.x;
for (int c = 0; c < cols_per_block; c++) {
// for each col that every block must deal with , do the following :
// load from global to shared mem
for (int i = thread_id * multiplier;
i < thread_id * multiplier + multiplier; i++) {
sh_m[i] = a[i + c * rows] * (i < rows);
}
for (int i = thread_id * multiplier;
i < thread_id * multiplier + multiplier; i++) {
sh_v[i] = vector[i + c * rows] * (i < rows);
}
__syncthreads();
// find the hadamard product
hadamard_product_small(sh_m, sh_v, multiplier, rows);
__syncthreads();
// initiallize shared vector res with zeros
for (int i = thread_id * multiplier;
i < thread_id * multiplier + multiplier; i++) {
res[i] = 0;
}
__syncthreads();
for (int i = 0; i < multiplier; i++) {
array_sum_small(&sh_v[i * threads_per_block], res[i], multiplier,
rows, i * threads_per_block);
}
__syncthreads();
if (thread_id == 0) {
for (int i = 1; i < multiplier; i++) {
res[0] += res[i];
}
result[blockIdx.x * cols_per_block + c] = res[0]
* sigm[blockIdx.x * cols_per_block + c];
}
}
}
using namespace std;
double getRandom(int min, int max);
double* matrix_vector_mull(int cols, int rows, double* matrix, double* vector);
int get_threads_per_cols(int cols);
int get_wSize_on_layer(int l, int* sizes);
int get_dSize_on_layer(int l, int* sizes);
double* hadamard_product(int size, double* a, double* b);
void backpropagate(double** delta, double** sigm_derivative,double** w, int* sizeOfLayers, int numOfLayers) ;
int main(void) {
struct timeval t1, t2;
double time, time_c, time_h;
int num_of_layers = 4;
int* sizes = new int[num_of_layers];
cudaStream_t default_stream;
cudaStreamCreate(&default_stream);
sizes[0] = 9000;
sizes[1] = 90;
sizes[2] = 90;
sizes[3] = 10;
// seirial arrays
double** w = new double*[num_of_layers - 1];
double** delta = new double*[num_of_layers];
double** sigm_der = new double*[num_of_layers];
// cuda arrays
double *w_c, *delta_c, *sigm_der_c;
int w_length = 0, d_length = 0;
w_length = get_wSize_on_layer(num_of_layers - 1, sizes);
d_length = get_wSize_on_layer(num_of_layers, sizes);
// gpu mem allocation
cudaMalloc((void**) &w_c, sizeof(double) * w_length);
cudaMalloc((void**) &delta_c, sizeof(double) * d_length);
cudaMalloc((void**) &sigm_der_c, sizeof(double) * d_length);
// host mem allocation
for (int i = 0; i < num_of_layers - 1; i++) {
w[i] = new double[sizes[i] * sizes[i + 1]];
for (int j = 0; j < sizes[i] * sizes[i + 1]; j++) {
w[i][j] = 1;
}
}
for (int i = 0; i < num_of_layers; i++) {
delta[i] = new double[sizes[i]];
for (int j = 0; j < sizes[i]; j++) {
delta[i][j] = 1;
}
}
for (int i = 0; i < num_of_layers; i++) {
sigm_der[i] = new double[sizes[i]];
for (int j = 0; j < sizes[i]; j++) {
sigm_der[i][j] = 0.5;
}
}
// backpropagate requires only the delta[sizes[num_of_layers-1]] , so
// we are not going to count the cudaMemcy's of the rest data,
// simply because they can happen after cpu (host) updates the w's
// and we got enough time from that point until we reach backpropagate function
// suppose we do that way before we get close to backpropagation fucntion call
// copy w in cuda
for (int i = 0; i < num_of_layers - 1; i++) {
cudaMemcpyAsync(&w_c[get_wSize_on_layer(i, sizes)], w[i],
sizeof(double) * sizes[i] * sizes[i + 1],
cudaMemcpyHostToDevice, default_stream);
}
// copy sigm_der in cuda
for (int i = 0; i < num_of_layers; i++) {
cudaMemcpyAsync(&sigm_der_c[get_dSize_on_layer(i, sizes)], sigm_der[i],
sizeof(double) * sizes[i], cudaMemcpyHostToDevice,
default_stream);
}
// copies done , wait for steam 0 (default) to compute all copies (as we said, we do not count them)
cudaStreamSynchronize(default_stream);
// now we may procced to backpropagation algorithm
int multiplier = 0;
gettimeofday(&t1, 0);
// step 1 : copy the delta of the last layer into gpu
// cpu commands : delta[numOfLayers - 1] = d_L;
cudaMemcpyAsync(&delta_c[get_dSize_on_layer(num_of_layers - 1, sizes)],
delta[num_of_layers - 1], sizeof(double) * sizes[num_of_layers - 1],
cudaMemcpyHostToDevice, default_stream);
// step 2
int bl = 0;
for (int i = num_of_layers - 2; i >= 0; i--) {
// w_d = matrix_vector_mull(sizeOfLayers[i + 1], sizeOfLayers[i + 2], w[i], delta[i + 1]);
if(i>0){
multiplier = get_threads_per_cols(sizes[i]);// multiplier = get_threads_per_cols(cols);
bl = sizes[i];
}else{
multiplier = get_threads_per_cols(sizes[i+1]);
bl = sizes[i+1];
}
array_mult<<<bl, threads_per_block,
sizeof(double) * (3 * threads_per_block * multiplier),
default_stream>>>(&w_c[get_wSize_on_layer(i, sizes)],
&delta_c[get_dSize_on_layer(i + 1, sizes)],
&delta_c[get_dSize_on_layer(i, sizes)], sizes[i + 1], 1,
multiplier, &sigm_der_c[get_dSize_on_layer(i, sizes)]);
// delta[i] = hadamard_product(sizeOfLayers[i + 1], w_d, sigm_derivative[i]);
cudaStreamSynchronize(default_stream);
cudaMemcpyAsync(delta[i], &delta_c[get_dSize_on_layer(i, sizes)],
sizeof(double) * sizes[i], cudaMemcpyDeviceToHost,
default_stream);
}
// wait until the last copy is completed
cudaStreamSynchronize(default_stream);
// done
gettimeofday(&t2, 0);
time = (1000000.0 * (t2.tv_sec - t1.tv_sec) + t2.tv_usec - t1.tv_usec)
/ 1000.0;
cout << "Parallel time is " << time << " millisec \n";
time_c = time;
// retrieve data back to cpu memory for debbugin reasons
cout<< "cuda results : \n";
for (int i = 0; i < 1; i++) {
for (int j = 0; j < 3; j++) {
cout<< delta[i][j] << " ";
}
}
cout<< "\n";
// now the serial code
gettimeofday(&t1, 0);
backpropagate(delta, sigm_der,w,sizes,num_of_layers);
gettimeofday(&t2, 0);
time = (1000000.0 * (t2.tv_sec - t1.tv_sec) + t2.tv_usec - t1.tv_usec)
/ 1000.0;
cout << "serial time is " << time << " millisec \n";
time_h = time;
cout<< "cpu results : \n";
for (int i = 0; i < 1; i++) {
for (int j = 0; j < 3; j++) {
cout<< delta[i][j] << " ";
}
}
cout<< "\n";
cout << "accelaration is " << (time_h-time_c)*100 << " % \n";
cout << "SUCCESS epitelous";
return 0;
}
void backpropagate(double** delta, double** sigm_derivative,
double** w, int* sizeOfLayers, int numOfLayers) {
double* w_d;
for (int i = numOfLayers - 2; i >= 0; i--) {
w_d = matrix_vector_mull(sizeOfLayers[i], sizeOfLayers[i + 1], w[i],delta[i + 1]);
delta[i] = hadamard_product(sizeOfLayers[i], w_d, sigm_derivative[i]);
delete[] w_d;
}
}
double* hadamard_product(int size,double* a, double* b) {
// returns the datamard product for vectors a and b
// (return a.*b in matlab)
// size = length of arrays a and b
double* result = new double[size];
for (int i = 0; i < size; i++) {
result[i] = a[i] * b[i];
}
return result;
}
double* matrix_vector_mull(int cols, int rows, double* matrix, double* vector) {
// TESTED
// returns "cols x 1" vector
double* temp = NULL ;
double* res = new double[cols];
for(int j=0; j<cols; j++){
temp = new double[rows] ;
for(int i=0; i<rows; i++){
temp[i] = matrix[i*cols+j];
}
temp = hadamard_product(rows,temp,vector);
res[j] = 0;
for(int i=0; i<rows; i++){
res[j] += temp[i];
}
delete[] temp;
}
return res;
}
double getRandom(int min, int max) {
return (((max - min) * ((double) rand() / (double) RAND_MAX) + min) * 100)
/ 100;
}
int get_threads_per_cols(int cols) {
if (cols < threads_per_block) {
return 1;
}
int res = floor(cols / (double) threads_per_block);
if (cols / (double) threads_per_block
- floor(cols / (double) threads_per_block)) {
res++;
}
return res;
}
int get_wSize_on_layer(int l, int* sizes) {
int res = 0;
for (int i = 0; i < l; i++) {
res += sizes[i] * sizes[i + 1];
}
return res;
}
int get_dSize_on_layer(int l, int* sizes) {
int res = 0;
for (int i = 0; i < l; i++) {
res += sizes[i];
}
return res;
}
|
20,590 | #include <stdio.h>
// 每个thread负责一个 C(i, j), 每个线程for循环次数是K
// C(0, 0) A的第0行 乘 B的第0列
__global__ void matrixMultiply(float *A, float *B, float *C,
int M, int K, int N) {
float sum = 0.0f;
// thread(row, col) is for C(i,j)
int row = blockIdx.y * blockDim.y + threadIdx.y; // y is for row
int col = blockIdx.x * blockDim.x + threadIdx.x; // x is for cloumn
if(row < M && col < N) {
for (int i = 0; i < K; ++i) { // K loop for a thread
sum += A[row * K + i] * B[i * N + col];
}
C[row * N + col] = sum;
}
}
int main(int argc, char **argv) {
float A[] = {2, 3, -1, 6, 1, -2}; // M x K = 2 x 3
// 2 3 -1
// 6 1 -2
float B[] = {4, -5, -3, 0, 1, 2}; // K x N = 3 x 2
// 4 -5
// -3 0
// 1 2
float C[4] = {0};
int M = 2;
int K = 3;
int N = 2;
float *d_a;
float *d_b;
float *d_c;
cudaMalloc((void**)&d_a,M * K * sizeof(float));
cudaMalloc((void**)&d_b,K * N * sizeof(float));
cudaMalloc((void**)&d_c,M * N * sizeof(float));
cudaMemcpy(d_a, A, sizeof(float) * M * K, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, B, sizeof(float) * K * N, cudaMemcpyHostToDevice);
cudaMemcpy(d_c, C, sizeof(float) * M * N, cudaMemcpyHostToDevice);
dim3 dim_grid((N - 1) / 16 + 1, (M - 1) / 16 + 1, 1);
dim3 dim_block(16, 16, 1);
matrixMultiply<<<dim_grid, dim_block>>>(d_a, d_b, d_c, M, K, N);
cudaMemcpy(C, d_c, sizeof(float) * M * N, cudaMemcpyDeviceToHost);
for (int i = 0; i < M * N; i++) {
printf("%f ", C[i]);
}
printf("\n");
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
20,591 | #include <cuda.h>
#include <cuda_runtime_api.h>
#define N_FLOPS_PER_THREAD 784
#define N_LOOPS 1
#define FLOPS_BLOCK \
reg0 = reg1 * reg2 + reg3; \
reg5 = reg6 * reg6; \
reg1 = reg2 * reg3 + reg4; \
reg6 = reg7 * reg7; \
reg2 = reg3 * reg4 + reg5; \
reg7 = reg0 * reg0; \
reg3 = reg4 * reg5 + reg6; \
reg0 = reg1 * reg1; \
reg4 = reg5 * reg6 + reg7; \
reg1 = reg2 * reg2; \
reg5 = reg6 * reg7 + reg0; \
reg2 = reg3 * reg3; \
reg6 = reg7 * reg0 + reg1; \
reg3 = reg4 * reg4; \
reg7 = reg0 * reg1 + reg2; \
reg4 = reg5 * reg5;
__device__ float flops(int i){
// Declare a bunch or registers and init to 0.0f
float reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7;
// 1 FLOP per assignment = 8 FLOPs total
reg0 = reg1 = reg2 = reg3 = 9.765625e-10f * threadIdx.x;
reg4 = reg5 = reg6 = reg7 = 9.765625e-10f * threadIdx.y;
for(int it = 0; it < N_LOOPS; ++it){
FLOPS_BLOCK // 1
FLOPS_BLOCK
FLOPS_BLOCK
FLOPS_BLOCK
FLOPS_BLOCK
FLOPS_BLOCK
FLOPS_BLOCK
FLOPS_BLOCK // 8
FLOPS_BLOCK
FLOPS_BLOCK
FLOPS_BLOCK
FLOPS_BLOCK
FLOPS_BLOCK
FLOPS_BLOCK
FLOPS_BLOCK
FLOPS_BLOCK // 16
FLOPS_BLOCK
FLOPS_BLOCK
FLOPS_BLOCK
FLOPS_BLOCK
FLOPS_BLOCK
FLOPS_BLOCK
FLOPS_BLOCK
FLOPS_BLOCK // 24
FLOPS_BLOCK
FLOPS_BLOCK
FLOPS_BLOCK
FLOPS_BLOCK
FLOPS_BLOCK
FLOPS_BLOCK
FLOPS_BLOCK
FLOPS_BLOCK // 32
}
return reg0 + reg1 + reg2 + reg3 + reg4 + reg5 + reg6 + reg7 + i;
}
//-----------------------------------------------------------------------------
// Simple test kernel template for flops test
// @param d_counters - Counters to hold how many FLOPs a kernel does.
// @param n_threads - Total number of threads per block
//-----------------------------------------------------------------------------
__global__
void branch_perf_kernel(float* d_counters, int num_branches) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
float temp;
switch((int)threadIdx.x%num_branches){
case 0: temp = flops(1); break;
case 1: temp = flops(2); break;
case 2: temp = flops(3); break;
case 3: temp = flops(4); break;
case 4: temp = flops(5); break;
case 5: temp = flops(6); break;
case 6: temp = flops(7); break;
case 7: temp = flops(8); break;
case 8: temp = flops(9); break;
case 9: temp = flops(10); break;
case 10: temp = flops(11); break;
case 11: temp = flops(12); break;
case 12: temp = flops(13); break;
case 13: temp = flops(14); break;
case 14: temp = flops(15); break;
case 15: temp = flops(16); break;
case 16: temp = flops(17); break;
case 17: temp = flops(18); break;
case 18: temp = flops(19); break;
case 19: temp = flops(20); break;
case 20: temp = flops(21); break;
case 21: temp = flops(22); break;
case 22: temp = flops(23); break;
case 23: temp = flops(24); break;
case 24: temp = flops(25); break;
case 25: temp = flops(26); break;
case 26: temp = flops(27); break;
case 27: temp = flops(28); break;
case 28: temp = flops(29); break;
case 29: temp = flops(30); break;
case 30: temp = flops(31); break;
case 31: temp = flops(32); break;
default: temp = 0.0f;
}
d_counters[i] = temp;
}
|
20,592 | #include "gol_gpu.cuh"
|
20,593 | /*
Test Programm nach:
https://www.thomas-krenn.com/de/wiki/CUDA_Programmierung
*/
#include<stdio.h>
#include<cuda.h>
#include<stdlib.h>
// Vars
// Host-Vars
int* h_A;
int* h_B;
int* h_C;
// Device-Vars
int* d_A;
int* d_B;
int* d_C;
// Prototypes
void RandomInit(int* data, int n);
int CheckResults(int* A, int* B, int* C, int n);
// Kernel
__global__ void VecAdd(const int* A, const int* B, int* C, int N) {
// Index holen
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
C[i] = A[i] + B[i];
}
int main(void) {
printf("Vector addtion\n");
//int i;
int N = 100000 * 1000;
size_t size = N * sizeof(int);
// Speicher auf Host allozieren
h_A = (int*)malloc(size);
h_B = (int*)malloc(size);
h_C = (int*)malloc(size);
// Random Init
RandomInit(h_A, N);
RandomInit(h_B, N);
// Speicher auf Device allozieren
cudaMalloc((void**)&d_A, size);
cudaMalloc((void**)&d_B, size);
cudaMalloc((void**)&d_C, size);
// Vektoren zum Device kopieren
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
// Kernelaufruf
// Nvidia GTX 1080 TI hat 1024 Threads pro Block
int threadsPerBlock = 1024;
int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;
printf("BlocksPerGrid = %i, ThreadsPerBlock = %i\n\n", blocksPerGrid, threadsPerBlock);
VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
// Auf das Gerät warten
cudaDeviceSynchronize();
// Ergebnis auf Host kopieren
cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
// Ergebnisse prüfen
if (CheckResults(h_A, h_B, h_C, N) == 0)
printf("Alles ok!\n");
else
printf("Fehler\n");
// Speicherfreigabe
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
free(h_A);
free(h_B);
free(h_C);
return 0;
}
// Vector mit Zufallszahlen füllen
void RandomInit(int* data, int n) {
for (int i = 0; i < n; i++)
data[i] = rand() % (int) 100;
}
// Ergebnis Prüfen
int CheckResults(int* A, int* B, int* C, int n) {
int i;
for (i = 0; i < n; i++) {
if ((A[i]+B[i]) != C[i])
return -1;
}
return 0;
}
|
20,594 | #include "includes.h"
__global__ void FullToCOO(int numElem, float* H_vals, double* hamilValues, int dim)
{
int i = threadIdx.x + blockDim.x*blockIdx.x;
if (i < numElem)
{
hamilValues[i] = H_vals[i];
}
} |
20,595 | #define BLOCK_WIDTH 32
#define BLOCK_HEIGHT 32
#define TILE_WIDTH 30
#define TILE_HEIGHT 30
#define NODATA -9999
#define FILTER_RADIUS 1
// -------------------------------------------------Neighbours access order is // 1 2 3
__constant__ int off_x[8] = {-1, 0, 1,1,1,0,-1,-1}; // 8 4
__constant__ int off_y[8] = {-1,-1,-1,0,1,1, 1, 0}; // 7 6 5
__global__ void kernel_ndvi( short *m_l,short *m_r,short *m_wi, const int w, const int h)
{
int x = blockIdx.x * TILE_WIDTH + threadIdx.x - FILTER_RADIUS;
int y = blockIdx.y * TILE_HEIGHT + threadIdx.y - FILTER_RADIUS;
//Clamp to the center
x = max(FILTER_RADIUS, x);
x = min(x, w - FILTER_RADIUS - 1);
y = max(FILTER_RADIUS, y);
y = min(y, h - FILTER_RADIUS - 1);
unsigned int i_img = y * w + x;
short v_l, v_r;
float vf_ndvi;
v_l = m_l[i_img];
v_r = m_r[i_img];
if (v_l != NODATA && v_r != NODATA)
{
vf_ndvi = __int2float_rn ( v_l - v_r ) / __int2float_rn ( v_l + v_r );
m_wi[i_img] = __float2int_rz (vf_ndvi * 1000.0) + 1000;
}else{
m_wi[i_img] = NODATA;
}
}
__global__ void kernel_sobel( short *m_wi, float *m_sobel, const int w, const int h)
{
__shared__ short sh_wi[BLOCK_WIDTH * BLOCK_HEIGHT];
int x = blockIdx.x * TILE_WIDTH + threadIdx.x - FILTER_RADIUS;
int y = blockIdx.y * TILE_HEIGHT + threadIdx.y - FILTER_RADIUS;
//Clamp to the center
x = max(FILTER_RADIUS, x);
x = min(x, w - FILTER_RADIUS - 1);
y = max(FILTER_RADIUS, y);
y = min(y, h - FILTER_RADIUS - 1);
unsigned int i_img = y * w + x;
unsigned int i_sh = threadIdx.y * blockDim.y + threadIdx.x;
sh_wi[i_sh] = m_wi[i_img];
__syncthreads();
//m_sobel[i_img] = __int2float_rn (sh_wi[i_sh]);
if ( threadIdx.x == 0 || threadIdx.x == BLOCK_WIDTH -1 || threadIdx.y == 0 || threadIdx.y == BLOCK_HEIGHT - 1)
{}else{
float sobel_x = __int2float_rn (
-sh_wi[i_sh - blockDim.x - 1] + sh_wi[i_sh - blockDim.x + 1]
-sh_wi[i_sh - 1] * 2 + sh_wi[i_sh + 1] * 2
-sh_wi[i_sh + blockDim.x - 1] + sh_wi[i_sh + blockDim.x + 1] );
float sobel_y = __int2float_rn (
sh_wi[i_sh - blockDim.x - 1] + sh_wi[i_sh - blockDim.x] * 2 + sh_wi[i_sh - blockDim.x + 1]
-sh_wi[i_sh + blockDim.x - 1] - sh_wi[i_sh + blockDim.x] * 2 - sh_wi[i_sh + blockDim.x + 1] );
m_sobel[i_img] = sqrtf ( sobel_x * sobel_x + sobel_y * sobel_y ) ;
}
/* */
}
|
20,596 | /* This is a demonstration file that shows how the prime test functions work. */
#include "primetest.cuh"
#include <iostream>
#include <random>
#include <chrono>
int main() {
// Initialize RNG.
auto seed = std::chrono::system_clock::now().time_since_epoch().count();
std::mt19937 generator(seed);
std::uniform_int_distribution<unsigned int> dist(0, 100);
// The known number of primes less than 10,100,1000,10000,100000,1000000
unsigned int kPrimes[6] = {4,25,168,1229,9592,78498};
unsigned int pCountN[6]; // Number of primes counted for naive test.
unsigned int pCountM[6]; // Number of primes counted for Miller-Rabin test.
float msN[6]; // Time of execution for naive test in ms.
float msM[6]; // Time of execution for Miller-Rabin test in ms.
// Compare the naive and Miller-Rabin primality test. k = 1.
int size = 10;
unsigned int *out, *in;
for (int i = 0; i < 6; i++) {
out = new unsigned int[size];
in = new unsigned int[size];
for (int j = 0; j < size; j++)
in[j] = j;
// Naive
msN[i] = primetest_naive(out, in, size, 1);
pCountN[i] = 0;
for (int j = 0; j < size; j++)
pCountN[i] += out[j];
// Miller Rabin
msM[i] = primetest_miller(out, in, size, 1, 0, 1);
pCountM[i] = 0;
for (int j = 0; j < size; j++)
pCountM[i] += out[j];
size *= 10;
}
// Print results
for (int i = 0; i < 6; i++) {
std::printf("%5u, %5u, %5u, %5f, %5f\n", kPrimes[i], pCountN[i], pCountM[i], msN[i], msM[i]);
}
std::printf("\n");
// Compare the naive and Miller-Rabin primality test. k = 4.
size = 10;
for (int i = 0; i < 6; i++) {
out = new unsigned int[size];
in = new unsigned int[size];
for (int j = 0; j < size; j++)
in[j] = j;
// Naive
msN[i] = primetest_naive(out, in, size, 1);
pCountN[i] = 0;
for (int j = 0; j < size; j++)
pCountN[i] += out[j];
// Miller Rabin
msM[i] = primetest_miller(out, in, size, 4, dist(generator), 1);
pCountM[i] = 0;
for (int j = 0; j < size; j++)
pCountM[i] += out[j];
size *= 10;
}
// Print results
for (int i = 0; i < 6; i++) {
std::printf("%5u, %5u, %5u, %5f, %5f\n", kPrimes[i], pCountN[i], pCountM[i], msN[i], msM[i]);
}
std::printf("\n");
// Demonstrate factorization.
size = 10;
in = new unsigned int[size];
unsigned int **fout = new unsigned int*[size];
for (int i = 0; i < size; i++) {
in[i] = dist(generator);
}
float msF = factor_naive(fout, in, size);
// Print results
for (int i = 0; i < size; i++) {
std::printf("Value: %u\n", in[i]);
unsigned int nFact = fout[i][0];
for (unsigned int j = 0; j < nFact; j++) {
std::printf(" %u, %u\n", fout[i][2*j+1], fout[i][2*j+2]);
}
std::printf("--------------\n");
}
std::printf("\n");
// Scaling analysis of factorization.
float msFarr[3];
for (int i = 0; i < 3; i++) {
fout = new unsigned int*[size];
in = new unsigned int[size];
for (int j = 0; j < size; j++)
in[j] = j;
msFarr[i] = factor_naive(fout, in, size);
size *= 10;
}
// Print results
for (int i = 0; i < 3; i++) {
std::printf("%f\n", msFarr[i]);
}
delete(fout);
delete(out);
delete(in);
return 0;
} |
20,597 | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
__global__ void mult_mat(float *matA, float *matB, float *matR, int ncol, int nRows, int stream) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
float res=0;
int nPr = stream*ncol*nRows;
for(int i=0; i<ncol; i++){
res += matA[ncol*idy+i] * matB[i*ncol+idx];
}
matR[nPr+idy*ncol+idx] = res;
if(stream==0)
printf("[%d] %.2f ", nPr+idy*ncol+idx, matR[nPr+idy*ncol+idx]);
}
float *matA, *matR, *matB;
int numColA, numRowA, numColB, numRowB, nCore, nRows;
FILE *arq;
float* read(){
int numCol, numRow;
fscanf(arq,"%d",&numRow);
fscanf(arq,"%d",&numCol);
numColB = numCol;
float *mat = (float*)malloc(sizeof(float)*numCol*numRow);
for(int i=0;i<numCol*numRow;i++){
fscanf(arq,"%f",&mat[i]);
}
fclose(arq);
return mat;
}
void print(int rows){
for(int i=0; i<numColB*rows; i++){
if(i%(numColB) == 0)
printf("\n");
printf("%.2f ",matR[i]);
}
}
void multiply(){
/*Variaveis globais no dispositivo*/
float *matA_d;
cudaMalloc((void **) &matA_d, numColA*numRowA*sizeof(float));
float *matR_d;
cudaMalloc((void **) &matR_d, numColB*numRowA*sizeof(float));
float *matB_d;
cudaMalloc((void **) &matB_d, numColB*numRowA*sizeof(float));
cudaStream_t vectorOfStreams[nCore];
for(int stream_id=0; stream_id<nCore; stream_id++)
cudaStreamCreate(&vectorOfStreams[stream_id]);
/*transferencia das matrizes A e B para GPU*/
for(int stream_id=0; stream_id<nCore; stream_id++)
cudaMemcpyAsync(&matA_d[nRows*stream_id*numColA],&matA[nRows*stream_id*numColA],nRows*numColA*sizeof(float),
cudaMemcpyHostToDevice,vectorOfStreams[stream_id]);
cudaMemcpy(matB_d, matB, numColB*numRowB*sizeof(float),cudaMemcpyHostToDevice);
dim3 block(32,32);
dim3 grid(numColB/32,(numRowA/4)/32);
for(int stream_id=0; stream_id<nCore; stream_id++){
mult_mat<<<grid,block,0,vectorOfStreams[stream_id]>>>
(&matA_d[nRows*stream_id],matB_d,matR_d,numColB,nRows,stream_id);
}
cudaDeviceSynchronize();
/*copia da resultante para CPU*/
for(int stream_id=0; stream_id<nCore; stream_id++){
cudaMemcpyAsync(&matR[nRows*stream_id*numColB], &matR_d[nRows*stream_id*numColB],
numColB*nRows*sizeof(float),cudaMemcpyDeviceToHost, vectorOfStreams[stream_id]);
}
cudaDeviceSynchronize();
cudaFree(matA_d);
cudaFree(matB_d);
cudaFree(matR_d);
}
int main(int argc, char *argv[]) {
printf("Aplicação -/- -/- -/- %s / %s",argv[1],argv[2]);
arq = fopen(argv[1],"r");
if (arq == NULL) {
printf ("Houve um erro ao abrir o arquivo.\n");
return 1;
}
matA = read();
arq = fopen(argv[2],"r");
if (arq == NULL) {
printf ("Houve um erro ao abrir o arquivo.\n");
return 1;
}
matB = read();
numColA = numRowA = numRowB = numColB;
arq = fopen("time.txt","a");
struct timeval utime;
double tstart, tend;
gettimeofday(&utime, NULL);
tstart = utime.tv_sec + ( utime.tv_usec / 1000000.0 );
nCore = 4;
nRows = numRowA/nCore;
matR = (float*) malloc(sizeof(float)*numRowA*numColB);
multiply();
print(128);
free(matA);
free(matB);
free(matR);
gettimeofday(&utime, NULL);
tend = utime.tv_sec + ( utime.tv_usec / 1000000.0 );
printf("\n\nExecution time: %.4lf\n",tend-tstart);
fprintf(arq, "%.4lf\n",tend-tstart);
fclose(arq);
return 0;
}
|
20,598 | #include <stdio.h>
#include <curand_kernel.h>
#include <unistd.h>
#include <curand.h>
#define M 512
#define CUDART_PI_F 3.141592654f
// the CUDA kernel for vector addition
__global__ void sum(double *a, double *b, double *out, int n)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < n) {
out[idx] = a[idx] + b[idx];
}
}
// the CUDA kernel for vector subtract
__global__ void subtract(double *a, double *b, double *out, int n)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < n) {
out[idx] = a[idx] - b[idx];
}
}
// the CUDA kernel for vector multiply
__global__ void multi(double *a, double *b, double *out, int n)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < n) {
out[idx] = a[idx] * b[idx];
}
}
// the CUDA kernel for element-wise vector divide
__global__ void divide(double *a, double *b, double *out, int n)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < n) {
out[idx] = __ddiv_rn(a[idx], b[idx]);
}
}
// the CUDA kernel for vector exp
__global__ void cudaexp(double *a, double *out, int n)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < n) {
out[idx] = exp(a[idx]);
}
}
// the CUDA kernel for vector log
__global__ void cudalog(double *a, double *out, int n)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < n) {
out[idx] = log(a[idx]);
}
}
// the CUDA kernel for vector square root
__global__ void cudasqrt(double *a, double *out, int n)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < n) {
out[idx] = sqrt(a[idx]);
}
}
// the CUDA kernel for gamma function
__global__ void cudagamma(double *a, double *out, int n)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < n) {
out[idx] = tgamma(a[idx]);
}
}
// the CUDA kernel for gamma pdf
__global__ void cudagammapdf(double *a, double *out, int n, double k, double theta)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < n) {
out[idx] = (1 / (tgamma(k) * pow(theta, k))) * pow(a[idx],
(k - 1)) * exp(-a[idx] / theta);
}
}
// the CUDA kernel for beta pdf
__global__ void cudabetapdf(double *a, double *out, int n, double k, double theta)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < n) {
out[idx] = pow(a[idx], (k-1)) * pow((1 - a[idx]), (theta - 1))
/ (tgamma(k) * tgamma(theta)
/ tgamma(k + theta));
}
}
// the CUDA kernel for beta function
__global__ void cudabeta(double *a, double *b, double *out, int n)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < n) {
out[idx] = tgamma(a[idx]) * tgamma(b[idx])
/ tgamma(a[idx] + b[idx]);
}
}
// the CUDA kernel for vector power
__global__ void cudapower(double *a, double *out, int n, double alpha)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < n) {
out[idx] = pow(a[idx], alpha);
}
}
// the CUDA kernel for vector plus constant
__global__ void cudavecincre(double *a, double *out, int n, double incre)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < n) {
out[idx] = a[idx] + incre;
}
}
// the CUDA kernel for normal pdf
__global__ void cudanormdensity(double *a, double *out, int n,
double mean, double sd)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < n) {
out[idx] = (1 / (sd *sqrt(2 * CUDART_PI_F)))
* exp(-pow((a[idx] - mean), 2) / (2 * pow(sd, 2)));
}
}
// the CUDA kernel for normal CDF
__global__ void cudanormCDF(double *a, double *out, int n)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < n) {
out[idx] = normcdf(a[idx]);
}
}
//the CUDA kernel for sample variance
__global__ void cuda_var(double *input, double *out, int n, double mean)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < n) {
out[idx] = pow(input[idx] - mean, 2);
}
}
// the kernel for vector reduction summation
__global__ void cudareduction(double * input, double * output, int len)
{
// Load a segment of the input vector into shared memory
__shared__ double partialSum[2 * M];
int globalThreadId = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int t = threadIdx.x;
unsigned int start = 2 * blockIdx.x * blockDim.x;
if ((start + t) < len)
{
partialSum[t] = input[start + t];
}
else
{
partialSum[t] = 0.0;
}
if ((start + blockDim.x + t) < len)
{
partialSum[blockDim.x + t] = input[start + blockDim.x + t];
}
else
{
partialSum[blockDim.x + t] = 0.0;
}
// Traverse reduction tree
for (unsigned int stride = blockDim.x; stride > 0; stride /= 2)
{
__syncthreads();
if (t < stride)
partialSum[t] += partialSum[t + stride];
}
__syncthreads();
// Write the computed sum of the block to the output vector at correct index
if (t == 0 && (globalThreadId * 2) < len)
{
output[blockIdx.x] = partialSum[t];
}
}
// the CUDA kernel for vector subset copying
__global__ void vectorsubset(double *a, double *out, int n, int *index)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < n) {
out[idx] = a[index[idx] - 1];
}
}
/*
Following part contains the CUDA kernels invocation functions called
from the host side (CPU)
*/
// CUDA vector reduction summation kernel invocation function
extern "C" void cuda_sum(double *a, double *b, double *c, int n)
{
sum<<<(n + M - 1) / M, M>>>(a, b, c, n);
return;
}
// CUDA vector subtraction kernel invocation function
extern "C" void cuda_subtract(double *a, double *b, double *c, int n)
{
subtract<<<(n + M - 1) / M , M>>>(a, b, c, n);
return;
}
// CUDA vector multiplication kernel invocation function
extern "C" void cuda_multi(double *a, double *b, double *c, int n)
{
multi<<<(n + M - 1) / M, M>>>(a, b, c, n);
return;
}
// CUDA vector division kernel invocation function
extern "C" void cuda_divide(double *a, double *b, double *c, int n)
{
divide<<<(n + M - 1) / M, M>>>(a, b, c, n);
return;
}
// CUDA vector exponential kernel invocation function
extern "C" void cuda_exp(double *a, double *c, int n)
{
cudaexp<<<(n + M - 1) / M, M>>>(a, c, n);
return;
}
// CUDA vector logarithm kernel invocation function
extern "C" void cuda_log(double *a, double *c, int n)
{
cudalog<<<(n + M - 1) / M, M>>>(a, c, n);
return;
}
// CUDA vector square root kernel invocation function
extern "C" void cuda_sqrt(double *a, double *c, int n)
{
cudasqrt<<<(n + M - 1) / M, M>>>(a, c, n);
return;
}
// CUDA vector gamma function kernel invocation function
extern "C" void cuda_gamma(double *a, double *c, int n)
{
cudagamma<<<(n + M - 1) / M, M>>>(a, c, n);
return;
}
// CUDA vector beta function kernel invocation function
extern "C" void cuda_beta(double *a, double *b, double *c, int n)
{
cudabeta<<<(n + M - 1) / M, M>>>(a, b, c, n);
return;
}
// CUDA vector gamma pdf function kernel invocation function
extern "C" void cuda_gammapdf(double *a, double *c, int n, double k,
double theta)
{
cudagammapdf<<<(n + M - 1) / M, M>>>(a, c, n, k, theta);
return;
}
// CUDA vector beta pdf function kernel invocation function
extern "C" void cuda_betapdf(double *a, double *c, int n,
double k, double theta)
{
cudabetapdf<<<(n + M - 1) / M, M>>>(a, c, n, k, theta);
return;
}
// CUDA vector power kernel invocation function
extern "C" void cuda_power(double *a, double *c, int n, double alpha)
{
cudapower<<<(n + M - 1) / M, M>>>(a, c, n, alpha);
return;
}
// CUDA vector plus constant invocation function
extern "C" void cuda_vecincre(double *a, double *c, int n, double alpha)
{
cudavecincre<<<(n + M - 1) / M, M>>>(a, c, n, alpha);
return;
}
// CUDA vector normal density function kernel invocation function
extern "C" void cuda_normal_density(double *a, double *c, int n,
double mean, double sd)
{
cudanormdensity<<<(n + M - 1) / M, M>>>(a, c, n, mean, sd);
return;
}
// CUDA vector normal cumulative density function kernel invocation function
extern "C" void cuda_normal_CDF(double *a, double *c, int n)
{
cudanormCDF<<<(n + M - 1) / M, M>>>(a, c, n);
return;
}
// CUDA vector variance kernel invocation function
extern "C" void cudavariance(double *a, double *c, int n, double mean)
{
cuda_var<<<(n + M - 1) / M, M>>>(a, c, n, mean);
return;
}
// CUDA vector subset copy kernel invocation function
extern "C" void vector_subset(double *a, double *c, int n, int *index)
{
vectorsubset<<<(n + M - 1) / M, M>>>(a, c, n, index);
return;
}
// CUDA vector summation kernel invocation function
extern "C" double cuda_reduction(double *a, int n)
{
int numOutputElements = n / (M<<1);
if (n % (M<<1))
{
numOutputElements++;
}
double * hostOutput = (double*) malloc(numOutputElements
* sizeof(double));
double * deviceOutput;
cudaMalloc((void **)&deviceOutput, numOutputElements * sizeof(double));
dim3 DimGrid( numOutputElements, 1, 1);
dim3 DimBlock(M, 1, 1);
cudareduction<<<DimGrid, DimBlock>>>(a, deviceOutput, n);
cudaMemcpy(hostOutput, deviceOutput, numOutputElements * sizeof(double),
cudaMemcpyDeviceToHost);
for (int ii = 1; ii < numOutputElements; ii++)
{
hostOutput[0] += hostOutput[ii];
}
cudaFree(deviceOutput);
return hostOutput[0];
}
// Print GPU device information
void printDevProp(cudaDeviceProp devProp)
{
printf("Major revision number: %d\n", devProp.major);
printf("Minor revision number: %d\n", devProp.minor);
printf("Name: %s\n", devProp.name);
printf("Total global memory: %u\n", devProp.totalGlobalMem);
printf("Total shared memory per block: %u\n", devProp.sharedMemPerBlock);
printf("Total registers per block: %d\n", devProp.regsPerBlock);
printf("Warp size: %d\n", devProp.warpSize);
printf("Maximum memory pitch: %u\n", devProp.memPitch);
printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock);
for (int i = 0; i < 3; ++i)
printf("Maximum dimension %d of block: %d\n",
i, devProp.maxThreadsDim[i]);
for (int i = 0; i < 3; ++i)
printf("Maximum dimension %d of grid: %d\n", i,
devProp.maxGridSize[i]);
printf("Clock rate: %d\n", devProp.clockRate);
printf("Total constant memory: %u\n", devProp.totalConstMem);
printf("Texture alignment: %u\n", devProp.textureAlignment);
printf("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ?
"Yes" : "No"));
printf("Number of multiprocessors: %d\n",
devProp.multiProcessorCount);
printf("Kernel execution timeout: %s\n",
(devProp.kernelExecTimeoutEnabled ? "Yes" : "No"));
}
extern "C" void gpuquery()
{
// Number of CUDA devices
int devCount;
cudaGetDeviceCount(&devCount);
printf("CUDA Device Query...\n");
printf("There are %d CUDA devices.\n", devCount);
// Iterate through devices
for (int i = 0; i < devCount; ++i)
{
// Get device properties
printf("\nCUDA Device #%d\n", i);
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, i);
printDevProp(devProp);
}
}
//kernel to initalize curandState
__global__ void init(double seed, curandState_t* states, int length) {
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < length) curand_init(seed, id, 0, &states[id]);
}
//kernel to generate gamma random variable by using George Marsaglia and Wai Wan Tsang's method
__global__ void gammarand(curandState* states, double a, double b, double* numbers, int length)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < length){
if (a > 1)
{
double x, v, u;
double d = a - 1.0 / 3.0;
double c = (1.0 / 3.0) / sqrt (d);
while (1){
do{
x = curand_normal_double(&states[id]);
v = 1.0 + c * x;
} while (v <= 0);
v = v * v * v;
u = curand_uniform_double(&states[id]);
if (u < 1 - 0.0331 * x * x * x * x)
break;
if (log (u) < 0.5 * x * x + d * (1 - v + log (v)))
break;
}
numbers[id] = b * d * v;
}
else
{
const double ap = a + 1.0;
double x, v, u, up;
double d = ap - 1.0 / 3.0;
double c = (1.0 / 3.0) / sqrt (d);
up = curand_uniform_double(&states[id]);
while (1){
do{
x = curand_normal_double(&states[id]);
v = 1.0 + c * x;
} while (v <= 0);
v = v * v * v;
u = curand_uniform_double(&states[id]);
if (u < 1 - 0.0331 * x * x * x * x)
break;
if (log (u) < 0.5 * x * x + d * (1 - v + log (v)))
break;
}
numbers[id] = b * d * v;
numbers[id] = numbers[id] * pow (up, 1.0 / a);
}
}
}
// CUDA gamma RNG kernel invocation function
extern "C" void gammarng(double a, double b, int n, double seed, double* number)
{
curandState* states;
cudaMalloc((void**) &states, n * sizeof(curandState));
init<<<n + M - 1, M>>>(seed, states, n);
gammarand<<<(n + M - 1) / M, M>>>(states, a , b, number, n);
}
//kernel to generate beta random variable by using beta-gamma relationship
__global__ void betarand(curandState* states, double a, double b, double* numbers, int length)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < length){
if (a > 1)
{
double x, v, u;
double d = a - 1.0 / 3.0;
double c = (1.0 / 3.0) / sqrt (d);
while (1){
do{
x = curand_normal_double(&states[id]);
v = 1.0 + c * x;
} while (v <= 0);
v = v * v * v;
u = curand_uniform_double(&states[id]);
if (u < 1 - 0.0331 * x * x * x * x)
break;
if (log (u) < 0.5 * x * x + d * (1 - v + log (v)))
break;
}
numbers[id] = 1 * d * v;
}
else
{
const double ap = a + 1;
double x, v, u, up;
double d = ap - 1.0 / 3.0;
double c = (1.0 / 3.0) / sqrt (d);
up = curand_uniform_double(&states[id]);
while (1){
do{
x = curand_normal_double(&states[id]);
v = 1.0 + c * x;
} while (v <= 0);
v = v * v * v;
u = curand_uniform_double(&states[id]);
if (u < 1 - 0.0331 * x * x * x * x)
break;
if (log (u) < 0.5 * x * x + d * (1 - v + log (v)))
break;
}
numbers[id] = 1 * d * v;
numbers[id] = numbers[id] * pow (up, 1.0 / a);
}
double y;
if (b > 1)
{
double x, v, u;
double d = b - 1.0 / 3.0;
double c = (1.0 / 3.0) / sqrt (d);
while (1){
do{
x = curand_normal_double(&states[id]);
v = 1.0 + c * x;
} while (v <= 0);
v = v * v * v;
u = curand_uniform_double(&states[id]);
if (u < 1 - 0.0331 * x * x * x * x)
break;
if (log (u) < 0.5 * x * x + d * (1 - v + log (v)))
break;
}
y = 1 * d * v;
}
else
{
const double bp = b + 1;
double x, v, u, up;
double d = bp - 1.0 / 3.0;
double c = (1.0 / 3.0) / sqrt (d);
up = curand_uniform_double(&states[id]);
while (1){
do{
x = curand_normal_double(&states[id]);
v = 1.0 + c * x;
} while (v <= 0);
v = v * v * v;
u = curand_uniform_double(&states[id]);
if (u < 1 - 0.0331 * x * x * x * x)
break;
if (log (u) < 0.5 * x * x + d * (1 - v + log (v)))
break;
}
y = 1 * d * v;
y = y * pow (up, 1.0 / b);
}
numbers[id] = numbers[id] / (numbers[id] + y);
}
}
// CUDA beta RNG kernel invocation function
extern "C" void betarng(double a, double b, int n, double seed, double* numbers)
{
curandState* states;
cudaMalloc((void**) &states, n * sizeof(curandState));
init<<<n + M - 1, M>>>(seed, states, n);
betarand<<<(n + M - 1) / M, M>>>(states, a , b, numbers, n);
}
// CUDA beta dirichlet kernel invocation function
extern "C" void dirichletrng(double a, int K, int n, double seed, double* numbers)
{
curandState* states;
cudaMalloc((void**) &states, n * sizeof(curandState));
init<<<n + M - 1, M>>>(seed, states, n);
}
|
20,599 | #include <iostream>
#include <chrono>
#include <cassert>
#include <cmath>
#include <cstdlib>
#include <vector>
#include <algorithm>
#define BLOCKSIZE 128
// MUST BE ASSOCIATIVE
__device__ inline int f(int a, int b){
return a + b;
}
/**
* Implements prefix-scan using a Hillis-Steele algorithm.
* Since Hillis-Steele assumes as many concurrent processors as data lines, we need to use
* "double-buffering" to simulate concurrent modifications.
* Since this algorithm requires 2 DRAM accesses per thread, this is a slow algorithm.
* In my results, it's still faster than a CPU algorithm. GPUs are so cool :)
**/
__global__ void prefix_scan(const int n, const int jump, int* old, int* nnew){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(0 <= i - jump){
nnew[i] = f(old[i], old[i - jump]);
} else {
nnew[i] = old[i];
}
}
int main(){
const int n = (1 << 28);
const int block_size = BLOCKSIZE;
assert(n % block_size == 0);
int *x = (int *) malloc(n * sizeof(int));
assert(x != NULL);
for(int i = 0; i < n; i++){
x[i] = 1;
}
int *d_x, *d_y;
cudaMalloc(&d_x, n * sizeof(int));
cudaMalloc(&d_y, n * sizeof(int));
cudaMemcpy(d_x, x, n * sizeof(float), cudaMemcpyHostToDevice);
int block_count = n / block_size;
for(int i = 1, j = 0; i < n; i *= 2, j++){
prefix_scan<<<block_count, block_size>>>(n, i, d_x, d_y);
std::swap(d_x, d_y);
}
int *result = (int *) malloc(n * sizeof(int));
cudaMemcpy(result, d_x, n * sizeof(int), cudaMemcpyDeviceToHost);
// Test to make sure prefix scan is correct.
for(int i = 0; i < n; i++){
if(result[i] != i + 1){
std::cerr << i << ' ' << i + 1 << ' ' << result[i] << '\n';
return -1;
}
}
std::cout << "memory usage: " << n * sizeof(int) << " bytes" << std::endl;
} |
20,600 | #include <iostream>
#include <math.h>
//#include <cuda_runtime.h>
// function to copy the elements of an array and decrement to make the compiler not override it
__global__
void copyKernel(int n, float4* x, float4* y, float4* z, float4* w){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride){
y[i].x = x[i].x - 1.0;
y[i].y = x[i].y - 1.0;
y[i].z = x[i].z - 1.0;
y[i].w = x[i].w - 1.0;
z[i].x = y[i].x - 1.0;
z[i].y = y[i].y - 1.0;
z[i].z = y[i].z - 1.0;
z[i].w = y[i].w - 1.0;
w[i].x = z[i].x - 1.0;
w[i].y = z[i].y - 1.0;
w[i].z = z[i].z - 1.0;
w[i].w = z[i].w - 1.0;
}
}
__global__
void init(int n, float4* x, float val){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride){
x[i].x = val;
x[i].y = val;
x[i].z = val;
x[i].w = val;
}
}
int main(int argc,char* argv[]){
int N = 1<<20;
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
if (argc >= 2)
blockSize = atoi(argv[1]);
if (argc >= 3)
numBlocks = atoi(argv[2]);
std::cout<<"blockSize= "<<blockSize<<std::endl;
std::cout<<"numBlocks= "<<numBlocks<<std::endl;
float4 *x, *y, *z, *w;
//variable allocation on GPU memory
cudaMallocManaged (&x, N*sizeof(float4));
cudaMallocManaged (&y, N* sizeof(float4));
cudaMallocManaged (&z, N*sizeof(float4));
cudaMallocManaged (&w, N* sizeof(float4));
// initialize x and y arrays on the device
float val = 3.0f;
init<<<numBlocks, blockSize>>>(N, x, val);
// Run kernel on 1M parallel elements on the GPU
copyKernel<<<numBlocks, blockSize>>>(N, x, y, z, w);
// wait for the GPU to finish the results
cudaDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++){
maxError = fmax(maxError, fabs(y[i].x - 2.0f));
maxError = fmax(maxError, fabs(y[i].y - 2.0f));
maxError = fmax(maxError, fabs(y[i].z - 2.0f));
maxError = fmax(maxError, fabs(y[i].w - 2.0f));
maxError = fmax(maxError, fabs(z[i].x - 1.0f));
maxError = fmax(maxError, fabs(z[i].y - 1.0f));
maxError = fmax(maxError, fabs(z[i].z - 1.0f));
maxError = fmax(maxError, fabs(z[i].w - 1.0f));
maxError = fmax(maxError, fabs(w[i].x - 0.0f));
maxError = fmax(maxError, fabs(w[i].y - 0.0f));
maxError = fmax(maxError, fabs(w[i].z - 0.0f));
maxError = fmax(maxError, fabs(w[i].w - 0.0f));
}
std::cout << "Max error: " << maxError << std::endl;
// Free GPU memory
cudaFree(x);
cudaFree(y);
cudaFree(z);
cudaFree(w);
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.