serial_no
int64 1
24.2k
| cuda_source
stringlengths 11
9.01M
|
|---|---|
1,601
|
/*!
* \brief A helper class for {@link MultiStageMeanfieldLayer} class, which is the Caffe layer that implements the
* CRF-RNN described in the paper: Conditional Random Fields as Recurrent Neural Networks. IEEE ICCV 2015.
*
* This class itself is not a proper Caffe layer although it behaves like one to some degree.
*
* \authors Sadeep Jayasumana, Bernardino Romera-Paredes, Shuai Zheng, Zhizhong Su.
* \version 1.0
* \date 2015
* \copyright Torr Vision Group, University of Oxford.
* \details If you use this code, please consider citing the paper:
* Shuai Zheng, Sadeep Jayasumana, Bernardino Romera-Paredes, Vibhav Vineet, Zhizhong Su, Dalong Du,
* Chang Huang, Philip H. S. Torr. Conditional Random Fields as Recurrent Neural Networks. IEEE ICCV 2015.
*
* For more information about CRF-RNN, please visit the project website http://crfasrnn.torr.vision.
*/
namespace caffe {
template <typename Dtype>
__device__ Dtype get_gpu_pixel(const Dtype* data, int C, int H, int W, int c, int h, int w)
{
Dtype value=0;
if( c>=0 && c<C && h>=0 && h<H && w>=0 && w<W)
{
value = data[H*W*c+h*W+w];
}
return value;
}
template <typename Dtype>
__device__ Dtype get_gpu_pixel(const Dtype* data, int N, int C, int H, int W, int n, int c, int h, int w)
{
Dtype value=0;
if( n>=0 && n<N && c>=0 && c<C && h>=0 && h<H && w>=0 && w<W)
{
value = data[n*C*H*W + c*H*W + h*W + w];
}
return value;
}
template <typename Dtype>
__device__ void set_gpu_pixel(Dtype* data, int C, int H, int W, int c, int h, int w, Dtype value)
{
if( c>=0 && c<C && h>=0 && h<H && w>=0 && w<W)
{
data[H*W*c+h*W+w] = value;
}
}
template <typename Dtype>
__device__ void set_gpu_pixel(Dtype* data, int N, int C, int H, int W, int n, int c, int h, int w, Dtype value)
{
if(n>=0 && n<N && c>=0 && c<C && h>=0 && h<H && w>=0 && w<W)
{
data[n*C*H*W + c*H*W + h*W + w] = value;
}
}
} // namespace caffe
|
1,602
|
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#define BLOCK_SIZE 64
#define WA 64 * BLOCK_SIZE
#define HA 16 * BLOCK_SIZE
#define WB 16 * BLOCK_SIZE
#define HB WA
#define WC WB
#define HC HA
__global__ void
matrixMul_coalescing( float* C, float* A, float* B, int wA, int wB)
{
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
int aBegin = wA * BLOCK_SIZE * by;
int aEnd = aBegin + wA - 1;
int aStep = BLOCK_SIZE;
int bBegin = BLOCK_SIZE * bx;
int bStep = BLOCK_SIZE * wB;
float Csub = 0;
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep) {
As[ty][tx] = A[a + wA * ty + tx];
Bs[tx][ty] = B[b + wB * ty + tx];
__syncthreads();
for (int k = 0; k < BLOCK_SIZE; ++k)
Csub += As[ty][k] * Bs[tx][k];
__syncthreads();
}
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
}
__global__ void
matrixMul_tiling( float* C, float* A, float* B, int wA, int wB)
{
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
int aBegin = wA * BLOCK_SIZE * by;
int aEnd = aBegin + wA - 1;
int aStep = BLOCK_SIZE;
int bBegin = BLOCK_SIZE * bx;
int bStep = BLOCK_SIZE * wB;
float Csub = 0;
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep) {
As[ty][tx] = A[a + wA * ty + tx];
Bs[tx][ty] = B[b + wB * tx + ty];
__syncthreads();
for (int k = 0; k < BLOCK_SIZE; ++k)
Csub += As[ty][k] * Bs[k][tx];
__syncthreads();
}
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
}
__global__ void
matrixMul_naive( float* C, float* A, float* B, int wA, int wB)
{
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
int i = by * blockDim.y + ty;
int j = bx * blockDim.x + tx;
float accu = 0.0;
for(int k=0; k<wA; k++){
accu = accu + A[ i * wA + k ] * B[ k * wB + j ];
}
C[ i * wB + j ] = accu;
}
void Init(float* data, int size)
{
for (int i = 0; i < size; ++i)
data[i] = sin(i);
}
void display(float* matrix, int size)
{
for(int i = 0; i < size; i++)
printf("\n%f",matrix[i]);
}
int main()
{
// allocate host memory for matrices A and B
unsigned int size_A = WA * HA;
unsigned int mem_size_A = sizeof(float) * size_A;
float* h_A = (float*) malloc(mem_size_A);
unsigned int size_B = WB * HB;
unsigned int mem_size_B = sizeof(float) * size_B;
float* h_B = (float*) malloc(mem_size_B);
// initialize host memory
Init(h_A, size_A);
Init(h_B, size_B);
// allocate device memory
float* d_A;
cudaMalloc((void**) &d_A, mem_size_A);
float* d_B;
cudaMalloc((void**) &d_B, mem_size_B);
// allocate device memory for result
unsigned int size_C = WC * HC;
unsigned int mem_size_C = sizeof(float) * size_C;
float* d_C;
cudaMalloc((void**) &d_C, mem_size_C);
// allocate host memory for the result
float* h_C = (float*) malloc(mem_size_C);
dim3 threads,grid;
threads = dim3(BLOCK_SIZE, BLOCK_SIZE);
grid = dim3(WC / threads.x, HC / threads.y);
// copy host memory to device
cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice);
//matrixMul_naive<<< grid, threads >>>(d_C, d_A, d_B, WA, WB);
//matrixMul_coalescing<<< grid, threads >>>(d_C, d_A, d_B, WA, WB);
//matrixMul_tiling<<< grid, threads >>>(d_C, d_A, d_B, WA, WB);
// copy result from device to host
cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost);
free(h_A);
free(h_B);
free(h_C);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
return 0;
}
|
1,603
|
#include "includes.h"
__global__ void jacobiFirstLocal(float *x, const float *diagonal_values , const float *non_diagonal_values, const int *indeces ,const float *y, const int size)
{
const int index = blockIdx.x * blockDim.x + threadIdx.x;
float error = 1 ;
float current_value = 1 ;
if (index < size)
{
float local_diagonal_value ;
float local_non_diagonal_values[2];
int local_indeces[2];
float local_y;
local_diagonal_value = diagonal_values[index];
local_non_diagonal_values[0] = non_diagonal_values[2*index];
local_non_diagonal_values[1] = non_diagonal_values[2*index+1];
local_indeces[0] = indeces[2*index];
local_indeces[1] = indeces[2*index+1];
local_y = y[index];
float sum = 0 ;
while(fabsf(error) > 0.00001)
{
for (int i = 0 ; i<2 ; i++)
{
sum += local_non_diagonal_values[i] * x[local_indeces[i]] ;
}
current_value = (local_y - sum )/local_diagonal_value;
error = current_value - x[index] ;
x[index] = current_value ;
sum = 0 ;
__syncthreads();
}
}
}
|
1,604
|
#include "includes.h"
__device__ unsigned int getGid3d3d(){
int blockId = blockIdx.x + blockIdx.y * gridDim.x
+ gridDim.x * gridDim.y * blockIdx.z;
int threadId = blockId * (blockDim.x * blockDim.y * blockDim.z)
+ (threadIdx.y * blockDim.x)
+ (threadIdx.z * (blockDim.x * blockDim.y)) + threadIdx.x;
return threadId;
}
__global__ void ktorus_V(double *x, double *y, double *z, double* items, double *Ax, double *Ay, double *Az, double *V){
int gid = getGid3d3d();
int xid = blockDim.x*blockIdx.x + threadIdx.x;
int yid = blockDim.y*blockIdx.y + threadIdx.y;
int zid = blockDim.z*blockIdx.z + threadIdx.z;
double rad = sqrt((x[xid] - items[6]) * (x[xid] - items[6])
+ (y[yid] - items[7]) * (y[yid] - items[7]))
- 0.5*items[0];
double omegaR = (items[3]*items[3] + items[4]*items[4]);
double V_tot = (2*items[5]*items[5]*(z[zid] - items[8])*(z[zid] - items[8])
+ omegaR*(rad*rad + items[12]*rad*z[zid]));
V[gid] = 0.5*items[9]*(V_tot
+ Ax[gid]*Ax[gid]
+ Ay[gid]*Ay[gid]
+ Az[gid]*Az[gid]);
}
|
1,605
|
//xfail:BOOGIE_ERROR
//--blockDim=64 --gridDim=64 --no-inline
//
#include "cuda.h"
#define N dim*dim
#define dim 2
__global__ void foo() {
__shared__ int a;
a = threadIdx.x;
}
|
1,606
|
#include <cuda_runtime.h>
#include <stdio.h>
int main(void) {
int nElem = 1024;
dim3 block(1024);
dim3 grid((nElem + block.x - 1) / block.x);
printf("grid.x %d block.x %d\n", grid.x, block.x);
block.x = 512;
grid.x = (nElem + block.x - 1) / block.x;
printf("grid.x %d block.x %d\n", grid.x, block.x);
block.x = 256;
grid.x = (nElem + block.x - 1) / block.x;
printf("grid.x %d block.x %d\n", grid.x, block.x);
block.x = 128;
grid.x = (nElem + block.x - 1) / block.x;
printf("grid.x %d block.x %d\n", grid.x, block.x);
return 0;
}
|
1,607
|
#include "includes.h"
__global__ void update_accel_acoustic_kernel(float * accel, const int size, const float * rmass){
int id;
id = threadIdx.x + (blockIdx.x) * (blockDim.x) + (blockIdx.y) * ((gridDim.x) * (blockDim.x));
if (id < size) {
accel[id] = (accel[id]) * (rmass[id]);
}
}
|
1,608
|
#include<stdio.h>
__global__ void kernel (int *a, int dimx, int dimy)
{
// Compute the index variable
int ix = blockIdx.x*blockDim.x + threadIdx.x;
int iy = blockIdx.y*blockDim.y + threadIdx.y;
int idx = iy*dimx + ix;
//a[idx] = a[idx]+1;
a[idx] = iy*dimx + ix;
}
int main()
{
int dimx = 16;
int dimy = 16;
int num_bytes = dimy*dimx*sizeof(int);
int *d_a=0, *h_a=0; // device and host pointers
// Allocate memory on host (CPU)
h_a = (int*)malloc(num_bytes);
// Allocate memory on device (GPU)
cudaMalloc((void**)&d_a,num_bytes);
// Check to see that there was enough memory for both
// allocations.
// If the memory allocation fails, it doesn't change the
// pointer value. That is why we set them to be 0 at declaration,
// and then see if they have changed or stayed the same.
if (0==h_a || 0==d_a)
{
printf("couldn't allocate memory\n");
return 1;
}
// Initialize array to all 0's
cudaMemset(d_a,0,num_bytes);
//-----------------------------------------------------------------------//
dim3 grid,block;
block.x = 4;
block.y = 4;
grid.x = dimx/block.x;
grid.y = dimx/block.y;
kernel<<<grid,block>>>(d_a,dimx,dimy);
// Copy it back over
cudaMemcpy(h_a,d_a,num_bytes,cudaMemcpyDeviceToHost);
for (int row=0;row<dimy;row++)
{
for (int col=0;col<dimx;col++)
{
printf("%d",h_a[row*dimx+col]);
}
printf("\n");
}
free(h_a);
cudaFree(d_a);
return 0;
}
|
1,609
|
#include <iostream>
#include <cstdio>
#define NUM_BANKS 32
#define LOG_NUM_BANKS 5
#define GET_OFFSET(idx) (idx >> LOG_NUM_BANKS)
__global__ void Scan(int* in_data, int* out_data) {
// in_data -> [1 2 3 4 5 6 7 8], block_size 4
// block_idx -> [0 0 0 0 1 1 1 1 ]
extern __shared__ int shared_data[];
// block_idx = 0
unsigned int tid = threadIdx.x;
unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
shared_data[tid + GET_OFFSET(tid)] = in_data[index];
// shared_data[tid + (tid >> LOG_NUM_BANKS)] = in_data[index];
// shared_data -> [1, 2, 3, 4]
__syncthreads();
// shift = 2^(d - 1)
for (unsigned int shift = 1; shift < blockDim.x; shift <<= 1 ) {
int ai = shift * (2 * tid + 1) - 1; // tid = 0, shift = 1, ai = 0; // tid = 16, shift = 1, ai = 32 = 0
int bi = shift * (2 * tid + 2) - 1;
if (bi < blockDim.x) {
shared_data[bi + GET_OFFSET(bi)] += shared_data[ai + GET_OFFSET(ai)];
}
__syncthreads();
}
if (tid == 0) {
shared_data[blockDim.x - 1 + GET_OFFSET(blockDim.x - 1)] = 0;
}
__syncthreads();
int temp;
for (unsigned int shift = blockDim.x / 2; shift > 0; shift >>= 1) {
int bi = shift * (2 * tid + 2) - 1;
int ai = shift * (2 * tid + 1) - 1;
int ai_offset = ai + GET_OFFSET(ai);
int bi_offset = bi + GET_OFFSET(bi);
if (bi < blockDim.x) {
temp = shared_data[ai_offset]; // blue in temp
// temp = 4
shared_data[ai_offset] = shared_data[bi_offset]; // orange
// 1 2 1 0 1 2 1 0 // temp = 4
shared_data[bi_offset] = temp + shared_data[bi_offset];
}
__syncthreads();
}
// if (blockIdx.x == 16383) {
// printf("%d %d %d %d\n", tid, tid + GET_OFFSET(tid), shared_data[tid + GET_OFFSET(tid)], index);
// // std::cout << shared_data[tid] << std::endl;
// }
// block_idx = 0 -> [a0, a1, a2, a3]
// block_idx = 1 -> [a4, a5, a6, a7]
out_data[index] = shared_data[tid + GET_OFFSET(tid)];
__syncthreads();
// out_data[block_idx == 0] = [1, 3, 6, 10]
// out_data[block_idx == 1] = [5, 11, 18, 26]
}
int main() {
const int block_size = 1024;
const int array_size = 1 << 20;
int* h_array = new int[array_size];
for (int i = 0; i < array_size; ++i) {
h_array[i] = 1;
}
// int* output = new int[array_size];
int* d_array;
cudaMalloc(&d_array, sizeof(int) * array_size);
cudaMemcpy(d_array, h_array, sizeof(int) * array_size, cudaMemcpyHostToDevice);
int num_blocks = array_size / block_size;
int* d_localscan;
cudaMalloc(&d_localscan, sizeof(int) * array_size);
int* h_localscan = new int[array_size];
cudaEvent_t start;
cudaEvent_t stop;
// Creating event
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
Scan<<<num_blocks, block_size, sizeof(int) * (block_size + GET_OFFSET(block_size))>>>(d_array, d_localscan);
cudaEventRecord(stop);
cudaMemcpy(h_localscan, d_localscan, sizeof(int) * array_size, cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
std::cout << milliseconds << " elapsed" << std::endl;
std::cout << h_localscan[array_size - 1] << std::endl;
delete[] h_array;
delete[] h_localscan;
}
|
1,610
|
#include "thrust.cuh"
#include <thrust/device_ptr.h>
#include <thrust/device_malloc.h>
#include <thrust/device_free.h>
#include <thrust/sort.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/sequence.h>
#include <thrust/for_each.h>
#include "mod_range.cuh"
namespace my_thrust {
// void my_thrust::stable_sort() {
// thrust::device_ptr<float> d_ptr = thrust::device_malloc<float>(3);
// thrust::device_ptr<float> first = d_ptr;
// thrust::device_ptr<float> last = d_ptr + 3;
// d_ptr[0] = 3.0; d_ptr[1] = 2.0; d_ptr[2] = 1.0;
// thrust::stable_sort(first, last);
// std::cout << d_ptr[0] << ", " << d_ptr[1] << ", " << d_ptr[2] << std::endl;
// thrust::device_free(d_ptr);
// }
static const int NSORTS = 16000;
static const int DSIZE = 1000;
struct Mod {
int d_;
int p_;
Mod(int d) : d_(d) {}
int operator()() {
return p_++ / d_;
}
};
thrust::device_vector<int> gen_rand() {
thrust::host_vector<int> h_data(DSIZE*NSORTS);
thrust::generate(h_data.begin(), h_data.end(), rand);
thrust::device_vector<int> d_data = h_data;
return d_data;
}
bool validate(const thrust::device_vector<int> &d1, const thrust::device_vector<int> &d2){
return thrust::equal(d1.cbegin(), d1.cend(), d2.cbegin());
}
void print(const thrust::device_vector<int>& result) {
std::cout << result[0] << ", " << result[1] << ", " << result[2] << " ... ";
std::cout << result[DSIZE-3] << ", " << result[DSIZE-2] << ", " << result[DSIZE-1] << std::endl;
int c = (NSORTS - 1) * DSIZE;
std::cout << result[c+0] << ", " << result[c+1] << ", " << result[c+2] << " ... ";
std::cout << result[c+DSIZE-3] << ", " << result[c+DSIZE-2] << ", " << result[c+DSIZE-1] << std::endl;
}
template <class Proc>
void benchmark(Proc proc) {
thrust::device_vector<int> d_vec = gen_rand();
auto expect = d_vec;
for (int i = 0; i < NSORTS; i++) {
thrust::sort(expect.begin() + (i*DSIZE), expect.begin() + ((i+1)*DSIZE));
}
// https://ivanlife.wordpress.com/2011/05/09/time-cuda/
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
proc(d_vec);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float time;
cudaEventElapsedTime(&time, start, stop);
print(d_vec);
printf ("Time for the kernel: %f ms\n", time);
if (validate(d_vec, expect)) {
printf("OK!\n");
} else {
printf("failed!\n");
}
}
void stable_sort_batch_vector() {
// ??? count_iterator ???
benchmark([=](thrust::device_vector<int>& d_vec) {
// thrust::host_vector<int> h_segments(DSIZE*NSORTS);
// thrust::generate(h_segments.begin(), h_segments.end(), Mod(DSIZE));
// thrust::device_vector<int> d_segments = h_segments;
typedef thrust::device_vector<int>::iterator Iterator;
mod_range<Iterator> d_segments(d_vec.begin(), d_vec.end(), DSIZE);
thrust::stable_sort_by_key(d_vec.begin(), d_vec.end(), d_segments.begin());
// thrust::stable_sort_by_key(d_segments.begin(), d_segments.end(), d_vec.begin());
});
}
struct SortFunctor
{
thrust::device_ptr<int> data;
int dsize;
__host__ __device__
void operator()(int start_idx)
{
thrust::sort(thrust::device, data+(dsize*start_idx), data+(dsize*(start_idx+1)));
}
};
void stable_sort_batch_nested() {
benchmark([=](thrust::device_vector<int>& d_vec) {
cudaDeviceSetLimit(cudaLimitMallocHeapSize, (16*DSIZE*NSORTS));
thrust::device_vector<int> d_result3 = gen_rand();
SortFunctor f = {d_result3.data(), DSIZE};
thrust::device_vector<int> idxs(NSORTS);
thrust::sequence(idxs.begin(), idxs.end());
thrust::for_each(idxs.begin(), idxs.end(), f);
});
}
} // namespace
|
1,611
|
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
// these are just for timing measurments
#include <time.h>
// Code that reads values from a 2D grid and for each node in the grid finds the minumum
// value among all values stored in cells sharing that node, and stores the minumum
// value in that node.
// To compile it with nvcc execute: nvcc -O2 -o grid3 grid3.cu
// Modified by Bob Crovella NVIDIA Corp. 12/2011 to demonstrate CUDA
//define the window size (square window) and the data set size
#define WSIZE 16
#define DATAHSIZE 20000
#define DATAWSIZE 14000
#define CHECK_VAL 1
#define MIN(X,Y) ((X<Y)?X:Y)
#define BLKWSIZE 32
#define BLKHSIZE 32
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, cudaGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
typedef int oArray[DATAWSIZE];
typedef int iArray[DATAWSIZE+WSIZE];
__global__ void cmp_win(oArray *output, const iArray *input)
{
int tempout, i, j;
int idx = blockIdx.x*blockDim.x + threadIdx.x;
int idy = blockIdx.y*blockDim.y + threadIdx.y;
if ((idx < DATAHSIZE) && (idy < DATAWSIZE)){
tempout = output[idx][idy];
#pragma unroll
for (i=0; i<WSIZE; i++)
#pragma unroll
for (j=0; j<WSIZE; j++)
if (input[idx + i][idy + j] < tempout)
tempout = input[idx + i][idy + j];
output[idx][idy] = tempout;
}
}
int main(int argc, char *argv[])
{
int i, j;
const dim3 blockSize(BLKHSIZE, BLKWSIZE, 1);
const dim3 gridSize(((DATAHSIZE+BLKHSIZE-1)/BLKHSIZE), ((DATAWSIZE+BLKWSIZE-1)/BLKWSIZE), 1);
// these are just for timing
clock_t t0, t1, t2;
double t1sum=0.0;
double t2sum=0.0;
// overall data set sizes
const int nr = DATAHSIZE;
const int nc = DATAWSIZE;
// window dimensions
const int wr = WSIZE;
const int wc = WSIZE;
// pointers for data set storage via malloc
iArray *h_in, *d_in;
oArray *h_out, *d_out;
// start timing
t0 = clock();
// allocate storage for data set
if ((h_in = (iArray *)malloc(((nr+wr)*(nc+wc))*sizeof(int))) == 0) {printf("malloc Fail \n"); exit(1);}
if ((h_out = (oArray *)malloc((nr*nc)*sizeof(int))) == 0) {printf("malloc Fail \n"); exit(1); }
// synthesize data
printf("Begin init\n");
memset(h_in, 0x7F, (nr+wr)*(nc+wc)*sizeof(int));
memset(h_out, 0x7F, (nr*nc)*sizeof(int));
for (i=0; i<nr+wr; i+=wr)
for (j=0; j< nc+wc; j+=wc)
h_in[i][j] = CHECK_VAL;
t1 = clock();
t1sum = ((double)(t1-t0))/CLOCKS_PER_SEC;
printf("Init took %f seconds. Begin compute\n", t1sum);
// allocate GPU device buffers
cudaMalloc((void **) &d_in, (((nr+wr)*(nc+wc))*sizeof(int)));
cudaCheckErrors("Failed to allocate device buffer");
cudaMalloc((void **) &d_out, ((nr*nc)*sizeof(int)));
cudaCheckErrors("Failed to allocate device buffer2");
// copy data to GPU
cudaMemcpy(d_out, h_out, ((nr*nc)*sizeof(int)), cudaMemcpyHostToDevice);
cudaCheckErrors("CUDA memcpy failure");
cudaMemcpy(d_in, h_in, (((nr+wr)*(nc+wc))*sizeof(int)), cudaMemcpyHostToDevice);
cudaCheckErrors("CUDA memcpy2 failure");
cmp_win<<<gridSize,blockSize>>>(d_out, d_in);
cudaCheckErrors("Kernel launch failure");
// copy output data back to host
cudaMemcpy(h_out, d_out, ((nr*nc)*sizeof(int)), cudaMemcpyDeviceToHost);
cudaCheckErrors("CUDA memcpy3 failure");
t2 = clock();
t2sum = ((double)(t2-t1))/CLOCKS_PER_SEC;
printf ("Done. Compute took %f seconds\n", t2sum);
for (i=0; i < nr; i++)
for (j=0; j < nc; j++)
if (h_out[i][j] != CHECK_VAL) {printf("mismatch at %d,%d, was: %d should be: %d\n", i,j,h_out[i][j], CHECK_VAL); return 1;}
printf("Results pass\n");
return 0;
}
|
1,612
|
#include <cuda.h>
#include <stdio.h>
__global__ void kernel(float *g_data, float value)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
g_data[idx] = g_data[idx] + value;
// printf("%f+g_data[%d]=%f\n", value, idx, g_data[idx]);
}
int checkResult(float *data, const int n, const float x)
{
for (int i = 0; i < n; i++)
{
if (data[i] != x)
{
printf("Error! data[%d] = %f, ref = %f\n", i, data[i], x);
return 0;
}
}
return 1;
}
int main()
{
int devID=1;
int count = 0;
struct cudaDeviceProp props;
float *d_a=0;
float *h_a=0;
int num = 1 << 4;
int nbytes = num * sizeof(float);
int value=16;
int nStreams = 4;
//test();
cudaGetDeviceCount(&count);
printf("cuda count=%d\n", count);
// return 0;
printf("[=] Before devID is %d\n", devID);
cudaGetDevice(&devID);
printf("[=] After devID is %d\n", devID);
printf("prop=%lu\n", sizeof(struct cudaDeviceProp));
cudaGetDeviceProperties(&props, devID);
printf("Device %d: \"%s\" with Compute %d.%d capability\n",devID, props.name, props.major, props.minor);
// return 0;
h_a=(float*)malloc(nbytes);
memset(h_a, 0, nbytes);
// start
cudaMalloc((void**)&d_a, nbytes);
cudaMemset(d_a, 0, nbytes);
// set kernel launch configuration
dim3 block = dim3(4);
dim3 grid = dim3((num + block.x - 1) / block.x);
float ms; // elapsed time in milliseconds
// create events and streams
cudaEvent_t startEvent, stopEvent, dummyEvent;
cudaStream_t stream[nStreams];
cudaEventCreate(&startEvent);
cudaEventCreate(&stopEvent);
cudaEventCreate(&dummyEvent);
for (int i = 0; i < nStreams; ++i)
cudaStreamCreate(&stream[i]);
cudaEventRecord(startEvent,0);
cudaMemcpy(d_a, h_a, nbytes, cudaMemcpyHostToDevice);
kernel<<<grid, block>>>(d_a, value);
cudaMemcpy(h_a, d_a, nbytes, cudaMemcpyDeviceToHost);
cudaEventRecord(stopEvent, 0);
cudaEventSynchronize(stopEvent);
cudaEventElapsedTime(&ms, startEvent, stopEvent);
printf("Time for sequential transfer and execute (ms): %f\n", ms);
bool bFinalResults = (bool) checkResult(h_a, num, value);
printf("result:%s\n", bFinalResults? "PASS":"FAILED");
// end
free(h_a);
cudaFree(d_a);
// cleanup
cudaEventDestroy(startEvent);
cudaEventDestroy(stopEvent);
cudaEventDestroy(dummyEvent);
for (int i = 0; i < nStreams; ++i)
cudaStreamDestroy(stream[i]);
cudaEvent_t events[64];
for (int i = 0; i < 64; ++i){
// create
cudaEventCreate(&events[i]);
}
for (int i = 0; i < 64; ++i){
cudaEventDestroy(events[i]);
}
printf("long long int size = %d\n", sizeof(long long int));
printf("size of cudaEvent_t = %d\n", sizeof(cudaEvent_t));
return 0;
}
|
1,613
|
#include <stdio.h>
int main() {
int c;
cudaGetDeviceCount(&c);
printf("Total device %d\n",c);
int i;
cudaDeviceProp deviceProp;
for(i=0; i<c; i++){
cudaGetDeviceProperties(&deviceProp, i);
printf("Device %d has compute capability %d.%d.\n",
i, deviceProp.major, deviceProp.minor);
}
}
|
1,614
|
#include "includes.h"
__global__ void cudaUpdateMostActive_kernel(unsigned int * exampleIds, unsigned int * exampleFiringRate, unsigned int * mostActiveId, unsigned int inputsDimX, unsigned int inputsDimY, unsigned int inputsDimZ)
{
const unsigned int inputSize = inputsDimZ * inputsDimX * inputsDimY;
const unsigned int batchInputOffset = blockIdx.z * inputSize;
const unsigned int blockOffset = blockIdx.x * blockDim.x;
const unsigned int partialIdx = threadIdx.x + blockOffset;
// TODO: Also used shared memory for firing rates to avoid global
// memory accesses
extern __shared__ unsigned int partialActiveIdx[];
// TODO: Index 0 has a slight advantage here
partialActiveIdx[threadIdx.x] = 0;
if (partialIdx < inputSize){
partialActiveIdx[threadIdx.x] = exampleIds[partialIdx + batchInputOffset];
}
__syncthreads();
// Reduction over neurons
for (int offset = blockDim.x/2; offset > 0; offset >>= 1) {
if (threadIdx.x < offset){
if (exampleFiringRate[partialActiveIdx[threadIdx.x]] <
exampleFiringRate[partialActiveIdx[threadIdx.x + offset]]) {
partialActiveIdx[threadIdx.x] =
partialActiveIdx[threadIdx.x + offset];
}
}
__syncthreads();
}
if (threadIdx.x == 0) {
mostActiveId[blockIdx.x+gridDim.x*blockIdx.z] = partialActiveIdx[0];
}
}
|
1,615
|
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <stdio.h>
#include <errno.h>
#include <unistd.h>
#include <stdlib.h>
#include <arpa/inet.h>
#include <math.h>
#include <semaphore.h>
#include "cs_cuda.h"
#include "cs_dbg.h"
#include "cs_helper.h"
#include "cs_buffer.h"
// #define CUDA_DBG
// #define CUDA_DBG1
static int cs_buf_cnt ;
static int cs_buf_free_cnt = 0 ;
static struct cs_buf *cs_buf_free ;
static struct cs_buf_list *cs_buf_lp ;
static sem_t mutex ;
static int do_lock = 0 ;
void cs_put_free_buf ( struct cs_buf *bp ) ;
struct cs_buf * cs_get_free_buf () ;
// lock : 1->do semaphore for the critical section
int
cs_buffer_init ( struct cs_buf_desc *cbp, int cnt, int lock )
{
char *d_p ;
int icnt, total, i, j ;
struct cs_buf_desc *cp ;
struct cs_buf *bp ;
do_lock = lock ;
if ( do_lock )
{
if ( sem_init ( &mutex, 0, 1 ) < 0 )
{
printf("%s : sem_init failed \n", __func__ ) ;
return ( 0 ) ;
}
}
cs_buf_cnt = cnt ;
cp = cbp ;
total = 0 ;
icnt = 0 ;
for ( i = 0 ; i < cnt ; i++ )
{
total += (((( cp->size - 1 ) >> 2 ) + 1 ) << 2 ) * cp->cnt ;
icnt += cp->cnt ;
#ifdef CUDA_DBG
printf("%s: i %d total %d size %d cnt %d total cnt %d\n", __func__, i,
total, cp->size, cp->cnt, icnt ) ;
#endif
cp++ ;
}
if (( i = cudaMalloc ( &d_p, total )) != cudaSuccess )
{
printf("%s: malloc failed %d \n", __func__, i ) ;
return ( 0 ) ;
}
if (!( cs_buf_lp = ( struct cs_buf_list *)malloc ( sizeof (
struct cs_buf_list ) * cs_buf_cnt )))
{
#ifdef CUDA_DBG
printf("%s: cs_list failed \n", __func__ ) ;
#endif
return ( 0 ) ;
}
if (!( bp = cs_buf_free = ( struct cs_buf *)malloc ( sizeof ( struct cs_buf ) * icnt )))
{
#ifdef CUDA_DBG
printf("%s: cs_bufp failed \n", __func__ ) ;
#endif
return ( 0 ) ;
}
cs_buf_free = NULL ;
while ( icnt-- )
{
cs_put_free_buf ( bp ) ;
bp++ ;
}
cp = cbp ;
for ( i = 0 ; i < cs_buf_cnt ; i++ )
{
cs_buf_lp[i].bp = NULL ;
cs_buf_lp[i].cnt = 0 ;
total = (((( cp->size - 1 ) >> 2 ) + 1 ) << 2 ) ;
for ( j = 0 ; j < cp->cnt ; j++ )
{
cs_put_free_list( d_p, i ) ;
d_p += total ;
}
cs_buf_lp[i].used = 0 ;
cs_buf_lp[i].max_used = 0 ;
cp++ ;
}
p_buffer_dbg("init") ;
return ( 1 ) ;
}
void
p_buf_free(const char *s )
{
struct cs_buf *cb ;
int i = 0 ;
printf("%s: %s cnt %d \n", __func__, s, cs_buf_free_cnt ) ;
if ( do_lock )
sem_wait ( &mutex ) ;
cb = cs_buf_free ;
while ( cb )
{
i++ ;
printf("%i :: %p \n", i, cb ) ;
cb = cb->np ;
}
if ( do_lock )
sem_post( &mutex ) ;
}
void
p_buf_list( const char *s )
{
struct cs_buf *cb ;
int i, j, k ;
#ifdef CUDA_DBG
printf("%s: %s cs_buf_cnt %d\n", __func__, s, cs_buf_cnt ) ;
#endif
if ( do_lock )
sem_wait ( &mutex ) ;
for ( i = 0 ; i < cs_buf_cnt ; i++ )
{
cb = cs_buf_lp[i].bp ;
j = cs_buf_lp[i].cnt ;
printf("%s : idx %d used %d cnt %d max_used %d \n",
__func__, i, cs_buf_lp[i].used, cs_buf_lp[i].cnt, cs_buf_lp[i].max_used ) ;
for ( k = 0 ; k < j ; k++ )
{
#ifdef CUDA_DBG
printf("i %d k %d cb %p d_A %p np %p \n", i, k, cb, cb->d_A, cb->np ) ;
#endif
cb = cb->np ;
}
}
if ( do_lock )
sem_post( &mutex ) ;
}
void
p_buffer_dbg( const char *s )
{
#ifdef CUDA_DBG
printf("--------------------------------------------------------------- B\n") ;
p_buf_free( s ) ;
p_buf_list( s ) ;
printf("--------------------------------------------------------------- E\n") ;
#endif
}
char *
cs_get_free_list ( int idx )
{
struct cs_buf *cs_bufp ;
char *bp = NULL ;
#ifdef CUDA_DBG
printf("%s: idx %d \n", __func__, idx ) ;
#endif
if ( idx < 0 || idx >= cs_buf_cnt )
{
#ifdef CUDA_DBG
printf("%s: wrong idx %d \n", __func__, idx ) ;
#endif
return ( NULL ) ;
}
if ( do_lock )
sem_wait ( &mutex ) ;
cs_bufp = cs_buf_lp[ idx ].bp ;
if ( cs_bufp )
{
cs_buf_lp[ idx ].bp = cs_bufp->np ;
bp = cs_bufp->d_A ;
cs_buf_lp[ idx ].cnt-- ;
cs_buf_lp[ idx ].used++ ;
if ( cs_buf_lp[ idx ].used > cs_buf_lp[ idx ].max_used )
cs_buf_lp[ idx ].max_used++ ;
}
#ifdef CUDA_DBG
printf("%s: idx %d bp %p cnt %d\n", __func__, idx, bp, cs_buf_lp[idx].cnt ) ;
#endif
if ( do_lock )
sem_post( &mutex ) ;
if ( cs_bufp )
cs_put_free_buf( cs_bufp ) ;
if ( bp == NULL )
{
printf("buffer ERR : idx %d OUT OF BUFFER wait forever\n", idx) ;
while ( 1 )
sleep ( 60 ) ;
}
#ifdef CUDA_OBS
p_buffer_dbg("cs_get_free_list") ;
#endif
return ( bp ) ;
}
void
cs_put_free_list ( char *bp, int idx )
{
struct cs_buf *fbp ;
#ifdef CUDA_DBG
printf("%s: bp %p idx %d \n", __func__, bp, idx ) ;
#endif
if ( idx < 0 || idx >= cs_buf_cnt )
{
#ifdef CUDA_DBG
printf("%s: wrong idx %d \n", __func__, idx ) ;
#endif
return ;
}
fbp = cs_get_free_buf() ;
if ( fbp == NULL )
{
printf("%s: no free buf left idx %d bp %p\n", __func__, idx, bp ) ;
return ;
}
if ( do_lock )
sem_wait ( &mutex ) ;
fbp->d_A = bp ;
if ( cs_buf_lp[ idx ].bp )
fbp->np = cs_buf_lp[ idx ].bp ;
else
fbp->np = NULL ;
cs_buf_lp[ idx ].bp = fbp ;
cs_buf_lp[ idx ].cnt++ ;
cs_buf_lp[ idx ].used-- ;
#ifdef CUDA_DBG
printf("%s: bp %p d_p %p idx %d cnt %d\n", __func__, cs_buf_lp[idx].bp,
cs_buf_lp[idx].bp->d_A, idx, cs_buf_lp[idx].cnt ) ;
#endif
if ( do_lock )
sem_post( &mutex ) ;
}
struct cs_buf *
cs_get_free_buf ()
{
struct cs_buf *bp = NULL ;
if ( do_lock )
sem_wait ( &mutex ) ;
if ( cs_buf_free )
{
bp = cs_buf_free ;
cs_buf_free = cs_buf_free->np ;
cs_buf_free_cnt-- ;
}
#ifdef CUDA_DBG
printf("%s: bp %d cnt %d \n", __func__, bp, cs_buf_free_cnt ) ;
#endif
if ( do_lock )
sem_post( &mutex ) ;
return ( bp ) ;
}
void
cs_put_free_buf ( struct cs_buf *bp )
{
if ( do_lock )
sem_wait ( &mutex ) ;
bp->np = cs_buf_free ;
cs_buf_free = bp ;
cs_buf_free_cnt++ ;
#ifdef CUDA_DBG
printf("%s: bp %d cnt %d \n", __func__, bp, cs_buf_free_cnt ) ;
#endif
if ( do_lock )
sem_post( &mutex ) ;
}
void
cs_buf_swap (float **fp1, float **fp2 )
{
float *fp ;
fp = *fp1 ;
*fp1 = *fp2 ;
*fp2 = fp ;
}
|
1,616
|
// kompilowanie: nvcc -o executable p1.cu
#include <stdio.h>
#include <time.h>
#define N 1000000
#define BLOCK_SIZE 16 //threads per blocks
float hArray[N];
float *dArray;
int blocks;
clock_t cpu_startTime, cpu_endTime;
double cpu_elapseTime = 0;
cudaEvent_t start, stop;
float gpu_elapseTime = 0;
void prologue(void) {
cudaMalloc((void**)&dArray, sizeof(hArray));
cudaMemcpy(dArray, hArray, sizeof(hArray), cudaMemcpyHostToDevice);
}
void epilogue(void) {
cudaMemcpy(hArray, dArray, sizeof(hArray), cudaMemcpyDeviceToHost);
cudaFree(dArray);
/*for(int i = 0; i < sizeof(hArray)/sizeof(float); i++)
{
printf("%.1f\n", hArray[i]);
}*/
}
void cpu(float *A)
{
float b;
for (int x=0; x < N; x++)
{
b = A[x] * A[x] * A[x] + A[x] * A[x] + A[x];
//printf("%.1f\n", b);
}
}
// Kernel
__global__ void pow3(float *A) {
int x = blockDim.x * blockIdx.x + threadIdx.x;
if(x < N)
A[x] = A[x] * A[x] * A[x] + A[x] * A[x] + A[x];
}
int main(int argc, char** argv)
{
int devCnt;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaGetDeviceCount(&devCnt);
if(devCnt == 0) {
perror("No CUDA devices available -- exiting.");
return 1;
}
memset(hArray, 0, sizeof(hArray));
for(int i = 0; i < N; i++) {
hArray[i] = i + 1;
}
//CPU:
cpu_startTime = clock();
cpu(hArray);
cpu_endTime = clock();
cpu_elapseTime = ((cpu_endTime - cpu_startTime)/(double)CLOCKS_PER_SEC);
printf("CPU time: %f ms\n", cpu_elapseTime);
//GPU:
prologue();
cudaEventRecord(start, 0);
//prologue();
blocks = N / BLOCK_SIZE; // amount of threads' blocks
if(N % BLOCK_SIZE)
blocks++;
pow3<<<blocks, BLOCK_SIZE>>>(dArray); // running thread
cudaThreadSynchronize();
//epilogue();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&gpu_elapseTime, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
epilogue();
printf("GPU time: %f ms\n", gpu_elapseTime);
return 0;
}
// Dla N 1000: CPU 0,000008 ms, GPU 0,761984 ms / 0,027136 ms (bez prologu i epilogu)
// Dla N 1000000: CPU 0,007193, GPU 6,650976 / 0,582208
|
1,617
|
#include "includes.h"
// CUDA kernel for vector addition
// Initialize
__global__ void MatrixMul(int* a, int* b, int* c, int n) {
// row
int row = (blockIdx.y * blockDim.y) + threadIdx.y;
//col
int col = (blockIdx.x * blockDim.x) + threadIdx.x;
int temp_sum = 0;
// boundary guard
if ((row < n) && (col < n)) {
for (int k = 0; k < n; k++)
{
temp_sum += a[row*n+k]*b[k*n+col];
}
c[row*n+col] = temp_sum;
}
}
|
1,618
|
#include <cuda_runtime.h>
extern "C" void sumMatrixOnGPU1D1(float *MatA, float *MatB, float *MatC, int nx, int ny, int dimx);
// grid 1D block 1D
__global__ void sumMatrixOnGPU1D(float *MatA, float *MatB, float *MatC, int nx,
int ny)
{
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
if (ix < nx )
for (int iy = 0; iy < ny; iy++)
{
int idx = iy * nx + ix;
MatC[idx] = MatA[idx] + MatB[idx];
}
}
void sumMatrixOnGPU1D1(float *MatA, float *MatB, float *MatC, int nx, int ny, int dimx)
{
dim3 block(dimx, 1);
dim3 grid((nx + block.x - 1) / block.x, 1);
sumMatrixOnGPU1D<<<grid, block>>>(MatA, MatB, MatC, nx, ny);
}
|
1,619
|
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <device_functions.h>
#define MAIN "_MAIN_:"
#define F1LO "_ODD_:"
#define CTRL "_CRTL_:"
#define ARRAY_SIZE 100000
#define TILE_WIDTH 128
#define DEVICE 1 //device 0 o 1
__device__
inline void SWAP(int32_t *_a,int32_t *_b){int32_t __aux; __aux = *_a; *_a = *_b; *_b = __aux;}
/*###################################
########### KERNELS ###########
###################################*/
__global__
void odd_even_sort_kernel(int32_t * arr_d, int32_t n){
int32_t t_position = (blockDim.x * blockIdx.x + threadIdx.x)*2 + 1;// +1 corresponde para evitar el overflow en el 0
int32_t tid = threadIdx.x*2+1;
int32_t i_limit = blockDim.x*2;
for(int32_t i=0; i<i_limit;i++){
if ((i&1) && t_position< n-1 && tid < blockDim.x*2-1 ) { // impar
if (*(arr_d+t_position + 1) < *(arr_d+t_position)) {
SWAP(arr_d + t_position, arr_d + t_position + 1);
}
}
if(!(i&1) && t_position < n && tid < blockDim.x*2){ //par
if (*(arr_d+t_position) < *(arr_d+t_position-1)) {
SWAP(arr_d + t_position, arr_d + t_position - 1);
}
}
__syncthreads();
}
}
__global__
void fast_odd_even_sort_kernel(int32_t * arr_d, int32_t n){
int32_t position = (blockDim.x * blockIdx.x + threadIdx.x)*2 + 1;// +1 corresponde para evitar el overflow en el 0
int32_t tid = threadIdx.x*2+1;
__shared__ int32_t sh_arr[2*TILE_WIDTH];
int32_t bound = blockDim.x*2;
int32_t i_limit = blockDim.x*2;
if(position < n){
*(sh_arr+tid)=*(arr_d+position);
*(sh_arr+tid-1)=*(arr_d+position-1);
__syncthreads();
for(int32_t i=0; i<i_limit;i++){
if ((i&1) && position< n-1 && tid < bound-1 ) { // impar
if (*(sh_arr+tid + 1) < *(sh_arr+tid)) {
SWAP(sh_arr + tid, sh_arr + tid + 1);
}
}
if(!(i&1) && position < n && tid < bound){ //par
if (*(sh_arr+tid) < *(sh_arr+tid-1)) {
SWAP(sh_arr + tid, sh_arr + tid - 1);
}
}
__syncthreads();
}
*(arr_d+position) = *(sh_arr+tid);
*(arr_d+position-1) = *(sh_arr+tid-1);
}
}
/*##########################################
########### HOST FUNCTIONS ###########
##########################################*/
__host__
void odd_even_sort(int32_t * arr, int32_t n){
int32_t *cuda_d;
dim3 dimGrid ((uint)((ARRAY_SIZE / TILE_WIDTH)+1), 1, 1);
dim3 dimBlock (TILE_WIDTH, 1, 1);
cudaError_t err;
cudaEvent_t start, stop;
float mili;
err = cudaMalloc((void**)&cuda_d, sizeof(int32_t)*ARRAY_SIZE);
if( err != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); // best definition
exit(EXIT_FAILURE);
}
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaMemcpy(cuda_d, arr, sizeof(int32_t)*ARRAY_SIZE, cudaMemcpyHostToDevice);
int32_t j_limit = n/TILE_WIDTH+1;
int32_t *p_cuda;
int32_t size;
printf("%s ordenando..\n",F1LO);
cudaEventRecord(start);
for(int32_t j=0;j<j_limit;j++){
p_cuda = cuda_d + (j&1) * TILE_WIDTH;
size = n - (j&1) * TILE_WIDTH;
odd_even_sort_kernel<<<dimGrid, dimBlock>>>(p_cuda, size);
}
cudaEventRecord(stop);
//cudaDeviceSynchronize();
cudaEventSynchronize(stop);
cudaEventElapsedTime(&mili, start, stop);
cudaMemcpy(arr, cuda_d, sizeof(int32_t)*ARRAY_SIZE, cudaMemcpyDeviceToHost);
printf("%s terminanding.. time: %f s\n", F1LO, mili/1000);
cudaFree(cuda_d);
}
__host__
void fast_odd_even_sort(int32_t * arr, int32_t n){
int32_t *cuda_d;
//float tile = TILE_WIDTH , size_t = ARRAY_SIZE;
//dim3 dimGrid ((uint)ceil(size_t/tile), 1, 1);
dim3 dimGrid ((uint)((ARRAY_SIZE / TILE_WIDTH)+1), 1, 1);
dim3 dimBlock (TILE_WIDTH, 1, 1);
cudaError_t err;
cudaEvent_t start, stop;
float mili;
err = cudaMalloc(&cuda_d, sizeof(int32_t)*ARRAY_SIZE);
if( err != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); // best definition
exit(EXIT_FAILURE);
}
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaMemcpy(cuda_d, arr, sizeof(int32_t)*ARRAY_SIZE, cudaMemcpyHostToDevice);
int32_t j_limit = n/TILE_WIDTH+1;
int32_t *p_cuda;
int32_t size;
printf("%s ordenando..\n",F1LO);
cudaEventRecord(start);
for(int32_t j=0;j<j_limit;j++){
p_cuda = cuda_d + (j&1) * TILE_WIDTH;
size = n - (j&1) * TILE_WIDTH;
fast_odd_even_sort_kernel<<<dimGrid, dimBlock>>>(p_cuda, size);
}
cudaEventRecord(stop);
//cudaDeviceSynchronize();
cudaEventSynchronize(stop);
cudaEventElapsedTime(&mili, start, stop);
cudaMemcpy(arr, cuda_d, sizeof(int32_t)*ARRAY_SIZE, cudaMemcpyDeviceToHost);
printf("%s terminanding.. time: %f s\n", F1LO, mili/1000);
cudaFree(cuda_d);
}
__host__
int control(int32_t *arr, int32_t n){
for(int32_t i=1; i<n; i++){
if(arr[i-1] > arr[i]){
printf("%s I = %d\n", CTRL, i);
return 1;
}
}
return 0;
}
/*###################################
########### MAIN ###########
###################################*/
int main( int argc, char *argv[] ){
int32_t *arr;
cudaError_t err;
arr = (int32_t*)malloc(sizeof(int32_t)*ARRAY_SIZE);
printf("array size: %d tile: %d\n",ARRAY_SIZE, TILE_WIDTH);
printf("#### SORT WHIT GLOBAL MEMORY ####\n" );
for (int i = 0; i < ARRAY_SIZE; i++) {
arr[i] = rand()%1000+1;
// printf("%d ", arr[i]);
}
arr[0]=1001;
arr[ARRAY_SIZE-1]=0;
printf("\n");
err = cudaSetDevice(DEVICE);
if( err != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); // best definition
exit(EXIT_FAILURE);
}
if(control(arr, ARRAY_SIZE)) printf("%s desordenado!! \n",MAIN);
else printf("%s ok!! \n",MAIN);
odd_even_sort(arr,ARRAY_SIZE);
if(control(arr, ARRAY_SIZE)) printf("%s desordenado!! \n",MAIN);
else printf("%s ok!! \n" ,MAIN);
printf("#### SORT WHIT SHARED MEMORY ####\n" );
for (int i = 0; i < ARRAY_SIZE; i++) {
arr[i] = rand()%1000;
// printf("%d ", arr[i]);
}
printf("\n");
if(control(arr, ARRAY_SIZE)) printf("%s desordenado!! \n",MAIN);
else printf("%s ok!! \n",MAIN);
fast_odd_even_sort(arr,ARRAY_SIZE);
if(control(arr, ARRAY_SIZE)) printf("%s desordenado!! \n",MAIN);
else printf("%s ok!! \n" ,MAIN);
free(arr);
printf("\n");
return 0;
}
|
1,620
|
#define d_wavefield(z,x) d_wavefield[(x)*(nz)+(z)]
#define d_Lambda(z,x) d_Lambda[(x)*(nz)+(z)]
#define d_Cp(z,x) d_Cp[(x)*(nz)+(z)]
// #define d_data(it,iRec) d_data[(iRec)*(nSteps)+(it)]
__global__ void res_injection(float *d_wavefield, int nz, float *d_res, \
float *d_Lambda, int it, float dt, int nSteps, int nrec, int *d_z_rec, int *d_x_rec) {
int iRec = threadIdx.x + blockDim.x*blockIdx.x;
// float scale = pow(d_Cp(d_z_rec[iRec], d_x_rec[iRec]),2);
if(iRec >= nrec){
return;
}
// d_wavefield(d_z_rec[iRec], d_x_rec[iRec]) += (d_res[(iRec)*(nSteps)+(it)] + d_res[(iRec)*(nSteps)+(it+1)]) / 2.0 \
// * d_Lambda(d_z_rec[iRec], d_x_rec[iRec]) * dt;
d_wavefield(d_z_rec[iRec], d_x_rec[iRec]) += d_res[(iRec)*(nSteps)+(it)];
// d_wavefield(d_z_rec[iRec], d_x_rec[iRec]) += d_res[(iRec)*(nSteps)+(it)] * d_Lambda(d_z_rec[iRec], d_x_rec[iRec]) * dt;
}
|
1,621
|
#include <cmath>
__host__ __device__
void revert_sign(float *a, size_t dim1, size_t dim2)
{
for (size_t i = 0; i < dim1 * dim2; ++i)
a[i] = -a[i];
}
__host__ __device__
void sum_vectors(
float *a,
float *b,
float *result,
size_t dim1,
size_t dim2
)
{
for (size_t i = 0; i < dim1*dim2; ++i)
result[i] = a[i] + b[i];
}
__device__
void sum_vectors_v(
volatile float *a,
volatile float *b,
volatile float *result,
size_t dim1,
size_t dim2
)
{
for (size_t i = 0; i < dim1*dim2; ++i)
result[i] = a[i] + b[i];
}
__host__ __device__
void mul_vectors(
float *a,
float *b,
float *result,
size_t a_dim1,
size_t a_dim2,
size_t b_dim2
)
{
for (size_t i = 0; i < b_dim2; ++i)
for (size_t j = 0; j < a_dim1; ++j)
{
float current_value = 0;
for (size_t k = 0; k < a_dim2; ++k)
current_value += a[j * a_dim2 + k] * b[k * b_dim2 + i];
result[j * b_dim2 + i] = current_value;
}
}
float matrix_norm(float *m, size_t dim1, size_t dim2)
{
float max_column_norm = 0;
for (size_t i = 0; i < dim1; ++i)
{
float column_norm = 0;
for (size_t j = 0; j < dim2; ++j)
column_norm += std::fabs(m[i * dim2 + j]);
max_column_norm = column_norm > max_column_norm ? column_norm : max_column_norm;
}
return max_column_norm;
}
void inverse_matrix(float *m_, float *result, size_t dim)
{
float inverse_matrix[dim * dim], m[dim * dim];
// init accessory matrices
for (size_t i = 0; i < dim; ++i)
for (size_t j = 0; j < dim; ++j)
{
if (i == j)
inverse_matrix[i * dim + j] = 1;
else
inverse_matrix[i * dim + j] = 0;
m[i * dim + j] = m_[i * dim + j];
}
// forward stroke
for (size_t i = 0; i < dim; ++i)
{
for (size_t j = i + 1; j < dim; ++j)
{
float multiplier = m[j * dim + i] / m[i * dim + i];
for (size_t k = i; k < dim; ++k)
m[j * dim + k] -= m[i * dim + k] * multiplier;
for (size_t k = 0; k < dim; ++k)
inverse_matrix[j * dim + k] -= inverse_matrix[i * dim + k] * multiplier;
}
float divisor = m[i * dim + i];
for (size_t j = i; j < dim; ++j)
m[i * dim + j] /= divisor;
for (size_t j = 0; j < dim; ++j)
inverse_matrix[i * dim + j] /= divisor;
}
// return stroke
for (size_t i = 1; i <= dim; ++i)
for (size_t j = i + 1; j <= dim; ++j)
{
float multiplier = m[(dim - j) * dim + dim - i];
m[(dim - j) * dim + dim - i] -= m[(dim - i) * dim + dim - i] * multiplier;
for (size_t k = 0; k < dim; ++k)
inverse_matrix[(dim - j) * dim + k] -= inverse_matrix[(dim - i) * dim + k] * multiplier;
}
// copy inverse matrix to result
for (size_t i = 0; i < dim*dim; ++i)
result[i] = inverse_matrix[i];
}
|
1,622
|
/******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
#include <stdio.h>
#define BLOCK_SIZE 512
#define HIDDEN_SIZE 17
#define ETA 0.3
#define MOMENT 0.3
/*
__device__ float squash(float x ) {
//float m;
//x = -x;
//m = 1 + x + x*x/2 + x*x*x/6 + x*x*x*x/24 + x*x*x*x*x/120;
//return(1.0 / (1.0 + m));
return (1.0 / (1.0 + exp(-x)));
}
*/
/*
__global__ void kernel_bpnn_layerforward(float *input_units, float *hidden_units, float *input_weights, int inp, int hidden) {
int threadIdx_x = threadIdx.x;
int element = threadIdx.x + blockDim.x * blockIdx.x;
if(element < inp) {
float val = 0.0;
for(int i=1; i < hidden ; i++) {
val = input_units[element] * input_weights[(element * hidden) + i] ;
// printf("%d, %d, %f, %f, %f\n", element, i, input_weights[(element * hidden) + i] , input_units[element], val);
atomicAdd(&hidden_units[i], val);
}
}
// nt i=1; i < hidden ; i++) {
//(f(element==0) {
// hidden_units[0] = 0.0;
// }
}
*/
__global__ void kernel_bpnn_layerforward(float *input_units_master, float *hidden_units_master, float *input_weights_master, int inp, int hidden) {
int tx = threadIdx.x;
int element = threadIdx.x + blockDim.x * blockIdx.x;
// Store Input Units, Input Weights in shared memory
__shared__ float input_units[BLOCK_SIZE];
__shared__ float input_weights[18*BLOCK_SIZE];
__shared__ float hidden_units[17];
if(element < inp) {
// Read Data from Global memory to Shared memory
input_units[tx] = input_units_master[element];
// printf("PROBLEM------ %d, %d, %f\n", element, tx, input_units[tx]);
int i;
for(i=0; i<hidden; i++) {
input_weights[(tx*hidden)+i] = input_weights_master[(element * hidden) + i];
hidden_units[i] = 0.0;
hidden_units_master[i] = 0.0;
}
// Sync All Threads
__syncthreads();
// Calculate Intermediate results in Shared memory
for(i=1; i<hidden; i++) {
float result = input_units[tx] * input_weights[(tx*hidden)+i];
//hidden_units[i] += result;
atomicAdd(&(hidden_units[i]), result);
//printf("Intermediate: %d, %d, %f * %f, %f \n", element, i,input_units[tx] , input_weights[(tx*hidden)+i], result);
}
__syncthreads();
// Store final results in Main memory
if(tx ==0) {
for(i=1; i<hidden; i++) {
atomicAdd(&(hidden_units_master[i]), hidden_units[i]);
//hidden_units_master[tx] = hidden_units[tx];
// printf("SUM: %d, %d, %f, %f \n", element, i, hidden_units[i], hidden_units_master[i]);
if(element == 0) {
hidden_units_master[0] = 0.0;
}
}
}
}
}
__global__ void gpu_output_error_kernel_function(float *delta, float *t, float *o, int count, float *err) {
int element = threadIdx.x + blockDim.x * blockIdx.x;
if(element != 0 && element < count) {
delta[element] = o[element] * ( 1.0 - o[element]) * (t[element] - o[element]);
// printf("Output err: %d, %f, %f = %f \n", element, o[element], t[element], delta[element]);
}
}
__global__ void kernel_squash(float *hidden, int count) {
int element = threadIdx.x + blockDim.x * blockIdx.x;
if(element >0 && element < count) {
float orig = hidden[element];
hidden[element] = (1.0 / (1.0 + exp(- orig)));
// printf("Element: %d, Orig: %f, ,squash: %f \n", element, orig, hidden[element]);
}
}
__global__ void gpu_hidden_error_kernel_function (float *hidden_delta_d, int hid, float *output_delta_d, int out, float *hidden_weights_d, float *hidden_units_d) {
int element = threadIdx.x + blockDim.x * blockIdx.x;
if(element < hid) {
// for(int i =1; i < out; i++) {
float h = hidden_units_d[element] ;
float sum = output_delta_d[1] * hidden_weights_d[element];
//printf("%d, %f * %f = %f \n", element, output_delta_d[1], hidden_weights_d[element], sum);
hidden_delta_d[element] = h * (1.0 -h) * sum;
//printf("%d => %f * (1.0 - %f) * %f = %f\n", element, h,h, sum, hidden_delta_d[element]);
// }
}
}
__global__ void gpu_weight_adjust_function(float *delta, int out, float *hidden_units, int hid, float *hidden_weights) {
int element = threadIdx.x + blockDim.x * blockIdx.x;
if(element < hid) {
float new_dw = ((ETA * delta[1] * hidden_units[element]) + (MOMENT * 0));
hidden_weights[element] += new_dw;
//printf("Element: %d, new val = %f, %f \n", element, new_dw, hidden_weights[element]);
}
}
/// Algorithm using Naive method
/*
void gpu_bpnn_layerforward(float *input_units, float *hidden_units, float *input_weights, int inp, int hidden) {
// Place holder to complete input sanity check
//Allocate Blocks
dim3 DimGrid((inp-1)/BLOCK_SIZE +1, 1, 1);
dim3 DimBlock(BLOCK_SIZE+1, 1, 1);
// Invoke CUDA kernel -----------------------------------------------------
kernel_bpnn_layerforward<<<DimGrid, DimBlock>>>(input_units, hidden_units, input_weights, inp, hidden);
cudaDeviceSynchronize();
dim3 DimGrid2((hidden-1)/BLOCK_SIZE +1, 1, 1);
dim3 DimBlock2(BLOCK_SIZE+1, 1, 1);
// Invoke Squashing kernel
kernel_squash<<<DimGrid2, DimBlock2>>>(hidden_units, hidden);
}
*/
// Algorithm using shared memory
void gpu_bpnn_layerforward(float *input_units, float *hidden_units, float *input_weights, int inp, int hidden) {
dim3 DimGrid((inp-1)/BLOCK_SIZE +1, 1, 1);
dim3 DimBlock(BLOCK_SIZE, 1, 1);
kernel_bpnn_layerforward<<<DimGrid, DimBlock>>>(input_units, hidden_units, input_weights, inp, hidden);
cudaDeviceSynchronize();
dim3 DimGrid2(1, 1, 1);
dim3 DimBlock2(hidden, 1, 1);
// Invoke Squashing kernel
kernel_squash<<<DimGrid2, DimBlock2>>>(hidden_units, hidden);
}
void gpu_output_error_kernel(float *delta, float *target, float *output, int count, float *err) {
dim3 DimGrid((count-1)/BLOCK_SIZE +1, 1, 1);
dim3 DimBlock(BLOCK_SIZE, 1, 1);
gpu_output_error_kernel_function<<<DimGrid, DimBlock>>>(delta, target, output, count, err);
}
void gpu_hidden_error_kernel(float *hidden_delta_d , int hid, float *output_delta_d, int out, float *hidden_weights_d, float *hidden_units_d) {
dim3 DimGrid((hid-1)/BLOCK_SIZE +1, 1, 1);
dim3 DimBlock(BLOCK_SIZE, 1, 1);
gpu_hidden_error_kernel_function<<<DimGrid, DimBlock>>>(hidden_delta_d, hid, output_delta_d, out, hidden_weights_d, hidden_units_d);
}
void gpu_weight_adjust(float *delta, int out, float *hidden_units, int hid, float *hidden_weights) {
dim3 DimGrid((hid-1)/BLOCK_SIZE +1, 1, 1);
dim3 DimBlock(BLOCK_SIZE, 1, 1);
gpu_weight_adjust_function<<<DimGrid, DimBlock>>>(delta, out, hidden_units, hid, hidden_weights);
}
|
1,623
|
#include "includes.h"
#define TAM 2
__global__ void matrixMultDevice(float* d_A, float* d_B, float* d_C, int width) {
int Row = blockDim.y * blockIdx.y + threadIdx.y;
int Col = blockDim.x * blockIdx.x + threadIdx.x;
if(Row < width && Col < width) {
float ans = 0.0;
for(int k=0; k<width; k++) {
ans += d_A[Row*width+k]*d_B[k*width+Col];
}
d_C[Row*width+Col]=ans;
}
}
|
1,624
|
#include <stdio.h>
#include <cuda_runtime_api.h>
#include <time.h>
/****************************************************************************
*
*
* Compile with:
* nvcc -o cudapass cuda_password.cu
*
* Dr Kevan Buckley, University of Wolverhampton, January 2018
*****************************************************************************/
__device__ int is_a_match(char *attempt){
char plain_password1[] ="AA2434";
char plain_password2[] ="RA3333";
char plain_password3[] ="MI2222";
char plain_password4[] ="TA4444";
char *r = attempt;
char *m = attempt;
char *t = attempt;
char *a = attempt;
char *r1 = plain_password1;
char *r2 = plain_password2;
char *r3 = plain_password3;
char *r4 = plain_password4;
while(*r ==*r1){
if(*r == '\0')
{
printf("password:%s\n", plain_password1);
break;
}
r++;
r1++;
}
while(*m ==*r2){
if(*m == '\0')
{
printf("password:%s\n", plain_password2);
break;
}
m++;
r2++;
}
while(*t ==*r3){
if(*t == '\0')
{
printf("password:%s\n", plain_password3);
break;
}
t++;
r3++;
}
while(*a ==*r4){
if(*a == '\0')
{
printf("password: %s\n", plain_password4);
return 1;
}
a++;
r4++;
}
return 0;
}
__global__ void kernel(){
char n1, n2, n3, n4;
char password[7];
password[6] ='\0';
int i = blockIdx.x +65;
int j = threadIdx.x+65;
char firstMatch =i;
char secondMatch =j;
password[0] =firstMatch;
password[1] =secondMatch;
for(n1='0'; n1<='9'; n1++){
for(n2='0'; n2<='9'; n2++){
for(n3='0'; n3<='9'; n3++){
for(n4='0'; n4<='9'; n4++){
password[2] =n1;
password[3] =n2;
password[4] =n3;
password[5] =n4;
if(is_a_match(password)){
}
else{
//printf("tried: %s\n",password);
}
}
}
}
}
}
int time_difference(struct timespec *start, struct timespec *finish,long long int *difference) {
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0 ) {
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main() {
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
kernel<<<26,26>>>();
cudaThreadSynchronize();
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed,
(time_elapsed/1.0e9));
return 0;
}
|
1,625
|
#include <cmath>
#include <cstdlib>
#include <cstring>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
// -------------------------------------------------------------------------------------
__global__ void FMaxPoolForward(const float* ori_data, float* pool_data, int* indice_data,
const int num, const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
const int poolsize, const int poolstrike) {
int poolx = threadIdx.x + blockIdx.x * blockDim.x;
int pooly = threadIdx.y + blockIdx.y * blockDim.y;
int poolz = blockIdx.z;
if (pooly < pooled_height && poolx < pooled_width) {
int hstart = pooly * poolstrike;
int hend = hstart + poolsize;
int wstart = poolx * poolstrike;
int wend = wstart + poolsize;
float maxval = -9999;
int indice = 0;
ori_data += poolz * height * width;
for (int h = hstart; h < hend; h++) {
for (int w = wstart; w < wend; w++) {
if( ori_data[h * width + w] > maxval){
maxval = ori_data[h * width + w];
indice = (h - hstart) * poolsize + w - wstart ;
}
}
}
pool_data[poolx + pooly * pooled_width + poolz * pooled_height * pooled_width] = maxval;
indice_data[poolx + pooly * pooled_width + poolz * pooled_height * pooled_width] = indice;
}
}
//---------------------------------------------------------------------------------------
__global__ void FMaxPoolBackward(float* reverse_data, float* pool_data, int* indice_data,
const int num, const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
const int poolsize, const int poolstrike) {
int poolx = threadIdx.x + blockIdx.x * blockDim.x;
int pooly = threadIdx.y + blockIdx.y * blockDim.y;
int poolz = blockIdx.z;
if (pooly < pooled_height && poolx < pooled_width) {
float maxdata = pool_data[poolx + pooly * pooled_width + poolz * pooled_height * pooled_width] ;
int posit = indice_data[poolx + pooly * pooled_width + poolz * pooled_height * pooled_width] ;
int hstart = pooly * poolstrike;
int wstart = poolx * poolstrike;
int woffset = posit % poolsize;
int hoffset = int(posit / poolsize);
int h = hstart + hoffset;
int w = wstart + woffset;
reverse_data += poolz * height * width;
reverse_data[h * width + w] = maxdata;
}
}
//--------------------------------------------------------------------------------------------
__global__ void FMaxPoolForwardFix(const float* ori_data, float* pool_data, int* indice_data,
const int num, const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
const int poolsize, const int poolstrike) {
int poolx = threadIdx.x + blockIdx.x * blockDim.x;
int pooly = threadIdx.y + blockIdx.y * blockDim.y;
int poolz = blockIdx.z;
if (pooly < pooled_height && poolx < pooled_width) {
int posit = indice_data[poolx + pooly * pooled_width + poolz * pooled_height * pooled_width] ;
int hstart = pooly * poolstrike;
int wstart = poolx * poolstrike;
int woffset = posit % poolsize;
int hoffset = int(posit / poolsize);
int h = hstart + hoffset;
int w = wstart + woffset;
ori_data += poolz * height * width;
pool_data[poolx + pooly * pooled_width + poolz * pooled_height * pooled_width] = ori_data[h * width + w];
}
}
|
1,626
|
#include <stdio.h>
#define THREADS 64
// from http://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void device_global(unsigned int *input_array, int num_elements) {
int my_index = blockIdx.x * blockDim.x + threadIdx.x;
// all threads write a value to the array
input_array[my_index] = my_index - (my_index%2);
__syncthreads(); // all initial values are written
// all threads grab a value from the array
// we know this will always be in bounds
int new_index = input_array[my_index];
__syncthreads(); // all values are read
// use the values to write to the array, a write-write race
input_array[new_index] = my_index;
}
int main(void) {
// how big our array for interfacing with the GPU will be
int num_elements = THREADS;
int num_bytes = sizeof(unsigned int) * num_elements;
// pointers for the interfacing arrays
unsigned int *host_array = 0;
unsigned int *device_array = 0;
// malloc for host and device
host_array = (unsigned int*) malloc(num_bytes);
cudaMalloc((void **) &device_array, num_bytes);
// check the mallocs
if (host_array == 0) {
printf("Unable to allocate memory on host");
return 1;
}
if (device_array == 0) {
printf("Unable to allocate memory on device");
return 1;
}
// set host array values
for (int i = 0; i<num_elements; i++) {
host_array[i] = 777777;
}
// copy them to the GPU
cudaMemcpy(device_array, host_array, num_bytes, cudaMemcpyHostToDevice);
// define block and grid sizes
int block_size = THREADS;
int grid_size = (num_elements + block_size - 1) / block_size;
// run GPU code
device_global<<<grid_size, block_size>>>(device_array, num_elements);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
// copy output to host
cudaMemcpy(host_array, device_array, num_bytes, cudaMemcpyDeviceToHost);
// print any information
for (int i=0; i<num_elements; i++) {
printf("%6u, ", host_array[i]);
if (i%10 == 9) {
printf(" \n");
}
}
printf("\n");
// free memory
free(host_array);
cudaFree(device_array);
}
|
1,627
|
#include <stdio.h>
#include <cuda.h>
__global__ void helloKernel() {
/* ----- YOUR CODE HERE ----- */
/* -------------------------- */
}
int main() {
printf("Hello from the CPU\n");
/* ----- YOUR CODE HERE ----- */
/* -------------------------- */
cudaDeviceSynchronize();
return 0;
}
|
1,628
|
#include <iostream>
#include <cassert>
#include <chrono>
using namespace std;
constexpr long WIDTH = 128;
constexpr long TILE_WIDTH = 16;
void MatmulOnCPU(double* M, double* N, double* P) {
for (int i = 0; i < WIDTH; ++i)
for (int j = 0; j < WIDTH; ++j){
double sum = 0;
for (int k = 0; k < WIDTH; ++k){
double a = M[i * WIDTH + k];
double b = N[k * WIDTH + j];
sum += a * b;
}
P[i * WIDTH + j] = sum;
}
}
__global__ void MatrixMulKernel(double *Md, double *Nd, double *Pd);
void MatmulOnGPU(double* M, double* N, double* P) {
constexpr long size = WIDTH * WIDTH;
double *Md, *Nd, *Pd;
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH);
dim3 dimGrid(WIDTH / TILE_WIDTH, WIDTH / TILE_WIDTH);
cudaMalloc(&Md, size * sizeof(double));
cudaMemcpy(Md, M, size * sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc(&Nd, size * sizeof(double));
cudaMemcpy(Nd, N, size * sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc(&Pd, size * sizeof(double));
MatrixMulKernel <<<dimGrid, dimBlock>>> (Md, Nd, Pd);
cudaMemcpy(P, Pd, size * sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(Md);
cudaFree(Nd);
cudaFree(Pd);
}
__global__ void MatrixMulKernel(double *Md, double *Nd, double *Pd) {
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
double p = 0;
for (int m = 0; m < WIDTH / TILE_WIDTH; m++) {
// get the start position of sub-matrix
auto subMd = Md + by * TILE_WIDTH * WIDTH + m * TILE_WIDTH;
auto subNd = Nd + m * TILE_WIDTH * WIDTH + bx * TILE_WIDTH;
__shared__ double subMds[TILE_WIDTH][TILE_WIDTH];
__shared__ double subNds[TILE_WIDTH][TILE_WIDTH];
// each thread load an element from global memory to shared memory
subMds[ty][tx] = subMd[ty * WIDTH + tx];
subNds[ty][tx] = subNd[ty * WIDTH + tx];
__syncthreads();
for (int k = 0; k < TILE_WIDTH; k++) {
p += subMds[ty][k] * subNds[k][tx];
}
__syncthreads();
}
Pd[(by * TILE_WIDTH + ty) * WIDTH + (bx * TILE_WIDTH + tx)] = p;
}
int main() {
assert(WIDTH % TILE_WIDTH == 0);
constexpr long size = WIDTH * WIDTH;
double *M = new double[size];
double *N = new double[size];
for (int i = 0; i < size; i++) {
M[i] = i;
N[i] = i;
}
double *PCPU = new double[size];
double *PGPU = new double[size];
chrono::system_clock::time_point begin, end;
begin = chrono::system_clock::now();
MatmulOnCPU(M, N, PCPU);
end = chrono::system_clock::now();
auto cpu_duration = chrono::duration_cast<chrono::microseconds>(end - begin).count();
begin = chrono::system_clock::now();
MatmulOnGPU(M, N, PGPU);
end = chrono::system_clock::now();
auto gpu_duration = chrono::duration_cast<chrono::microseconds>(end - begin).count();
#ifdef DEBUG
for (int i = 0; i < WIDTH; i++) {
for (int j = 0; j < WIDTH; j++) {
printf("%.2lf\t", PCPU[i * WIDTH + j]);
}
printf("\n");
}
for (int i = 0; i < WIDTH; i++) {
for (int j = 0; j < WIDTH; j++) {
printf("%.2lf\t", PGPU[i * WIDTH + j]);
}
printf("\n");
}
#endif
bool correct = true;
for (long i = 0; i < size; i++) {
if (abs(PCPU[i] - PGPU[i]) > 1e-4) {
correct = false;
printf("at i = %ld, %lf -- %lf -- %lf\n", i, PCPU[i], PGPU[i], PCPU[i] - PGPU[i]);
// break;
}
}
printf("=====================Summary=======================\n");
if (correct) {
printf("\033[1;32mThe result is correct!\033[0m\n");
}
else {
printf("\033[1;31mThe result is wrong!\033[0m\n");
}
printf("cpu:\t %lld us\n", cpu_duration);
printf("gpu:\t %lld us\n", gpu_duration);
printf("speedup:\t %lf\n", cpu_duration / (double)gpu_duration);
printf("===================================================\n");
}
|
1,629
|
/*
Author : Kim, KyoungHo (rain_woo@korea.ac.kr)
Ki-Hwan Kim (wbkifun@korea.ac.kr)
Written date : 2009. 6. 11
last update :
Copyright : GNU GPL
*/
__global__ void update_src( int Nx, int Ny, int Nz, int tstep, float *F ) {
int idx, ijk;
idx = threadIdx.x;
//ijk = (idx+1)*Ny*Nz + (Ny/2)*Nz + (Nz/2);
//ijk = (idx+1)*Ny*Nz + (Ny/2 - 30)*Nz + (Nz/2 - 50);
//ijk = (Nx/2 - 30)*Ny*Nz + (idx)*Nz + (Nz/2 - 50);
ijk = (Nx/2-30)*Ny*Nz + (Ny/2-50)*Nz + idx;
F[ijk] += sin(0.1*tstep);
}
|
1,630
|
#include <iostream>
#include <cuda.h>
#include <vector>
#include <cuda_runtime.h>
// #include "../include/mycudaheader.h"
// #include "precond.h"
using namespace std;
__global__
void Jacobi_Precond_GPU(double* c, double* value, double* r, size_t num_rows)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
if ( id < num_rows )
c[id] = value[id] * r[id];
}
|
1,631
|
#include "includes.h"
__device__ __forceinline__ size_t gpu_fieldn_index(unsigned int x, unsigned int y, unsigned int z, unsigned int d)
{
return (NX*(NY*(NZ*(d-1)+z)+y)+x);
}
__global__ void gpu_stream(double *f0, double *f1, double *f2, double *h0, double *h1, double *h2, double *temp0, double *temp1, double *temp2)
{
unsigned int y = blockIdx.y;
unsigned int z = blockIdx.z;
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
// streaming step
unsigned int xp1 = (x + 1) % NX;
unsigned int yp1 = (y + 1) % NY;
unsigned int zp1 = (z + 1) % NZ;
unsigned int xm1 = (NX + x - 1) % NX;
unsigned int ym1 = (NY + y - 1) % NY;
unsigned int zm1 = (NZ + z - 1) % NZ;
// direction numbering scheme
// 6 2 5
// 3 0 1
// 7 4 8
// load populations from adjacent nodes (ft is post-streaming population of f1)
// flows
f1[gpu_fieldn_index(x, y, z, 1)] = f2[gpu_fieldn_index(xm1, y, z, 1)];
f1[gpu_fieldn_index(x, y, z, 2)] = f2[gpu_fieldn_index(xp1, y, z, 2)];
f1[gpu_fieldn_index(x, y, z, 3)] = f2[gpu_fieldn_index(x, ym1, z, 3)];
f1[gpu_fieldn_index(x, y, z, 4)] = f2[gpu_fieldn_index(x, yp1, z, 4)];
f1[gpu_fieldn_index(x, y, z, 5)] = f2[gpu_fieldn_index(x, y, zm1, 5)];
f1[gpu_fieldn_index(x, y, z, 6)] = f2[gpu_fieldn_index(x, y, zp1, 6)];
f1[gpu_fieldn_index(x, y, z, 7)] = f2[gpu_fieldn_index(xm1, ym1, z, 7)];
f1[gpu_fieldn_index(x, y, z, 8)] = f2[gpu_fieldn_index(xp1, yp1, z, 8)];
f1[gpu_fieldn_index(x, y, z, 9)] = f2[gpu_fieldn_index(xm1, y, zm1, 9)];
f1[gpu_fieldn_index(x, y, z, 10)] = f2[gpu_fieldn_index(xp1, y, zp1, 10)];
f1[gpu_fieldn_index(x, y, z, 11)] = f2[gpu_fieldn_index(x, ym1, zm1, 11)];
f1[gpu_fieldn_index(x, y, z, 12)] = f2[gpu_fieldn_index(x, yp1, zp1, 12)];
f1[gpu_fieldn_index(x, y, z, 13)] = f2[gpu_fieldn_index(xm1, yp1, z, 13)];
f1[gpu_fieldn_index(x, y, z, 14)] = f2[gpu_fieldn_index(xp1, ym1, z, 14)];
f1[gpu_fieldn_index(x, y, z, 15)] = f2[gpu_fieldn_index(xm1, y, zp1, 15)];
f1[gpu_fieldn_index(x, y, z, 16)] = f2[gpu_fieldn_index(xp1, y, zm1, 16)];
f1[gpu_fieldn_index(x, y, z, 17)] = f2[gpu_fieldn_index(x, ym1, zp1, 17)];
f1[gpu_fieldn_index(x, y, z, 18)] = f2[gpu_fieldn_index(x, yp1, zm1, 18)];
f1[gpu_fieldn_index(x, y, z, 19)] = f2[gpu_fieldn_index(xm1, ym1, zm1, 19)];
f1[gpu_fieldn_index(x, y, z, 20)] = f2[gpu_fieldn_index(xp1, yp1, zp1, 20)];
f1[gpu_fieldn_index(x, y, z, 21)] = f2[gpu_fieldn_index(xm1, ym1, zp1, 21)];
f1[gpu_fieldn_index(x, y, z, 22)] = f2[gpu_fieldn_index(xp1, yp1, zm1, 22)];
f1[gpu_fieldn_index(x, y, z, 23)] = f2[gpu_fieldn_index(xm1, yp1, zm1, 23)];
f1[gpu_fieldn_index(x, y, z, 24)] = f2[gpu_fieldn_index(xp1, ym1, zp1, 24)];
f1[gpu_fieldn_index(x, y, z, 25)] = f2[gpu_fieldn_index(xp1, ym1, zm1, 25)];
f1[gpu_fieldn_index(x, y, z, 26)] = f2[gpu_fieldn_index(xm1, yp1, zp1, 26)];
// charges
h1[gpu_fieldn_index(x, y, z, 1)] = h2[gpu_fieldn_index(xm1, y, z, 1)];
h1[gpu_fieldn_index(x, y, z, 2)] = h2[gpu_fieldn_index(xp1, y, z, 2)];
h1[gpu_fieldn_index(x, y, z, 3)] = h2[gpu_fieldn_index(x, ym1, z, 3)];
h1[gpu_fieldn_index(x, y, z, 4)] = h2[gpu_fieldn_index(x, yp1, z, 4)];
h1[gpu_fieldn_index(x, y, z, 5)] = h2[gpu_fieldn_index(x, y, zm1, 5)];
h1[gpu_fieldn_index(x, y, z, 6)] = h2[gpu_fieldn_index(x, y, zp1, 6)];
h1[gpu_fieldn_index(x, y, z, 7)] = h2[gpu_fieldn_index(xm1, ym1, z, 7)];
h1[gpu_fieldn_index(x, y, z, 8)] = h2[gpu_fieldn_index(xp1, yp1, z, 8)];
h1[gpu_fieldn_index(x, y, z, 9)] = h2[gpu_fieldn_index(xm1, y, zm1, 9)];
h1[gpu_fieldn_index(x, y, z, 10)] = h2[gpu_fieldn_index(xp1, y, zp1, 10)];
h1[gpu_fieldn_index(x, y, z, 11)] = h2[gpu_fieldn_index(x, ym1, zm1, 11)];
h1[gpu_fieldn_index(x, y, z, 12)] = h2[gpu_fieldn_index(x, yp1, zp1, 12)];
h1[gpu_fieldn_index(x, y, z, 13)] = h2[gpu_fieldn_index(xm1, yp1, z, 13)];
h1[gpu_fieldn_index(x, y, z, 14)] = h2[gpu_fieldn_index(xp1, ym1, z, 14)];
h1[gpu_fieldn_index(x, y, z, 15)] = h2[gpu_fieldn_index(xm1, y, zp1, 15)];
h1[gpu_fieldn_index(x, y, z, 16)] = h2[gpu_fieldn_index(xp1, y, zm1, 16)];
h1[gpu_fieldn_index(x, y, z, 17)] = h2[gpu_fieldn_index(x, ym1, zp1, 17)];
h1[gpu_fieldn_index(x, y, z, 18)] = h2[gpu_fieldn_index(x, yp1, zm1, 18)];
h1[gpu_fieldn_index(x, y, z, 19)] = h2[gpu_fieldn_index(xm1, ym1, zm1, 19)];
h1[gpu_fieldn_index(x, y, z, 20)] = h2[gpu_fieldn_index(xp1, yp1, zp1, 20)];
h1[gpu_fieldn_index(x, y, z, 21)] = h2[gpu_fieldn_index(xm1, ym1, zp1, 21)];
h1[gpu_fieldn_index(x, y, z, 22)] = h2[gpu_fieldn_index(xp1, yp1, zm1, 22)];
h1[gpu_fieldn_index(x, y, z, 23)] = h2[gpu_fieldn_index(xm1, yp1, zm1, 23)];
h1[gpu_fieldn_index(x, y, z, 24)] = h2[gpu_fieldn_index(xp1, ym1, zp1, 24)];
h1[gpu_fieldn_index(x, y, z, 25)] = h2[gpu_fieldn_index(xp1, ym1, zm1, 25)];
h1[gpu_fieldn_index(x, y, z, 26)] = h2[gpu_fieldn_index(xm1, yp1, zp1, 26)];
// temperature
temp1[gpu_fieldn_index(x, y, z, 1)] = temp2[gpu_fieldn_index(xm1, y, z, 1)];
temp1[gpu_fieldn_index(x, y, z, 2)] = temp2[gpu_fieldn_index(xp1, y, z, 2)];
temp1[gpu_fieldn_index(x, y, z, 3)] = temp2[gpu_fieldn_index(x, ym1, z, 3)];
temp1[gpu_fieldn_index(x, y, z, 4)] = temp2[gpu_fieldn_index(x, yp1, z, 4)];
temp1[gpu_fieldn_index(x, y, z, 5)] = temp2[gpu_fieldn_index(x, y, zm1, 5)];
temp1[gpu_fieldn_index(x, y, z, 6)] = temp2[gpu_fieldn_index(x, y, zp1, 6)];
temp1[gpu_fieldn_index(x, y, z, 7)] = temp2[gpu_fieldn_index(xm1, ym1, z, 7)];
temp1[gpu_fieldn_index(x, y, z, 8)] = temp2[gpu_fieldn_index(xp1, yp1, z, 8)];
temp1[gpu_fieldn_index(x, y, z, 9)] = temp2[gpu_fieldn_index(xm1, y, zm1, 9)];
temp1[gpu_fieldn_index(x, y, z, 10)] = temp2[gpu_fieldn_index(xp1, y, zp1, 10)];
temp1[gpu_fieldn_index(x, y, z, 11)] = temp2[gpu_fieldn_index(x, ym1, zm1, 11)];
temp1[gpu_fieldn_index(x, y, z, 12)] = temp2[gpu_fieldn_index(x, yp1, zp1, 12)];
temp1[gpu_fieldn_index(x, y, z, 13)] = temp2[gpu_fieldn_index(xm1, yp1, z, 13)];
temp1[gpu_fieldn_index(x, y, z, 14)] = temp2[gpu_fieldn_index(xp1, ym1, z, 14)];
temp1[gpu_fieldn_index(x, y, z, 15)] = temp2[gpu_fieldn_index(xm1, y, zp1, 15)];
temp1[gpu_fieldn_index(x, y, z, 16)] = temp2[gpu_fieldn_index(xp1, y, zm1, 16)];
temp1[gpu_fieldn_index(x, y, z, 17)] = temp2[gpu_fieldn_index(x, ym1, zp1, 17)];
temp1[gpu_fieldn_index(x, y, z, 18)] = temp2[gpu_fieldn_index(x, yp1, zm1, 18)];
temp1[gpu_fieldn_index(x, y, z, 19)] = temp2[gpu_fieldn_index(xm1, ym1, zm1, 19)];
temp1[gpu_fieldn_index(x, y, z, 20)] = temp2[gpu_fieldn_index(xp1, yp1, zp1, 20)];
temp1[gpu_fieldn_index(x, y, z, 21)] = temp2[gpu_fieldn_index(xm1, ym1, zp1, 21)];
temp1[gpu_fieldn_index(x, y, z, 22)] = temp2[gpu_fieldn_index(xp1, yp1, zm1, 22)];
temp1[gpu_fieldn_index(x, y, z, 23)] = temp2[gpu_fieldn_index(xm1, yp1, zm1, 23)];
temp1[gpu_fieldn_index(x, y, z, 24)] = temp2[gpu_fieldn_index(xp1, ym1, zp1, 24)];
temp1[gpu_fieldn_index(x, y, z, 25)] = temp2[gpu_fieldn_index(xp1, ym1, zm1, 25)];
temp1[gpu_fieldn_index(x, y, z, 26)] = temp2[gpu_fieldn_index(xm1, yp1, zp1, 26)];
}
|
1,632
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void addKernel(float *c, const float *a, const float *b,int nx,int ny)
{
int x = threadIdx.x + blockDim.x*blockIdx.x;
int y = threadIdx.y + blockDim.y*blockIdx.y;
int i = y*nx + x;
if (y < ny && x < nx)
{
c[i] = a[i] + b[i];
}
}
// Helper function for using CUDA to add vectors in parallel.
void addWithCuda(float* mat_a,float* mat_b,float* mat_c,int nx,int ny)
{
dim3 block(32, 32);
dim3 grid((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y);
addKernel<<<grid, block>>>(mat_c,mat_a,mat_b,nx,ny);
}
|
1,633
|
//optimization homework #4 cs 677 Theodore Jagodits
#include <stdio.h>
#include <stdlib.h>
#include "string.h"
#include <iostream>
#define DEFAULT_SIZE 128
#define DEFAULT_WIDTH 128
#define DEFAULT_HEIGHT 128
#define TILE_SIZE 16
__global__ void unknown_algo(float *inp1, float *inp2, float *result, int width, int height){
// make shared
int id = blockIdx.x * blockDim.x + threadIdx.x;
float temp, k_loop_temp, inpt1_s;
temp = 0.0f;
for(int j = 0; j < height; j++){
temp += inp2[id * width + j];
result[id * width + j] = temp;
k_loop_temp = 0.0f;
inpt1_s = inp1[j];
for(int k = 0; k < height; k++){
k_loop_temp += inpt1_s * inp1[k];
}
//speed up here
result[id * width + j] += k_loop_temp;
}
}
int main( int argc, char **argv ){
int size = DEFAULT_SIZE;
int width = DEFAULT_WIDTH;
int height = DEFAULT_HEIGHT;
if(argc == 3){
//size = atoi(argv[1]);
width = atoi(argv[1]);
height = atoi(argv[2]);
}
//create vars
int input1_bytes = height * sizeof(float);
int num_bytes = width * height * sizeof(float);
//event timers
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
//malloc device
float *d_input1 = (float *) malloc(input1_bytes);
float *d_input2 = (float *) malloc(num_bytes);
float *d_result = (float *) malloc(num_bytes);
//malloc host
float *h_input1 = (float *) malloc(input1_bytes);
float *h_input2 = (float *) malloc(num_bytes);
float *h_result = (float *) malloc(num_bytes);
//cuda malloc
cudaMalloc(&d_input1, input1_bytes);
cudaMalloc(&d_input2, num_bytes);
cudaMalloc(&d_result, num_bytes);
//put in data
for(int o = 0; o < width; o++){
h_input1[o] = 1;
for(int p = 0; p < height; p++){
h_input2[width * o + p] = 1;
}
}
//copy over memory
cudaMemcpy(d_input1, h_input1, input1_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_input2, h_input2, num_bytes, cudaMemcpyHostToDevice);
//declare block and grid size for kernel
int block_size = 128;
int grid_size = (int)ceil((float)width/block_size);
//start timer
cudaEventRecord(start);
//run kernel
unknown_algo<<< grid_size, block_size >>> (d_input1, d_input2, d_result, width, height);
//end timer
cudaEventRecord(stop);
// Copy result back to host
cudaMemcpy(h_result, d_result, num_bytes, cudaMemcpyDeviceToHost);
//synchronize https://devblogs.nvidia.com/how-implement-performance-metrics-cuda-cc/
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
//print output
for(int o = 0; o < width; o++){
for(int p = 0; p < height; p++){
printf("%d ", (int)h_result[o*width + p]);
}
printf("\n");
}
printf("time for execution: %lf ms\n", milliseconds);
//free all vars
free(h_input1);
free(h_input2);
free(h_result);
cudaFree(d_input1);
cudaFree(d_input2);
cudaFree(d_result);
return 0;
}
|
1,634
|
/*
Author: Su, Ming Yi
Date: 11/18/2018
Goal: use cuda to reverse matrix
How to compile it:
module load cuda
nvcc -o example_4 example_4.cu
How to run it:
./example_4
*/
#include "stdio.h"
// kernel-find linearized threadId, and set A[id] = tid
// use "__global__ void" type for base cuda kernel
__global__ void initArray(int *A)
{
printf("blockIdx.x = %d, blockDim.x = %d, threadIdx.x = %d\n", blockIdx.x, blockDim.x, threadIdx.x);
int tid;
tid = blockIdx.x * blockDim.x + threadIdx.x;
A[tid] = tid;
}
int main()
{
printf("Hello World.\n");
int *h_a; // pointer for host memory
int *d_a; // pointer for device memory
// define thread hierarchy
int num_blocks= 8;
int num_th_per_blk = 8;
// allocate host and device memory
size_t memSize;
// for this example, assume we will have one thread per data item
memSize = num_blocks*num_th_per_blk * sizeof(int);
h_a= (int*)malloc(memSize);
cudaMalloc((void **) &d_a, memSize);
// launch kernel
dim3 dimGrid(num_blocks);
dim3 dimBlock(num_th_per_blk);
initArray<<<dimGrid, dimBlock>>>(d_a);
// retrieve results
cudaMemcpy(h_a, d_a, memSize, cudaMemcpyDeviceToHost);
for (int i=0;i<num_blocks;i++)
{
for(int j=0;j<num_th_per_blk;j++)
{
printf("h_a[%d][%d] = %d ", i, j, h_a[i+j*num_th_per_blk]);
}
printf("\n");
}
return 0;
}
|
1,635
|
// Author: Ulises Olivares
// uolivares@unam.mx
// Oct 22, 2020
#include<iostream>
#include<stdio.h>
#include<time.h>
#include<cstdlib>
#include<math.h>
#define n 900000
#define m 10000
using namespace std;
//Global variables
long long int sizeN = n * sizeof(float);
long long int sizeM = m * sizeof(float);
float h_N[n] , h_M[m], h_P[n];
int threads = 512;
int blocks = ceil(float(n)/float(threads));
__constant__ float c_M[m];
// GPU timers using CUDA events
float globalMemTimer = 0, constantMemTimer = 0;
// Method definition
void generateRandom(float *h_a, int size);
void parallelConvolution1D();
void parallelConvolutionConstant1D();
template <typename vec>
void printVector(vec *V, int size);
__global__ void CUDAConvolution1D(float *N, float *M, float *P, int Mask_Width, int Width);
__global__ void CUDAConvolutionConstant1D(float *N, float *P, int Mask_Width, int Width);
int main(){
//init N and M with random numbers
generateRandom(h_N, n);
generateRandom(h_M, m);
// Parallel convolution 1D kernel
parallelConvolution1D();
// Parallel convolution 1D constant memory
parallelConvolutionConstant1D();
return 0;
}
__global__ void CUDAConvolution1D(float *N, float *M, float *P, int Mask_Width, int Width){
int i = blockIdx.x*blockDim.x + threadIdx.x;
float Pvalue = 0;
int N_start_point = i - (Mask_Width/2);
for (int j = 0; j < Mask_Width; j++) {
if (N_start_point + j >= 0 && N_start_point + j < Width) {
Pvalue += N[N_start_point + j]*M[j];
}
}
P[i] = Pvalue;
}
__global__ void CUDAConvolutionConstant1D(float *N, float *P, int Mask_Width, int Width){
int i = blockIdx.x*blockDim.x + threadIdx.x;
//printf("M[i]: %d ", c_M[i] );
//printf("thread: %d", i );
float Pvalue = 0;
int N_start_point = i - (Mask_Width/2);
for (int j = 0; j < Mask_Width; j++) {
if (N_start_point + j >= 0 && N_start_point + j < Width) {
Pvalue += N[N_start_point + j]*c_M[j];
}
}
P[i] = Pvalue;
}
template <typename vec>
void printVector(vec *V, int size){
for(int i = 0; i < size; i++){
cout<< V[i] << " ";
}
cout << endl;
}
void generateRandom(float *h_a, int size){
// Initialize seed
srand(time(NULL));
for(int i=0; i<size; i++){
h_a[i] = float(rand() % 10 +1);
}
}
void parallelConvolutionConstant1D(){
float *d_N, *d_P;
cudaMalloc((void **)&d_N, sizeN);
cudaMalloc((void **)&d_P, sizeN);
// copy data from host to device
cudaMemcpy(d_N, h_N, sizeN, cudaMemcpyHostToDevice);
// Trasfeer data to constant memory
cudaMemcpyToSymbol(c_M, h_M, sizeM);
// define timers
cudaEvent_t start, stop;
// events to take time
cudaEventCreate(&start);
cudaEventCreate(&stop);
// start timer
cudaEventRecord(start,0);
//Launch kernel
CUDAConvolutionConstant1D<<<blocks, threads>>>(d_N, d_P, m, n);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&constantMemTimer, start, stop);
cudaDeviceSynchronize();
cout<< "Elapsed parallel 1D convolution (Constant Mem) : " << constantMemTimer << " ms, " << globalMemTimer / 1000 << " secs" <<endl;
cudaMemcpy(h_P, d_P, sizeN, cudaMemcpyDeviceToHost);
//cout<< "Resulting P vector (Constant)" << endl;
//printVector(h_P, n);
}
void parallelConvolution1D(){
float *d_N, *d_M, *d_P;
// Reservar memoria en device
cudaMalloc((void **)&d_N, sizeN);
cudaMalloc((void **)&d_M, sizeM);
cudaMalloc((void **)&d_P, sizeN);
// Transferir datos de host a device
cudaMemcpy(d_N, h_N, sizeN, cudaMemcpyHostToDevice);
cudaMemcpy(d_M, h_M, sizeM, cudaMemcpyHostToDevice);
// define timers
cudaEvent_t start, stop;
// events to take time
cudaEventCreate(&start);
cudaEventCreate(&stop);
// start timer
cudaEventRecord(start,0);
//Launch kernel
CUDAConvolution1D<<<blocks, threads>>>(d_N, d_M, d_P, m, n);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&globalMemTimer, start, stop);
//cudaDeviceSynchronize();
cout<< "Elapsed parallel 1D convolution (Global Mem) : " << globalMemTimer << " ms, " << globalMemTimer / 1000 << " secs" <<endl;
cudaMemcpy(h_P, d_P, sizeN, cudaMemcpyDeviceToHost);
//cout<< "Resulting P vector (Global)" << endl;
//printVector(h_P, n);
//free(h_N); free(h_M); free(h_P);
cudaFree(d_M); cudaFree(d_N); cudaFree(d_P);
}
|
1,636
|
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
typedef short WORD;
typedef int DWORD;
typedef int LONG;
static unsigned char s_box[256] = {
// 0 1 2 3 4 5 6 7 8 9 a b c d e f
0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76, // 0
0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0, // 1
0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15, // 2
0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75, // 3
0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84, // 4
0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf, // 5
0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8, // 6
0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2, // 7
0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73, // 8
0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb, // 9
0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79, // a
0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08, // b
0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a, // c
0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e, // d
0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf, // e
0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16};// f
static unsigned char inv_s_box[256] = {
// 0 1 2 3 4 5 6 7 8 9 a b c d e f
0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38, 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb, // 0
0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87, 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb, // 1
0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d, 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e, // 2
0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2, 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25, // 3
0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16, 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92, // 4
0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda, 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84, // 5
0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a, 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06, // 6
0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02, 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b, // 7
0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea, 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73, // 8
0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85, 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e, // 9
0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89, 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b, // a
0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20, 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4, // b
0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31, 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f, // c
0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d, 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef, // d
0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0, 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61, // e
0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d};// f
int Nb = 4;
void shift_rows(unsigned char *state) {
unsigned char i, k, s, tmp;
for (i = 1; i < 4; i++) {
s = 0;
while (s < i) {
tmp = state[Nb*i+0];
for (k = 1; k < Nb; k++) {
state[Nb*i+k-1] = state[Nb*i+k];
}
state[Nb*i+Nb-1] = tmp;
s++;
}
}
}
void inv_shift_rows(unsigned char *state) {
unsigned char i, k, s, tmp;
for (i = 1; i < 4; i++) {
s = 0;
while (s < i) {
tmp = state[Nb*i+Nb-1];
for (k = Nb-1; k > 0; k--) {
state[Nb*i+k] = state[Nb*i+k-1];
}
state[Nb*i+0] = tmp;
s++;
}
}
}
unsigned char gmult(unsigned char a, unsigned char b) {
unsigned char p = 0, i = 0, hbs = 0;
for (i = 0; i < 8; i++) {
if (b & 1) {
p ^= a;
}
hbs = a & 0x80;
a <<= 1;
if (hbs) a ^= 0x1b; // 0000 0001 0001 1011
b >>= 1;
}
return (unsigned char)p;
}
void coef_mult(unsigned char *a, unsigned char *b, unsigned char *d) {
d[0] = gmult(a[0],b[0])^gmult(a[3],b[1])^gmult(a[2],b[2])^gmult(a[1],b[3]);
d[1] = gmult(a[1],b[0])^gmult(a[0],b[1])^gmult(a[3],b[2])^gmult(a[2],b[3]);
d[2] = gmult(a[2],b[0])^gmult(a[1],b[1])^gmult(a[0],b[2])^gmult(a[3],b[3]);
d[3] = gmult(a[3],b[0])^gmult(a[2],b[1])^gmult(a[1],b[2])^gmult(a[0],b[3]);
}
void mix_columns(unsigned char *state) {
unsigned char a[] = {0x02, 0x01, 0x01, 0x03}; // a(x) = {02} + {01}x + {01}x2 + {03}x3
unsigned char i, j, col[4], res[4];
for (j = 0; j < Nb; j++) {
for (i = 0; i < 4; i++) {
col[i] = state[Nb*i+j];
}
coef_mult(a, col, res);
for (i = 0; i < 4; i++) {
state[Nb*i+j] = res[i];
}
}
}
void inv_mix_columns(unsigned char *state) {
unsigned char a[] = {0x0e, 0x09, 0x0d, 0x0b}; // a(x) = {0e} + {09}x + {0d}x2 + {0b}x3
unsigned char i, j, col[4], res[4];
for (j = 0; j < Nb; j++) {
for (i = 0; i < 4; i++) {
col[i] = state[Nb*i+j];
}
coef_mult(a, col, res);
for (i = 0; i < 4; i++) {
state[Nb*i+j] = res[i];
}
}
}
//Round Keys
unsigned char key[16] = {
0x00, 0x01, 0x02, 0x03,
0x04, 0x05, 0x06, 0x07,
0x08, 0x09, 0x0a, 0x0b,
0x0c, 0x0d, 0x0e, 0x0f};
void key_xor(unsigned char *state){
for(int i=0;i < 16;i++)
{
state[i] = state[i]^key[i];
}
}
#pragma pack(push, 1)
typedef struct tagBITMAPFILEHEADER
{
WORD bfType; //specifies the file type
DWORD bfSize; //specifies the size in bytes of the bitmap file
WORD bfReserved1; //reserved; must be 0
WORD bfReserved2; //reserved; must be 0
DWORD bOffBits; //species the offset in bytes from the bitmapfileheader to the bitmap bits
}BITMAPFILEHEADER;
#pragma pack(pop)
#pragma pack(push, 1)
typedef struct tagBITMAPINFOHEADER
{
DWORD biSize; //specifies the number of bytes required by the struct
LONG biWidth; //specifies width in pixels
LONG biHeight; //species height in pixels
WORD biPlanes; //specifies the number of color planes, must be 1
WORD biBitCount; //specifies the number of bit per pixel
DWORD biCompression;//spcifies the type of compression
DWORD biSizeImage; //size of image in bytes
LONG biXPelsPerMeter; //number of pixels per meter in x axis
LONG biYPelsPerMeter; //number of pixels per meter in y axis
DWORD biClrUsed; //number of colors used by th ebitmap
DWORD biClrImportant; //number of colors that are important
}BITMAPINFOHEADER;
#pragma pack(pop)
unsigned char *LoadBitmapFile(char *filename, BITMAPINFOHEADER *bitmapInfoHeader, BITMAPFILEHEADER *bitmapFileHeader)
{
FILE *filePtr; //our file pointer
unsigned char *bitmapImage; //store image data
int imageIdx=0; //image index counter
unsigned char tempRGB; //our swap variable
//open filename in read binary mode
filePtr = fopen(filename,"rb");
if (filePtr == NULL)
return NULL;
//read the bitmap file header
fread(bitmapFileHeader, sizeof(BITMAPFILEHEADER),1,filePtr);
//verify that this is a bmp file by check bitmap id
if (bitmapFileHeader->bfType !=0x4D42)
{
fclose(filePtr);
return NULL;
}
//read the bitmap info header
fread(bitmapInfoHeader, sizeof(BITMAPINFOHEADER),1,filePtr); // small edit. forgot to add the closing bracket at sizeof
//move file point to the begging of bitmap data
fseek(filePtr, bitmapFileHeader->bOffBits, SEEK_SET);
//allocate enough memory for the bitmap image data
bitmapImage = (unsigned char*)malloc(bitmapInfoHeader->biSizeImage);
//verify memory allocation
if (!bitmapImage)
{
free(bitmapImage);
fclose(filePtr);
return NULL;
}
//read in the bitmap image data
fread(bitmapImage,1,bitmapInfoHeader->biSizeImage,filePtr);
//make sure bitmap image data was read
if (bitmapImage == NULL)
{
fclose(filePtr);
return NULL;
}
cudaEvent_t start;
cudaEventCreate(&start);
cudaEvent_t end;
cudaEventCreate(&end);
float swapTime;
cudaEventRecord(start, 0);
//swap the r and b values to get RGB (bitmap is BGR)
for (imageIdx = 0; imageIdx < bitmapInfoHeader->biSizeImage;imageIdx+=3)
{
tempRGB = bitmapImage[imageIdx];
bitmapImage[imageIdx] = bitmapImage[imageIdx + 2];
bitmapImage[imageIdx + 2] = tempRGB;
}
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
cudaEventElapsedTime(&swapTime, start, end);
printf("Load Swap Time: %fms\n",swapTime);
cudaEventDestroy(start);
cudaEventDestroy(end);
//close file and return bitmap iamge data
fclose(filePtr);
return bitmapImage;
}
void ReloadBitmapFile(char *filename, unsigned char *bitmapImage, BITMAPFILEHEADER *bitmapFileHeader, BITMAPINFOHEADER *bitmapInfoHeader)
{
FILE *filePtr; //our file pointer
int imageIdx=0; //image index counter
unsigned char tempRGB; //our swap variable
//open filename in write binary mode
filePtr = fopen(filename,"wb");
if (filePtr == NULL)
{
printf("\nERROR: Cannot open file %s", filename);
exit(1);
}
//write the bitmap file header
fwrite(bitmapFileHeader, sizeof(BITMAPFILEHEADER),1,filePtr);
//write the bitmap info header
fwrite(bitmapInfoHeader, sizeof(BITMAPINFOHEADER),1,filePtr); // small edit. forgot to add the closing bracket at sizeof
//swap the r and b values to get RGB (bitmap is BGR)
for (imageIdx = 0; imageIdx < bitmapInfoHeader->biSizeImage;imageIdx+=3)
{
tempRGB = bitmapImage[imageIdx];
bitmapImage[imageIdx] = bitmapImage[imageIdx + 2];
bitmapImage[imageIdx + 2] = tempRGB;
}
//write in the bitmap image data
fwrite(bitmapImage,bitmapInfoHeader->biSizeImage,1,filePtr);
//close file
fclose(filePtr);
}
void encrypt(unsigned char *bitmapImage, int size, int key)
{
int i;
//byte substitution
for(i=0;i < size;i++)
{
bitmapImage[i] = s_box[bitmapImage[i]];
}
//shift rows
unsigned char * p = bitmapImage;
for(i=0;i < size;i+=16)
{
shift_rows(p + i);
}
//mix columns
p = bitmapImage;
for(i=0;i < size;i+=16)
{
mix_columns(p + i);
}
//Add key
p = bitmapImage;
for(i=0;i < size;i+=16)
{
key_xor(p + i);
}
}
void decrypt(unsigned char *bitmapImage, int size, int key)
{
int i;
unsigned char * p = bitmapImage;
//Add key
for(i=0;i < size;i+=16)
{
key_xor(p + i);
}
//mix columns
p = bitmapImage;
for(i=0;i < size;i+=16)
{
inv_mix_columns(p + i);
}
//shift rows
p = bitmapImage;
for(i=0;i < size;i+=16)
{
inv_shift_rows(p + i);
}
//byte substitution
for(i=0;i < size;i++)
{
bitmapImage[i] = inv_s_box[bitmapImage[i]];
}
}
int main()
{
BITMAPINFOHEADER bitmapInfoHeader;
BITMAPFILEHEADER bitmapFileHeader;
unsigned char *bitmapData;
bitmapData = LoadBitmapFile("lena.bmp",&bitmapInfoHeader, &bitmapFileHeader);
printf("%d\n",bitmapInfoHeader.biSizeImage);
int key = 251;
cudaEvent_t start;
cudaEventCreate(&start);
cudaEvent_t end;
cudaEventCreate(&end);
float encryptionTime, decryptionTime;
//Encryption
cudaEventRecord(start, 0);
encrypt(bitmapData, bitmapInfoHeader.biSizeImage, key);
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
cudaEventElapsedTime(&encryptionTime, start, end);
printf("Encryption Time: %fms\n",encryptionTime);
ReloadBitmapFile("encrypted.bmp", bitmapData, &bitmapFileHeader, &bitmapInfoHeader);
//load encrypted image to array
bitmapData = LoadBitmapFile("encrypted.bmp",&bitmapInfoHeader, &bitmapFileHeader);
//Decryption
cudaEventRecord(start, 0);
decrypt(bitmapData, bitmapInfoHeader.biSizeImage, key);
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
cudaEventElapsedTime(&decryptionTime, start, end);
printf("Decryption Time: %fms\n",decryptionTime);
ReloadBitmapFile("Decrypted.bmp", bitmapData, &bitmapFileHeader, &bitmapInfoHeader);
cudaEventDestroy(start);
cudaEventDestroy(end);
return 0;
}
|
1,637
|
/*
* fast_recommender.cu
* --------------------
* Movie recommender based in closest neighbor using Cuda.
* Computes the euclidean distance between a client and
* a group of users. Chooses the closest in resemblance
* based in the lowest Euclidean Distance in the ratings
* of movies. Once we have our closest user it finds
* which movies the client has not seen, and recommends
* the top ones based on the ratings of the closest neighbor.
* This version optimizes data reading as it doesn't give
* the perfect match but stops when the resemblance is good
* enough
*
* @author: Miguel Angel Velázquez Ramos
* 2017
*
*/
#include <thrust/device_vector.h>
#include <thrust/transform_reduce.h>
#include <thrust/sequence.h>
#include <thrust/random.h>
#include <thrust/gather.h>
#include <thrust/extrema.h>
#include <thrust/sort.h>
#include <stdio.h>
#include "data.cuh"
using namespace thrust::placeholders;
#define INF 999
/*
* Operator: power_difference
* --------------------
* computes the square power difference of two numbers using:
* pow(a - b, 2) only if both numbers are different to 0.
* This is a special power_difference as we only want to
* compute when both elements in our vector have a value.
* Meaning only when two users have seen the movie for
* our recommender.
*
* a: first number to compute the power difference
* b: second number to compute the power difference
*
* returns: 0 when one of the input values is 0
* power difference of the two values in any other case
*/
struct power_difference {
__host__ __device__ float operator()(const char& a, const char& b) const {
int ai = (int)a;
int bi = (int)b;
if ( ai == 0 || bi == 0) {
return 0;
} else {
return powf(ai - bi, 2);
}
}
};
/*
* Operator: weight_division
* --------------------
* computes the division of two numbers a and b using the fomula:
* (a + (0.00001 - 0.000001 * b)) / b
* This is a special divison used to rank matches. With these
* even if two division would be the same, we favor a lowe value
* by the weight of the dividend
* Example
* Normal divison Weighted division
* a b result a b result
* --- --- -------- --- --- --------
* 1 2 0.5 1 2 0.5000040
* 2 4 0.5 2 4 0.5000015
*
* This way if the quotients are sorted, we favor values with larger
* number of matches (b).
*
* a: dividend
* b: divided
*
* returns: Weighted quotient of two numbers
*/
struct weight_division {
__host__ __device__ float operator()(const float& a, const float& b) const {
if(b == 0) {
return INF;
} else {
return (a + (0.00001f - b * 0.000001f)) / b;
}
}
};
/*
* Operator: one_if_not_zeros
* --------------------
* this operator return 1 when both inputs are different to 0
*
* a: number
* b: number
*
* returns: 1 when the two input values are different to 0
* 0 otherwise
*/
struct one_if_not_zeros {
__host__ __device__ char operator()(const char& a, const char& b) const {
if ( a > 0 && b > 0) {
return 1;
} else {
return 0;
}
}
};
/*
* Operator: not_in_common
* --------------------
* this operator returns the rating of a movie if the client
* has not seen it yet. Otherwise 0
*
* a: user movie rating
* b: client movie rating
*
* returns: a when b is 0
* 0 otherwise
*/
struct not_in_common {
__host__ __device__ char operator()(const char& a, const char& b) const {
if ( b == 0) {
return a;
} else {
return 0;
}
}
};
/*
* Iterator: make_matrix_index
* --------------------
* creates an iterator that is a one dimension representation of a two
* dimentional matrix. Where all rows have the same value.
* Example: In a 4 x 3 the content will be:
* (1, 1, 1, 1
* 2, 2, 2, 2
* 3, 3, 3, 3)
*
* first1: Beginning of fist range
* last1: End of first range
* fist2: Beginning of the second range
* output: where to store the output
*
* returns: An iterator with an indexed row matrix
*/
template <typename InputIterator1, typename InputIterator2, typename OutputIterator>
OutputIterator make_matrix_index(InputIterator1 first1, InputIterator1 last1,
InputIterator2 first2, OutputIterator output) {
typedef typename thrust::iterator_difference<InputIterator1>::type difference_type;
difference_type input_size = thrust::distance(first1, last1);
difference_type output_size = thrust::reduce(first1, last1);
// scan the counts to obtain output offsets for each input element
thrust::device_vector<difference_type> output_offsets(input_size, 0);
thrust::exclusive_scan(first1, last1, output_offsets.begin());
// scatter the nonzero counts into their corresponding output positions
thrust::device_vector<difference_type> output_indices(output_size, 0);
thrust::scatter_if(thrust::counting_iterator<difference_type>(0),
thrust::counting_iterator<difference_type>(input_size), output_offsets.begin(),
first1, output_indices.begin());
// compute max-scan over the output indices, filling in the holes
thrust::inclusive_scan(output_indices.begin(), output_indices.end(), output_indices.begin(),
thrust::maximum<difference_type>());
// gather input values according to index array (output = first2[output_indices])
OutputIterator output_end = output;
thrust::advance(output_end, output_size);
thrust::gather(output_indices.begin(), output_indices.end(), first2, output);
// return output + output_size
thrust::advance(output, output_size);
return output;
}
/*
* Function: print_matrix
* --------------------
* print a vector as a formated 2D matrix
*
* matrix: vector of size x * y
* x: Number of rows
* y: Numbe of columns
* label: Label to display above the matrix
*
*/
template <class T>
void print_matrix (thrust::device_vector<T>& matrix, const int x, const int y, const char* label, int offset) {
std::cout << "\n\n " << label << "\n";
std::cout << " ----------------------\n";
for(int i = 0; i < x; i++) {
std::cout << " u[" << i+offset << "] ";
for(int j = 0; j < y; j++) {
std::cout << matrix[i * y + j] << " ";
}
std::cout << "\n";
}
std::cout << "\n";
}
/*
* Function: print_matrix_char
* --------------------
* print a vector as a formated 2D matrix where it content are chars
* it converts each char to int
*
* matrix: vector of size x * y
* x: Number of rows
* y: Numbe of columns
* label: Label to display above the matrix
*
*/
void print_char_matrix (thrust::device_vector<char>& matrix, const int x, const int y, const char* label, int offset) {
std::cout << "\n\n " << label << "\n";
std::cout << " ----------------------\n";
for(int i = 0; i < x; i++) {
std::cout << " u[" << i+offset << "] ";
for(int j = 0; j < y; j++) {
int n = (int)matrix[i * y + j];
std::cout << n << " ";
}
std::cout << "\n";
}
std::cout << "\n";
}
/*
* Function: main
* --------------------
* compute which user has the lowest euclidean distance for Client
* provides 3 recommendations of movies based on the closest neighbor
*
* amount_of_users_in_dataset: Number of users to select from our initial data. Max 943
* amount_of_movies_in_dataset: Number movies to select from our initial data. Max 1682
* client_id: An user_id we want to find a closes match
* verbose: Print additional steps along the way
* block_size: How many users to evaluate per iteration
*
*/
int main(int argc, char** argv) {
/*
* Read the input parameters
* --------------------
*/
const int amount_of_users_in_dataset = atoi(argv[1]); //Users in our initial dataset
const int amount_of_movies_in_dataset = atoi(argv[2]); //Movies in our initial dataset
int client_id = atoi(argv[3]); //user_id of the person we want to find similar users for
int verbose = atoi(argv[4]); //verbose 0 - Print only results, verbose 1 - print steps
int block_size = atoi(argv[5]); //block_size - Number of users to evaluate in each iteration
int N_users = block_size;
int N_movies = amount_of_movies_in_dataset;
float distance = 99.99f;
int block_id = 0;
int closest_peer = 0;
int closest_peer_offset_in_block = 0;
thrust::device_vector<char> user_ratings_dataset(block_size * N_movies);
thrust::device_vector<char> client_ratings_dataset(block_size * N_movies);
load_char(client_ratings_dataset, block_size, N_movies, client_id);
// Show original ratings dataset
if(verbose) {
print_char_matrix (client_ratings_dataset, block_size,
N_movies, "client_ratings_dataset", 0);
}
/*
* Create index matrix for reduction
* --------------------
* create a vector that will help us to reduce by rows in next step
* E.g. In a 3 x 2
* (1 1 1
* 2 2 2)
*/
thrust::device_vector<int> seq(block_size);
thrust::device_vector<int> reduce_by_key_index(block_size * N_movies);
thrust::device_vector<int> reps(block_size, N_movies);
thrust::sequence(seq.begin(), seq.begin() + block_size);
make_matrix_index(reps.begin(), reps.end(), seq.begin(), reduce_by_key_index.begin());
thrust::device_vector<float> dev_null(block_size);
thrust::device_vector<float> squared_differences(block_size * N_movies);
thrust::device_vector<float> squared_differences_sum(block_size);
thrust::device_vector<char> common_movies(block_size * N_movies);
thrust::device_vector<float> common_movies_count(block_size);
thrust::device_vector<float> euclidean_distance(block_size);
thrust::device_vector<int> user_index(block_size);
thrust::sequence(user_index.begin(), user_index.end(), 0, 1);
/*
* Find lowest euclidean distance by blocks
* --------------------
* In order to save time compute only euclidean distance for block_size amount of users
* if we have a good enough correspaondce, stop and recommend a movie. Continue until
* the similarity of an user is below the threshold. This does not find the closest
* match to our client, but as we load the data little by little it saves time in all
* the cuda mallocs
*/
while(distance > 0.1f) {
std::cout << "Start Iteration: " << block_id << "\n";
N_users = block_size;
int offset_start = block_size * block_id;
load_char_from(user_ratings_dataset, block_size, amount_of_movies_in_dataset, block_size * block_id);
// Show original ratings dataset
if(verbose) {
print_char_matrix (user_ratings_dataset, block_size,
N_movies, "user_ratings_dataset", offset_start);
}
/*
* Compute Euclidean distance
* --------------------
*/
thrust::transform(user_ratings_dataset.begin(), user_ratings_dataset.end(),
client_ratings_dataset.begin(), squared_differences.begin(), power_difference());
// Show squared differences dataset
if(verbose) {
print_matrix (squared_differences, N_users, N_movies, "squared_differences", offset_start);
}
thrust::reduce_by_key(reduce_by_key_index.begin(), reduce_by_key_index.end(), squared_differences.begin(),
dev_null.begin(), squared_differences_sum.begin());
thrust::transform(user_ratings_dataset.begin(), user_ratings_dataset.end(),
client_ratings_dataset.begin(), common_movies.begin(), one_if_not_zeros());
if(verbose) {
print_char_matrix (common_movies, N_users, N_movies, "common_movies", offset_start);
}
thrust::reduce_by_key(reduce_by_key_index.begin(), reduce_by_key_index.end(), common_movies.begin(),
dev_null.begin(), common_movies_count.begin());
thrust::transform(squared_differences_sum.begin(), squared_differences_sum.end(), common_movies_count.begin(),
euclidean_distance.begin(), weight_division());
// Show Euclidean distance
if(verbose) {
std::cout << "\n\n similarity \n";
std::cout << " ----------------------\n";
for(int i = 0; i < N_users; i++) {
std::cout << " u[" << i+offset_start << "] " << squared_differences_sum[i] << " / " << common_movies_count[i] << "="
<< euclidean_distance[i] << " \n";
}
}
/*
* Find lowest distance in data set
* --------------------
*/
thrust::sort_by_key(euclidean_distance.begin(), euclidean_distance.end(),
user_index.begin());
// Show Euclidean distance
if(verbose) {
std::cout << "\n\n sorted similarity \n";
std::cout << " ----------------------\n";
for(int i = 0; i < N_users; i++) {
std::cout << " u[" << user_index[i]+offset_start << "] " << euclidean_distance[i]
<< " \n";
}
}
int answer = 0;
closest_peer = user_index[answer] + offset_start ;
closest_peer_offset_in_block = user_index[answer];
if (client_id == closest_peer) {
closest_peer = user_index[answer+1] + offset_start;
closest_peer_offset_in_block = user_index[answer+1];
answer++;
}
std::cout << "End of Iteration: " << block_id << "\n";
std::cout << "Lowest Euclidean Distance: " << euclidean_distance[answer]
<< " from user: " << closest_peer << " \n\n";
distance = euclidean_distance[answer];
block_id++;
} //end while
/*
* Recommend a movie
* --------------------
*/
int offset=closest_peer_offset_in_block * N_movies;
thrust::device_vector<char> possible_movies(N_movies);
thrust::transform(user_ratings_dataset.begin() + offset, user_ratings_dataset.begin()
+ offset + N_movies, client_ratings_dataset.begin(), possible_movies.begin(),
not_in_common());
// Show movies in common
if(verbose) {
std::cout << "\n\n possible_movie_ratings \n";
std::cout << " ----------------------\n";
std::cout << " u[" << closest_peer << "] \n";
std::cout << " movie rating \n";
for(int i = 0; i < N_movies; i++) {
int rating = (int) possible_movies[i];
std::cout << " " << i<< " " << rating << "\n";
}
std::cout << "\n";
}
thrust::device_vector<int> movie_index(N_movies);
thrust::sequence(movie_index.begin(), movie_index.end(), 0, 1);
thrust::sort_by_key(possible_movies.begin(), possible_movies.end(),
movie_index.begin());
std::cout << "Recommended Movies: " << movie_index[N_movies-1] << ", "
<< movie_index[N_movies-2] << ", "
<< movie_index[N_movies-3] << ", "
<< " \n";
return 0;
}
|
1,638
|
#include <stdio.h>
#define N 1
int *a;
__global__ void uninit(int *a) {
/* a[0] = 42; */
printf("a[0]: %d\n", a[0]);
}
void run_uninit() {
uninit<<<1,1>>>(a);
cudaGetErrorString(cudaGetLastError());
printf("Sync: %s\n", cudaGetErrorString(cudaThreadSynchronize()));
}
int main() {
cudaMalloc(&a, N*sizeof(a[0]));
run_uninit();
cudaDeviceReset();
cudaFree(a);
return 0;
}
|
1,639
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include<stdio.h>
#include<stdlib.h>
#include<time.h>
#include<math.h>
void init_array(float *a,int n);
__global__
void sum(float* input)
{
int tid=threadIdx.x;
int no_threads=blockDim.x;
int step_size=1;
while(no_threads>0)
{
//printf("\n tid:%d no_threads:%d step_size:%d \n",tid,no_threads,step_size);
if(tid<no_threads)
{
int fst=tid*step_size*2;
int snd=fst+step_size;
input[fst]+=input[snd];
}
step_size <<= 1;
no_threads >>=1;
}
}
__global__
void max(float* input)
{
int tid=threadIdx.x;
int no_threads=blockDim.x;
int step_size=1;
while(no_threads>0)
{
if(tid<no_threads)
{
int fst=tid*step_size*2;
int snd=fst+step_size;
if(input[fst]<input[snd])
input[fst]=input[snd];
}
step_size <<= 1;
no_threads >>=1;
}
}
__global__
void min(float* input)
{
int tid=threadIdx.x;
int no_threads=blockDim.x;
int step_size=1;
while(no_threads>0)
{
if(tid<no_threads)
{
int fst=tid*step_size*2;
int snd=fst+step_size;
if(input[fst]>input[snd])
input[fst]=input[snd];
}
step_size <<= 1;
no_threads >>=1;
}
}
__global__
void std_(float* input,float avg)
{
int tid=threadIdx.x;
int no_threads=blockDim.x;
int step_size=1;
while(no_threads>0)
{
if(tid<no_threads)
{
int fst=tid*step_size*2;
int snd=fst+step_size;
if(step_size==1){
input[fst] = (input[fst]-avg)*(input[fst]-avg);
input[snd] = (input[snd]-avg)*(input[snd]-avg);
input[fst] += input[snd];}
else{
input[fst] += input[snd];
}
}
step_size <<= 1;
no_threads >>=1;
}
}
int main()
{
int n=4;
float *a,*d_a;
float SUM,MAX,MIN,STD_,avg;
a=(float*)malloc(sizeof(float)*n);
cudaMalloc(&d_a,n*sizeof(float));
init_array(a,n);
for(int i=0;i<n;i++)
printf("%f ",a[i]);
float m;
// for(int i=0;i<n;i++) //sequential sum
// m=m+a[i];
/**********************************************************************************************/
cudaMemcpy(d_a,a,n*sizeof(float),cudaMemcpyHostToDevice);
sum<<<1,n/2>>>(d_a);
cudaMemcpy(&SUM,d_a,sizeof(float),cudaMemcpyDeviceToHost);
printf("SUM:%f",SUM);
/**********************************************************************************************/
/**********************************************************************************************/
cudaMemcpy(d_a,a,n*sizeof(float),cudaMemcpyHostToDevice);
max<<<1,n/2>>>(d_a);
cudaMemcpy(&MAX,d_a,sizeof(float),cudaMemcpyDeviceToHost);
printf("\nMax:%f",MAX);
/**********************************************************************************************/
/**********************************************************************************************/
cudaMemcpy(d_a,a,n*sizeof(float),cudaMemcpyHostToDevice);
min<<<1,n/2>>>(d_a);
cudaMemcpy(&MIN,d_a,sizeof(float),cudaMemcpyDeviceToHost);
printf("\nMin:%f",MIN);
/**********************************************************************************************/
/**********************************************************************************************/
avg=SUM/n;
cudaMemcpy(d_a,a,n*sizeof(float),cudaMemcpyHostToDevice);
std_<<<1,n/2>>>(d_a,avg);
cudaMemcpy(&STD_,d_a,sizeof(float),cudaMemcpyDeviceToHost);
STD_ = STD_/n;
STD_ = sqrt(STD_);
printf("\nSTD:%f",STD_);
/**********************************************************************************************/
cudaFree(d_a);
delete[] a;
return 0;
}
void init_array(float*a,int n)
{
for(int i=0;i<n;i++)
a[i] = rand()%n + 1;
}
|
1,640
|
/************************************************************************
File : lcsCollectActiveParticlesForNewRun.cu
Author : Mingcheng Chen
Last Update : January 29th, 2013
*************************************************************************/
#include <stdio.h>
#define BLOCK_SIZE 1024
__global__ void InitializeScanArrayKernel(int *exitCells, int *oldActiveParticles, int *scanArray, int length) {
int globalID = blockDim.x * blockIdx.x + threadIdx.x;
if (globalID < length)
scanArray[globalID] = exitCells[oldActiveParticles[globalID]] < 0 ? 0 : 1;
}
__global__ void CollectActiveParticlesKernel(int *exitCells, int *oldActiveParticles, int *scanArray,
int *newActiveParticles, int length) {
int globalID = blockDim.x * blockIdx.x + threadIdx.x;
if (globalID < length)
if (exitCells[oldActiveParticles[globalID]] >= 0)
newActiveParticles[scanArray[globalID]] = oldActiveParticles[globalID];
}
extern "C"
void InitializeScanArray2(int *exitCells, int *oldActiveParticles, int *scanArray, int length) {
dim3 dimBlock(BLOCK_SIZE, 1, 1);
dim3 dimGrid((length - 1) / dimBlock.x + 1, 1, 1);
InitializeScanArrayKernel<<<dimGrid, dimBlock>>>(exitCells, oldActiveParticles, scanArray, length);
cudaError_t err = cudaDeviceSynchronize();
if (err) {
cudaGetErrorString(err);
exit(0);
}
}
extern "C"
void CollectActiveParticles2(int *exitCells, int *oldActiveParticles, int *scanArray,
int *newActiveParticles, int length) {
dim3 dimBlock(BLOCK_SIZE, 1, 1);
dim3 dimGrid((length - 1) / dimBlock.x + 1, 1, 1);
CollectActiveParticlesKernel<<<dimGrid, dimBlock>>>(exitCells, oldActiveParticles, scanArray,
newActiveParticles, length);
cudaError_t err = cudaDeviceSynchronize();
if (err) {
cudaGetErrorString(err);
exit(0);
}
}
|
1,641
|
#include <stdio.h>
#include <time.h>
const int M = 1024 * 1024;
const int thread_per_block = 512;
#define time_record_begin(start){ \
cudaEventCreate(&start); \
cudaEventRecord(start, 0); \
}
#define time_record_end(start, stop, time){ \
cudaEventCreate(&stop); \
cudaEventRecord(stop, 0); \
cudaEventSynchronize(stop); \
cudaEventElapsedTime(&time, start, stop);\
}
__global__ void findMin(int *A){
__shared__ int sdata[512];
int tid = threadIdx.x;
int i = blockIdx.x * blockDim.x + threadIdx.x;
sdata[tid] = A[i];
__syncthreads();
for(unsigned int s = blockDim.x/2; s > 32; s /= 2)
{
if(tid < s)
{
if(sdata[tid] > sdata[tid + s])
sdata[tid] = sdata[tid + s];
}
__syncthreads();
}
if(tid < 32)
{
if(sdata[tid] > sdata[tid + 32])
sdata[tid] = sdata[tid + 32];
if(sdata[tid] > sdata[tid + 16])
sdata[tid] = sdata[tid + 16];
if(sdata[tid] > sdata[tid + 8])
sdata[tid] = sdata[tid + 8];
if(sdata[tid] > sdata[tid + 4])
sdata[tid] = sdata[tid + 4];
if(sdata[tid] > sdata[tid + 2])
sdata[tid] = sdata[tid + 2];
if(sdata[tid] > sdata[tid + 1])
sdata[tid] = sdata[tid + 1];
}
if(tid == 0)
A[blockIdx.x] = sdata[0];
}
__global__ void findMax(int *A){
__shared__ int sdata[512];
int tid = threadIdx.x;
int i = blockIdx.x * blockDim.x + threadIdx.x;
sdata[tid] = A[i];
__syncthreads();
for(unsigned int s = blockDim.x/2; s > 32; s /= 2)
{
if(tid < s)
{
if(sdata[tid] < sdata[tid + s])
sdata[tid] = sdata[tid + s];
}
__syncthreads();
}
if(tid < 32)
{
if(sdata[tid] < sdata[tid + 32])
sdata[tid] = sdata[tid + 32];
if(sdata[tid] < sdata[tid + 16])
sdata[tid] = sdata[tid + 16];
if(sdata[tid] < sdata[tid + 8])
sdata[tid] = sdata[tid + 8];
if(sdata[tid] < sdata[tid + 4])
sdata[tid] = sdata[tid + 4];
if(sdata[tid] < sdata[tid + 2])
sdata[tid] = sdata[tid + 2];
if(sdata[tid] < sdata[tid + 1])
sdata[tid] = sdata[tid + 1];
}
__syncthreads();
if(tid == 0)
A[blockIdx.x] = sdata[0];
}
void random_number_generator(int *A, int size){
time_t t;
srand((unsigned) time(&t));
for(int i = 0; i < size; i++){
A[i] = rand();
}
}
int main(int argc, char ** argv){
int *A;
int *d_A_MAX, *d_A_MIN;
int SIZE[3] = {2, 8 ,32};
int max_gpu = INT_MIN;
int min_gpu = INT_MAX;
int max_cpu = INT_MIN;
int min_cpu = INT_MAX;
for(int i = 0; i < 3; i++){
int size = SIZE[i] * M;
A = (int*) malloc(size * sizeof(int));
random_number_generator(A, size);
float total_max_average = 0;
float total_min_average = 0;
cudaMalloc((void**)&d_A_MAX, size * sizeof(int));
cudaMalloc((void**)&d_A_MIN, size * sizeof(int));
cudaMemcpy(d_A_MAX, A, size * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_A_MIN, A, size * sizeof(int), cudaMemcpyHostToDevice);
dim3 dimGrid1(size / thread_per_block);
dim3 dimBlock1(thread_per_block);
dim3 dimGrid2(size / thread_per_block / thread_per_block);
dim3 dimBlock2(512);
dim3 dimGrid3(1);
dim3 dimBlock3(size / thread_per_block / thread_per_block);
cudaEvent_t start,stop;
float time_findMax, time_findMin;
float total_max_average_gpu = 0;
float total_min_average_gpu = 0;
for(int j = 0; j < 10; j++){
int max = INT_MIN, min = INT_MAX;
clock_t start_t, end_t;
start_t = clock();
for(int i = 0; i < size; i++){
if(A[i] > max)
max = A[i];
}
max_cpu = max;
end_t = clock();
total_max_average += (float)(end_t - start_t) / CLOCKS_PER_SEC;
start_t = clock();
for(int i = 0; i < size; i++){
if(A[i] < min)
min = A[i];
}
min_cpu = min;
end_t = clock();
total_min_average += (float)(end_t - start_t) / CLOCKS_PER_SEC;
cudaMemcpy(d_A_MAX, A, size * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_A_MIN, A, size * sizeof(int), cudaMemcpyHostToDevice);
time_record_begin(start);
findMax<<<dimGrid1, dimBlock1>>>(d_A_MAX);
findMax<<<dimGrid2, dimBlock2>>>(d_A_MAX);
findMax<<<dimGrid3, dimBlock3>>>(d_A_MAX);
time_record_end(start, stop, time_findMax);
total_max_average_gpu += time_findMax / 1000;
cudaMemcpy(&max_gpu, d_A_MAX, sizeof(int), cudaMemcpyDeviceToHost);
time_record_begin(start);
findMin<<<dimGrid1, dimBlock1>>>(d_A_MIN);
findMin<<<dimGrid2, dimBlock2>>>(d_A_MIN);
findMin<<<dimGrid3, dimBlock3>>>(d_A_MIN);
time_record_end(start, stop, time_findMin);
total_min_average_gpu += time_findMin / 1000;
cudaMemcpy(&min_gpu, d_A_MIN, sizeof(int), cudaMemcpyDeviceToHost);
}
printf("N: %dM, GPUmax: %d, CPUmax: %d GPUtime: %f, CPUtime: %f, GPUSpeedup: %f ", SIZE[i], max_gpu, max_cpu, total_max_average_gpu/10, total_max_average/10, total_max_average/total_max_average_gpu);
printf("N: %dM, GPUmin: %d, CPUmin: %d GPUtime: %f, CPUtime: %f, GPUSpeedup: %f\n", SIZE[i], min_gpu, min_cpu, total_min_average_gpu/10, total_min_average/10, total_min_average/total_min_average_gpu);
cudaFree(d_A_MAX);
cudaFree(d_A_MIN);
free(A);
}
}
|
1,642
|
/*
* Copyright 1993-2009 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
// A simple macro to divide and round-up
#define DIVUP(A,B) ( (A)%(B) == 0 ? (A)/(B) : ((A) / (B) + 1) )
// macro to clamp to min & max value:
#define CLAMP(A,B,C) ( (A) < (B) ? (B) : (A) > (C) ? (C) : (A) )
// Declare constant memory for our convolution filter
__constant__ float FilterCoeff[9];
// Here's the CPU data for our filter which we'll copy into the constant memory
float FilterCoeff_CPU[9] = { -1.0f, -2.0f, -1.0f,
-2.0f, 12.0f, -2.0f,
-1.0f, -2.0f, -1.0f};
#define BLOCK_W 16
#define BLOCK_H 16
#define TILE_W (BLOCK_W * 4 + 16) // need 8 pixel aprons because we are doing 64-bit reads)
#define TILE_H (BLOCK_H + 2)
__global__ void Convolution3x3Kernel(uchar4 * in_image, uchar4 * out_image, int width, int height, int pitch32)
{
// This is a pointer to the shared memory we use for the image tile
__shared__ unsigned char smem[TILE_W*TILE_H];
int2 * smem_64bit = (int2*)smem;
// Compute the thread index overall
int X = __mul24(blockDim.x,blockIdx.x) + threadIdx.x;
int Y = __mul24(blockDim.y,blockIdx.y) + threadIdx.y;
// variables to hold the addresses we are going to write & read in GMEM & SMEM
int smem_idx;
int gmem_idx;
// Perform 64-bit reads, so only some of the threads need to participate.
// we'll cast the source image to a 64-bit data type (e.g. int2) and them read into a the smem with the same
// 64-bit casting
if(threadIdx.x < BLOCK_W / 2 + 2)
{
int row = CLAMP(Y-1,0,height-1);
int col = __mul24(blockDim.x>>1,blockIdx.x) + threadIdx.x - 1; // reading 8 bytes / thread, but only 1/2 of the block is used so divide blockDim / 2
col = CLAMP(col,0,width>>3-1);
gmem_idx = __mul24(row,pitch32>>1) + col; // must divide the pitch32 & X by 2 since it's a 64-bit address
smem_idx = __mul24(threadIdx.y,TILE_W/8) + threadIdx.x; // the index into the 64-bit casted smem
// Perform the read
smem_64bit[smem_idx] = ((int2*)in_image)[gmem_idx];
// two rows need to read again for the lower apron rows
if(threadIdx.y < 2)
{
row = CLAMP(Y + BLOCK_H - 1,0,height-1);
gmem_idx = __mul24(row,pitch32>>1) + col;
smem_idx += TILE_W/8 * BLOCK_H;
smem_64bit[smem_idx] = ((int2*)in_image)[gmem_idx];
}
}
// Now we need to wait until all threads in this block have finished reading
// their respective pixels into SMEM
__syncthreads();
if(X < width && Y < height)
{
// Now for the convolutions.
uchar4 out; // variable to store our output
unsigned int f_idx = 0; // index into the filter coefficients
float sum;
// compute the location of the pixel in smem we are going to start to process.
// this will be the pixel to the top-left of the active pixel
smem_idx = __mul24(threadIdx.y,TILE_W) + (threadIdx.x<<2) +7;
// now do the math. We'll use floating point since it's fastest
// Pixel 1
sum = FilterCoeff[f_idx++] * (float)smem[smem_idx];
sum += FilterCoeff[f_idx++] * (float)smem[smem_idx+1];
sum += FilterCoeff[f_idx++] * (float)smem[smem_idx+2];
smem_idx += TILE_W;
sum += FilterCoeff[f_idx++] * (float)smem[smem_idx];
sum += FilterCoeff[f_idx++] * (float)smem[smem_idx+1];
sum += FilterCoeff[f_idx++] * (float)smem[smem_idx+2];
smem_idx += TILE_W;
sum += FilterCoeff[f_idx++] * (float)smem[smem_idx];
sum += FilterCoeff[f_idx++] * (float)smem[smem_idx+1];
sum += FilterCoeff[f_idx] * (float)smem[smem_idx+2];
out.x = (unsigned char)CLAMP(sum,0,255.0f);
// Pixel 2
f_idx = 0;
smem_idx = __mul24(threadIdx.y,TILE_W) + (threadIdx.x<<2) +8;
sum = FilterCoeff[f_idx++] * (float)smem[smem_idx];
sum += FilterCoeff[f_idx++] * (float)smem[smem_idx+1];
sum += FilterCoeff[f_idx++] * (float)smem[smem_idx+2];
smem_idx += TILE_W;
sum += FilterCoeff[f_idx++] * (float)smem[smem_idx];
sum += FilterCoeff[f_idx++] * (float)smem[smem_idx+1];
sum += FilterCoeff[f_idx++] * (float)smem[smem_idx+2];
smem_idx += TILE_W;
sum += FilterCoeff[f_idx++] * (float)smem[smem_idx];
sum += FilterCoeff[f_idx++] * (float)smem[smem_idx+1];
sum += FilterCoeff[f_idx] * (float)smem[smem_idx+2];
out.y = (unsigned char)CLAMP(sum,0,255.0f);
// Pixel 3
f_idx = 0;
smem_idx = __mul24(threadIdx.y,TILE_W) + (threadIdx.x<<2) + 9;
sum = FilterCoeff[f_idx++] * (float)smem[smem_idx];
sum += FilterCoeff[f_idx++] * (float)smem[smem_idx+1];
sum += FilterCoeff[f_idx++] * (float)smem[smem_idx+2];
smem_idx += TILE_W;
sum += FilterCoeff[f_idx++] * (float)smem[smem_idx];
sum += FilterCoeff[f_idx++] * (float)smem[smem_idx+1];
sum += FilterCoeff[f_idx++] * (float)smem[smem_idx+2];
smem_idx += TILE_W;
sum += FilterCoeff[f_idx++] * (float)smem[smem_idx];
sum += FilterCoeff[f_idx++] * (float)smem[smem_idx+1];
sum += FilterCoeff[f_idx] * (float)smem[smem_idx+2];
out.z = (unsigned char)CLAMP(sum,0,255.0f);
// Pixel 4
f_idx = 0;
smem_idx = __mul24(threadIdx.y,TILE_W) + (threadIdx.x<<2) +10;
sum = FilterCoeff[f_idx++] * (float)smem[smem_idx];
sum += FilterCoeff[f_idx++] * (float)smem[smem_idx+1];
sum += FilterCoeff[f_idx++] * (float)smem[smem_idx+2];
smem_idx += TILE_W;
sum += FilterCoeff[f_idx++] * (float)smem[smem_idx];
sum += FilterCoeff[f_idx++] * (float)smem[smem_idx+1];
sum += FilterCoeff[f_idx++] * (float)smem[smem_idx+2];
smem_idx += TILE_W;
sum += FilterCoeff[f_idx++] * (float)smem[smem_idx];
sum += FilterCoeff[f_idx++] * (float)smem[smem_idx+1];
sum += FilterCoeff[f_idx] * (float)smem[smem_idx+2];
out.w = (unsigned char)CLAMP(sum,0,255.0f);
// **DEBUG**
// The following lines of code are useful for debugging
// They simply copy the value of the primary pixels back to the output rather
// than performing the convolution. Thus, it's easy to tell if the GMEM is loaded
// into SMEM properly. It's also useful to shift the offsets to insure the aprons are correct
//out.x = smem[(threadIdx.y+1)*TILE_W + threadIdx.x*4+7];
//out.y = smem[(threadIdx.y+1)*TILE_W + threadIdx.x*4+8];
//out.z = smem[(threadIdx.y+1)*TILE_W + threadIdx.x*4+9];
//out.w = smem[(threadIdx.y+1)*TILE_W + threadIdx.x*4+10];
// Finally, write out the result!
gmem_idx = __mul24(Y,pitch32) + X;
out_image[gmem_idx] = out;
}
}
// Function to preform an arbitrary 3x3 convolution on a grayscale 8-bit image
extern "C" cudaError_t Convolution3x3(unsigned char * src, unsigned char * dest, int width, int height, size_t pitch)
{
cudaError_t cerr;
// First, copy data for the filter coefficients into constant memory
cerr = cudaMemcpyToSymbol(FilterCoeff,FilterCoeff_CPU,9*sizeof(float));
if(cerr != cudaSuccess) return(cerr);
// Compute our block & grid dimensions
dim3 BlockSz(BLOCK_W,BLOCK_H,1);
dim3 GridSz(DIVUP(width,BlockSz.x*4),DIVUP(height,BlockSz.y),1);
Convolution3x3Kernel<<<GridSz,BlockSz>>>((uchar4*)src, (uchar4*)dest,
width, height, (int)pitch/4);
// Wait for this function to complete before we return
cerr = cudaThreadSynchronize();
return(cerr);
}
|
1,643
|
#include <cstdio>
#include <cstdlib>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define GIG 1000000000
#define PRINT_TIME 1
#define BLOCK 32 //blockDim
#define THREAD 32
#define SM_ARR_LEN 1024 //size of the matrix
#define THREADS_PER_BLOCK 64
#define THREADS_PER_DIM 16
#define TILING_DIM 20
#define MAX_MATRIX_SIZE_FOR_OUTPUT 64
#define QRD_SIMPLE_CALC_NORM2 128
#define QRD_SIMPLE_SCALE_COLUMN 128
#define QRD_SIMPLE_TRANSFORM_COLUMNS 128
#define QRD_OPTIMISED_NORM2_BLOCKSIZE 128
#define QRD_OPTIMISED_SCALE_COLUMN 128
#define QRD_OPTIMISED_TRANSFORM_COLUMNS_DIM 512
#define IMUL(a, b) __mul24(a, b)
// Assertion to check for errors
#define CUDA_SAFE_CALL(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"CUDA_SAFE_CALL: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
typedef struct
{
float *n; // Length: size = width*height
float *d; // Length: height
unsigned int width;
unsigned int height;
unsigned int size;
bool hasPivot;
int *pivot; // Length: height
} QRmatrix;
__global__ void qrd_optimised_calc_norm2(float *out_norms, float *qr, int k, int m, int n)
{
// Shared memory
extern __shared__ float s_qr[];
unsigned int r = blockIdx.x * blockDim.x + threadIdx.x + k; // Get row index
// Clear cache for threads that exceeds max + they should not influence result
s_qr[threadIdx.x] = 0;
if (r < m)
{
// Read value to shared memory
float val = qr[r * n + k];
s_qr[threadIdx.x] = val * val;
// Sync threads to make sure all other also have loaded values
__syncthreads();
// Do the actual pivot finding
for(unsigned int stride = blockDim.x/2; stride>0; stride>>=1)
{
if (threadIdx.x < stride && (stride+threadIdx.x+k) < m)
{
s_qr[threadIdx.x] += s_qr[threadIdx.x + stride]; // Update value
}
// Sync threads
__syncthreads();
}
// The first thread should write result from block to output
if (threadIdx.x == 0)
{
out_norms[blockIdx.x] = s_qr[0]; // Load sum to output
}
}
}
__global__ void qrd_optimised_calc_norm2_L2(float *val_norms, int max)
{
// Shared memory
extern __shared__ float s_qr[];
int tid = blockIdx.x * blockDim.x + threadIdx.x;
// Clear cache for threads that exceeds max + they should not influence result
s_qr[threadIdx.x] = 0;
if (tid < max)
{
// Read value to shared memory
s_qr[threadIdx.x] = val_norms[tid];
// Sync threads to make sure all other also have loaded values
__syncthreads();
// Do the actual pivot finding
for(unsigned int stride = blockDim.x/2; stride>0; stride>>=1)
{
if (threadIdx.x < stride)
{
s_qr[threadIdx.x] += s_qr[threadIdx.x + stride]; // Update value
}
// Sync threads
__syncthreads();
}
// The first thread should write result from block to output
if (threadIdx.x == 0)
{
val_norms[blockIdx.x] = s_qr[0]; // Load sum to output
}
}
}
__global__ void qrd_optimised_calc_norm2_FINISH(float *val_norms, float *qr, float *qr_diag, float *qr_norms, int k, int n)
{
// Square root to get raw norm
float nrm = sqrtf(val_norms[0]);
// Flip sign for norm depending on the value of kth row kth column
nrm = qr[k * n + k] < 0 ? -nrm : nrm;
// Save the actual norm
val_norms[0] = nrm;
qr_norms[k] = nrm;
// Flip sign for norm and save to QR diagonale
qr_diag[k] = -nrm;
}
float calc_norm2_optimised(float *d_qr, float *d_diag, float *d_norms, int k, int m, int n)
{
int threads = m-k;
int blocks = (threads + QRD_OPTIMISED_NORM2_BLOCKSIZE-1) / QRD_OPTIMISED_NORM2_BLOCKSIZE;
dim3 dimBlock(QRD_OPTIMISED_NORM2_BLOCKSIZE, 1, 1);
dim3 dimGrid(blocks, 1, 1);
int smemSize = QRD_OPTIMISED_NORM2_BLOCKSIZE*sizeof(float);
float *d_out;
cudaMalloc( (void**) &d_out, sizeof(float) * dimGrid.x);
// First run on a, subsequential will be on d_out
qrd_optimised_calc_norm2<<< dimGrid, dimBlock, smemSize >>>(d_out, d_qr, k, m, n);
while(blocks > 1)
{
// Adjust the number of required blocks, for the second round
threads = blocks;
blocks = threads > QRD_OPTIMISED_NORM2_BLOCKSIZE
? (threads + QRD_OPTIMISED_NORM2_BLOCKSIZE-1) / QRD_OPTIMISED_NORM2_BLOCKSIZE
: 1;
dimGrid.x = blocks;
qrd_optimised_calc_norm2_L2<<< dimGrid, dimBlock, smemSize >>>(d_out, threads);
}
qrd_optimised_calc_norm2_FINISH<<< 1, 1 >>>(d_out, d_qr, d_diag, d_norms, k, n);
cudaThreadSynchronize();
float norm = 0.0;
cudaMemcpy( &norm, d_out, sizeof(float), cudaMemcpyDeviceToHost);
cudaFree( d_out );
return norm;
}
void output_matrix(float *m, int height, int width) {
int size = height*width;
if (size > MAX_MATRIX_SIZE_FOR_OUTPUT)
{
printf("Matrix to big to be outputted...");
printf("M: %dx%d (%d)\n", height, width, size);
return;
}
int row = 0;
for(int i = 0; i < size; i++)
{
int curRow = i/width;
//int curCol = i%m->width;
if (curRow > row)
{
printf("\n");
row = curRow;
}
else if (i > 0)
{
printf(" ");
}
printf("%f", m[i] );
}
printf("\n");
}
void output_qr_matrix(QRmatrix *m)
{
output_matrix(m->n, m->height, m->width);
printf("\nD:\n");
if (m->size > MAX_MATRIX_SIZE_FOR_OUTPUT)
{
printf("Total diagonale elements %d.", m->height);
}
else
{
for(unsigned int i = 0; i < m->height; i++)
{
if (i > 0) printf(", ");
printf("%f", m->d[i] );
}
}
printf("\n");
}
__global__ void qrd_optimised_scale_column(float *qr, float *qr_norms, int k, int m, int n)
{
float norm = qr_norms[k];
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int r = k + tid;
if (r < m)
{
qr[r * n + k] /= norm;
}
if (tid == 0)
{
qr[k * n + k] += 1.0;
}
}
__global__ void qrd_optimised_transform_columns(float *qr, int k, int m, int n)
{
// Declare cache
extern __shared__ float v[];
// Apply transformation to remaining columns.
int c = blockIdx.x * blockDim.x + threadIdx.x + k + 1;
if (c < n)
{
float s = 0.0, knk = qr[k * n + k];
for (int i = k; i < m;)
{
// Guard for not exceeding height of QR matrix + load k column values to shared memory
v[threadIdx.x] = i+threadIdx.x < m ? qr[(i+threadIdx.x) * n + k] : 0;
__syncthreads();
// For all k values loaded, calc s
for(int vi = 0; vi < blockDim.y; vi++, i++)
s += v[vi] * qr[i * n + c];
}
s = (-s) / knk;
for (int i = k; i < m;)
{
// Guard for not exceeding height of QR matrix + load k column values to shared memory
v[threadIdx.x] = i+threadIdx.x < m ? qr[(i+threadIdx.x) * n + k] : 0;
__syncthreads();
// For all k values loaded, calc s
for(int vi = 0; vi < blockDim.y; vi++, i++)
qr[i * n + c] += s * v[vi];
}
}
}
void scale_column_optimised(float *d_qr, float *d_norms, int k, int m, int n)
{
int threads = m-k;
int blocks = (threads + QRD_OPTIMISED_SCALE_COLUMN-1) / QRD_OPTIMISED_SCALE_COLUMN;
qrd_optimised_scale_column<<< blocks, QRD_OPTIMISED_SCALE_COLUMN >>>( d_qr, d_norms, k, m, n );
}
void transform_columns_optimised(float *d_qr, int k, int m, int n)
{
int threads = n-(k+1);
int blocks = (threads + QRD_OPTIMISED_TRANSFORM_COLUMNS_DIM-1) / QRD_OPTIMISED_TRANSFORM_COLUMNS_DIM;
dim3 dimBlock(QRD_OPTIMISED_TRANSFORM_COLUMNS_DIM, 1, 1);
dim3 dimGrid(blocks, 1, 1);
int smemSize = QRD_OPTIMISED_TRANSFORM_COLUMNS_DIM*sizeof(float);
qrd_optimised_transform_columns<<< dimGrid, dimBlock, smemSize >>>( d_qr, k, m, n );
}
void randomInit(float* data, int size, float min, float max)
{
const float range = (max - min);
for (int i = 0; i < size; ++i)
{
float rnd = rand()/(float)RAND_MAX; // Generate random value from 0.0 to 1.0
data[i] = rnd*range + min;
}
}
QRmatrix* gpu_qrd_optimised(QRmatrix *a, int blockDimension, int version, bool usePivot)
{
// GPU Timing variables
cudaEvent_t start, stop;
float elapsed_gpu;
bool output = a->size < 50;
int n = a->width;
int m = a->height;
QRmatrix *qr = (QRmatrix*) malloc (sizeof(QRmatrix));
qr->height = a->height;
qr->size = a->size;
qr->width = a->width;
qr->n = (float*) malloc (sizeof(float)*a->size);
qr->d = (float*) malloc (sizeof(float)*a->height);
qr->pivot = (int*) malloc (sizeof(int)*a->height);
qr->hasPivot = false;
// Check if block dimension is too big
if (blockDimension > (int)a->width) blockDimension = a->width;
// Declare kernel pointers
float *d_qr, *d_diag, *d_norms; // QR + Diagonale + Norms
// Allocate memory on GPU for LU matrix
cudaMalloc( (void**) &d_qr, sizeof(float) * qr->size );
cudaMalloc( (void**) &d_diag, sizeof(float) * qr->width );
cudaMalloc( (void**) &d_norms, sizeof(float) * qr->width );
// Copy matrix A vector values to the QR pointer on the GPU/device = A copy to QR (will be modified)
cudaMemcpy( d_qr, a->n, sizeof(float) * a->size, cudaMemcpyHostToDevice);
#if PRINT_TIME
// Create the cuda events
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Record event on the default stream
cudaEventRecord(start, 0);
#endif
// Execute kernels
// Optimised QR
// Core algorithm for QR Decomposition
for (int k = 0; k < n; k++) // For each column
{
// Compute 2-norm of k-th column
float nrm = calc_norm2_optimised(d_qr, d_diag, d_norms, k, m, n);
if (nrm != 0.0)
{
// Form k-th Householder vector. (Performed on device)
scale_column_optimised(d_qr, d_norms, k, m, n);
if (output)
{
cudaMemcpy( qr->n, d_qr, qr->size * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy( qr->d, d_diag, qr->width * sizeof(float), cudaMemcpyDeviceToHost);
printf("\n\nDecompose Partly Column=%d\n--------------\n", k);
output_qr_matrix(qr);
}
// Apply transformation to remaining columns.
transform_columns_optimised(d_qr, k, m, n);
}
if (output)
{
cudaMemcpy( qr->n, d_qr, qr->size * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy( qr->d, d_diag, qr->width * sizeof(float), cudaMemcpyDeviceToHost);
printf("\n\nDecompose Partly Column=%d\n--------------\n", k);
output_qr_matrix(qr);
}
}
cudaThreadSynchronize();
#if PRINT_TIME
// Stop and destroy the timer
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_gpu, start, stop);
printf("\nGPU time: %f (msec)\n", elapsed_gpu);
cudaEventDestroy(start);
cudaEventDestroy(stop);
#endif
// Copy matrix members + value vector to host;
cudaMemcpy( qr->n, d_qr, qr->size * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy( qr->d, d_diag, qr->width * sizeof(float), cudaMemcpyDeviceToHost);
// Release memory on device
cudaFree( d_qr );
cudaFree( d_diag );
cudaFree( d_norms );
cudaThreadExit();
return qr;
}
void run_cuda_qr_decomposition(int aHeight, int aWidth, int type, int blockDimension, bool cpu_calc)
{
// Declare matrices and allocate memory
QRmatrix *a = (QRmatrix*) malloc (sizeof(QRmatrix));
a->size = aHeight*aWidth;
a->width = aWidth;
a->height = aHeight;
a->n = (float*) malloc (sizeof(float)*a->size);
a->d = (float*) malloc (sizeof(float)*a->height);
a->hasPivot = false;
// Allocate memory for vector values
randomInit(a->n, a->size, 0, 9);
printf("Running QR Decomposition\n\n");
printf("A: %dx%d (%d)\n", a->height, a->width, a->size);
output_matrix(a->n, a->height, a->width);
printf("\n");
// Run matrix multiplication on GPU
printf("Running on GPU...");
QRmatrix *qr;
bool pivot = false;
qr = gpu_qrd_optimised(a, blockDimension, 1, false);
}
int main(int argc, char **argv)
{
int type = 1;
bool cpu = true;
// Arrays on GPU global memory
float *d_a, *d_b, *d_result;
// Arrays on the host memory
float *h_a, *h_b, *h_result, *h_judge;
int arrLen = SM_ARR_LEN;
printf("Length of the array = %d\n", arrLen);
// Allocate GPU memory
size_t allocSize = arrLen * arrLen * sizeof(float);
CUDA_SAFE_CALL(cudaMalloc((void **)&d_a, allocSize));
CUDA_SAFE_CALL(cudaMalloc((void **)&d_b, allocSize));
CUDA_SAFE_CALL(cudaMalloc((void **)&d_result, allocSize));
// Allocate arrays on host memory
h_a = (float *) malloc(allocSize);
h_b = (float *) malloc(allocSize);
h_result = (float *) malloc(allocSize);
h_judge = (float *) malloc(allocSize);
// Transfer the arrays to the GPU memory
CUDA_SAFE_CALL(cudaMemcpy(d_a, h_a, allocSize, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(d_b, h_b, allocSize, cudaMemcpyHostToDevice));
dim3 globalGrid(BLOCK, BLOCK, 1);
dim3 globalBlock(THREAD, THREAD, 1);
run_cuda_qr_decomposition(SM_ARR_LEN, SM_ARR_LEN, type, 20, cpu);
cudaDeviceSynchronize();
// Check for errors during launch
CUDA_SAFE_CALL(cudaPeekAtLastError());
return 0;
}
|
1,644
|
#include <stdio.h>
#include <sys/time.h>
#include <stdlib.h>
#define MARGIN 1e-6
#define ARRAY_SIZE 100000
double cpuSecond() {
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec * 1.e-6);
}
__global__ void SAXPY (double *x,double *y, double a) {
// Y = AX + Y
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < ARRAY_SIZE; i += stride)
y[i] += a * x[i];
}
int main(int argc, char *argv[]) {
double *x, *y, a = 2.2, *r;
unsigned int gridsize = (ARRAY_SIZE + 256 - 1) / 256;
unsigned int nBytes = sizeof(double) * ARRAY_SIZE;
x = (double*)malloc(nBytes);
y = (double*)malloc(nBytes);
r = (double*)malloc(nBytes);
for (int i = 0; i < ARRAY_SIZE; i++) {
x[i] = i;
y[i] = 2 * i;
}
double *d_x, *d_y;
cudaMalloc(&d_x, nBytes);
cudaMalloc(&d_y, nBytes);
cudaMemcpy(d_x, x, nBytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, nBytes, cudaMemcpyHostToDevice);
printf("Computing SAXPY on the CPU...\n");
double start = cpuSecond();
for (int i = 0; i < ARRAY_SIZE; i++) {
y[i] += a * x[i];
r[i] = y[i];
}
printf("Done! CPU costs %lf\n", cpuSecond() - start);
printf("Computing SAXPY on the GPU...\n");
start = cpuSecond();
SAXPY<<<gridsize,256>>>(d_x, d_y, a);
cudaDeviceSynchronize();
printf("Done! GPU costs %lf\n", cpuSecond() - start);
printf("Comparing the output for each implementation...\n");
cudaMemcpy(y, d_y, nBytes, cudaMemcpyDeviceToHost);
int c = 0;
for(int i = 0; i < ARRAY_SIZE; i++) {
double diff = abs(y[i] - r[i]);
if (diff > MARGIN) {
c += 1;
printf("The %d-th element doesn't match. The difference is %lf\n", i, diff);
}
}
printf("Totally %d mismatch(es)\n", c);
if (c == 0)
printf("Correct!\n");
cudaFree(d_x);
cudaFree(d_y);
free(x);
free(y);
free(r);
return 0;
}
|
1,645
|
#include "includes.h"
__global__ void square(float * d_out, float * d_in) {
int idx = threadIdx.x;
float f = d_in[idx];
d_out[idx] = f * f;
}
|
1,646
|
// compile with:
//
// nvcc -gencode arch=compute_20,code=compute_20 -ptx jitlink.cu -o jitlink.ptx
//
//
extern "C"{
__device__
int bar(int* out, int a) {
*out = a * 2;
return 0;
}
}
|
1,647
|
#include <iostream>
using namespace std;
#include <thrust/reduce.h>
#include <thrust/sequence.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
__global__ void fillKernel(int *a, int n)
{
int tid = blockIdx.x*blockDim.x + threadIdx.x;
if (tid < n) a[tid] = tid;
}
void fill(int* d_a, int n)
{
int nThreadsPerBlock= 512;
int nBlocks= n/nThreadsPerBlock + ((n%nThreadsPerBlock)?1:0);
fillKernel <<< nBlocks, nThreadsPerBlock >>> (d_a, n);
}
int main(){
const int N=50000;
thrust::device_vector<int> a(N);
fill(thrust::raw_pointer_cast(&a[0]),N);
int sumA= thrust::reduce(a.begin(),a.end(), 0);
int sumCheck=0;
for(int i=0; i < N; i++) sumCheck += i;
cout << sumA << endl;
cout << sumCheck << endl;
if(sumA == sumCheck) cout << "Test succeeded!!!" << endl;
else { cerr << "Test FAILED!!!!!" << endl; return(1); }
return (0);
}
|
1,648
|
/**
*
* Matrix Multiplication - CUDA for GPUs
*
* CS3210
*
**/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#include <assert.h>
#define BLOCK_SIZE 32
int size;
typedef struct
{
float **element;
} matrix;
long long wall_clock_time()
{
#ifdef __linux__
struct timespec tp;
clock_gettime(CLOCK_REALTIME, &tp);
return (long long)(tp.tv_nsec + (long long)tp.tv_sec * 1000000000ll);
#else
struct timeval tv;
gettimeofday(&tv, NULL);
return (long long)(tv.tv_usec * 1000 + (long long)tv.tv_sec * 1000000000ll);
#endif
}
__device__ float getElement(matrix A, int row, int col) {
return A.element[row][col];
}
__device__ void setElement(matrix A, int row, int col, float value) {
A.element[row][col] = value;
}
__device__ matrix getSubMatrix(matrix A, int blockRow, int blockCol) {
int startingRow = BLOCK_SIZE * blockRow;
int startingCol = BLOCK_SIZE * blockCol;
// Allocate memory for sub matrix
matrix subA;
subA.element = (float**)malloc(sizeof(float*) * BLOCK_SIZE);
int row;
for (row = 0; row < BLOCK_SIZE; row++) {
// subA.element[row] = (float*)malloc(sizeof(float) * BLOCK_SIZE);
subA.element[row] = A.element[startingRow + row] + startingCol;
}
// int row, col;
// for (row = 0; row < BLOCK_SIZE; row++) {
// subA.element[row] = A.element[startingRow + row] + startingCol;
// // for (col = 0; col < BLOCK_SIZE; col++) {
// // printf("%f ", A.element[startingRow + row][startingCol + col]);
// // }
// // printf("\n");
// }
// int i, j;
// for (i = 0; i < BLOCK_SIZE; i++) {
// for (j = 0; j < BLOCK_SIZE; j++) {
// printf("%f ", subA.element[i][j]);
// }
// printf("\n");
// }
return subA;
}
/**
* Allocates memory for a matrix of size SIZE
* The memory is allocated row-major order, i.e.
* elements from the same row are allocated at contiguous
* memory addresses.
**/
void allocate_matrix(matrix* m)
{
int i;
cudaError_t rc;
// allocate array for all the rows
rc = cudaMallocManaged((void**)&(m->element), sizeof(float*) * size);
if (rc != cudaSuccess)
{
fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(rc));
exit(1);
}
// allocate an array for each row of the matrix
for (i = 0; i < size; i++)
{
rc = cudaMallocManaged((void**)&(m->element[i]), sizeof(float) * size);
if (rc != cudaSuccess)
{
fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(rc));
exit(1);
}
}
}
/**
* Free the memory allocated for a matrix.
**/
void free_matrix(matrix* m) {
int i;
for (i = 0; i < size; i++)
cudaFree(m->element[i]);
cudaFree(m->element);
}
/**
* Initializes the elements of the matrix with
* random values between 0 and 9
**/
void init_matrix(matrix m)
{
int i, j;
for (i = 0; i < size; i++)
for (j = 0; j < size; j++)
{
m.element[i][j] = rand() % 10;
}
}
/**
* Initializes the elements of the matrix with
* element 0.
**/
void init_matrix_zero(matrix m)
{
int i, j;
for (i = 0; i < size; i++)
for (j = 0; j < size; j++)
{
m.element[i][j] = 0.0;
}
}
/**
* Multiplies matrix @a with matrix @b storing
* the result in matrix @result
*
* The multiplication algorithm is the O(n^3)
* algorithm
*/
void mm(matrix a, matrix b, matrix result)
{
int i, j, k;
// Do the multiplication
for (i = 0; i < size; i++)
for (j = 0; j < size; j++)
for(k = 0; k < size; k++)
result.element[i][j] += a.element[i][k] * b.element[k][j];
}
/**
* Each kernel computes the result element (i,j).
*/
__global__ void mm_kernel(matrix a, matrix b, matrix result, int size)
{
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
int k;
float resultValue = 0;
// if (i >= size || j >= size)
// return;
for (k = 0; k < (size / BLOCK_SIZE); k++) {
__shared__ float sharedA[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float sharedB[BLOCK_SIZE][BLOCK_SIZE];
sharedA[threadIdx.y][threadIdx.x] = a.element[i][k * BLOCK_SIZE + threadIdx.x];
sharedB[threadIdx.y][threadIdx.x] = b.element[k * BLOCK_SIZE + threadIdx.y][j];
__syncthreads();
int i;
for (i = 0; i < BLOCK_SIZE; i++) {
resultValue += sharedA[threadIdx.y][i] * sharedB[i][threadIdx.x];
}
__syncthreads();
}
result.element[i][j] = resultValue;
}
void print_matrix(matrix m)
{
int i, j;
for (i = 0; i < size; i++)
{
printf("row %4d: ", i);
for (j = 0; j < size; j++)
printf("%6.2f ", m.element[i][j]);
printf("\n");
}
}
void work()
{
matrix a, b, result1, result2;
long long before, after;
int correct, i, j, dim;
cudaError_t rc;
// Allocate memory for matrices
allocate_matrix(&a);
allocate_matrix(&b);
allocate_matrix(&result1);
allocate_matrix(&result2);
// Initialize matrix elements
init_matrix(a);
init_matrix(b);
// print_matrix(a);
// printf("\n");
// print_matrix(b);
// printf("\n");
// Perform sequential matrix multiplication
before = wall_clock_time();
mm(a, b, result1);
after = wall_clock_time();
fprintf(stderr, "Matrix multiplication on CPU took %1.2f seconds\n", ((float)(after - before))/1000000000);
// print_matrix(result1);
// Perform CUDA matrix multiplication
dim3 block(BLOCK_SIZE, BLOCK_SIZE); // a block of 32 x 32 CUDA threads
dim = (size % BLOCK_SIZE == 0) ? size / BLOCK_SIZE : size / BLOCK_SIZE + 1;
dim3 grid(dim, dim); // a grid of CUDA thread blocks
before = wall_clock_time();
mm_kernel<<<grid, block>>>(a, b, result2, size);
cudaDeviceSynchronize();
after = wall_clock_time();
fprintf(stderr, "Matrix multiplication on GPU took %1.2f seconds\n", ((float)(after - before))/1000000000);
// print_matrix(result2);
// was there any error?
rc = cudaGetLastError();
if (rc != cudaSuccess)
printf("Last CUDA error %s\n", cudaGetErrorString(rc));
// Compare the results
correct = 1;
for (i = 0; correct && i < size; i++)
for (j = 0; j < size; j++)
if (result1.element[i][j] != result2.element[i][j]) {
printf("correct: %f, actual: %f\n", result1.element[i][j], result2.element[i][j]);
correct = 0;
break;
}
if (correct)
printf("The result matrices are identical!\n");
else
printf("Difference in result matrices at element (%d, %d)!\n", i, j);
free_matrix(&a);
free_matrix(&b);
free_matrix(&result1);
free_matrix(&result2);
}
int main(int argc, char ** argv)
{
srand(0);
printf("Usage: %s <size>\n", argv[0]);
if (argc >= 2)
size = atoi(argv[1]);
else
size = 1024;
fprintf(stderr,"Sequential matrix multiplication of size %d\n", size);
// Multiply the matrices
work();
return 0;
}
|
1,649
|
#include <stdio.h>
#include <time.h>
#include <cuda_runtime.h>
#include <cuda_profiler_api.h>
__global__ void initializePagerankArray(float * pagerank_d, int n_vertices) {
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < n_vertices) {
pagerank_d[i] = 1.0/(float)n_vertices;
}
}
__global__ void setPagerankNextArray(float * pagerank_next_d, int n_vertices) {
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < n_vertices) {
pagerank_next_d[i] = 0.0;
}
}
__global__ void addToNextPagerankArray(float * pagerank_d, float * pagerank_next_d, int * n_successors_d, int * successors_d, int * successor_offset_d, float * dangling_value2, int n_vertices) {
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
int j;
int n_suc;
if (i < n_vertices) {
n_suc = n_successors_d[i];
if(n_suc > 0) {
for(j = 0; j < n_suc; j++) {
atomicAdd(&(pagerank_next_d[successors_d[successor_offset_d[i]+j]]), 0.85*(pagerank_d[i])/n_suc);
}
} else {
atomicAdd(dangling_value2, 0.85*pagerank_d[i]);
}
}
}
__global__ void finalPagerankArrayForIteration(float * pagerank_next_d, int n_vertices, float dangling_value2) {
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if(i < n_vertices) {
pagerank_next_d[i] += (dangling_value2 + (1-0.85))/((float)n_vertices);
}
}
__global__ void setPagerankArrayFromNext(float * pagerank_d, float * pagerank_next_d, int n_vertices, float *diff) {
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
float temp;
if(i < n_vertices) {
temp=pagerank_d[i];
pagerank_d[i] = pagerank_next_d[i];
pagerank_next_d[i] = 0.0;
atomicAdd(diff,((temp - pagerank_d[i])>=0)?(temp- pagerank_d[i]):(pagerank_d[i]-temp) );
}
}
int main(int argc, char ** args) {
if (argc != 2) {
fprintf(stderr,"Wrong number of args. Provide input graph file.\n");
exit(EXIT_FAILURE);
}
cudaError_t err = cudaSuccess;
clock_t time_to_build, time_to_calc;
err = cudaFree(0);
if (err != cudaSuccess){
fprintf(stderr, "[ERROR] Fail to initialize and free device, error=%s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
///////////////////////// Step01. Build Graph //////////////////////
// vars for grahp
int i;
unsigned int n_vertices = 0;
unsigned int n_edges = 0;
unsigned int vertex_from = 0, vertex_to = 0, vertex_prev = 0;
// vars for pagerank
float * pagerank_h, *pagerank_d;
float *pagerank_next_d;
int * n_successors_h, *n_successors_d;
int * successors_h, *successors_d;
int * successor_offset_h;
int *successor_offset_d;
FILE * fp;
if ((fp = fopen(args[1], "r")) == NULL) {
fprintf(stderr,"[ERROR] Could not open input file.\n");
exit(EXIT_FAILURE);
}
// parse file to count the number of vertices and edges
while (fscanf(fp, "%u %u", &vertex_from, &vertex_to) != EOF){
if (vertex_from > n_vertices) {
n_vertices = vertex_from;
}
else if (vertex_to > n_vertices) {
n_vertices = vertex_to;
}
n_edges++;
}
n_vertices++;
clock_t start = clock();
// allocate memory for device and host
pagerank_h = (float *) malloc(n_vertices * sizeof(*pagerank_h));
err = cudaMalloc((void **)&pagerank_d, n_vertices*sizeof(float));
err = cudaMalloc((void **)&pagerank_next_d, n_vertices*sizeof(float));
n_successors_h = (int *) calloc(n_vertices, sizeof(*n_successors_h));
err = cudaMalloc((void **)&n_successors_d, n_vertices*sizeof(int));
successor_offset_h = (int *) malloc(n_vertices * sizeof(*successor_offset_h));
err = cudaMalloc((void **)&successor_offset_d, n_vertices*sizeof(int));
successors_h = (int *) malloc(n_edges * sizeof(*successors_h));
err = cudaMalloc((void **)&successors_d, n_edges*sizeof(int));
if (err != cudaSuccess){
fprintf(stderr, "[ERROR] Fail to allocate device memeory for Pagerank, Graph and Successor, error=%s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// parse file to count the number of successors for each vertex
fseek(fp, 0L, SEEK_SET);
int offset = 0, edges = 0;
i = 0;
while (fscanf(fp, "%u %u", &vertex_from, &vertex_to) != EOF) {
n_successors_h[vertex_from] += 1;
successor_offset_h[i] = offset;
if(edges != 0 && vertex_prev != vertex_from) {
i = vertex_from;
offset = edges;
successor_offset_h[i] = offset;
vertex_prev = vertex_from;
}
successors_h[edges] = vertex_to;
edges++;
}
successor_offset_h[i] = edges - 1;
fclose(fp);
// get build time and restart clock() for calculate time
time_to_build = clock() - start;
start = clock();
///////////////////////// Step02. Calculate Pagerank //////////////////////
// copy memory from host to device
err = cudaMemcpy(n_successors_d, n_successors_h, n_vertices*sizeof(int), cudaMemcpyHostToDevice);
err = cudaMemcpy(successors_d, successors_h, n_edges*sizeof(int), cudaMemcpyHostToDevice);
err = cudaMemcpy(successor_offset_d, successor_offset_h, n_vertices*sizeof(int), cudaMemcpyHostToDevice);
if (err != cudaSuccess){
fprintf(stderr, "[ERROR] Fail to copy memeory from Host to Device for Successor, error=%s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// vars for cuda setting and loop control
int n_iterations = 30;
int iteration = 0;
int numOfBlocks = 1;
int threadsPerBlock = 1000;
if(n_vertices <= 1024) {
threadsPerBlock = n_vertices;
numOfBlocks = 1;
} else {
threadsPerBlock = 1024;
numOfBlocks = (n_vertices + 1023)/1024;
}
// vars for dangling point values
float dangling_value_h = 0;
float dangling_value_h2 = 0;
float *dangling_value2, *reduced_sums_d;
int n_blocks = (n_vertices + 2048 - 1)/2048;
if (n_blocks == 0){
n_blocks = 1;
}
float epsilon = 0.000001;
float * d_diff;
float h_diff = epsilon + 1;
err = cudaMalloc((void **)&d_diff, sizeof(float));
err = cudaMalloc((void **)&reduced_sums_d, 1024 * sizeof(float));
err = cudaMalloc((void **)&dangling_value2, sizeof(float));
err = cudaMemcpy(dangling_value2, &dangling_value_h, sizeof(float), cudaMemcpyHostToDevice);
if (err != cudaSuccess){
fprintf(stderr, "[ERROR] Fail to allocate and copy memeory for Dangling Point, error=%s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
initializePagerankArray<<<numOfBlocks,threadsPerBlock>>>(pagerank_d, n_vertices);
//cudaDeviceSynchronize();
setPagerankNextArray<<<numOfBlocks,threadsPerBlock>>>(pagerank_next_d, n_vertices);
//cudaDeviceSynchronize();
while(epsilon < h_diff && iteration < n_iterations) { //was 23
// set the dangling value to 0
dangling_value_h = 0;
err = cudaMemcpy(dangling_value2, &dangling_value_h, sizeof(float), cudaMemcpyHostToDevice);
// initial parallel pagerank_next computation
addToNextPagerankArray<<<numOfBlocks,threadsPerBlock>>>(pagerank_d, pagerank_next_d, n_successors_d, successors_d, successor_offset_d, dangling_value2, n_vertices);
//cudaDeviceSynchronize();
// get the dangling value
err = cudaMemcpy(&dangling_value_h2, dangling_value2, sizeof(float), cudaMemcpyDeviceToHost);
// final parallel pagerank_next computation
finalPagerankArrayForIteration<<<numOfBlocks,threadsPerBlock>>>(pagerank_next_d, n_vertices, dangling_value_h2);
//cudaDeviceSynchronize();
// Get difference to compare to epsilon
cudaMemset(d_diff, 0, sizeof(float) );
setPagerankArrayFromNext<<<numOfBlocks,threadsPerBlock>>>(pagerank_d, pagerank_next_d, n_vertices, d_diff);
cudaMemcpy(&h_diff, d_diff, sizeof(float), cudaMemcpyDeviceToHost);
printf("probe2: %f\n", h_diff);
cudaDeviceSynchronize();
iteration++;
}
err = cudaMemcpy(pagerank_h, pagerank_d, n_vertices*sizeof(float), cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "[ERROR] Failed to copy memory from Device to Host! error=%s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
time_to_calc = clock() - start;
int build_milli = time_to_build * 1000 / CLOCKS_PER_SEC;
int calc_milli = time_to_calc * 1000 / CLOCKS_PER_SEC;
FILE *f_result;
f_result=fopen("rg","w");
for (i=0;i<n_vertices;i++) {
fprintf(f_result,"Vertex %u:\tpagerank = %.18f\n", i, pagerank_h[i]);
}
// free host memory
free(pagerank_h);
free(successors_h);
free(n_successors_h);
free(successor_offset_h);
printf("[DEBUG] before printf");
// free device memory and reset device
err = cudaDeviceReset();
if (err != cudaSuccess)
{
fprintf(stderr, "[ERROR] Failed to clean and reset device! error=%s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// print results and exit()
printf("Time to build: %d seconds, %d milliseconds\n",build_milli/1000, build_milli%1000);
printf("Time to calc: %d seconds, %d milliseconds\n",calc_milli/1000, calc_milli%1000);
printf("Number of iteration: %d\n", iteration);
printf("[FINISH] Pagerank with atomicAdd()");
return 0;
}
|
1,650
|
/************************************************************************************\
* *
* Copyright � 2014 Advanced Micro Devices, Inc. *
* Copyright (c) 2015 Mark D. Hill and David A. Wood *
* All rights reserved. *
* *
* Redistribution and use in source and binary forms, with or without *
* modification, are permitted provided that the following are met: *
* *
* You must reproduce the above copyright notice. *
* *
* Neither the name of the copyright holder nor the names of its contributors *
* may be used to endorse or promote products derived from this software *
* without specific, prior, written permission from at least the copyright holder. *
* *
* You must include the following terms in your license and/or other materials *
* provided with the software. *
* *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" *
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE *
* IMPLIED WARRANTIES OF MERCHANTABILITY, NON-INFRINGEMENT, AND FITNESS FOR A *
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER *
* OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, *
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT *
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS *
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN *
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING *
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY *
* OF SUCH DAMAGE. *
* *
* Without limiting the foregoing, the software may implement third party *
* technologies for which you must obtain licenses from parties other than AMD. *
* You agree that AMD has not obtained or conveyed to you, and that you shall *
* be responsible for obtaining the rights to use and/or distribute the applicable *
* underlying intellectual property rights related to the third party technologies. *
* These third party technologies are not licensed hereunder. *
* *
* If you use the software (in whole or in part), you shall adhere to all *
* applicable U.S., European, and other export laws, including but not limited to *
* the U.S. Export Administration Regulations ("EAR") (15 C.F.R Sections 730-774), *
* and E.U. Council Regulation (EC) No 428/2009 of 5 May 2009. Further, pursuant *
* to Section 740.6 of the EAR, you hereby certify that, except pursuant to a *
* license granted by the United States Department of Commerce Bureau of Industry *
* and Security or as otherwise permitted pursuant to a License Exception under *
* the U.S. Export Administration Regulations ("EAR"), you will not (1) export, *
* re-export or release to a national of a country in Country Groups D:1, E:1 or *
* E:2 any restricted technology, software, or source code you receive hereunder, *
* or (2) export to Country Groups D:1, E:1 or E:2 the direct product of such *
* technology or software, if such foreign produced direct product is subject to *
* national security controls as identified on the Commerce Control List (currently *
* found in Supplement 1 to Part 774 of EAR). For the most current Country Group *
* listings, or for additional information about the EAR or your obligations under *
* those regulations, please refer to the U.S. Bureau of Industry and Security's *
* website at http://www.bis.doc.gov/. *
* *
\************************************************************************************/
/**
* @brief pagerank 1
* @param row csr pointer array
* @param col csr column array
* @param data weight array
* @param page_rank1 pagerank array 1
* @param page_rank2 pagerank array 2
* @param num_nodes number of vertices
* @param num_edges number of edges
*/
__global__ void
pagerank1(int *row, int *col, int *data, float *page_rank1, float *page_rank2,
const int num_nodes, const int num_edges)
{
// Get my workitem id
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < num_nodes) {
// Get the starting and ending pointers of the neighborlist
int start = row[tid];
int end;
if (tid + 1 < num_nodes) {
end = row[tid + 1];
} else {
end = num_edges;
}
int nid;
// Navigate the neighbor list
for (int edge = start; edge < end; edge++) {
nid = col[edge];
// Transfer the PageRank value to neighbors
atomicAdd(&page_rank2[nid], page_rank1[tid] / (float)(end - start));
}
}
}
/**
* @brief pagerank 2
* @param row csr pointer array
* @param col csr column array
* @param data weight array
* @param page_rank1 pagerank array 1
* @param page_rank2 pagerank array 2
* @param num_nodes number of vertices
* @param num_edges number of edges
*/
__global__ void
pagerank2(int *row, int *col, int *data, float *page_rank1, float *page_rank2,
const int num_nodes, const int num_edges)
{
// Get my workitem id
int tid = blockDim.x * blockIdx.x + threadIdx.x;
// Update pagerank value with the damping factor
if (tid < num_nodes) {
page_rank1[tid] = 0.15 / (float)num_nodes + 0.85 * page_rank2[tid];
page_rank2[tid] = 0.0f;
}
}
/**
* @brief inibuffer
* @param row csr pointer array
* @param page_rank1 pagerank array 1
* @param page_rank2 pagerank array 2
* @param num_nodes number of vertices
*/
__global__ void
inibuffer(int *row, float *page_rank1, float *page_rank2, const int num_nodes,
const int num_edges)
{
// Get my thread id
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < num_nodes) {
page_rank1[tid] = 1 / (float)num_nodes;
page_rank2[tid] = 0.0f;
}
}
|
1,651
|
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <math.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#define NUM_BLOCKS 1
#define BLOCK_SIZE 256
#define NUM_MEM 32768
//stage one
__global__ void prefixOne(int *in_array, int *out_array, int unsize, int size)
{
int tid = threadIdx.x;
for(int j = 0; j < unsize; j++){
if (j == 0){
out_array[tid * unsize] = in_array[tid * unsize];
} else {
for(int k = 0; k <= j; k++) {
out_array[tid * unsize + j] += in_array[tid * unsize + k];
}
}
}
}
//stage two
__global__ void prefixTwo(int *in_array, int unsize, int maxid, int idx)
{
int tid = threadIdx.x;
if (tid <= maxid) {
int maxstep = unsize * (int)(powf(2, idx - 1));
for(int j = 0; j < maxstep; j++) {
int startIdx = unsize * (int)(powf(2, idx - 1)) * (1 + 2 * tid);
in_array[startIdx + j] = in_array[startIdx - 1] + in_array[startIdx + j];
}
}
}
void prefixsum(int blocks, int threads, int steps, int *array_h, int size)
{
int *array_d;
int *tmp_one;
int unsize = size/(blocks * threads);
dim3 dim_grid(blocks, 1, 1);
dim3 dim_block(threads, 1, 1);
// allocate tmp_d
cudaMalloc((void **)&tmp_one, size * sizeof(int));
//cudaMalloc((void **)&out_array_d, blocks * sizeof(int));
cudaMalloc((void **)&array_d, size * sizeof(int));
//copy data from host to device
cudaMemcpy(array_d, array_h, size * sizeof(int),
cudaMemcpyHostToDevice);
cudaMemset(tmp_one, 0, size * sizeof(int));
//do stage 1
prefixOne<<<dim_grid, dim_block>>> (array_d, tmp_one, unsize, size);
if (steps !=0) {
int maxtid = 0;
//do stage 2
for (int i = 1; i <= steps; i++) {
maxtid = (int)pow(2, steps-i) - 1;
prefixTwo<<<dim_grid, dim_block>>>(tmp_one, unsize, maxtid, i);
}
}
cudaMemcpy(array_h, tmp_one, size * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(array_d);
cudaFree(tmp_one);
}
void prepare_numbers(int **array, int count)
{
int *numbers = (int *)malloc(count * sizeof(int));
// load array
for (int i = 0; i < count; i++) {
numbers[i] = 1;
}
*array = numbers;
}
void print_array(int *array, int count)
{
for (int i = 0; i < count; i++) {
printf("%d\t", array[i]);
}
printf("\n");
}
int main()
{
int blocks, threads, max, stepTwo;
int *array;
float calTime;
cudaEvent_t start, end;
cudaEventCreate(&start);
cudaEventCreate(&end);
blocks = NUM_BLOCKS;
threads = BLOCK_SIZE;
stepTwo = 8;
max = NUM_MEM;
// pre-init numbers
array = NULL;
prepare_numbers(&array, max);
cudaEventRecord(start, 0);
prefixsum(blocks, threads, stepTwo, array, max);
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
cudaEventElapsedTime(&calTime, start, end);
// print array
// print_array(array, max);
printf("the elapsed time with %d threads is %.10f\n", threads, calTime);
free(array);
return 0;
}
|
1,652
|
///
/// vecAddKernel00.cu
/// For CSU CS575 Spring 2011
/// Instructor: Wim Bohm
/// Based on code from the CUDA Programming Guide
/// By David Newman
/// Created: 2011-02-16
/// Last Modified: 2011-02-16 DVN
///
/// This Kernel adds two Vectors A and B in C on GPU
/// without using coalesced memory access.
///
__global__ void AddVectors(const float* A, const float* B, float* C, int N)
{
int blockStartIndex = blockIdx.x * blockDim.x * N;
int threadStartIndex = blockStartIndex + (threadIdx.x * N);
int threadEndIndex = threadStartIndex + N;
int i;
for( i=threadStartIndex; i<threadEndIndex; ++i ){
C[i] = A[i] + B[i];
}
}
|
1,653
|
__global__ void clock_block(clock_t* d_o, volatile long clock_count)
{
volatile long clock_offset = 0;
long temp_clock = clock_count;
while (clock_offset < temp_clock)
{
clock_offset++;
}
d_o[0] = clock_offset;
}
/*
__global__ void clock_block(clock_t* d_o, clock_t clock_count)
{
clock_t start_clock = clock64();
clock_t clock_offset = 0;
while (clock_offset < clock_count)
{
clock_offset = clock64() - start_clock;
}
d_o[0] = clock_offset;
}
*/
|
1,654
|
// REQUIRES: clang-driver
// REQUIRES: x86-registered-target
// REQUIRES: nvptx-registered-target
// REQUIRES: zlib
// RUN: %clang -### -target x86_64-linux-gnu -c %s -g -gz 2>&1 \
// RUN: | FileCheck %s --check-prefixes WARN,COMMON
// RUN: %clang -### -target x86_64-linux-gnu -c %s -gdwarf -fdebug-info-for-profiling 2>&1 \
// RUN: | FileCheck %s --check-prefixes WARN,COMMON
// RUN: %clang -### -target x86_64-linux-gnu -c %s -gdwarf-2 -gsplit-dwarf 2>&1 \
// RUN: | FileCheck %s --check-prefixes WARN,COMMON
// RUN: %clang -### -target x86_64-linux-gnu -c %s -gdwarf-3 -glldb 2>&1 \
// RUN: | FileCheck %s --check-prefixes WARN,COMMON
// RUN: %clang -### -target x86_64-linux-gnu -c %s -gdwarf-4 -gcodeview 2>&1 \
// RUN: | FileCheck %s --check-prefixes WARN,COMMON
// RUN: %clang -### -target x86_64-linux-gnu -c %s -gdwarf-5 -gmodules 2>&1 \
// RUN: | FileCheck %s --check-prefixes WARN,COMMON
// RUN: %clang -### -target x86_64-linux-gnu -c %s -ggdb1 -fdebug-macro 2>&1 \
// RUN: | FileCheck %s --check-prefixes WARN,COMMON
// RUN: %clang -### -target x86_64-linux-gnu -c %s -ggdb2 -ggnu-pubnames 2>&1 \
// RUN: | FileCheck %s --check-prefixes WARN,COMMON
// RUN: %clang -### -target x86_64-linux-gnu -c %s -ggdb3 -gdwarf-aranges 2>&1 \
// RUN: | FileCheck %s --check-prefixes WARN,COMMON
// RUN: %clang -### -target x86_64-linux-gnu -c %s -g -gcolumn-info -fdebug-types-section 2>&1 \
// RUN: | FileCheck %s --check-prefixes WARN,COMMON
// Same tests for OpenMP
// RUN: %clang -### -target x86_64-linux-gnu -fopenmp=libomp -fopenmp-targets=nvptx64-nvidia-cuda -c %s \
// RUN: -g -gz 2>&1 | FileCheck %s --check-prefixes WARN,COMMON
// RUN: %clang -### -target x86_64-linux-gnu -fopenmp=libomp -fopenmp-targets=nvptx64-nvidia-cuda -c %s \
// RUN: -gdwarf -fdebug-info-for-profiling 2>&1 | FileCheck %s --check-prefixes WARN,COMMON
// RUN: %clang -### -target x86_64-linux-gnu -fopenmp=libomp -fopenmp-targets=nvptx64-nvidia-cuda -c %s \
// RUN: -gdwarf-2 -gsplit-dwarf 2>&1 | FileCheck %s --check-prefixes WARN,COMMON
// RUN: %clang -### -target x86_64-linux-gnu -fopenmp=libomp -fopenmp-targets=nvptx64-nvidia-cuda -c %s \
// RUN: -gdwarf-3 -glldb 2>&1 | FileCheck %s --check-prefixes WARN,COMMON
// RUN: %clang -### -target x86_64-linux-gnu -fopenmp=libomp -fopenmp-targets=nvptx64-nvidia-cuda -c %s \
// RUN: -gdwarf-4 -gcodeview 2>&1 | FileCheck %s --check-prefixes WARN,COMMON
// RUN: %clang -### -target x86_64-linux-gnu -fopenmp=libomp -fopenmp-targets=nvptx64-nvidia-cuda -c %s \
// RUN: -gdwarf-5 -gmodules 2>&1 | FileCheck %s --check-prefixes WARN,COMMON
// RUN: %clang -### -target x86_64-linux-gnu -fopenmp=libomp -fopenmp-targets=nvptx64-nvidia-cuda -c %s \
// RUN: -ggdb1 -fdebug-macro 2>&1 | FileCheck %s --check-prefixes WARN,COMMON
// RUN: %clang -### -target x86_64-linux-gnu -fopenmp=libomp -fopenmp-targets=nvptx64-nvidia-cuda -c %s \
// RUN: -ggdb2 -ggnu-pubnames 2>&1 | FileCheck %s --check-prefixes WARN,COMMON
// RUN: %clang -### -target x86_64-linux-gnu -fopenmp=libomp -fopenmp-targets=nvptx64-nvidia-cuda -c %s \
// RUN: -ggdb3 -gdwarf-aranges 2>&1 | FileCheck %s --check-prefixes WARN,COMMON
// RUN: %clang -### -target x86_64-linux-gnu -fopenmp=libomp -fopenmp-targets=nvptx64-nvidia-cuda -c %s \
// RUN: -g -gcolumn-info -fdebug-types-section 2>&1 | FileCheck %s --check-prefixes WARN,COMMON
// RUN: %clang -### -target x86_64-linux-gnu -c %s -gdwarf-5 -gembed-source 2>&1 \
// RUN: | FileCheck %s --check-prefixes WARN-GES,COMMON
// RUN: %clang -### -target x86_64-linux-gnu -c %s -ggdb -gembed-source -gdwarf-5 2>&1 \
// RUN: | FileCheck %s --check-prefixes WARN-GES,COMMON
// RUN: %clang -### -target x86_64-linux-gnu -fopenmp=libomp -fopenmp-targets=nvptx64-nvidia-cuda -c %s \
// RUN: -gdwarf-5 -gembed-source 2>&1 | FileCheck %s --check-prefixes WARN-GES,COMMON
// RUN: %clang -### -target x86_64-linux-gnu -fopenmp=libomp -fopenmp-targets=nvptx64-nvidia-cuda -c %s \
// RUN: -ggdb -gembed-source -gdwarf-5 2>&1 | FileCheck %s --check-prefixes WARN-GES,COMMON
// COMMON: warning: debug information option '{{-gz|-fdebug-info-for-profiling|-gsplit-dwarf|-glldb|-gcodeview|-gmodules|-gembed-source|-fdebug-macro|-ggnu-pubnames|-gdwarf-aranges|-fdebug-types-section}}' is not supported
// WARN-SAME: for target 'nvptx64-nvidia-cuda' [-Wunsupported-target-opt]
// WARN-GES-SAME: requires DWARF-5 but target 'nvptx64-nvidia-cuda' only provides DWARF-2 [-Wunsupported-target-opt]
// COMMON-NOT: debug information option '{{.*}}' is not supported for target 'x86
// COMMON: "-triple" "nvptx64-nvidia-cuda"
// COMMON-NOT: {{-compress-debug|-fdebug-info-for-profiling|lldb|codeview|module-format|embed-source|debug-info-macro|gnu-pubnames|generate-arange-section|generate-type-units}}
// COMMON: "-triple" "x86_64
// COMMON-SAME: {{-compress-debug|-fdebug-info-for-profiling|split-dwarf|lldb|codeview|module-format|embed-source|debug-info-macro|gnu-pubnames|generate-arange-section|generate-type-units}}
|
1,655
|
#include<stdio.h>
#define N 30
void add(int *X,int *Y,int *Z)
{
for(int i=0;i<N;i++)
for(int j=0;j<N;j++)
Z[i*N+j] = X[i*N+j]+Y[i*N+j];
}
__global__ void add_kernel(int *X,int *Y,int *Z)
{
int i = threadIdx.x;
int j = threadIdx.y;
Z[i*N+j] = X[i*N+j]+Y[i*N+j];
}
int main()
{
int X[N*N];
int Y[N*N];
for(int i=0;i<N;i++)
for(int j=0;j<N;j++) {
X[i*N+j]=-1;
Y[i*N+j]= 1;
}
//Outout matrix
int Z[N*N];
int *d_X,*d_Y,*d_Z;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaMalloc((void**) &d_X, (N*N)*sizeof(int));
cudaMalloc((void**) &d_Y, (N*N)*sizeof(int));
cudaMalloc((void**) &d_Z, (N*N)*sizeof(int));
cudaMemcpy(d_X, &X,(N*N)*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_Y, &Y,(N*N)*sizeof(int), cudaMemcpyHostToDevice);
dim3 dimGrid(13,13,1);
dim3 dimBlock(22,22,1);
cudaEventRecord(start);
add_kernel<<<dimGrid, dimBlock>>>(d_X,d_Y,d_Z);
cudaEventRecord(stop);
//add(X, Y, Z);
cudaMemcpy(&Z, d_Z,(N*N)*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_X);
cudaFree(d_Y);
cudaFree(d_Z);
cudaEventSynchronize(stop);
float Timeused = 0;
cudaEventElapsedTime(&Timeused, start, stop);
for(int i=0;i<N;i++)
for(int j=0;j<N;j++){
printf("%d ",Z[i*N+j]);
}
printf("\n");
printf("Time used:%f ",Timeused);
return -1;
}
|
1,656
|
#include "cuda_runtime.h"
#include "help_functions.cuh"
__device__ float help_functions::fisqrt(const float number)
{
long i;
float x2, y;
const float threehalfs = 1.5F;
x2 = number * 0.5F;
y = number;
i = *(long*)&y;
i = 0x5f3759df - (i >> 1);
y = *(float*)&i;
y = y * (threehalfs - (x2 * y * y));
return y;
}
template<class T>
__device__ T help_functions::max(T t1, T t2)
{
return t1 < t2 ? t2 : t1;
}
template<class T>
__device__ T help_functions::min(T t1, T t2)
{
return t1 < t2 ? t1 : t2;
}
typedef unsigned char uchar;
__device__ void dummyHelpTemplate()
{
help_functions::max<float>(0.0f, 0.0f);
help_functions::min<float>(0.0f, 0.0f);
help_functions::min<uchar>(0, 0);
help_functions::max<int>(0, 0);
help_functions::min<int>(0, 0);
help_functions::max<uchar>(0, 0);
}
|
1,657
|
#include <bits/stdc++.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/tabulate.h>
#include <thrust/scan.h>
#include <thrust/functional.h>
#include <thrust/execution_policy.h>
const int MAXN = 16777216;
__global__ void stage_first(int n, char *str, int32_t *pos) {
int idx = blockDim.x*blockIdx.x+threadIdx.x;
if (idx >= n)
return ;
pos[idx] = str[idx] > ' ' ? -1 : idx;
}
__global__ void stage_second(int n, int32_t *pos) {
extern __shared__ int32_t tmp[];
int m = blockDim.x;
int tid = threadIdx.x;
int base = (blockDim.x*blockIdx.x)*2;
int off = 1;
tmp[2*tid] = pos[2*tid+base];
tmp[2*tid+1] = pos[2*tid+1+base];
for (int d = m; d > 0; d >>= 1, off <<= 1) {
__syncthreads();
if (tid < d) {
int32_t ai = off*(2*tid+1)-1;
int32_t bi = off*(2*tid+2)-1;
tmp[bi] = tmp[bi] > tmp[ai] ? tmp[bi] : tmp[ai];
}
}
if (tid == 0) {
tmp[2*m] = tmp[2*m-1];
tmp[2*m-1] = -1;
}
for (int d = 1; d < 2*m; d <<= 1) {
off >>= 1;
__syncthreads();
if (tid < d) {
int32_t ai = off*(2*tid+1)-1;
int32_t bi = off*(2*tid+2)-1;
int32_t t = tmp[ai];
tmp[ai] = tmp[bi];
tmp[bi] = t > tmp[bi] ? t : tmp[bi];
}
}
__syncthreads();
pos[2*tid+base] = tmp[2*tid+1];
pos[2*tid+1+base] = tmp[2*tid+2];
}
__global__ void stage_fixed(int n, __global__ int32_t *pos, int bsz) {
int idx = blockDim.x*blockIdx.x+threadIdx.x;
if (idx >= n || idx/bsz == 0)
return ;
if (pos[idx] == -1)
pos[idx] = pos[idx/bsz*bsz-1];
}
__global__ void stage_third(int n, int32_t *pos) {
int idx = blockDim.x*blockIdx.x+threadIdx.x;
if (idx >= n)
return ;
pos[idx] = idx - pos[idx];
}
void custom_sol(int n, char *cuStr, int32_t *cuPos) {
const int bsz = 256;
const int tsz = 512;
dim3 bb(bsz);
dim3 gg(((n+1)/2+bsz-1)/bsz);
stage_first<<<(n+tsz-1)/tsz, tsz>>>(n, cuStr, cuPos);
stage_second<<<gg, bb, (bsz*2+1)*sizeof(int32_t)>>>(n, cuPos);
stage_fixed<<<(n+tsz-1)/tsz, tsz>>>(n, cuPos, bsz*2);
stage_third<<<(n+tsz-1)/tsz, tsz>>>(n, cuPos);
}
template<class T> struct MM {
char *base;
MM(char *base): base(base) {}
__host__ __device__ T operator()(const T& index) const { return (base[index] > ' ') ? -1 : index; };
};
template<class T> struct NN {
int32_t *base;
NN(int32_t *base): base(base) {}
__host__ __device__ T operator()(const T& index) const { return index-base[index]; };
};
void thrust_sol(int n, char *cuStr, int32_t *cuPos) {
thrust::tabulate(thrust::device, cuPos, cuPos+n, MM<int32_t>(cuStr));
thrust::inclusive_scan(thrust::device, cuPos, cuPos+n, cuPos, thrust::maximum<int>());
thrust::tabulate(thrust::device, cuPos, cuPos+n, NN<int32_t>(cuPos));
}
int main() {
static char *cuStr;
static int32_t *cuPos;
cudaMalloc(&cuStr, sizeof(char)*MAXN);
cudaMalloc(&cuPos, sizeof(int32_t)*MAXN);
static char s[MAXN];
static int ret[MAXN];
int cases = 0;
while (fgets(s, MAXN, stdin)) {
int n = strlen(s);
cudaMemcpy(cuStr, s, sizeof(char)*n, cudaMemcpyHostToDevice);
clock_t st, ed;
st = clock();
for (int i = 0; i < 10; i++) {
custom_sol(n, cuStr, cuPos);
// thrust_sol(n, cuStr, cuPos);
}
cudaMemcpy(ret, cuPos, sizeof(int32_t)*n, cudaMemcpyDeviceToHost);
ed = clock() - st;
printf ("It took me %lf seconds.\n", ((float) ed)/CLOCKS_PER_SEC/10);
puts("Checking");
st = clock();
for (int i = 0, sum = 0; i < n; i++) {
// printf("%d%c", ret[i], " \n"[i==n-1]);
if (s[i] > ' ')
sum++;
else
sum = 0;
assert(sum == ret[i]);
}
ed = clock() - st;
printf ("It took me %lf seconds.\n", ((float) ed)/CLOCKS_PER_SEC);
printf("Case #%d: PASS\n", ++cases);
}
cudaFree(cuPos);
cudaFree(cuStr);
return 0;
}
|
1,658
|
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#define THREADS 128 // 2^7
#define BLOCKS 1024 // 2^10
#define NUM_VALS THREADS*BLOCKS
#define checkCudaErrors(func) \
{ \
cudaError_t E = func; \
if(E != cudaSuccess) \
{ \
printf( "\nError at line: %d ", __LINE__); \
printf( "\nError: %s ", cudaGetErrorString(E)); \
} \
}
void print_elapsed(clock_t start, clock_t stop)
{
double elapsed = ((double) (stop - start)) / CLOCKS_PER_SEC;
printf("Elapsed time: %.3fs\n", elapsed);
}
// GPU helper function
// calculate the id of the current thread
__device__ unsigned int getIdx(dim3* threads, dim3* blocks) {
int x;
return threadIdx.x +
threadIdx.y * (x = threads->x) +
threadIdx.z * (x *= threads->y) +
blockIdx.x * (x *= threads->z) +
blockIdx.y * (x *= blocks->z) +
blockIdx.z * (x *= blocks->y);
}
//
// Perform a full mergesort on our section of the data.
//
__device__ void gpu_bottomUpMerge(long* source, long* dest, long start, long middle, long end) {
long i = start;
long j = middle;
for (long k = start; k < end; k++) {
if (i < middle && (j >= end || source[i] < source[j])) {
dest[k] = source[i];
i++;
} else {
dest[k] = source[j];
j++;
}
}
}
__global__ void gpu_mergesort(long* source, long* dest, long size, long width, long slices, dim3* threads, dim3* blocks) {
unsigned int idx = getIdx(threads, blocks);
long start = width*idx*slices,
middle,
end;
for (long slice = 0; slice < slices; slice++) {
if (start >= size)
break;
middle = min(start + (width >> 1), size);
end = min(start + width, size);
gpu_bottomUpMerge(source, dest, start, middle, end);
start += width;
}
}
//
// Finally, sort something
// gets called by gpu_mergesort() for each slice
//
void mergesort(long* data, long size, dim3 threadsPerBlock, dim3 blocksPerGrid) {
//
// Allocate two arrays on the GPU
// we switch back and forth between them during the sort
//
long* D_data;
long* D_swp;
dim3* D_threads;
dim3* D_blocks;
// Actually allocate the two arrays
checkCudaErrors(cudaMalloc((void**) &D_data, size * sizeof(long)));
checkCudaErrors(cudaMalloc((void**) &D_swp, size * sizeof(long)));
// Copy from our input list into the first array
checkCudaErrors(cudaMemcpy(D_data, data, size * sizeof(long), cudaMemcpyHostToDevice));
//
// Copy the thread / block info to the GPU as well
//
checkCudaErrors(cudaMalloc((void**) &D_threads, sizeof(dim3)));
checkCudaErrors(cudaMalloc((void**) &D_blocks, sizeof(dim3)));
checkCudaErrors(cudaMemcpy(D_threads, &threadsPerBlock, sizeof(dim3), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(D_blocks, &blocksPerGrid, sizeof(dim3), cudaMemcpyHostToDevice));
long* A = D_data;
long* B = D_swp;
long nThreads = threadsPerBlock.x * threadsPerBlock.y * threadsPerBlock.z *
blocksPerGrid.x * blocksPerGrid.y * blocksPerGrid.z;
for (int width = 2; width < (size << 1); width <<= 1) {
long slices = size / ((nThreads) * width) + 1;
gpu_mergesort<<<blocksPerGrid, threadsPerBlock>>>(A, B, size, width, slices, D_threads, D_blocks);
A = A == D_data ? D_swp : D_data;
B = B == D_data ? D_swp : D_data;
}
checkCudaErrors(cudaMemcpy(data, A, size * sizeof(long), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(A));
checkCudaErrors(cudaFree(B));
}
int main(int argc, char const *argv[])
{
clock_t start, stop;
long *values = (long*)malloc(NUM_VALS * sizeof(long));
FILE *f = fopen("reverse_dataset.txt", "r");
for(int i=0;i< NUM_VALS; i++) {
fscanf(f, "%d\n", &values[i]);
}
dim3 blocks(BLOCKS,1);
dim3 threads(THREADS,1);
start = clock();
mergesort(values, NUM_VALS, threads, blocks);
stop = clock();
print_elapsed(start, stop);
return 0;
}
|
1,659
|
/*
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <http://unlicense.org/>
*/
template <unsigned int prevPower2>
__global__ void transitionKernel(
size_t nPixels,
size_t nn1Pixels,
float mu1Constant,
float mu2Constant,
float logNLandUse,
unsigned int const* dkCounts,
unsigned int const* dlCounts,
float const* dPhis,
float* probsOut
) {
float
klt,
mu1,
mu2,
kll_l(0.0f),
kkl_l(0.0f);
unsigned int k = threadIdx.x;
unsigned int l = threadIdx.y;
unsigned int nLandUse = blockDim.x;
unsigned int tid = k * nLandUse + l;
unsigned int bid = blockIdx.x * nLandUse + blockIdx.y;
// counts in local memory
__shared__ int kCounts[32];
__shared__ int lCounts[32];
// the (log) transition probabilities
__shared__ float phis[1024];
__shared__ float kl1[1024];
__shared__ float kl2[1024];
__shared__ float kll_kkl[1024];
// copy the counts to local memory
phis[tid] = dPhis[tid];
if(l == 0) {
// remember the block is always square
kCounts[k] = dkCounts[k];
lCounts[k] = dlCounts[k];
}
__syncthreads();
// do the sums
klt = (kCounts[k] + 1) * (lCounts[l] + 1) * phis[tid];
kl1[tid] = klt;
kl2[tid] = klt * phis[tid];
for(int korl(0); korl < k; ++korl) kkl_l += 2.0 * klt * kCounts[korl] * phis[korl*nLandUse + l];
kkl_l += klt * (kCounts[k] + 1) * phis[tid];
for(int korl(0); korl < l; ++korl) kll_l += 2.0 * klt * lCounts[korl] * phis[k*nLandUse + korl];
kll_l += klt * (lCounts[l] + 1) * phis[tid];
kll_kkl[tid] = kkl_l + kll_l;
__syncthreads();
// write rows beyond the largest power of two that fits into the first lastPower2 rows
if(tid >= prevPower2) {
kl1[tid - prevPower2] += kl1[tid];
kl2[tid - prevPower2] += kl2[tid];
kll_kkl[tid - prevPower2] += kll_kkl[tid];
}
__syncthreads();
// now run the reduction
if(prevPower2 >= 1024) {
if(tid < 512) {
kl1[tid] += kl1[tid + 512];
kl2[tid] += kl2[tid + 512];
kll_kkl[tid] += kll_kkl[tid + 512];
}
__syncthreads();
}
if(prevPower2 >= 512) {
if(tid < 256) {
kl1[tid] += kl1[tid + 256];
kl2[tid] += kl2[tid + 256];
kll_kkl[tid] += kll_kkl[tid + 256];
}
__syncthreads();
}
if(prevPower2 >= 256) {
if(tid < 128) {
kl1[tid] += kl1[tid + 128];
kl2[tid] += kl2[tid + 128];
kll_kkl[tid] += kll_kkl[tid + 128];
}
__syncthreads();
}
if(prevPower2 >= 128) {
if(tid < 64) {
kl1[tid] += kl1[tid + 64];
kl2[tid] += kl2[tid + 64];
kll_kkl[tid] += kll_kkl[tid + 64];
}
__syncthreads();
}
if(tid < 32) { // if we are in the
if(prevPower2 >= 64) {
kl1[tid] += kl1[tid + 32];
kl2[tid] += kl2[tid + 32];
kll_kkl[tid] += kll_kkl[tid + 32];
}
if(prevPower2 >= 32) {
kl1[tid] += kl1[tid + 16];
kl2[tid] += kl2[tid + 16];
kll_kkl[tid] += kll_kkl[tid + 16];
}
if(prevPower2 >= 16) {
kl1[tid] += kl1[tid + 8];
kl2[tid] += kl2[tid + 8];
kll_kkl[tid] += kll_kkl[tid + 8];
}
if(prevPower2 >= 8) {
kl1[tid] += kl1[tid + 4];
kl2[tid] += kl2[tid + 4];
kll_kkl[tid] += kll_kkl[tid + 4];
}
if(prevPower2 >= 4) {
kl1[tid] += kl1[tid + 2];
kl2[tid] += kl2[tid + 2];
kll_kkl[tid] += kll_kkl[tid + 2];
}
if(prevPower2 >= 2) {
kl1[tid] += kl1[tid + 1];
kl2[tid] += kl2[tid + 1];
kll_kkl[tid] += kll_kkl[tid + 1];
}
}
__syncthreads();
if(tid == 0) {
mu1 = mu1Constant + kl1[0] / nPixels;
mu2 = mu2Constant
+ kl2[0] / nPixels
+ kl2[0] / nn1Pixels
- kll_kkl[0] / nn1Pixels
+ (kl1[0] * kl1[0]) / nn1Pixels
- 2.0f * logNLandUse * kl1[0];
probsOut[bid] = mu1 + (mu2 - mu1*mu1) / 2.0f;
}
}
void getTProbs(
unsigned int nLandUse,
unsigned int nPixels,
unsigned int const* dkCounts,
unsigned int const* dlCounts,
float const* dPhis,
float* probsOut
) {
unsigned int
prevPower2(1),
nn1Pixels(nPixels * (nPixels - 1));
float
logNLandUse(static_cast<float>(std::log(nLandUse))),
mu1Constant(-(nPixels * logNLandUse)),
mu2Constant(mu1Constant * mu1Constant);
dim3
blockDim(nLandUse, nLandUse);
while(prevPower2 <= nLandUse*nLandUse) prevPower2 <<= 1;
prevPower2 >>= 1;
switch(prevPower2) {
case 1024:
transitionKernel<1024> <<<blockDim, blockDim>>>(nPixels, nn1Pixels, mu1Constant, mu2Constant, logNLandUse, dkCounts, dlCounts, dPhis, probsOut);
break;
case 512:
transitionKernel<512> <<<blockDim, blockDim>>>(nPixels, nn1Pixels, mu1Constant, mu2Constant, logNLandUse, dkCounts, dlCounts, dPhis, probsOut);
break;
case 256:
transitionKernel<256> <<<blockDim, blockDim>>>(nPixels, nn1Pixels, mu1Constant, mu2Constant, logNLandUse, dkCounts, dlCounts, dPhis, probsOut);
break;
case 128:
transitionKernel<128> <<<blockDim, blockDim>>>(nPixels, nn1Pixels, mu1Constant, mu2Constant, logNLandUse, dkCounts, dlCounts, dPhis, probsOut);
break;
case 64:
transitionKernel<64> <<<blockDim, blockDim>>>(nPixels, nn1Pixels, mu1Constant, mu2Constant, logNLandUse, dkCounts, dlCounts, dPhis, probsOut);
break;
case 32:
transitionKernel<32> <<<blockDim, blockDim>>>(nPixels, nn1Pixels, mu1Constant, mu2Constant, logNLandUse, dkCounts, dlCounts, dPhis, probsOut);
break;
case 16:
transitionKernel<16> <<<blockDim, blockDim>>>(nPixels, nn1Pixels, mu1Constant, mu2Constant, logNLandUse, dkCounts, dlCounts, dPhis, probsOut);
break;
case 8:
transitionKernel<8> <<<blockDim, blockDim>>>(nPixels, nn1Pixels, mu1Constant, mu2Constant, logNLandUse, dkCounts, dlCounts, dPhis, probsOut);
break;
case 4:
transitionKernel<4> <<<blockDim, blockDim>>>(nPixels, nn1Pixels, mu1Constant, mu2Constant, logNLandUse, dkCounts, dlCounts, dPhis, probsOut);
break;
case 2:
transitionKernel<2> <<<blockDim, blockDim>>>(nPixels, nn1Pixels, mu1Constant, mu2Constant, logNLandUse, dkCounts, dlCounts, dPhis, probsOut);
break;
case 1:
transitionKernel<1> <<<blockDim, blockDim>>>(nPixels, nn1Pixels, mu1Constant, mu2Constant, logNLandUse, dkCounts, dlCounts, dPhis, probsOut);
break;
}
}
|
1,660
|
#include <iostream>
#include <iterator>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/sequence.h>
#include <thrust/transform.h>
#include <thrust/functional.h>
#include <algorithm>
const int size=10;
int main() {
thrust::host_vector<double> x(size), y(size);
thrust::sequence(x.begin(), x.end());
thrust::sequence(y.begin(), y.end(), 0.0, 0.1);
thrust::device_vector<double> d_x(x), d_y(y), d_z(size);
thrust::transform(d_x.begin(), d_x.end(), d_y.begin(), d_z.begin(),
thrust::plus<double>());
thrust::host_vector<double> z(d_z);
std::copy(z.begin(), z.end(),
std::ostream_iterator<double>(std::cout, " "));
std::cout << std::endl;
return 0;
}
|
1,661
|
__shared__ float sha[512];
__shared__ float shb[512];
__global__ void subDiffKernel(const float * a, const float * b, float * res, const int numFloats)
{
const int index = blockIdx.x * blockDim.x + threadIdx.x;
sha[threadIdx.x] = a[index];
shb[threadIdx.x] = b[index];
__syncthreads();
float temp1 = 0.0f, temp2 = 0.0f;
for (int i = -8; i <= 8; ++i)
{
const int index2 = (threadIdx.x + i + numFloats) % numFloats;
temp1 += sha[index2] * sha[index2];
temp2 += shb[index2] * shb[index2];
}
res[index] = (temp2 - temp1) / 9.0f;
}
void subdiff_runSub(const int gs, const int bs, float * p0, float * p1, float * p2, int p3)
{
subDiffKernel<<<gs, bs>>>(p0, p1, p2, p3);
}
|
1,662
|
/*В данном задании требуется представить 2 варианта программы для видеокарты: 1) максимально простой и короткий; и 2) быстрый, использующий разделяемую память.
Запрограммируйте генерацию случайных входных данных для алгоритма и автоматическую проверку корректности работы программы.
Выполните теоретическую оценку производительности обоих вариантов алгоритма. Укажите в отчете, насколько теоретическая оценка отличается от практической. */
/*Реализуйте умножение длинной матрицы, хранящейся по столбцам, на длинный вектор*/
#include <iostream>
#define N 16 //shortest dimension of A: 32
#define M 2*(102400*8) // 1
//Теоретическая оценка производительности параллельного варианта: 4*819200*32/18000,000,000 (1,8e10 = DeviceToDevice*2 to GB)
using namespace std;
#define CHECK(value) { \
cudaError_t _m_cudaStat = value; \
if (_m_cudaStat != cudaSuccess) { \
cout<< "Error:" << cudaGetErrorString(_m_cudaStat) \
<< " at line " << __LINE__ << " in file " << __FILE__ << "\n"; \
exit(1); \
} }
__global__ void Multiply(int *A, int *B, int *C){
// calculate the row & col index of the element
int row = blockIdx.x*blockDim.x + threadIdx.x;
if (row >= N)
return;
int result = 0;
// product between row of a and b
for(int k = 0; k < M; ++k)
{
result += A[row + k*N] * B[k];
//printf("%d ", result);
}
C[row] = result;
}
__global__ void Multiply_smart_string(int *A, int *B, int *C){
int col = blockIdx.x*blockDim.x + threadIdx.x;
if (col >= M)
return;
int dev_private = 0;
__shared__ int dev_shared;
for (int j = 0; j < M/blockDim.x; ++j)
{
int addition = A[(j*blockDim.x+threadIdx.x)*N+blockIdx.x] * B[j*blockDim.x+threadIdx.x];
dev_private += addition;
}
if (threadIdx.x == 0)
dev_shared = 0;
__syncthreads();
atomicAdd(&dev_shared, dev_private);
__syncthreads();
if (threadIdx.x == 0)
C[blockIdx.x] = dev_shared;
}
__global__ void Multiply_smart_column(int *A, int *B, int *C){
int global_id = blockIdx.x*blockDim.x + threadIdx.x;
int global_trd_cnt = blockDim.x*gridDim.x;
__shared__ int dev_shared_res[N];
int addition = 0;
if (threadIdx.x < N)
dev_shared_res[threadIdx.x] = 0;
for (int j = 0; j < M/(global_trd_cnt/N); ++j)
{
int super_global_id = global_id + j*global_trd_cnt;
int row = super_global_id % N;
int col = super_global_id / N;
addition += A[col*N + row] * B[col];
}
__syncthreads();
atomicAdd(&dev_shared_res[threadIdx.x % N], addition);
__syncthreads();
if (threadIdx.x < N)
atomicAdd(&C[threadIdx.x], dev_shared_res[threadIdx.x]);
}
int main(int argc, char **argv)
{
srand(time(NULL));
int *A = new int [N*M];
int *b = new int [M];
int *res_CPU = new int[N];
int *res_GPU = new int[N];
int i, j;
for(i = 0; i < N; ++i)
res_CPU[i] = 0;
for(i = 0; i < N; ++i)
{
for(j = 0; j < M; ++j)
{
A[i + j*N] = rand() % 10; // % 3 - 1; //1;
//cout << A[i*N + j] << " ";
}
//cout << endl;
}
//cout << endl;
for(i = 0; i < M; ++i)
{
b[i] = rand() % 10; // % 3 - 1; //1;
//cout << b[i] << " ";
}
//cout << endl;
// shared memory: t = 0..32 - warp
clock_t startCPU = clock();
for(i = 0; i < N; ++i)
{
for(j = 0; j < M; ++j)
res_CPU[i] += A[i + j*N]*b[j];
//cout << "Res_CPU[" << i << "] = " << res_CPU[i] << " " << endl;
}
double elapsedTimeCPU = (double)(clock()-startCPU)/CLOCKS_PER_SEC;
cout << "CPU product time = " << elapsedTimeCPU*1000 << " ms\n";
int (*aA), (*aB), (*aRes);
cudaEvent_t startCUDA, stopCUDA;
float elapsedTimeCUDA;
cudaEventCreate(&startCUDA);
cudaEventCreate(&stopCUDA);
CHECK(cudaMalloc((void**)&aA, (N*M)*sizeof(int)));
CHECK(cudaMalloc((void**)&aB, (M)*sizeof(int)));
CHECK(cudaMalloc((void**)&aRes, (N)*sizeof(int)));
CHECK(cudaMemcpy(aA, A, (N*M)*sizeof(int), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(aB, b, (M)*sizeof(int), cudaMemcpyHostToDevice));
CHECK(cudaMemset(aRes, 0, (N)*sizeof(int)));
//int numBlocks = 1;
//dim3 threadsPerBlock(N,N);
cudaEventRecord(startCUDA,0);
//Multiply<<<(N+511)/512, 512>>>(aA,aB,aRes);
//Multiply_smart_string<<<N, 512>>>(aA,aB,aRes);
Multiply_smart_column<<<8, 1024>>>(aA,aB,aRes); //N*M/1024
cudaEventRecord(stopCUDA,0);
cudaEventSynchronize(stopCUDA);
CHECK(cudaGetLastError());
CHECK(cudaMemcpy(res_GPU, aRes, N*sizeof(int), cudaMemcpyDeviceToHost));
cudaEventElapsedTime(&elapsedTimeCUDA, startCUDA, stopCUDA);
cout << "CUDA product time = " << elapsedTimeCUDA << " ms\n";
cout << "CUDA memory throughput = " << 3*N*sizeof(float)/elapsedTimeCUDA/1024/1024/1.024 << " Gb/s\n";
for (i = 0; i < N; i++) {
//cout << "Res_GPU[" << i << "] = " << res_GPU[i] << " " << endl;
}
for (i = 0; i < N; i++) {
if (res_CPU[i] != res_GPU[i])
{
cout << "Not equal. Try again, again." << endl;
break;
}
}
CHECK(cudaFree(aA));
CHECK(cudaFree(aB));
CHECK(cudaFree(aRes));
return 0;
}
|
1,663
|
#include <cuda.h>
#include <stdio.h>
#include <unistd.h>
int main(){
printf("Press Ctrl+z to suspend program\n");
printf("Type bg to send program to background\n");
printf("Attach to process with cuda-gdb\n\n");
printf("cuda-gdb --pid=%d\n\n", getpid());
printf("Issue the following commands in (cuda-gdb)\n");
printf(" set var dummy=1\n");
printf(" continue\n");
fflush(stdout);
volatile int dummy=0;
int dummy2=0;
while(!dummy){
dummy2=1;
}
dummy=dummy2;
// Initialize
cuInit(0);
// Get number of devices supporting CUDA
int deviceCount = 0;
cuDeviceGetCount(&deviceCount);
if (deviceCount == 0) {
printf("There is no device supporting CUDA.\n");
exit (0);
}
printf("There are %d CUDA devices\n", deviceCount);
return 0;
}
|
1,664
|
#include "includes.h"
const int Nthreads = 1024, NrankMax = 3, nt0max = 71, NchanMax = 1024;
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
__global__ void reNormalize(const double *Params, const double *A, const double *B, double *W, double *U, double *mu){
int Nfilt, nt0, tid, bid, Nchan,k, Nrank, imax, t, ishift, tmax;
double x, xmax, xshift, sgnmax;
volatile __shared__ double sW[NrankMax*nt0max], sU[NchanMax*NrankMax], sS[NrankMax+1],
sWup[nt0max*10];
nt0 = (int) Params[4];
Nchan = (int) Params[9];
Nfilt = (int) Params[1];
Nrank = (int) Params[6];
tmax = (int) Params[11];
bid = blockIdx.x;
tid = threadIdx.x;
for(k=0;k<Nrank;k++)
sW[tid + k*nt0] = W[tid + bid*nt0 + k*Nfilt*nt0];
while (tid<Nchan*Nrank){
sU[tid] = U[tid%Nchan + bid*Nchan + (tid/Nchan)*Nchan*Nfilt];
tid += blockDim.x;
}
__syncthreads();
tid = threadIdx.x;
if (tid<Nrank){
x = 0.0f;
for (k=0; k<Nchan; k++)
x += sU[k + tid*Nchan] * sU[k + tid*Nchan];
sS[tid] = sqrt(x);
}
// no need to sync here
if (tid==0){
x = 0.0000001f;
for (k=0;k<Nrank;k++)
x += sS[k] * sS[k];
sS[Nrank] = sqrt(x);
mu[bid] = sqrt(x);
}
__syncthreads();
// now re-normalize U
tid = threadIdx.x;
while (tid<Nchan*Nrank){
U[tid%Nchan + bid*Nchan + (tid/Nchan)*Nchan*Nfilt] = sU[tid] / sS[Nrank];
tid += blockDim.x;
}
/////////////
__syncthreads();
// now align W
xmax = 0.0f;
imax = 0;
for(t=0;t<nt0;t++)
if (abs(sW[t]) > xmax){
xmax = abs(sW[t]);
imax = t;
}
tid = threadIdx.x;
// shift by imax - tmax
for (k=0;k<Nrank;k++){
ishift = tid + (imax-tmax);
ishift = (ishift%nt0 + nt0)%nt0;
xshift = sW[ishift + k*nt0];
W[tid + bid*nt0 + k*nt0*Nfilt] = xshift;
}
__syncthreads();
for (k=0;k<Nrank;k++){
sW[tid + k*nt0] = W[tid + bid*nt0 + k*nt0*Nfilt];
}
/////////////
__syncthreads();
// now align W. first compute 10x subsample peak
tid = threadIdx.x;
if (tid<10){
sWup[tid] = 0;
for (t=0;t<nt0;t++)
sWup[tid] += A[tid + t*10] * sW[t];
}
__syncthreads();
xmax = 0.0f;
imax = 0;
sgnmax = 1.0f;
for(t=0;t<10;t++)
if (abs(sWup[t]) > xmax){
xmax = abs(sWup[t]);
imax = t;
sgnmax = copysign(1.0f, sWup[t]);
}
// interpolate by imax
for (k=0;k<Nrank;k++){
xshift = 0.0f;
for (t=0;t<nt0;t++)
xshift += B[tid + t*nt0 +nt0*nt0*imax] * sW[t + k*nt0];
if (k==0)
xshift = -xshift * sgnmax;
W[tid + bid*nt0 + k*nt0*Nfilt] = xshift;
}
}
|
1,665
|
//========================================================================================================================================================================================================200
//======================================================================================================================================================150
//====================================================================================================100
//==================================================50
//========================================================================================================================================================================================================200
// UPDATE
//========================================================================================================================================================================================================200
// 14 APR 2011 Lukasz G. Szafaryn
//========================================================================================================================================================================================================200
// DEFINE/INCLUDE
//========================================================================================================================================================================================================200
//======================================================================================================================================================150
// LIBRARIES
//======================================================================================================================================================150
#include <stdbool.h> // (in path known to compiler) needed by true/false
#include <stdio.h> // (in path known to compiler) needed by printf
#include <stdlib.h> // (in path known to compiler) needed by malloc
#include <time.h>
#include <sys/time.h>
#define fp double
#define NUMBER_PAR_PER_BOX 100 // keep this low to allow more blocks that share shared memory to run concurrently, code does not work for larger than 110, more speedup can be achieved with larger number and no shared memory used
/* #define NUMBER_THREADS 128 // this should be roughly equal to NUMBER_PAR_PER_BOX for best performance */
// Parameterized work group size
#ifdef RD_WG_SIZE_0_0
#define NUMBER_THREADS RD_WG_SIZE_0_0
#elif defined(RD_WG_SIZE_0)
#define NUMBER_THREADS RD_WG_SIZE_0
#elif defined(RD_WG_SIZE)
#define NUMBER_THREADS RD_WG_SIZE
#else
#define NUMBER_THREADS 128
#endif
#define DOT(A,B) ((A.x)*(B.x)+(A.y)*(B.y)+(A.z)*(B.z)) // STABLE
//===============================================================================================================================================================================================================200
// STRUCTURES
//===============================================================================================================================================================================================================200
typedef struct
{
fp x, y, z;
} THREE_VECTOR;
typedef struct
{
fp v, x, y, z;
} FOUR_VECTOR;
typedef struct nei_str
{
// neighbor box
int x, y, z;
int number;
long offset;
} nei_str;
typedef struct box_str
{
// home box
int x, y, z;
int number;
long offset;
// neighbor boxes
int nn;
nei_str nei[26];
} box_str;
typedef struct par_str
{
fp alpha;
} par_str;
typedef struct dim_str
{
// input arguments
int cur_arg;
int arch_arg;
int cores_arg;
int boxes1d_arg;
// system memory
long number_boxes;
long box_mem;
long space_elem;
long space_mem;
long space_mem2;
} dim_str;
//======================================================================================================================================================150
// UTILITIES
//======================================================================================================================================================150
int isInteger(char *str) {
if (*str == '\0') {
return 0;
}
for (; *str != '\0'; str++) {
if (*str < 48 ||
*str >
57) { // digit characters (need to include . if checking for float)
return 0;
}
}
return 1;
}
long long get_time() {
struct timeval tv;
gettimeofday(&tv, NULL);
return (tv.tv_sec * 1000000) + tv.tv_usec;
}
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err) {
printf("Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
fflush(NULL);
exit(EXIT_FAILURE);
}
}
//======================================================================================================================================================150
// KERNEL
//======================================================================================================================================================150
__global__ void kernel_gpu_cuda(par_str d_par_gpu, dim_str d_dim_gpu,
box_str *d_box_gpu, FOUR_VECTOR *d_rv_gpu,
fp *d_qv_gpu, FOUR_VECTOR *d_fv_gpu) {
//--------------------------------------------------------------------------------------------------------------------------------------------------------------------------180
// THREAD PARAMETERS
//--------------------------------------------------------------------------------------------------------------------------------------------------------------------------180
int bx = blockIdx.x; // get current horizontal block index (0-n)
int tx = threadIdx.x; // get current horizontal thread index (0-n)
// int ax = bx*NUMBER_THREADS+tx;
// int wbx = bx;
int wtx = tx;
//--------------------------------------------------------------------------------------------------------------------------------------------------------------------------180
// DO FOR THE NUMBER OF BOXES
//--------------------------------------------------------------------------------------------------------------------------------------------------------------------------180
if (bx < d_dim_gpu.number_boxes) {
//------------------------------------------------------------------------------------------------------------------------------------------------------160
// Extract input parameters
//------------------------------------------------------------------------------------------------------------------------------------------------------160
// parameters
fp a2 = 2.0 * d_par_gpu.alpha * d_par_gpu.alpha;
// home box
int first_i;
FOUR_VECTOR *rA;
FOUR_VECTOR *fA;
__shared__ FOUR_VECTOR rA_shared[100];
// nei box
int pointer;
int k = 0;
int first_j;
FOUR_VECTOR *rB;
fp *qB;
int j = 0;
__shared__ FOUR_VECTOR rB_shared[100];
__shared__ double qB_shared[100];
// common
fp r2;
fp u2;
fp vij;
fp fs;
fp fxij;
fp fyij;
fp fzij;
THREE_VECTOR d;
//------------------------------------------------------------------------------------------------------------------------------------------------------160
// Home box
//------------------------------------------------------------------------------------------------------------------------------------------------------160
//----------------------------------------------------------------------------------------------------------------------------------140
// Setup parameters
//----------------------------------------------------------------------------------------------------------------------------------140
// home box - box parameters
first_i = d_box_gpu[bx].offset;
// home box - distance, force, charge and type parameters
rA = &d_rv_gpu[first_i];
fA = &d_fv_gpu[first_i];
//----------------------------------------------------------------------------------------------------------------------------------140
// Copy to shared memory
//----------------------------------------------------------------------------------------------------------------------------------140
// home box - shared memory
while (wtx < NUMBER_PAR_PER_BOX) {
rA_shared[wtx] = rA[wtx];
wtx = wtx + NUMBER_THREADS;
}
wtx = tx;
// synchronize threads - not needed, but just to be safe
__syncthreads();
//------------------------------------------------------------------------------------------------------------------------------------------------------160
// nei box loop
//------------------------------------------------------------------------------------------------------------------------------------------------------160
// loop over neiing boxes of home box
for (k = 0; k < (1 + d_box_gpu[bx].nn); k++) {
//----------------------------------------50
// nei box - get pointer to the right box
//----------------------------------------50
if (k == 0) {
pointer = bx; // set first box to be processed to home box
} else {
pointer =
d_box_gpu[bx].nei[k - 1].number; // remaining boxes are nei boxes
}
//----------------------------------------------------------------------------------------------------------------------------------140
// Setup parameters
//----------------------------------------------------------------------------------------------------------------------------------140
// nei box - box parameters
first_j = d_box_gpu[pointer].offset;
// nei box - distance, (force), charge and (type) parameters
rB = &d_rv_gpu[first_j];
qB = &d_qv_gpu[first_j];
//----------------------------------------------------------------------------------------------------------------------------------140
// Setup parameters
//----------------------------------------------------------------------------------------------------------------------------------140
// nei box - shared memory
while (wtx < NUMBER_PAR_PER_BOX) {
rB_shared[wtx] = rB[wtx];
qB_shared[wtx] = qB[wtx];
wtx = wtx + NUMBER_THREADS;
}
wtx = tx;
// synchronize threads because in next section each thread accesses data
// brought in by different threads here
__syncthreads();
//----------------------------------------------------------------------------------------------------------------------------------140
// Calculation
//----------------------------------------------------------------------------------------------------------------------------------140
// loop for the number of particles in the home box
// for (int i=0; i<nTotal_i; i++){
while (wtx < NUMBER_PAR_PER_BOX) {
// loop for the number of particles in the current nei box
for (j = 0; j < NUMBER_PAR_PER_BOX; j++) {
// r2 = rA[wtx].v + rB[j].v - DOT(rA[wtx],rB[j]);
// u2 = a2*r2;
// vij= exp(-u2);
// fs = 2.*vij;
// d.x = rA[wtx].x - rB[j].x;
// fxij=fs*d.x;
// d.y = rA[wtx].y - rB[j].y;
// fyij=fs*d.y;
// d.z = rA[wtx].z - rB[j].z;
// fzij=fs*d.z;
// fA[wtx].v += qB[j]*vij;
// fA[wtx].x += qB[j]*fxij;
// fA[wtx].y += qB[j]*fyij;
// fA[wtx].z += qB[j]*fzij;
r2 = (fp)rA_shared[wtx].v + (fp)rB_shared[j].v -
DOT((fp)rA_shared[wtx], (fp)rB_shared[j]);
u2 = a2 * r2;
vij = exp(-u2);
fs = 2 * vij;
d.x = (fp)rA_shared[wtx].x - (fp)rB_shared[j].x;
fxij = fs * d.x;
d.y = (fp)rA_shared[wtx].y - (fp)rB_shared[j].y;
fyij = fs * d.y;
d.z = (fp)rA_shared[wtx].z - (fp)rB_shared[j].z;
fzij = fs * d.z;
fA[wtx].v += (double)((fp)qB_shared[j] * vij);
fA[wtx].x += (double)((fp)qB_shared[j] * fxij);
fA[wtx].y += (double)((fp)qB_shared[j] * fyij);
fA[wtx].z += (double)((fp)qB_shared[j] * fzij);
}
// increment work thread index
wtx = wtx + NUMBER_THREADS;
}
// reset work index
wtx = tx;
// synchronize after finishing force contributions from current nei box
// not to cause conflicts when starting next box
__syncthreads();
//----------------------------------------------------------------------------------------------------------------------------------140
// Calculation END
//----------------------------------------------------------------------------------------------------------------------------------140
}
// // increment work block index
// wbx = wbx + NUMBER_BLOCKS;
// // synchronize - because next iteration will overwrite current shared
// memory
// __syncthreads();
//------------------------------------------------------------------------------------------------------------------------------------------------------160
// nei box loop END
//------------------------------------------------------------------------------------------------------------------------------------------------------160
}
}
void kernel_gpu_cuda_wrapper(par_str par_cpu, dim_str dim_cpu, box_str *box_cpu,
FOUR_VECTOR *rv_cpu, fp *qv_cpu,
FOUR_VECTOR *fv_cpu) {
// timer
long long time0;
long long time1;
long long time2;
long long time3;
long long time4;
long long time5;
long long time6;
time0 = get_time(); // KERMA: unused
//======================================================================================================================================================150
// GPU SETUP
//======================================================================================================================================================150
//====================================================================================================100
// INITIAL DRIVER OVERHEAD
//====================================================================================================100
cudaDeviceSynchronize();
//====================================================================================================100
// VARIABLES
//====================================================================================================100
box_str *d_box_gpu;
FOUR_VECTOR *d_rv_gpu;
fp *d_qv_gpu;
FOUR_VECTOR *d_fv_gpu;
dim3 threads;
dim3 blocks;
//====================================================================================================100
// EXECUTION PARAMETERS
//====================================================================================================100
blocks.x = dim_cpu.number_boxes;
blocks.y = 1;
threads.x = NUMBER_THREADS; // define the number of threads in the block
threads.y = 1;
time1 = get_time();
//======================================================================================================================================================150
// GPU MEMORY (MALLOC)
//======================================================================================================================================================150
//====================================================================================================100
// GPU MEMORY (MALLOC) COPY IN
//====================================================================================================100
//==================================================50
// boxes
//==================================================50
cudaMalloc((void **)&d_box_gpu, dim_cpu.box_mem);
//==================================================50
// rv
//==================================================50
cudaMalloc((void **)&d_rv_gpu, dim_cpu.space_mem);
//==================================================50
// qv
//==================================================50
cudaMalloc((void **)&d_qv_gpu, dim_cpu.space_mem2);
//====================================================================================================100
// GPU MEMORY (MALLOC) COPY
//====================================================================================================100
//==================================================50
// fv
//==================================================50
cudaMalloc((void **)&d_fv_gpu, dim_cpu.space_mem);
time2 = get_time();
//==================================================50
// boxes
//==================================================50
cudaMemcpy(d_box_gpu, box_cpu, dim_cpu.box_mem, cudaMemcpyHostToDevice);
//==================================================50
// rv
//==================================================50
cudaMemcpy(d_rv_gpu, rv_cpu, dim_cpu.space_mem, cudaMemcpyHostToDevice);
//==================================================50
// qv
//==================================================50
cudaMemcpy(d_qv_gpu, qv_cpu, dim_cpu.space_mem2, cudaMemcpyHostToDevice);
//==================================================50
// fv
//==================================================50
cudaMemcpy(d_fv_gpu, fv_cpu, dim_cpu.space_mem, cudaMemcpyHostToDevice);
time3 = get_time();
//======================================================================================================================================================150
// KERNEL
//======================================================================================================================================================150
// launch kernel - all boxes
kernel_gpu_cuda<<<blocks, threads>>>(par_cpu, dim_cpu, d_box_gpu, d_rv_gpu,
d_qv_gpu, d_fv_gpu);
checkCUDAError("Start");
cudaDeviceSynchronize();
time4 = get_time();
//======================================================================================================================================================150
// GPU MEMORY COPY (CONTD.)
//======================================================================================================================================================150
cudaMemcpy(fv_cpu, d_fv_gpu, dim_cpu.space_mem, cudaMemcpyDeviceToHost);
time5 = get_time();
//======================================================================================================================================================150
// GPU MEMORY DEALLOCATION
//======================================================================================================================================================150
cudaFree(d_rv_gpu);
cudaFree(d_qv_gpu);
cudaFree(d_fv_gpu);
cudaFree(d_box_gpu);
time6 = get_time();
//======================================================================================================================================================150
// DISPLAY TIMING
//======================================================================================================================================================150
printf("Time spent in different stages of GPU_CUDA KERNEL:\n");
printf("%15.12f s, %15.12f %% : GPU: SET DEVICE / DRIVER INIT\n",
(float)(time1 - time0) / 1000000,
(float)(time1 - time0) / (float)(time6 - time0) * 100);
printf("%15.12f s, %15.12f %% : GPU MEM: ALO\n",
(float)(time2 - time1) / 1000000,
(float)(time2 - time1) / (float)(time6 - time0) * 100);
printf("%15.12f s, %15.12f %% : GPU MEM: COPY IN\n",
(float)(time3 - time2) / 1000000,
(float)(time3 - time2) / (float)(time6 - time0) * 100);
printf("%15.12f s, %15.12f %% : GPU: KERNEL\n",
(float)(time4 - time3) / 1000000,
(float)(time4 - time3) / (float)(time6 - time0) * 100);
printf("%15.12f s, %15.12f %% : GPU MEM: COPY OUT\n",
(float)(time5 - time4) / 1000000,
(float)(time5 - time4) / (float)(time6 - time0) * 100);
printf("%15.12f s, %15.12f %% : GPU MEM: FRE\n",
(float)(time6 - time5) / 1000000,
(float)(time6 - time5) / (float)(time6 - time0) * 100);
printf("Total time:\n");
printf("%.12f s\n", (float)(time6 - time0) / 1000000);
}
//========================================================================================================================================================================================================200
// MAIN FUNCTION
//========================================================================================================================================================================================================200
int main(int argc, char *argv[]) {
printf("thread block size of kernel = %d \n", NUMBER_THREADS);
//======================================================================================================================================================150
// CPU/MCPU VARIABLES
//======================================================================================================================================================150
// timer
// long long time0 = get_time(); // KERMA: unused
// timer
// long long time1;
// long long time2;
// long long time3;
// long long time4;
// long long time5;
// long long time6;
// long long time7;
// counters
int i, j, k, l, m, n;
// system memory
par_str par_cpu;
dim_str dim_cpu;
box_str *box_cpu;
FOUR_VECTOR *rv_cpu;
fp *qv_cpu;
FOUR_VECTOR *fv_cpu;
int nh;
// time1 = get_time(); // KERMA: unused
//======================================================================================================================================================150
// CHECK INPUT ARGUMENTS
//======================================================================================================================================================150
// assing default values
dim_cpu.boxes1d_arg = 1;
// go through arguments
for (dim_cpu.cur_arg = 1; dim_cpu.cur_arg < argc; dim_cpu.cur_arg++) {
// check if -boxes1d
if (strcmp(argv[dim_cpu.cur_arg], "-boxes1d") == 0) {
// check if value provided
if (argc >= dim_cpu.cur_arg + 1) {
// check if value is a number
if (isInteger(argv[dim_cpu.cur_arg + 1]) == 1) {
dim_cpu.boxes1d_arg = atoi(argv[dim_cpu.cur_arg + 1]);
if (dim_cpu.boxes1d_arg < 0) {
printf("ERROR: Wrong value to -boxes1d parameter, cannot be <=0\n");
return 0;
}
dim_cpu.cur_arg = dim_cpu.cur_arg + 1;
}
// value is not a number
else {
printf("ERROR: Value to -boxes1d parameter in not a number\n");
return 0;
}
}
// value not provided
else {
printf("ERROR: Missing value to -boxes1d parameter\n");
return 0;
}
}
// unknown
else {
printf("ERROR: Unknown parameter\n");
return 0;
}
}
// Print configuration
printf("Configuration used: boxes1d = %d\n", dim_cpu.boxes1d_arg);
// time2 = get_time(); // KERMA: unused
//======================================================================================================================================================150
// INPUTS
//======================================================================================================================================================150
par_cpu.alpha = 0.5;
// time3 = get_time(); // KERMA: unused
//======================================================================================================================================================150
// DIMENSIONS
//======================================================================================================================================================150
// total number of boxes
dim_cpu.number_boxes =
dim_cpu.boxes1d_arg * dim_cpu.boxes1d_arg * dim_cpu.boxes1d_arg;
// how many particles space has in each direction
dim_cpu.space_elem = dim_cpu.number_boxes * NUMBER_PAR_PER_BOX;
dim_cpu.space_mem = dim_cpu.space_elem * sizeof(FOUR_VECTOR);
dim_cpu.space_mem2 = dim_cpu.space_elem * sizeof(fp);
// box array
dim_cpu.box_mem = dim_cpu.number_boxes * sizeof(box_str);
// time4 = get_time(); // KERMA: unused
//======================================================================================================================================================150
// SYSTEM MEMORY
//======================================================================================================================================================150
//====================================================================================================100
// BOX
//====================================================================================================100
// allocate boxes
box_cpu = (box_str *)malloc(dim_cpu.box_mem);
// initialize number of home boxes
nh = 0;
// home boxes in z direction
for (i = 0; i < dim_cpu.boxes1d_arg; i++) {
// home boxes in y direction
for (j = 0; j < dim_cpu.boxes1d_arg; j++) {
// home boxes in x direction
for (k = 0; k < dim_cpu.boxes1d_arg; k++) {
// current home box
box_cpu[nh].x = k;
box_cpu[nh].y = j;
box_cpu[nh].z = i;
box_cpu[nh].number = nh;
box_cpu[nh].offset = nh * NUMBER_PAR_PER_BOX;
// initialize number of neighbor boxes
box_cpu[nh].nn = 0;
// neighbor boxes in z direction
for (l = -1; l < 2; l++) {
// neighbor boxes in y direction
for (m = -1; m < 2; m++) {
// neighbor boxes in x direction
for (n = -1; n < 2; n++) {
// check if (this neighbor exists) and (it is not the same as home
// box)
if ((((i + l) >= 0 && (j + m) >= 0 && (k + n) >= 0) == true &&
((i + l) < dim_cpu.boxes1d_arg &&
(j + m) < dim_cpu.boxes1d_arg &&
(k + n) < dim_cpu.boxes1d_arg) == true) &&
(l == 0 && m == 0 && n == 0) == false) {
// current neighbor box
box_cpu[nh].nei[box_cpu[nh].nn].x = (k + n);
box_cpu[nh].nei[box_cpu[nh].nn].y = (j + m);
box_cpu[nh].nei[box_cpu[nh].nn].z = (i + l);
box_cpu[nh].nei[box_cpu[nh].nn].number =
(box_cpu[nh].nei[box_cpu[nh].nn].z * dim_cpu.boxes1d_arg *
dim_cpu.boxes1d_arg) +
(box_cpu[nh].nei[box_cpu[nh].nn].y * dim_cpu.boxes1d_arg) +
box_cpu[nh].nei[box_cpu[nh].nn].x;
box_cpu[nh].nei[box_cpu[nh].nn].offset =
box_cpu[nh].nei[box_cpu[nh].nn].number * NUMBER_PAR_PER_BOX;
// increment neighbor box
box_cpu[nh].nn = box_cpu[nh].nn + 1;
}
} // neighbor boxes in x direction
} // neighbor boxes in y direction
} // neighbor boxes in z direction
// increment home box
nh = nh + 1;
} // home boxes in x direction
} // home boxes in y direction
} // home boxes in z direction
//====================================================================================================100
// PARAMETERS, DISTANCE, CHARGE AND FORCE
//====================================================================================================100
// random generator seed set to random value - time in this case
srand(time(NULL));
// input (distances)
rv_cpu = (FOUR_VECTOR *)malloc(dim_cpu.space_mem);
for (i = 0; i < dim_cpu.space_elem; i = i + 1) {
rv_cpu[i].v =
(rand() % 10 + 1) / 10.0; // get a number in the range 0.1 - 1.0
rv_cpu[i].x =
(rand() % 10 + 1) / 10.0; // get a number in the range 0.1 - 1.0
rv_cpu[i].y =
(rand() % 10 + 1) / 10.0; // get a number in the range 0.1 - 1.0
rv_cpu[i].z =
(rand() % 10 + 1) / 10.0; // get a number in the range 0.1 - 1.0
}
// input (charge)
qv_cpu = (fp *)malloc(dim_cpu.space_mem2);
for (i = 0; i < dim_cpu.space_elem; i = i + 1) {
qv_cpu[i] = (rand() % 10 + 1) / 10.0; // get a number in the range 0.1 - 1.0
}
// output (forces)
fv_cpu = (FOUR_VECTOR *)malloc(dim_cpu.space_mem);
for (i = 0; i < dim_cpu.space_elem; i = i + 1) {
fv_cpu[i].v = 0; // set to 0, because kernels keeps adding to initial value
fv_cpu[i].x = 0; // set to 0, because kernels keeps adding to initial value
fv_cpu[i].y = 0; // set to 0, because kernels keeps adding to initial value
fv_cpu[i].z = 0; // set to 0, because kernels keeps adding to initial value
}
// time5 = get_time(); // KERMA: unused
//======================================================================================================================================================150
// KERNEL
//======================================================================================================================================================150
//====================================================================================================100
// GPU_CUDA
//====================================================================================================100
kernel_gpu_cuda_wrapper(par_cpu, dim_cpu, box_cpu, rv_cpu, qv_cpu, fv_cpu);
// time6 = get_time(); // KERMA: unused
//======================================================================================================================================================150
// SYSTEM MEMORY DEALLOCATION
//======================================================================================================================================================150
// dump results
#ifdef OUTPUT
FILE *fptr;
fptr = fopen("result.txt", "w");
for (i = 0; i < dim_cpu.space_elem; i = i + 1) {
fprintf(fptr, "%f, %f, %f, %f\n", fv_cpu[i].v, fv_cpu[i].x, fv_cpu[i].y,
fv_cpu[i].z);
}
fclose(fptr);
#endif
free(rv_cpu);
free(qv_cpu);
free(fv_cpu);
free(box_cpu);
// time7 = get_time(); // KERMA: unused
return 0.0; // always returns 0.0
}
|
1,666
|
// (c) Copyright 2013 Lev Barash, Landau Institute for Theoretical Physics, Russian Academy of Sciences
// This is supplement to the paper:
// L.Yu. Barash, L.N. Shchur, "PRAND: GPU accelerated parallel random number generation library: Using most reliable algorithms and applying parallelism of modern GPUs and CPUs".
// e-mail: barash @ itp.ac.ru (remove space)
#include<stdio.h>
#define gm55_CUDA_CALL(x) do { if((x) != cudaSuccess) { printf("Error: %s at %s:%d\n",cudaGetErrorString(cudaGetLastError()),__FILE__,__LINE__); exit(1);}} while(0)
#define gm55_BLOCKS 128
#define gm55_THREADS 128
#define gm55_ARRAY_SECTIONS (gm55_BLOCKS*gm55_THREADS/8)
#define gm55_k 256
#define gm55_q 176
#define gm55_g 36028797018961904ULL
#define gm55_gdiv16 2251799813685119ULL
typedef unsigned long long lt;
typedef struct{
lt xN[8] __attribute__ ((aligned(16))),
xP[8] __attribute__ ((aligned(16)));
} gm55_state;
typedef gm55_state gm55_sse_state;
lt gm55_sse_Consts[8] __attribute__ ((aligned(16))) = {396316767208580944ULL,396316767208580944ULL,
2064ULL,2064ULL,36028792732385279ULL,36028792732385279ULL,36028797018961904ULL,36028797018961904ULL};
__host__ unsigned int gm55_sse_generate_(gm55_sse_state* state){
unsigned output;
asm volatile("movaps (%3),%%xmm0\n" \
"movaps (%2),%%xmm1\n" \
"movaps (%1),%%xmm4\n" \
"movaps %%xmm4,(%2)\n" \
"psllq $4,%%xmm4\n" \
"paddq %%xmm0,%%xmm4\n" \
"movaps %%xmm1,%%xmm2\n" \
"paddq %%xmm1,%%xmm2\n" \
"paddq %%xmm1,%%xmm2\n" \
"psllq $3,%%xmm1\n" \
"paddq %%xmm1,%%xmm2\n" \
"psubq %%xmm2,%%xmm4\n" \
"movaps %%xmm4,%%xmm2\n" \
"psrlq $51,%%xmm2\n" \
"movaps %%xmm2,%%xmm3\n" \
"psllq $7,%%xmm3\n" \
"paddq %%xmm2,%%xmm3\n" \
"psllq $51,%%xmm2\n" \
"psubq %%xmm2,%%xmm4\n" \
"paddq %%xmm3,%%xmm4\n" \
"psllq $4,%%xmm4\n" \
"movaps %%xmm4,%%xmm1\n" \
"paddq 16(%3),%%xmm1\n" \
"pshufd $245,%%xmm1,%%xmm3\n" \
"pcmpgtd 32(%3),%%xmm3\n" \
"pand 48(%3),%%xmm3\n" \
"psubq %%xmm3,%%xmm4\n" \
"movaps %%xmm4,(%1)\n" \
"movaps 16(%2),%%xmm1\n" \
"movaps 16(%1),%%xmm5\n" \
"movaps %%xmm5,16(%2)\n" \
"psllq $4,%%xmm5\n" \
"paddq %%xmm0,%%xmm5\n" \
"movaps %%xmm1,%%xmm2\n" \
"paddq %%xmm1,%%xmm2\n" \
"paddq %%xmm1,%%xmm2\n" \
"psllq $3,%%xmm1\n" \
"paddq %%xmm1,%%xmm2\n" \
"psubq %%xmm2,%%xmm5\n" \
"movaps %%xmm5,%%xmm2\n" \
"psrlq $51,%%xmm2\n" \
"movaps %%xmm2,%%xmm3\n" \
"psllq $7,%%xmm3\n" \
"paddq %%xmm2,%%xmm3\n" \
"psllq $51,%%xmm2\n" \
"psubq %%xmm2,%%xmm5\n" \
"paddq %%xmm3,%%xmm5\n" \
"psllq $4,%%xmm5\n" \
"movaps %%xmm5,%%xmm1\n" \
"paddq 16(%3),%%xmm1\n" \
"pshufd $245,%%xmm1,%%xmm3\n" \
"pcmpgtd 32(%3),%%xmm3\n" \
"pand 48(%3),%%xmm3\n" \
"psubq %%xmm3,%%xmm5\n" \
"movaps %%xmm5,16(%1)\n" \
"movaps 32(%2),%%xmm1\n" \
"movaps 32(%1),%%xmm6\n" \
"movaps %%xmm6,32(%2)\n" \
"psllq $4,%%xmm6\n" \
"paddq %%xmm0,%%xmm6\n" \
"movaps %%xmm1,%%xmm2\n" \
"paddq %%xmm1,%%xmm2\n" \
"paddq %%xmm1,%%xmm2\n" \
"psllq $3,%%xmm1\n" \
"paddq %%xmm1,%%xmm2\n" \
"psubq %%xmm2,%%xmm6\n" \
"movaps %%xmm6,%%xmm2\n" \
"psrlq $51,%%xmm2\n" \
"movaps %%xmm2,%%xmm3\n" \
"psllq $7,%%xmm3\n" \
"paddq %%xmm2,%%xmm3\n" \
"psllq $51,%%xmm2\n" \
"psubq %%xmm2,%%xmm6\n" \
"paddq %%xmm3,%%xmm6\n" \
"psllq $4,%%xmm6\n" \
"movaps %%xmm6,%%xmm1\n" \
"paddq 16(%3),%%xmm1\n" \
"pshufd $245,%%xmm1,%%xmm3\n" \
"pcmpgtd 32(%3),%%xmm3\n" \
"pand 48(%3),%%xmm3\n" \
"psubq %%xmm3,%%xmm6\n" \
"movaps %%xmm6,32(%1)\n" \
"movaps 48(%2),%%xmm1\n" \
"movaps 48(%1),%%xmm7\n" \
"movaps %%xmm7,48(%2)\n" \
"psllq $4,%%xmm7\n" \
"paddq %%xmm0,%%xmm7\n" \
"movaps %%xmm1,%%xmm2\n" \
"paddq %%xmm1,%%xmm2\n" \
"paddq %%xmm1,%%xmm2\n" \
"psllq $3,%%xmm1\n" \
"paddq %%xmm1,%%xmm2\n" \
"psubq %%xmm2,%%xmm7\n" \
"movaps %%xmm7,%%xmm2\n" \
"psrlq $51,%%xmm2\n" \
"movaps %%xmm2,%%xmm3\n" \
"psllq $7,%%xmm3\n" \
"paddq %%xmm2,%%xmm3\n" \
"psllq $51,%%xmm2\n" \
"psubq %%xmm2,%%xmm7\n" \
"paddq %%xmm3,%%xmm7\n" \
"psllq $4,%%xmm7\n" \
"movaps %%xmm7,%%xmm1\n" \
"paddq 16(%3),%%xmm1\n" \
"pshufd $245,%%xmm1,%%xmm3\n" \
"pcmpgtd 32(%3),%%xmm3\n" \
"pand 48(%3),%%xmm3\n" \
"psubq %%xmm3,%%xmm7\n" \
"movaps %%xmm7,48(%1)\n" \
"psrlq $51,%%xmm4\n" \
"psrlq $51,%%xmm5\n" \
"psrlq $51,%%xmm6\n" \
"psrlq $51,%%xmm7\n" \
"packssdw %%xmm5,%%xmm4\n" \
"packssdw %%xmm7,%%xmm6\n" \
"packssdw %%xmm6,%%xmm4\n" \
"packsswb %%xmm4,%%xmm4\n" \
"movaps %%xmm4,%%xmm0\n" \
"psrldq $4,%%xmm0\n" \
"pslld $4,%%xmm0\n" \
"pxor %%xmm0,%%xmm4\n"
"movd %%xmm4,%0\n" \
"":"=&r"(output):"r"(state->xN),"r"(state->xP),"r"(gm55_sse_Consts));
return output;
}
__device__ __host__ void gm55_get_sse_state_(gm55_state* state,gm55_sse_state* sse_state){
int i; for(i=0;i<8;i++) {sse_state->xN[i]=state->xN[i]; sse_state->xP[i]=state->xP[i];}
}
__device__ __host__ lt gm55_mod_g(lt x){ // returns x (mod g)
lt F,G; F = (x>>55); G = x-(F<<55)+(2064*F);
return ((G>=gm55_g) ? (G-gm55_g) : G);
}
__device__ __host__ lt gm55_MyMult(lt A,lt B){ // returns AB (mod gm55_g), where it is implied that A,B<gm55_g;
lt A1,A0,B1,B0,curr,x,m;
A1=A>>28; B1=B>>27; A0=A-(A1<<28); B0=B-(B1<<27);
curr=2*A1*B0+B1*A0; m=curr>>28; x=curr-(m<<28);
curr=(x<<27)+2064*m+(gm55_mod_g(129*A1*B1)<<4)+A0*B0;
return gm55_mod_g(curr);
}
__device__ __host__ lt gm55_CNext2(lt N,lt P,lt myk,lt myq){ // returns (myk*N-myq*P) (mod gm55_g)
lt curr1,curr2;
curr1=gm55_MyMult(myk,N); curr2=gm55_MyMult(myq,P);
if(curr1>=curr2) return (curr1-curr2); else return (gm55_g+curr1-curr2);
}
__device__ __host__ lt gm55_CNext(lt N,lt P){ // returns (256*N-176*P) (mod gm55_g)
return gm55_mod_g((N<<8)+176*(gm55_g-P));
}
__device__ __host__ lt gm55_GetNextN(lt x0,lt x1,unsigned int n){ //returns x_{2^n}
lt myk=gm55_k,myq=gm55_q,i,x=x1;
for(i=0;i<n;i++){
x=gm55_CNext2(x,x0,myk,myq);
myk=gm55_CNext2(myk,2,myk,myq);
myq=gm55_CNext2(myq,0,myq,0);
}
return x;
}
__device__ __host__ lt gm55_GetNextAny(lt x0,lt x1,lt N64,lt N0){ //N=2^64*N64+N0+1
lt i,xp=x0,xn=x1,xpnew,xnnew,shift=0;
i=N0; while(i>0){
if(i%2==1){ // xp,xn ----> 2^shift
xpnew=gm55_GetNextN(xp,xn,shift);
xnnew=gm55_GetNextN(xn,gm55_CNext(xn,xp),shift);
xp=xpnew; xn=xnnew;
}
i/=2; shift++;
}
i=N64; shift=64; while(i>0){
if(i%2==1){ // xp,xn ----> 2^shift
xpnew=gm55_GetNextN(xp,xn,shift);
xnnew=gm55_GetNextN(xn,gm55_CNext(xn,xp),shift);
xp=xpnew; xn=xnnew;
}
i/=2; shift++;
}
return xp; // returns x_N, where N=2^64*N64+N0+1
}
__device__ __host__ void gm55_skipahead_(gm55_state* state, lt offset64, lt offset0){ // offset=offset64*2^64+offset0+1
lt xn,xp,j;
for(j=0;j<8;j++){
xp=gm55_GetNextAny(state->xP[j],state->xN[j],offset64,offset0);
xn=gm55_GetNextAny(state->xP[j],state->xN[j],offset64,offset0+1);
state->xP[j]=xp; state->xN[j]=xn;
}
}
__device__ __host__ void gm55_init_(gm55_state* state){
lt x0=gm55_mod_g(100152853817629549ULL),x1=gm55_mod_g(132388305121829306ULL),xp,xn,j;
for(j=0;j<8;j++){
xp=gm55_GetNextAny(x0,x1,7730941120ULL,2741045636569588180ULL);
xn=gm55_GetNextAny(x0,x1,7730941120ULL,2741045636569588181ULL);
state->xP[j]=xp; state->xN[j]=xn; x0=xp; x1=xn;
}
}
__device__ __host__ void gm55_init_short_sequence_(gm55_state* state,lt SequenceNumber){ // 0 <= SequenceNumber < 10^18
lt n1,n2; // length of each sequence < 10^10
gm55_init_(state);
n1=SequenceNumber/892447987; n2=SequenceNumber%892447987;
gm55_skipahead_(state,n1,n1*4193950067);
gm55_skipahead_(state,0,n2*20669825409); // thus we are skipping ahead (SequenceNumber*20669825409) numbers
}
__device__ __host__ void gm55_init_long_sequence_(gm55_state* state,lt SequenceNumber){ // 0 <= SequenceNumber < 4*10^9
gm55_init_(state); // length of each sequence < 10^20
gm55_skipahead_(state,8*SequenceNumber,2699204111*SequenceNumber);
}
__device__ __host__ unsigned int gm55_generate_(gm55_state* state){
unsigned int sum=0; int i; lt temp;
for(i=0;i<8;i++){
temp=gm55_mod_g(((state->xN[i])<<8)+gm55_q*(gm55_g-state->xP[i]));
state->xP[i]=state->xN[i]; state->xN[i]=temp;
sum+= ((temp/gm55_gdiv16)<<((i<4)?(8*i):(8*i-28)));
}
return sum;
}
__device__ __host__ float gm55_generate_uniform_float_(gm55_state* state){
unsigned int sum=0; int i; lt temp;
for(i=0;i<8;i++){
temp=gm55_mod_g(((state->xN[i])<<8)+gm55_q*(gm55_g-state->xP[i]));
state->xP[i]=state->xN[i]; state->xN[i]=temp;
sum+= ((temp/gm55_gdiv16)<<((i<4)?(8*i):(8*i-28)));
}
return ((float) sum) * 2.3283064365386963e-10;
}
__host__ void gm55_print_state_(gm55_state* state){int i;
printf("Generator State:\nxN={");
for(i=0;i<8;i++) {printf("%llu",state->xN[i]%gm55_g); printf((i<7)?",":"}\nxP={");}
for(i=0;i<8;i++) {printf("%llu",state->xP[i]%gm55_g); printf((i<7)?",":"}\n\n");}
}
__host__ void gm55_print_sse_state_(gm55_sse_state* state){int i;
printf("Generator State:\nxN={");
for(i=0;i<8;i++) {printf("%llu",state->xN[i]%gm55_g); printf((i<7)?",":"}\nxP={");}
for(i=0;i<8;i++) {printf("%llu",state->xP[i]%gm55_g); printf((i<7)?",":"}\n\n");}
}
__global__ void gm55_kernel_generate_array(gm55_state* state, unsigned int* out, long* length) {
unsigned sum,i,j,orbit,seqNum,shift; long offset; lt temp;
__shared__ lt xP[gm55_THREADS]; // one generator per s=8 threads, i.e. one orbit
__shared__ lt xN[gm55_THREADS]; // per thread, i.e. blockDim.x orbits per block
__shared__ unsigned a[gm55_THREADS]; // array "a" contains corresponding parts of output
orbit = threadIdx.x % 8;
seqNum = (threadIdx.x + blockIdx.x * blockDim.x)>>3; // RNG_sequence index
offset = seqNum*(*length); // start of the section in the output array
xP[threadIdx.x]=gm55_GetNextAny(state->xP[orbit],state->xN[orbit],0,offset);
xN[threadIdx.x]=gm55_GetNextAny(state->xP[orbit],state->xN[orbit],0,offset+1);
shift=((orbit<4)?(8*orbit):(8*orbit-28));
for(i=0;i<(*length);i++){
temp = gm55_CNext( xN[threadIdx.x], xP[threadIdx.x] );
xP[threadIdx.x] = xN[threadIdx.x]; xN[threadIdx.x] = temp;
a[threadIdx.x] = ((temp/gm55_gdiv16)<<shift);
__syncthreads(); // each s=8 threads result in "length" values in the output array
if(orbit==0){ sum=0; for(j=0;j<8;j++) sum+=a[threadIdx.x+j]; out[offset+i]=sum; }
}
}
__host__ void gm55_generate_gpu_array_(gm55_state* state, unsigned int* dev_out, long length){
long mylength = length/gm55_ARRAY_SECTIONS;
gm55_state* dev_state;
long* dev_length;
if((mylength*gm55_ARRAY_SECTIONS)<length) mylength++;
gm55_CUDA_CALL(cudaMalloc((void**)&dev_state,sizeof(gm55_state)));
gm55_CUDA_CALL(cudaMalloc((void**)&dev_length,sizeof(long)));
gm55_CUDA_CALL(cudaMemcpy(dev_state,state,sizeof(gm55_state),cudaMemcpyHostToDevice));
gm55_CUDA_CALL(cudaMemcpy(dev_length,&mylength,sizeof(long),cudaMemcpyHostToDevice));
gm55_kernel_generate_array<<<gm55_BLOCKS,gm55_THREADS>>>(dev_state,dev_out,dev_length);
gm55_CUDA_CALL(cudaGetLastError());
gm55_CUDA_CALL(cudaFree(dev_state)); gm55_CUDA_CALL(cudaFree(dev_length));
}
__global__ void gm55_kernel_generate_array_float(gm55_state* state, float* out, long* length) {
unsigned sum,i,j,orbit,seqNum,shift; long offset; lt temp;
__shared__ lt xP[gm55_THREADS]; // one generator per s=8 threads, i.e. one orbit
__shared__ lt xN[gm55_THREADS]; // per thread, i.e. blockDim.x orbits per block
__shared__ unsigned a[gm55_THREADS]; // array "a" contains corresponding parts of output
orbit = threadIdx.x % 8;
seqNum = (threadIdx.x + blockIdx.x * blockDim.x)>>3; // RNG_sequence index
offset = seqNum*(*length); // start of the section in the output array
xP[threadIdx.x]=gm55_GetNextAny(state->xP[orbit],state->xN[orbit],0,offset);
xN[threadIdx.x]=gm55_GetNextAny(state->xP[orbit],state->xN[orbit],0,offset+1);
shift=((orbit<4)?(8*orbit):(8*orbit-28));
for(i=0;i<(*length);i++){
temp = gm55_CNext( xN[threadIdx.x], xP[threadIdx.x] );
xP[threadIdx.x] = xN[threadIdx.x]; xN[threadIdx.x] = temp;
a[threadIdx.x] = ((temp/gm55_gdiv16)<<shift);
__syncthreads(); // each s=8 threads result in "length" values in the output array
if(orbit==0){ sum=0; for(j=0;j<8;j++) sum+=a[threadIdx.x+j]; out[offset+i]=((float)sum)* 2.3283064365386963e-10; }
}
}
__host__ void gm55_generate_gpu_array_float_(gm55_state* state, float* dev_out, long length){
long mylength = length/gm55_ARRAY_SECTIONS;
gm55_state* dev_state;
long* dev_length;
if((mylength*gm55_ARRAY_SECTIONS)<length) mylength++;
gm55_CUDA_CALL(cudaMalloc((void**)&dev_state,sizeof(gm55_state)));
gm55_CUDA_CALL(cudaMalloc((void**)&dev_length,sizeof(long)));
gm55_CUDA_CALL(cudaMemcpy(dev_state,state,sizeof(gm55_state),cudaMemcpyHostToDevice));
gm55_CUDA_CALL(cudaMemcpy(dev_length,&mylength,sizeof(long),cudaMemcpyHostToDevice));
gm55_kernel_generate_array_float<<<gm55_BLOCKS,gm55_THREADS>>>(dev_state,dev_out,dev_length);
gm55_CUDA_CALL(cudaGetLastError());
gm55_CUDA_CALL(cudaFree(dev_state)); gm55_CUDA_CALL(cudaFree(dev_length));
}
__global__ void gm55_kernel_generate_array_double(gm55_state* state, double* out, long* length) {
unsigned sum,i,j,orbit,seqNum,shift; long offset; lt temp;
__shared__ lt xP[gm55_THREADS]; // one generator per s=8 threads, i.e. one orbit
__shared__ lt xN[gm55_THREADS]; // per thread, i.e. blockDim.x orbits per block
__shared__ unsigned a[gm55_THREADS]; // array "a" contains corresponding parts of output
orbit = threadIdx.x % 8;
seqNum = (threadIdx.x + blockIdx.x * blockDim.x)>>3; // RNG_sequence index
offset = seqNum*(*length); // start of the section in the output array
xP[threadIdx.x]=gm55_GetNextAny(state->xP[orbit],state->xN[orbit],0,offset);
xN[threadIdx.x]=gm55_GetNextAny(state->xP[orbit],state->xN[orbit],0,offset+1);
shift=((orbit<4)?(8*orbit):(8*orbit-28));
for(i=0;i<(*length);i++){
temp = gm55_CNext( xN[threadIdx.x], xP[threadIdx.x] );
xP[threadIdx.x] = xN[threadIdx.x]; xN[threadIdx.x] = temp;
a[threadIdx.x] = ((temp/gm55_gdiv16)<<shift);
__syncthreads(); // each s=8 threads result in "length" values in the output array
if(orbit==0){ sum=0; for(j=0;j<8;j++) sum+=a[threadIdx.x+j]; out[offset+i]=((double)sum)* 2.3283064365386963e-10; }
}
}
__host__ void gm55_generate_gpu_array_double_(gm55_state* state, double* dev_out, long length){
long mylength = length/gm55_ARRAY_SECTIONS;
gm55_state* dev_state;
long* dev_length;
if((mylength*gm55_ARRAY_SECTIONS)<length) mylength++;
gm55_CUDA_CALL(cudaMalloc((void**)&dev_state,sizeof(gm55_state)));
gm55_CUDA_CALL(cudaMalloc((void**)&dev_length,sizeof(long)));
gm55_CUDA_CALL(cudaMemcpy(dev_state,state,sizeof(gm55_state),cudaMemcpyHostToDevice));
gm55_CUDA_CALL(cudaMemcpy(dev_length,&mylength,sizeof(long),cudaMemcpyHostToDevice));
gm55_kernel_generate_array_double<<<gm55_BLOCKS,gm55_THREADS>>>(dev_state,dev_out,dev_length);
gm55_CUDA_CALL(cudaGetLastError());
gm55_CUDA_CALL(cudaFree(dev_state)); gm55_CUDA_CALL(cudaFree(dev_length));
}
__host__ void gm55_generate_array_(gm55_state* state, unsigned int* out, long length){
long mylength = length/gm55_ARRAY_SECTIONS;
gm55_state* dev_state;
unsigned int* dev_out;
long* dev_length;
if((mylength*gm55_ARRAY_SECTIONS)<length) mylength++;
gm55_CUDA_CALL(cudaMalloc((void**)&dev_state,sizeof(gm55_state)));
gm55_CUDA_CALL(cudaMalloc((void**)&dev_out,mylength*gm55_ARRAY_SECTIONS*sizeof(unsigned int)));
gm55_CUDA_CALL(cudaMalloc((void**)&dev_length,sizeof(long)));
gm55_CUDA_CALL(cudaMemcpy(dev_state,state,sizeof(gm55_state),cudaMemcpyHostToDevice));
gm55_CUDA_CALL(cudaMemcpy(dev_length,&mylength,sizeof(long),cudaMemcpyHostToDevice));
gm55_kernel_generate_array<<<gm55_BLOCKS,gm55_THREADS>>>(dev_state,dev_out,dev_length);
gm55_CUDA_CALL(cudaGetLastError());
gm55_CUDA_CALL(cudaMemcpy(out,dev_out,length*sizeof(unsigned int),cudaMemcpyDeviceToHost));
gm55_CUDA_CALL(cudaFree(dev_state)); gm55_CUDA_CALL(cudaFree(dev_out));
gm55_CUDA_CALL(cudaFree(dev_length));
}
|
1,667
|
#include "includes.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**********************************
When updating a kernel or adding a new one,
please compile the ptx file and commit it:
nvcc -ptx -arch=sm_30 SystemML.cu
***********************************/
/**
* Performs a slice operation where the input matrix is sparse and the output matrix is dense.
* This function avoids unnecessary sparse to dense conversion of the input matrix.
* Parallelization: rows of output matrix.
*
* @params inVal input val pointer
* @params inRowPtr input row pointer
* @params colInd input col index pointer
* @params ret dense output pointer
* @param rl row lower
* @param ru row upper
* @param cl column lower
* @param cu column upper
* @param retClen number of columns of output matrix
*/
extern "C"
/**
* Performs a slice operation where the input matrix is sparse and the output matrix is dense.
* This function avoids unnecessary sparse to dense conversion of the input matrix.
* Parallelization: subset of number of non-zeroes of input matrix.
*
* @params inVal input val pointer
* @params inRowPtr input row pointer
* @params colInd input col index pointer
* @params ret dense output pointer
* @param rl row lower
* @param ru row upper
* @param cl column lower
* @param cu column upper
* @param retClen number of columns of output matrix
*/
extern "C"
/**
* Performs a slice operation where the input matrix is dense and the output matrix is dense.
*
* @params in dense input pointer
* @params ret dense output pointer
* @param rl row lower
* @param ru row upper
* @param cl column lower
* @param cu column upper
* @param inClen number of columns of input matrix
* @param retRlen number of rows of output matrix
* @param retClen number of columns of output matrix
*/
extern "C"
/**
* Does a copy of upper to lower triangle of the given matrix
* @param ret the input and output array allocated on the GPU
* @param dim the number of rows of the square matrix ret
* @param N total number of elements of the matrix
*/
extern "C"
extern "C"
__global__ void slice_sparse_dense_row(double* inVal, int* inRowPtr, int* colInd, double* ret, int rl, int ru, int cl, int cu, int retClen) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int rowIndex = index + rl;
if (rowIndex <= ru){
/*
* TODO: Alternative approach: use dynamic parallelism. We are skipping this for now to avoid
* the complexity of two-step separate compilation and linking process.
*
* extern "C"
* __global__ void slice_sparse_dense_row_helper(double* inVal, int* inRowPtr, int* colInd, double* ret,
* int rl, int ru, int cl, int cu, int retClen, int start, int end, int index) {
* int i = blockIdx.x * blockDim.x + threadIdx.x + start;
* // Only slice if the index falls into the given range
* if(i < end && cl <= colInd[i] && colInd[i] <= cu) {
* ret[ index*retClen + (colInd[i] - cl) ] = inVal[i];
* }
* }
*
* int size = inRowPtr[rowIndex+1] - inRowPtr[rowIndex];
* double numThreads = (double)min(size, MAX_NUM_THREADS_CHILD_KERNEL);
* slice_sparse_dense_row_helper<<< ceil(numThreads/ MAX_NUM_THREADS_CHILD_KERNEL), MAX_NUM_THREADS_CHILD_KERNEL>>>(inVal, inRowPtr, colInd, ret,
* rl, ru, cl, cu, retClen, inRowPtr[rowIndex], inRowPtr[rowIndex+1], index);
*
* Two-step compilation and linking process in JCudaKernels's constructor:
* cuLinkAddFile(linkState, CUjitInputType.CU_JIT_INPUT_LIBRARY, "/usr/local/cuda/lib64/libcudadevrt.a", jitOptions);
*/
// Iterate over elements of the row 'rowIndex'.
for(int i = inRowPtr[rowIndex]; i < inRowPtr[rowIndex+1]; i++) {
// Only slice if the index falls into the given range
if(cl <= colInd[i] && colInd[i] <= cu) {
ret[ index*retClen + (colInd[i] - cl) ] = inVal[i];
}
}
}
}
|
1,668
|
//pass
//--blockDim=64 --gridDim=1 --no-inline
#include "cuda.h"
__global__ void foo(int* p) {
int* q;
q = p + 1;
q[threadIdx.x] = 0;
}
|
1,669
|
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,float var_2,float* var_3,float var_4) {
if (comp == floorf(-0.0f)) {
float tmp_1 = +1.3963E-6f;
comp += tmp_1 - (+1.6619E35f * var_2 - -1.5939E-42f);
for (int i=0; i < var_1; ++i) {
comp += log10f(floorf(sinf((+1.8762E35f - (+1.5776E-44f / var_4)))));
var_3[i] = asinf(-1.0657E-26f);
float tmp_2 = -1.9984E-7f;
comp = tmp_2 - var_3[i] * (+0.0f + (-1.9173E13f / -1.6537E25f));
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
float tmp_3 = atof(argv[3]);
float* tmp_4 = initPointer( atof(argv[4]) );
float tmp_5 = atof(argv[5]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5);
cudaDeviceSynchronize();
return 0;
}
|
1,670
|
extern "C" __device__
float atomicMaxIndex(int* address, int newCandidate, const float* image)
{
int *address_as_int =(int*)address;
int old = *address_as_int, assumed;
while (old < 0)
{
assumed = old;
old = atomicCAS(address_as_int, assumed, newCandidate);
}
// old is definitely initialized and therefore a valid index.
while (image[newCandidate] > image[old]) {
assumed = old;
old = atomicCAS(address_as_int, assumed, newCandidate);
}
return old;
}
extern "C" __global__
void fill_values(
const float* image,
const int* maxIdx,
const int K,
const int nClasses,
const int batchSize,
float* outVals)
{
int batch = blockIdx.z*blockDim.z + threadIdx.z;
int channel = blockIdx.x*blockDim.x + threadIdx.x;
int relevantLabel = blockIdx.y*blockDim.y+threadIdx.y;
int outIdx = batch*nClasses*K + K*channel+ relevantLabel;
if (outIdx<K*nClasses*batchSize)
outVals[outIdx] = image[maxIdx[outIdx]];
}
extern "C" __global__
void max_pooling(
const float* image,
const int* labels,
int* outIdx,
const int volumeSize,
const int nClasses,
const int K)
{
extern __shared__ int sharedIx[]; // contains index of max
int tid = threadIdx.x;
int channel = blockIdx.y;
int relevantLabel = blockIdx.z;
int volIndex = blockIdx.x*blockDim.x + tid;
int outputIndex = K*channel+relevantLabel;
int imgIndex = volumeSize*channel+volIndex;
sharedIx[tid] = -1;
if (volIndex<volumeSize && labels[volIndex]==relevantLabel)
{
sharedIx[tid] = imgIndex;
}
__syncthreads();
for (unsigned int s = blockDim.x/2; s>0; s>>=1)
{
if (tid<s && volIndex < volumeSize && sharedIx[tid+s] > 0)
{
if (image[sharedIx[tid+s]] > image[sharedIx[tid]])
sharedIx[tid] = sharedIx[tid+s];
}
__syncthreads();
}
if (tid == 0)
{
if (sharedIx[0] > 0){
atomicMaxIndex(&outIdx[outputIndex], sharedIx[0], image);
}
}
}
extern "C" __global__
void max_pooling_v2(
const float* image,
const int* labels,
int* outIdx,
const int volumeSize,
const int nClasses,
const int batchSize,
const int nbPixPerThread,
const int K)
{
int batch = blockIdx.y;
int numel = volumeSize*nClasses*batchSize;
int channel = blockIdx.x;
int x = threadIdx.x;
int imgStartIdx = batch*volumeSize*nClasses+
channel*volumeSize+
x*nbPixPerThread;
int labelStartIdx = volumeSize*batch + x*nbPixPerThread;
if (x*nbPixPerThread < volumeSize && channel < nClasses && batch < batchSize)
{
int imgIndex = imgStartIdx;
int labelIndex = labelStartIdx;
float candidate;
int label;
int outIndex;
int runningIdx;
for (int i=0; i<nbPixPerThread; i++)
{
imgIndex = imgStartIdx+i;
labelIndex = labelStartIdx+i;
runningIdx = labelIndex-batch*volumeSize;
if (runningIdx < volumeSize)
{
// candidate = image[imgIndex];
label = labels[labelIndex];
outIndex = batch*nClasses*K + K*channel + label;
atomicMaxIndex(&outIdx[outIndex], imgIndex, image);
}
else
{
return;
}
}
}
}
extern "C" __global__
void bw_max_pooling(
float* grad_in,
const float* img,
const int* indices,
const int nClasses,
const float* grad_outputs,
const int K,
const int batchSize)
{
int batch = blockIdx.z*blockDim.z + threadIdx.z;
int label = blockIdx.x*blockDim.x + threadIdx.x;
int channel = blockIdx.y*blockDim.y + threadIdx.y;
int lstIdx = batch*nClasses*K+ channel*K + label;
int lstSize = batchSize*nClasses*K;
if (lstIdx<lstSize)
{
int imgIndex = indices[lstIdx];
if (imgIndex >= 0)
{
grad_in[imgIndex] = grad_outputs[lstIdx];
}
}
}
extern "C" __global__
void avg_pooling(
const float* image,
const int* labels,
float* outVals,
int* outCounts,
const int volumeSize,
const int nClasses,
const int batchSize,
const int nbPixPerThread,
const int K)
{
int batch = blockIdx.y;
int numel = volumeSize*nClasses*batchSize;
int channel = blockIdx.x;
int x = threadIdx.x;
int imgStartIdx = batch*volumeSize*nClasses+
channel*volumeSize+
x*nbPixPerThread;
int labelStartIdx = batch*volumeSize + x*nbPixPerThread;
if (x*nbPixPerThread < volumeSize && channel < nClasses && batch < batchSize)
{
int imgIndex = imgStartIdx;
int labelIndex = labelStartIdx;
float newAddition;
int label;
int outIndex;
int runningIdx;
for (int i=0; i<nbPixPerThread; i++)
{
imgIndex = imgStartIdx+i;
labelIndex = labelStartIdx+i;
runningIdx = labelIndex-batch*volumeSize;
if (runningIdx < volumeSize)
{
newAddition = image[imgIndex];
label = labels[labelIndex];
outIndex = batch*nClasses*K+K*channel+label;
atomicAdd(&outVals[outIndex], newAddition);
if (channel == 0)
{
atomicAdd(&outCounts[batch*K+label], 1);
}
}
else
{
return;
}
}
}
}
extern "C" __global__
void bw_avg_pooling(
float* grad_in,
const int* counts,
const int* labels,
const float* grad_outputs,
const int K,
const int volumeSize,
const int nClasses,
const int batchSize,
const bool training)
{
int batch = blockIdx.z*blockDim.z + threadIdx.z;
int channel = blockIdx.y*blockDim.y + threadIdx.y;
int volumeIdx = blockIdx.x*blockDim.x + threadIdx.x;
int totalIdx = batch*volumeSize*nClasses+channel*volumeSize + volumeIdx;
if (volumeIdx<volumeSize && channel<nClasses && batch<batchSize)
{
int label = labels[batch*volumeSize+volumeIdx];
int lstIdx = batch*K*nClasses + K*channel + label;
if (training){
int count = counts[batch*K+label];
if (count == 0){
grad_in[totalIdx] = 0;
} else{
grad_in[totalIdx] = grad_outputs[lstIdx]/count;
}
} else {
grad_in[totalIdx] = grad_outputs[lstIdx];
}
}
}
|
1,671
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <stdio.h>
cudaError_t calcDotProductWithCuda(int *c, const int *a, const int *b, unsigned int size);
int main()
{
const int arraySize = 4;
const int a[arraySize] = { 2, 2, 2, 2 };
const int b[arraySize] = { 2, 2, 2, 2 };
int c[arraySize] = { 0 };
// Add vectors in parallel.
cudaError_t cudaStatus = calcDotProductWithCuda(c, a, b, arraySize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
int result = c[0] + c[1];
std::cout << "Result of calculation:" << result << std::endl;
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
#define THREADS_PER_BLOCK 2
__shared__ int blockNumbers[THREADS_PER_BLOCK];
__global__
void calcDotProdKernel(int * c, const int *b, const int * a)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
c[i] = a[i] * b[i];
printf("ThreadId: %d BlockId: %d using index: %d Calculated: %d \n", threadIdx.x, blockIdx.x, i, c[i]);
blockNumbers[threadIdx.x] = c[i];
//wait here until all threads reach this point inside the kernel
__syncthreads();
float subtotal = 0;
for (int k = 0; k < blockDim.x; k++)
subtotal += blockNumbers[k];
c[blockIdx.x] = subtotal;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t calcDotProductWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
calcDotProdKernel<<<size/THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(dev_c, dev_b , dev_a);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
}
|
1,672
|
//ECGR 6090 Heterogeneous Computing Homework 1
// Problem 1 - Naive Matrix Multiplication on GPU
//Written by Aneri Sheth - 801085402
#include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include<time.h>
#define N 100
#define M 100
#define k 100
__global__ void matrix_mul(float *a, float *b, float *c){
int row = (blockIdx.y * blockDim.y) + threadIdx.y; //where am I in matrix
int col = (blockIdx.x * blockDim.x) + threadIdx.x;
float temp = 0.0; //calculate sum
for(int i = 0;i < k;i++)
{
temp += a[row * k + i] * b[i * k + col]; //add and multiply
}
c[row * k + col] = temp; //final c matrix
}
//Function to initialize matrices with random values
void randomInit(float* data, int size)
{
for (int i = 0; i < size; i++)
for (int j = 0; j < size; j++)
*(data + i*size + j) = rand() % 10;
}
//Function to display matrices
void display_matrix(int size, float *matrix)
{
for(int i = 0; i < size; i++){
for (int j = 0; j < size; j++){
printf("Matrix = %f ",*(matrix + i*size + j));
}
}
}
int main(void)
{
float *a, *b, *c; //CPU copies
float *g_a, *g_b, *g_c; //GPU copies
int matrix_size = N * M * sizeof(float);
cudaEvent_t start, stop; //time start and stop
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
//Allocate device memory
cudaMalloc((void **)&g_a, matrix_size);
cudaMalloc((void **)&g_b, matrix_size);
cudaMalloc((void **)&g_c, matrix_size);
//Allocate CPU memory
a = (float *)malloc(matrix_size); randomInit(a, N);
b = (float *)malloc(matrix_size); randomInit(b, M);
c = (float *)malloc(matrix_size);
//Copy CPU memory to GPU memory
cudaMemcpy(g_a, a, matrix_size, cudaMemcpyHostToDevice);
cudaMemcpy(g_b, b, matrix_size, cudaMemcpyHostToDevice);
//display_matrix(N,k,a);
//display_matrix(k,M,b);
//Set thread and grid dimensions
dim3 threadBlocks = dim3((int) std::ceil( (double) k/16 ),(int) std::ceil ( (double) k/16),1);
//dim3 threadBlocks = dim3()
dim3 threadsPerBlock = dim3(16,16,1);
cudaEventRecord( start, 0 );
//Call the kernel
matrix_mul<<<threadBlocks,threadsPerBlock>>>(g_a,g_b,g_c);
//display_matrix(N,M,g_c);
cudaEventRecord( stop, 0 );
cudaEventSynchronize(stop);
cudaEventElapsedTime( &time, start, stop);
cudaEventDestroy( start );
cudaEventDestroy( stop );
//display_matrix(N,M,g_c);
printf("GPU Execution Time = %f\n",time);
//Copy from device to host
cudaMemcpy(c, g_c, matrix_size, cudaMemcpyDeviceToHost);
//display_matrix(N,M,c);
//free cpu and gpu memory
free(a); free(b); free(c);
cudaFree(g_a); cudaFree(g_b); cudaFree(g_c);
return 0;
}
|
1,673
|
#include "includes.h"
__global__ void add_1024(long* a, long* b, long* c, long N) { //more simple and probably faster core but works only with 1024 or less elements in vector in this example
c[threadIdx.x] = a[threadIdx.x] * b[threadIdx.x];
__syncthreads();
long step = N / 2;
while (step != 0) {
if (threadIdx.x < step)
{
c[threadIdx.x] += c[threadIdx.x + step];
}
step /= 2;
__syncthreads();
}
}
|
1,674
|
#include "includes.h"
__global__ void matmul(float* A, float* B, float* C, int ARows, int ACols, int BRows, int BCols, int CRows, int CCols) {
float CValue = 0;
int Row = blockIdx.y*16 + threadIdx.y;
int Col = blockIdx.x*16 + threadIdx.x;
for (int k = 0; k < (16 + ACols - 1)/16; k++) {
for (int n = 0; n < 16; ++n)
if ((k*16 + n < ACols && Row < ARows) && (k*16 + n < BRows && Col < BCols))
CValue += A[Row*ACols + k*16 + n] * B[(k*16 + n)*BCols + Col];
}
if (Row < CRows && Col < CCols) C[((blockIdx.y * blockDim.y + threadIdx.y)*CCols)+(blockIdx.x*blockDim.x)+threadIdx.x]=CValue;
}
|
1,675
|
#include <stdio.h>
__global__ void hellogpu (void)
{
printf ("Hello world from GPU!! from %d \n", threadIdx.x);
}
int main(void)
{
printf("Hello world from CPU!! \n");
hellogpu <<<2, 10>>>();
cudaDeviceReset ();
return 0;
}
|
1,676
|
#include <iostream>
#include <string>
#include <cuda_runtime.h>
using namespace std;
#define RADIX 257
#define SIZE 9216
#define BLOCK_SIZE 512
#define GRID_SIZE 64
#define LOOP_NUM 100
//int HashCalc(char *text, int length);
__host__ void hHashCalc(char *text, int length, unsigned int *rehash);
__global__ void gHashCalc(char *text, int *length, unsigned int *rehash);
__device__ void dHashCalc(char *text, int *length, unsigned int *rehash);
// void textHash(char *text, int textlen, int texthas [], int patlen);
__global__ void textHash(char *text, int *textlen, unsigned int *texthas, int *patlen);
void HashSearch(char *text, int textlen, unsigned int texthas [], char *pattern, int patlen, unsigned int pathas, bool flag []);
__global__ void gHashSearch(char *text, int *textlen, unsigned int *texthas, char *pattern, int *patlen, unsigned int *pathas, bool *flag);
void Emphasis(char *text, int textlen, int patlen, bool flag [], int Count);
void InsertChar(char *text, char *shift, bool flag [], bool mem [], int *counter, char *insert);
void ShiftChar(char *text, char *shift1, char *shift2, bool flag [], bool mem1 [], bool mem2 [], int *counter, int inslen, int looptimes);
int main(){
char text[SIZE * 2], pattern[SIZE];
string inputtext;
int textlen[1], patlen[1];
unsigned int texthas[SIZE * 2] = { 0 }, pathas[1] = { 0 };
bool FoundFlag[SIZE] = { 0 };
int FoundCount = 0;
int i;
// cout << "*Please input text." << endl;
getline(cin, inputtext);
const char *convert = inputtext.c_str();
strcpy(text, convert);
textlen[0] = strlen(text);
// cout << textlen[0] << endl;
do{
// cout << endl << "*Please input pattern." << endl;
getline(cin, inputtext);
convert = inputtext.c_str();
strcpy(pattern, convert);
patlen[0] = strlen(pattern);
if (textlen[0] < patlen[0])
{
cout << "**Search pattern is larger than the text size.**" << endl;
}
} while (textlen[0] < patlen[0]);
//GPU用変数
char *dText, *dPattern;
unsigned int *dTexthas, *dPathas;
int *dTextlen, *dPatlen;
bool *dFoundFlag;
cudaMalloc((void**) &dText, sizeof(char)*SIZE);
cudaMemcpy(dText, text, sizeof(char)*SIZE, cudaMemcpyHostToDevice);
cudaMalloc((void**) &dPattern, sizeof(char)*SIZE);
cudaMemcpy(dPattern, pattern, sizeof(char)*SIZE, cudaMemcpyHostToDevice);
cudaMalloc((void**) &dTexthas, sizeof(unsigned int)*SIZE);
cudaMemcpy(dTexthas, texthas, sizeof(unsigned int)*SIZE, cudaMemcpyHostToDevice);
cudaMalloc((void**) &dPathas, sizeof(unsigned int)*SIZE);
cudaMemcpy(dPathas, pathas, sizeof(unsigned int)*SIZE, cudaMemcpyHostToDevice);
cudaMalloc((void**) &dTextlen, sizeof(int)*SIZE);
cudaMemcpy(dTextlen, textlen, sizeof(int)*SIZE, cudaMemcpyHostToDevice);
cudaMalloc((void**) &dPatlen, sizeof(int)*SIZE);
cudaMemcpy(dPatlen, patlen, sizeof(int)*SIZE, cudaMemcpyHostToDevice);
cudaMalloc((void**) &dFoundFlag, sizeof(int)*SIZE);
cudaMemcpy(dFoundFlag, FoundFlag, sizeof(bool)*SIZE, cudaMemcpyHostToDevice);
for (int cnt = 1; cnt <= GRID_SIZE * BLOCK_SIZE; cnt++){
float Time_pathas = 0;
float Time_memcpy1 = 0;
float Time_texthas = 0;
float Time_memcpy2 = 0;
float Time_sum = 0;
float Time_HashSearch = 0;
dim3 grid(GRID_SIZE * BLOCK_SIZE / cnt);
dim3 block(cnt);
while (GRID_SIZE * BLOCK_SIZE % cnt != 0)
{
cnt++;
dim3 grid(GRID_SIZE * BLOCK_SIZE / cnt);
dim3 block(cnt);
}
//タイマーの設定
// cout << "Calculation start in the GPU." << endl;
// cout << "BlockSize\t:\t" << GRID_SIZE * BLOCK_SIZE / cnt << "\nGridSize\t:\t" << cnt << endl;
cout << "BlockSize," << GRID_SIZE * BLOCK_SIZE / cnt << "\nGridSize," << cnt << endl;
// float sum = 0.0f;
for (int loopcnt = 0; loopcnt < LOOP_NUM; loopcnt++){
/* if (loopcnt == 50){
dim3 grid(cnt);
}
*/
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float millseconds = 0.0f, sum = 0.0f;
//パターンのハッシュ値計算
cudaEventRecord(start, 0);
gHashCalc <<<grid, block>>> (dPattern, dPatlen, dPathas);
cudaThreadSynchronize();
// /*
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&millseconds, start, stop);
sum += millseconds;
Time_pathas += millseconds;
// cout << "Time required(pattern hash)\t:\t" << millseconds << " millseconds" << endl;
cudaEventRecord(start, 0);
// */
cudaMemcpy(pathas, dPathas, sizeof(unsigned int), cudaMemcpyDeviceToHost);
// /*
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&millseconds, start, stop);
sum += millseconds;
Time_memcpy1 += millseconds;
// cout << "Time required(memcpy)\t:\t" << millseconds << " millseconds" << endl;
// */
// cout << endl << "*Pattern Hash(" << pattern << ") = " << pathas[0] << endl << endl;
//テキストのハッシュ値計算
cudaEventRecord(start, 0);
textHash <<<grid, block>>> (dText, dTextlen, dTexthas, dPatlen);
cudaThreadSynchronize();
// /*
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&millseconds, start, stop);
sum += millseconds;
Time_texthas += millseconds;
// cout << "Time required(text hash)\t:\t" << millseconds << " millseconds" << endl;
cudaEventRecord(start, 0);
// */
cudaMemcpy(texthas, dTexthas, sizeof(unsigned int)*SIZE, cudaMemcpyDeviceToHost);
// /*
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&millseconds, start, stop);
sum += millseconds;
Time_memcpy2 += millseconds;
// cout << "Time required(memcpy)\t:\t" << millseconds << " millseconds" << endl;
// */
Time_sum += sum;
// cout << "Time required(sum)\t:\t" << sum << " millseconds" << endl;
//ハッシュ値比較
// cout << "*Finding..." << endl;
cudaEventRecord(start, 0);
HashSearch(text, textlen[0], texthas, pattern, patlen[0], pathas[0], FoundFlag);
//gHashSearch << <grid, block >> > (dText, dTextlen, dTexthas, dPattern, dPatlen, dPathas, dFoundFlag);
//cudaMemcpy(FoundFlag, dFoundFlag, sizeof(bool)*SIZE, cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&millseconds, start, stop);
sum += millseconds;
Time_HashSearch += millseconds;
// cout << "Time required(HashSearch)\t:\t" << millseconds << " millseconds" << endl;
}
cout << "Time required(pattern hash)," << Time_pathas / LOOP_NUM << endl;
cout << "Time required(memcpy1)," << Time_memcpy1 / LOOP_NUM << endl;
cout << "Time required(text hash)," << Time_texthas / LOOP_NUM << endl;
cout << "Time required(memcpy2)," << Time_memcpy2 / LOOP_NUM << endl;
cout << "Time required(sum)," << Time_sum / LOOP_NUM << endl;
cout << "Time required(HashSearch)," << Time_HashSearch / LOOP_NUM << endl;
}
/*
for (i = 0; i < textlen[0]; i++){
//cout << "*Text Hash(";
//for (int j = 0; j < patlen[0]; j++) cout << text[i + j];
//cout << ") = " << texthas[i] << endl;
if (FoundFlag[i] == true) FoundCount++;
}
cout << "*FoundCount = " << FoundCount << endl;
if (FoundCount != 0)
{
Emphasis(text, textlen[0], patlen[0], FoundFlag, FoundCount);
cout << endl << "**Found!!**" << endl << text << endl;
}
else
{
cout << endl << "**Not found**" << endl;
}
*/
cudaFree(dText);
cudaFree(dPattern);
cudaFree(dTexthas);
return 0;
}
__host__ void hHashCalc(char *text, int length, unsigned int *rehash)
{
int scan_idx;
*rehash = 0;
for (scan_idx = 0; scan_idx < length; scan_idx++)
{
// rehash = rehash * RADIX + text[i];
// rehash += (pow(RADIX, (double)i)) * text[i];
*rehash += ((scan_idx + 1) * RADIX) * text[scan_idx];
}
/*
cout << "Hash(";
for(i = 0; i < length; i++) cout << text[i];
cout << ") = " << rehash << endl;
*/
}
__global__ void gHashCalc(char *text, int *length, unsigned int *rehash)
{
unsigned int col_idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int scan_idx;
rehash[col_idx] = 0;
for (scan_idx = 0; scan_idx < *length; scan_idx++)
{
rehash[col_idx] += ((scan_idx + 1) * RADIX) * text[col_idx + scan_idx];
// *rehash += ((scan_idx + 1) * RADIX) * text[scan_idx];
//__syncthreads();
}
//__syncthreads();
}
__device__ void dHashCalc(char *text, int *length, unsigned int *rehash)
{
// unsigned int col_idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int scan_idx;
*rehash = 0;
for (scan_idx = 0; scan_idx < *length; scan_idx++)
{
// *rehash += ((scan_idx + 1) * RADIX) * text[col_idx + scan_idx];
*rehash += ((scan_idx + 1) * RADIX) * text[scan_idx];
__syncthreads();
}
/*
cout << "Hash(";
for(i = 0; i < length; i++) cout << text[i];
cout << ") = " << rehash << endl;
*/
}
__global__ void textHash(char *text, int *textlen, unsigned int *texthas, int *patlen)
{
unsigned int col_idx = blockIdx.x * blockDim.x + threadIdx.x;
int scan_idx;
texthas[col_idx] = 0;
for (scan_idx = 0; scan_idx < *patlen; scan_idx++){
texthas[col_idx] += ((scan_idx + 1) * RADIX) * text[col_idx + scan_idx];
//__syncthreads();
}
//__syncthreads();
}
__global__ void gHashSearch(char *text, int *textlen, unsigned int *texthas, char *pattern, int *patlen, unsigned int *pathas, bool *flag)
{
unsigned int col_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (*pathas == texthas[col_idx])
{
int scan_idx = 0;
do{
if (text[col_idx + scan_idx] != pattern[scan_idx]) break;
} while (++scan_idx < *patlen);
if (scan_idx == *patlen)
{
flag[col_idx] = true;
}
}
}
void HashSearch(char *text, int textlen, unsigned int texthas [], char *pattern, int patlen, unsigned int pathas, bool flag [])
{
int i, j;
for (i = 0; i < textlen - patlen + 1; i++)
{
if (pathas == texthas[i])
{
//cout << "Found the same hash!" << endl;
/* cout << "Text Hash(";
for (j = 0; j < patlen; j++) cout << text[i + j];
cout << ") = " << texthas[i] << endl;
*/
j = 0;
do{
if (text[i + j] != pattern[j]) break;
} while (++j < patlen);
if (j == patlen)
{
flag[i] = true;
}
}
}
}
void Emphasis(char *text, int textlen, int patlen, bool flag [], int Count)
{
int i, looptimes;
char shift1[SIZE], shift2[SIZE];
bool mem1[SIZE * 2], mem2[SIZE * 2];
char insert1 [] = " << ", insert2 [] = " >> ";
int inslen = strlen(insert1);
looptimes = textlen - patlen + (8 * Count);
for (i = 0; i < textlen - patlen + (8 * Count); i++)
{
if (flag[i] == true)
{
InsertChar(text, shift1, flag, mem1, &i, insert1);
ShiftChar(text, shift1, shift2, flag, mem1, mem2, &i, inslen, looptimes);
i += patlen;
InsertChar(text, shift1, flag, mem1, &i, insert2);
ShiftChar(text, shift1, shift2, flag, mem1, mem2, &i, inslen, looptimes);
}
}
}
void InsertChar(char *text, char *shift, bool flag [], bool mem [], int *counter, char *insert)
{
int inslen = strlen(insert), j;
for (j = 0; j < inslen; j++)
{
shift[j] = text[*counter + j];
mem[j] = flag[*counter + j];
}
for (j = 0; j < inslen; j++)
{
text[*counter + j] = insert[j];
}
*counter += inslen;
}
void ShiftChar(char *text, char *shift1, char *shift2, bool flag [], bool mem1 [], bool mem2 [], int *counter, int inslen, int looptimes)
{
int j;
for (j = 0; j < looptimes; j++)
{
shift2[j] = text[*counter + j];
mem2[j] = flag[*counter + j];
if (j < inslen){
text[*counter + j] = shift1[j];
flag[*counter + j] = mem1[j];
}
else{
text[*counter + j] = shift2[j - inslen];
flag[*counter + j] = mem2[j - inslen];
}
}
}
|
1,677
|
#include <cuda.h>
#include <cuda_runtime.h>
#include <cooperative_groups.h>
#include <stdio.h>
#define threadPerBlock 64
using namespace cooperative_groups;
__device__ int sumReduction(thread_group g, int* x, int val){
int lane = g.thread_rank();
for(int i=g.size()/2;i>0;i/=2){
x[lane] = val;
//g.sync();
if(lane<i){
val += x[lane+i];
}
g.sync();
}
if(g.thread_rank() == 0){
return val;
}
else{
return -1;
}
}
__global__ void cgkernel(){
extern __shared__ int workspace[];
thread_block wholeBlock = this_thread_block();
int input = wholeBlock.thread_rank();
int output = sumReduction(wholeBlock, workspace, input);
int expectedout = wholeBlock.size()*(wholeBlock.size()-1)/2;
if(wholeBlock.thread_rank()==0){
printf(" Sum of all ranks 0..%d in wholeBlock is %d (expected %d)\n\n",
wholeBlock.size()-1,output,
expectedout);
printf(" Now creating %d groups, each of size 16 threads:\n\n",
wholeBlock.size()/16);
}
thread_block_tile<16> tile16 = tiled_partition<16>(this_thread_block());
input = tile16.thread_rank();
output = sumReduction(tile16, workspace, input);
expectedout = 120;
if(tile16.thread_rank()==0)
printf(" Sum of all ranks 0..15 in this tiledPartition16 group is %d (expected %d)\n",output,expectedout);
}
int main(){
cgkernel<<<1,threadPerBlock,threadPerBlock*sizeof(int)>>>();
cudaDeviceSynchronize();
return 0;
}
|
1,678
|
#define BLOCK_SIZE 16
typedef struct {
int width;
int height;
int stride;
float *elements;
} Matrix;
__device__ float getElement(const Matrix C, int row, int col) {
return C.elements[row * C.stride + col];
}
__device__ void setElement(Matrix C, int row, int col, float value) {
C.elements[row * C.stride + col] = value;
}
__device__ Matrix getSubMatrix(Matrix C, int row, int col) {
Matrix Csub;
Csub.width = BLOCK_SIZE;
Csub.height = BLOCK_SIZE;
Csub.stride = C.stride;
Csub.elements = &C.elements[C.stride * BLOCK_SIZE * row + BLOCK_SIZE * col];
return Csub;
}
__device__ Matrix buildMatrix(int width, int height, float *elements) {
Matrix matrix;
matrix.width = width;
matrix.height = height;
matrix.stride = width;
matrix.elements = elements;
return matrix;
}
extern "C"
__global__ void JCudaMatrixSharedMemKernel(
int widthA,
int heightA,
float *elementsA,
int widthB,
int heightB,
float * elementsB,
float * elementsC
) {
// Build matrices
Matrix A = buildMatrix(widthA, heightA, elementsA);
Matrix B = buildMatrix(widthB, heightB, elementsB);
Matrix C = buildMatrix(widthB, heightA, elementsC);
// Block row and col
int blockRow = blockIdx.y;
int blockCol = blockIdx.x;
// Each thread block computes one sub-matrix Csub of C
Matrix Csub = getSubMatrix(C, blockRow, blockCol);
// Each thread computes one element of Csub
// by accumulating results into Cvalue
float Cvalue = 0;
// Thread row and col within Csub
int row = threadIdx.y;
int col = threadIdx.x;
// Loop over all the sub-matrices of A and B that are
// required to compute Csub
// Multiply each pair of sub-matrices together
// and accumulate the results
for (int m = 0; m <= (A.width / BLOCK_SIZE); m++) {
// Get sub-matrix Asub of A
Matrix Asub = getSubMatrix(A, blockRow, m);
// Get sub-matrix Bsub of B
Matrix Bsub = getSubMatrix(B, m, blockCol);
// Shared memory used to store Asub and Bsub respectively
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load Asub and Bsub from device memory to shared memory
// Each thread loads one element of each sub-matirx
As[row][col] = getElement(Asub, row, col);
Bs[row][col] = getElement(Bsub, row, col);
// Synchronize to make sure the sub-matrices are loaded
// before starting the computation
__syncthreads();
// Multipy Asub and Bsub together
for (int e = 0; e < BLOCK_SIZE; ++e) {
if(
(A.width < BLOCK_SIZE * blockCol + e) ||
(A.height < BLOCK_SIZE * m + e) ||
(B.width < BLOCK_SIZE * m + e) ||
(B.height < BLOCK_SIZE * blockRow + e)
) {}
else {
Cvalue += As[row][e] * Bs[e][col];
}
}
// Synchronize to make sure that the preceding
// compuation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write Csub to device memory
// Each thread writes one element
setElement(Csub, row, col, Cvalue);
}
|
1,679
|
extern "C"
{
__global__ void gradalex(const int n, const double *a, const double *b, double *c)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i<n)
{
if (b[i]>-0.5)
{c[i] += a[i];}
else
{c[i] -= 0.5*a[i]/b[i];}
}
}
}
|
1,680
|
// Let's implement UDWT in CUDA now, then we will HAVE to write all the other
// pieces to go with it, for example the mid-point sorting.
// OK, this code does use convolution, on a ROTATED array, not an extended or
// padded array. In fact, it uses FOUR convolutions, all largely the same - if
// I can remember what "RotateX s 0" in the Haskell code means compared to
// "RotateX s (0 - len)" and the other rotation values.
// There are two halves to this code, so let's do them one at a time. The two
// halves are "MRDWT" and "MIRDWT", I'll call them "The Man" and "The Station"
// to better differentiate them ("Mr" and "Mir"). "Man" and "Machine" would
// have been better in retrospect, but I've already written several comments now
// - there's no way I can change it at this point... Screw it, done!
// These filters were statically generated by Matlab, from which they originate.
/*float const
gc_fForward1[] = {-0.0106, -0.0329, 0.0308 , 0.1870 , -0.0280 , -0.6309 , 0.7148 , -0.2304 },
gc_fReverse1[] = { 0.2304, 0.7148 , 0.6309 , -0.0280 , -0.1870 , 0.0308 , 0.0329 , -0.0106 },
gc_fForward2[] = {-0.0106 / 2, 0.0329 / 2, 0.0308 / 2, -0.1870 / 2, -0.0280 / 2, 0.6309 / 2, 0.7148 / 2, 0.2304 / 2},
gc_fReverse2[] = {-0.2304 / 2, 0.7148 / 2, -0.6309 / 2, -0.0280 / 2, 0.1870 / 2, 0.0308 / 2, -0.0329 / 2, -0.0106 / 2};*/
#define CEILDIV(m,n) \
(((m) + (n) - 1) / (n))
__device__ __constant__ float
gc_fForward1[8],
gc_fReverse1[8],
gc_fForward2[8],
gc_fReverse2[8];
extern "C" void
UDWTCUDAFilter(
float * f1,
float * r1,
float * f2,
float * r2,
int len)
{
cudaMemcpyToSymbol(gc_fForward1, f1, len * sizeof (float));
cudaMemcpyToSymbol(gc_fReverse1, r1, len * sizeof (float));
cudaMemcpyToSymbol(gc_fForward2, f2, len * sizeof (float));
cudaMemcpyToSymbol(gc_fReverse2, r2, len * sizeof (float));
}
#define FILTER_SIZE (8)
#define WARP_SIZE (32) //(warpSize)
#define HALF_WARP (WARP_SIZE / 2)
#define EXTRA_DATA (((1 << 6) - 1) * (FILTER_SIZE - 1))
// ===========
// | The Man |
// ===========
void __device__
fpconv2CUDA(
int i,
float * xIn,
int const shift,
float * filterReverse,
float * filterForwards,
int filterLength,
float * xOutLow,
float * xOutHigh)
{
// The length of the input data needs to be a multiple of 2^level for the
// shifts to work correctly. I'm still sure that the original C is VERY
// confusing and have no idea why it was ever done in that way... OK, as a
// result of the above statement about how confusing the original C was, I
// went away and wrote an optimised C version which is (IMHO) much cleaner.
// Preliminary tests don't seem to have it that much faster, however, but
// I'm pretty sure it really should be! Anyway, back to the CUDA matter at
// hand... This will be the basic implementation with no shared memory
// code, I'll then try and write a better shared-memory version. Also, now
// another, much bigger, test has not come out much faster for the
// apparently "optimised" C code, oh well. I'll now test the effects of the
// common sort and filter code. Once again, back to the matter at hand -
// less rambling procrastination!
float
x0 = 0.0,
x1 = 0.0;
int
read = i;
for (size_t j = filterLength; j-- > 0; )
{
x0 += xIn[read] * filterReverse[j];
x1 += xIn[read] * filterForwards[j];
// This is just the bog-standard modulo version.
// Update: Don't need "mod" anymore because all the data exists in
// shared memory.
read += shift;
}
xOutLow[i] = x0;
xOutHigh[i] = x1;
// OK, I'll have to write the shared memory version from the start just to
// resolve the data dependencies.
}
void __device__
fpconv2NoHigh(
int i,
float * xIn,
int const shift,
float * filterReverse,
int filterLength,
float * xOutLow)
{
float
x0 = 0.0;
int
read = i;
for (size_t j = filterLength; j-- != 0; )
{
x0 += xIn[read] * filterReverse[j];
read += shift;
}
xOutLow[i] = x0;
}
__global__ void
MRDWT_CUDA(
int const filterLength,
int const inputLength,
int const levels,
float * lowpassInput,
float * lowpassOutput,
float * highpassOutput)
{
// Each level requires data from the last level beyond the edge of the data
// this block is supposed to be calculating, and because later levels need
// to load processed data beyond the bounds, we need even more data to be
// able to calculate those parts too. Fortunately this is bounded. If we
// have "n" levels and "n+1" stages, where stage "0" is initial data load
// and stage "n" is final output calculation each stage ("x") GIVES this
// much data (and requires all the data from the previous stage, with the
// exception being stage 0):
//
// n = levels
// f = filterLength - 1
// x = currentStage
// m = n - x
//
// extra = f . ((2n+1) . m + m^2)
// ------------------------
// 2
//
// For now I'll call "maxLevels" 15 and enforce it in "main.cpp". This
// gives a stage 0 upper limit (with constant filters of size 8) of 120
// extra elements to load initially. For "x = 0", "n = m" and this becomes
// the regular triangular number calculation. For "m = 0", this is all 0.
//
// OK, that calculation above seems to be WRONG! It is:
//
// extra = (m . m + m) . f - 1
// -----------------
// 2
//
// STILL NO! I forgot to take in to account the fact that each level
// DOUBLES in shift distance, it doesn't increase by one...
//
// extra = (2 ^ m - 1) . f
//
// Fourth try (reversed shifting):
//
// extra = f . (((1 << n) - 1) - ((1 << m) - 1)
//
__shared__ float
fRowData1[512 + EXTRA_DATA],
fRowData2[512 + EXTRA_DATA];
// This is a 1D operation, so we never need to worry about pitch or higher
// dimension index calculations. So far this code shares a surprising
// amount with the old convolution code, and I wasn't even trying to copy!
// One major difference is the aligned start is not required here as long
// as we load
int const
// This can be changed if you want blocks to never loop.
blockCalc = blockDim.x,
f = filterLength - 1,
nm1 = (1 << levels) - 1,
apronSize = nm1 * f,
//lIdx = threadIdx.x,
dataStart = blockIdx.x * blockCalc,
//gIdx = blockIdx.x * blockDim.x + lIdx,
//npnp1 = 2 * levels + 1,
apronEnd = blockCalc + apronSize;
float * const
highOutBase = highpassOutput + dataStart;
//apronEndClamp = min(apronEnd, inputLength);
// TODO: Add a top-level "if" statement to only run the edge-case code for
// the one block that needs it.
int
pos = threadIdx.x,
load = threadIdx.x + dataStart,
shift = 1;
while (pos < apronEnd)
{
fRowData1[pos] = lowpassInput[load % inputLength];
pos += blockCalc;
load += blockCalc;
}
// Data is preloaded pretty well.
__syncthreads();
float
* localInput = fRowData1,
* localOutput = fRowData2,
* localSwap;
// "- 1" because stage 0 is handled explicitly (global data loading).
for (int level = 0, m = 1; level != levels; ++level, ++m)
{
const int
procEnd = blockCalc + (nm1 - ((1 << m) - 1)) * f; //((npnp1 + m) * m * f / 2);
pos = threadIdx.x;
fpconv2CUDA(pos, localInput, shift, gc_fReverse1, gc_fForward1, filterLength, localOutput, highOutBase + (level * inputLength));
pos += blockCalc;
// This is to complete the extra local data for future loops, so it
// doesn't write any "high" outputs.
while (pos < procEnd)
{
fpconv2NoHigh(pos, localInput, shift, gc_fReverse1, filterLength, localOutput);
pos += blockCalc;
}
shift <<= 1;
localSwap = localInput;
localInput = localOutput;
localOutput = localSwap;
// Now we have completed that level, we can do the next, generating
// slightly less output data.
__syncthreads();
}
// Copy from whichever output is the current one (they keep swapping).
pos = threadIdx.x;
load = threadIdx.x + dataStart;
if (levels & 1)
{
lowpassOutput[load] = fRowData2[pos];
}
else
{
lowpassOutput[load] = fRowData1[pos];
}
}
extern "C" void
DoMRDWT(
int filterLength,
int inputLength,
int levels,
float * lowpassInput,
float * lowpassOutput,
float * highpassOutput)
{
dim3
// Number of blocks to execute in.
dimBlocks(CEILDIV(inputLength, 512)),
// Number of threads per block.
dimThreads(512);
MRDWT_CUDA<<<dimBlocks, dimThreads>>>(filterLength, inputLength, levels, lowpassInput, lowpassOutput, highpassOutput);
}
// ===============
// | The Machine |
// ===============
void __device__
bpconvCUDA(
int i,
int offset,
float * xOut,
int const shift,
float * filterForwards,
float * filterReverse,
int filterLength,
float * xInLow,
float * xInHigh)
{
float
x0 = 0.0;
int
read = i + offset;
for (size_t j = 0; j != filterLength; ++j)
{
// "xHigh" is read from global memory because every value is only ever
// used once, so there is no advantage to caching. Well, that may not
// be stricktly true because the reads below are NOT aligned and may
// slow down the main inner loop drastically! Unfortunately, it would
// require loading in huge amounts of global memory, equal to:
//
// (blockSize + extraSize) * levels
//
// If levels becomes large, this clearly becomes largs. It MIGHT be
// caching the data required for each level just once - actually I think
// it WILL be worth it because the data IS NOT read just once but
// multiple times. Hmm, apparently we need ALL the caching!
x0 += xInLow[read] * filterForwards[j] + xInHigh[read] * filterReverse[j];
read -= shift;
}
xOut[i] = x0;
}
__global__ void
MIRDWT_CUDA(
int const filterLength,
int const inputLength,
int const levels,
float * lowpassInput,
float * lowpassOutput,
float * highpassInput)
{
// TODO: Filter the inputs in parallel, not sequence.
__shared__ float
fHighData[512 + EXTRA_DATA],
fRowData1[512 + EXTRA_DATA],
fRowData2[512 + EXTRA_DATA];
int const
// This can be changed if you want blocks to never loop.
blockCalc = blockDim.x,
f = filterLength - 1,
nm1 = (1 << levels) - 1,
apronSize = nm1 * f,
dataStart = blockIdx.x * blockCalc,
dataStartBig = dataStart + inputLength,
apronEnd = blockCalc + apronSize;
// TODO: Add a top-level "if" statement to only run the edge-case code for
// the one block that needs it.
int
pos = threadIdx.x,
load = threadIdx.x + dataStartBig - apronSize,
shift = 1 << (levels - 1);
while (pos < apronEnd)
{
fRowData1[pos] = lowpassInput[load % inputLength];
pos += blockCalc;
load += blockCalc;
}
// Data is preloaded pretty well.
__syncthreads();
float
* localInput = fRowData1,
* localOutput = fRowData2,
* localSwap;
// "- 1" because stage 0 is handled explicitly (global data loading).
for (int level = levels, m = nm1; level-- != 0; )
{
// Now we get to load the high-pass data in too! Yay!
float * const
highIn = highpassInput + (level * inputLength);
int const
newApronSize = m * f,
newApronEnd = blockCalc + newApronSize;
m >>= 1;
int const
writeSize = blockCalc + m * f;
pos = threadIdx.x,
load = threadIdx.x + dataStartBig - newApronSize;
// Load in the data with a negative shift, not a positive one.
while (pos < newApronEnd)
{
fHighData[pos] = highIn[load % inputLength];
pos += blockCalc;
load += blockCalc;
}
// Now we have completed that level, we can do the next, generating
// slightly less output data.
__syncthreads();
// Now we have loaded the high data, do the main calculation.
const int
offset = newApronEnd - writeSize;
pos = threadIdx.x;
bpconvCUDA(pos, offset, localOutput, shift, gc_fForward2, gc_fReverse2, filterLength, localInput, fHighData);
// This is to complete the extra local data for future loops, so it
// doesn't write any "high" outputs.
pos += blockCalc;
while (pos < writeSize)
{
bpconvCUDA(pos, offset, localOutput, shift, gc_fForward2, gc_fReverse2, filterLength, localInput, fHighData);
pos += blockCalc;
}
shift >>= 1;
localSwap = localInput;
localInput = localOutput;
localOutput = localSwap;
// Now we have completed that level, we can do the next, generating
// slightly less output data.
__syncthreads();
}
// Copy from whichever output is the current one (they keep swapping).
pos = threadIdx.x;
load = threadIdx.x + dataStart;
if (levels & 1)
{
lowpassOutput[load] = fRowData2[pos];
}
else
{
lowpassOutput[load] = fRowData1[pos];
}
}
extern "C" void
DoMIRDWT(
int filterLength,
int inputLength,
int levels,
float * lowpassInput,
float * lowpassOutput,
float * highpassInput)
{
dim3
// Number of blocks to execute in.
dimBlocks(CEILDIV(inputLength, 512)),
// Number of threads per block.
dimThreads(512);
MIRDWT_CUDA<<<dimBlocks, dimThreads>>>(filterLength, inputLength, levels, lowpassInput, lowpassOutput, highpassInput);
}
|
1,681
|
#include "includes.h"
__global__ void getAggregateStartIndicesKernel(int size, int *fineAggregateSort, int *aggregateRemapIndex)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < size)
{
if(idx == 0 || fineAggregateSort[idx] != fineAggregateSort[idx - 1])
{
aggregateRemapIndex[fineAggregateSort[idx]] = idx;
}
}
}
|
1,682
|
#include <stdio.h>
#include <assert.h>
#define THREADS_PER_BLOCK 768
#define ARRAY_SIZE THREADS_PER_BLOCK * 1024
#define OPTIM 0
static void HandleError(cudaError_t error, const char *file, int line) {
if (error != cudaSuccess) {
printf("%s in %s at line %d\n", cudaGetErrorString(error), file, line);
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR(err) (HandleError(err, __FILE__, __LINE__ ))
__global__ void reverseArray(int *inArray, int *outArray)
{
int inOffset = blockDim.x * blockIdx.x;
// int inIndex = inOffset + blockIdx.x;
// ^
// print'ujac inIndex zauwazylem w gdb, ze
// w obrebie bloku z threadu na thread nie
// zmienienia sie jego wartosc, a co wiecej:
// wychodzimy poza adres pamieci (cuda-memcheck)
// np. dla bloku 1023.
//
int inIndex = inOffset + threadIdx.x;
// ^ poprawne przesuniecie
// int outOffset = blockDim.x * (gridDim.x - 1 - blockIdx.x);
// int outIndex = outOffset + (blockDim.x - 1 - blockIdx.x);
// ^ analogicznie jak wyzej.
int outOffset = blockDim.x * (gridDim.x - 1 - blockIdx.x);
int outIndex = outOffset + (blockDim.x - 1 - threadIdx.x);
outArray[outIndex] = inArray[inIndex];
}
__global__ void reverseArrayOptim(int *inArray, int *outArray) {
__shared__ int shared_memory[THREADS_PER_BLOCK];
int inOffset = blockDim.x * blockIdx.x;
int inIndex = inOffset + threadIdx.x;
shared_memory[blockDim.x - 1 - threadIdx.x] = inArray[inIndex];
// ^ reverse
__syncthreads();
int outOffset = blockDim.x * (gridDim.x - 1 - blockIdx.x);
int outIndex = outOffset + threadIdx.x;
outArray[outIndex] = shared_memory[threadIdx.x];
}
void print_vec(int n, int *array) {
printf(" array = { ");
for (int i = 0; i < n; ++i) {
printf("%d, ", array[i]);
}
printf("}\n");
}
int main(void)
{
int *hostArray;
int *devInArray, *devOutArray;
int numBlocks = ARRAY_SIZE / THREADS_PER_BLOCK;
size_t memSize = ARRAY_SIZE * sizeof(int);
hostArray = (int *)malloc(memSize);
for (int i = 0; i < ARRAY_SIZE; i++) {
hostArray[i] = i;
}
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
HANDLE_ERROR(cudaMalloc((void **)&devInArray, memSize));
HANDLE_ERROR(cudaMalloc((void **)&devOutArray, memSize));
HANDLE_ERROR(cudaMemcpy(devInArray, hostArray, memSize, cudaMemcpyHostToDevice));
dim3 dimGrid(numBlocks);
dim3 dimBlock(THREADS_PER_BLOCK);
#if OPTIM
reverseArrayOptim<<<dimGrid, dimBlock>>> (devInArray, devOutArray);
#else
reverseArray<<<dimGrid, dimBlock>>> (devInArray, devOutArray);
#endif
HANDLE_ERROR(cudaMemcpy(hostArray, devOutArray, memSize, cudaMemcpyDeviceToHost));
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float exec_time = 0;
cudaEventElapsedTime(&exec_time, start, stop);
#if OPTIM
printf("[GPU/OPTIM] Execution time (ms): %3.1f\n", exec_time);
#else
printf("[GPU] Execution time (ms): %3.1f\n", exec_time);
#endif
for (int i = 0; i < ARRAY_SIZE; i++) {
assert(hostArray[i] == ARRAY_SIZE - 1 - i);
}
HANDLE_ERROR(cudaFree(devInArray));
HANDLE_ERROR(cudaFree(devOutArray));
free(hostArray);
printf("Correct!\n");
return 0;
}
|
1,683
|
#include <iostream>
#include <math.h>
#include <vector>
#include <iomanip>
#include <sstream>
#include <string>
#include <fstream>
#include <thread>
#include <ctime>
#include <stdio.h>
#define BLOCK_SIZE (128)
#define WORK_SIZE_BITS 16
#define SEEDS_PER_CALL ((1ULL << (WORK_SIZE_BITS)) * (BLOCK_SIZE))
#define GPU_ASSERT(code) gpuAssert((code), __FILE__, __LINE__)
inline void gpuAssert(cudaError_t code, const char *file, int line) {
if (code != cudaSuccess) {
fprintf(stderr, "GPUassert: %s (code %d) %s %d\n", cudaGetErrorString(code), code, file, line);
exit(code);
}
}
__device__ uint64_t hardcoded = 8682522807148012UL * 181783497276652981UL;
__device__ int binarySearch(int64_t* values, int64_t value, int start, int end){
int low = 0;
int high = end - 1;
int mid = 0;
if(high < value || low > value)
return -1;
while(low <= high){
mid = low + ((high - low)/2);
if(values[mid] > value)
high = mid - 1;
else if((values[mid] < value))
low = mid - 1;
else
return mid;
}
return -1;
}
/*__global__ __launch_bounds__(BLOCK_SIZE,2) static void threadWork(int64_t* values, int size, uint64_t offset, uint32_t* counter, uint64_t* buffer){
int64_t Time = (blockIdx.x * blockDim.x + threadIdx.x) + offset;
int64_t scrambledTime = hardcoded ^ Time;
if(binarySearch(values, scrambledTime, 0, size) != -1){
buffer[atomicAdd(counter, 1)] = Time;
return;
}
}*/
__global__ __launch_bounds__(BLOCK_SIZE,2) static void threadWork(int64_t* values, int size, uint64_t offset, uint32_t* counter, uint64_t* buffer){
int64_t Time = (blockIdx.x * blockDim.x + threadIdx.x) + offset;
int64_t scrambledTime = hardcoded ^ Time;
if(binarySearch(values, scrambledTime, 0, size) != -1){
buffer[atomicAdd(counter, 1)] = Time;
return;
}
}
uint64_t* buffer;
uint32_t* counter;
std::vector<int64_t> structureSeeds;
int64_t* structSeedsArr;
int main(int argc, char **argv ){
time_t start = time(NULL);
FILE* fp = fopen("seananners-middlestep.txt", "w+");
std::fstream infile;
infile.open("seananners.txt", std::ios::in);
std::string line;
while(std::getline(infile, line)){
int64_t structureSeed = 0;
std::istringstream iss(line);
if(!(iss >> structureSeed)){break;}
structureSeeds.push_back(structureSeed);
}
infile.close();
double seconds_per_structure_seed = 0.0;
std::vector<std::thread> threads;
int thread = 0;
int curr = 0;
uint64_t startValue = 0;
uint64_t total = 281474976710656;
int tmpCount = 0;
int tmpSize = structureSeeds.size();
GPU_ASSERT(cudaMallocManaged(&buffer, sizeof(uint64_t) * SEEDS_PER_CALL));
GPU_ASSERT(cudaPeekAtLastError());
GPU_ASSERT(cudaMallocManaged(&counter, sizeof(uint32_t)));
GPU_ASSERT(cudaPeekAtLastError());
GPU_ASSERT(cudaMallocManaged(&structSeedsArr, sizeof(uint64_t) * tmpSize));
GPU_ASSERT(cudaPeekAtLastError());
printf("test1\n");
for(int i = 0; i <= structureSeeds.size(); i++){
structSeedsArr[i] = structureSeeds[i];
}
printf("test2\n");
printf("test3\n");
cudaSetDevice(0);
GPU_ASSERT(cudaPeekAtLastError());
GPU_ASSERT(cudaDeviceSynchronize());
uint64_t countOut = 0;
uint64_t tempCount;
printf("test4\n");
for(uint64_t offset = startValue; offset <= total; offset += SEEDS_PER_CALL){
threadWork<<<1ULL<<WORK_SIZE_BITS,BLOCK_SIZE>>>(structSeedsArr, tmpSize, offset, counter, buffer);
GPU_ASSERT(cudaPeekAtLastError());
GPU_ASSERT(cudaDeviceSynchronize());
for(int i = 0; i < *counter; i++){
uint64_t seed = buffer[i];
if(seed != 0)
fprintf(fp, "%lld\n", seed);
}
*counter = 0;
if(countOut >= 100000000000){
time_t tempTime = time(NULL);
uint64_t tempDiff = tempTime - start;
uint64_t sps = (uint64_t)(offset - startValue)/tempDiff;
double percent = ((double)offset/(double)total) * 100.0;
printf("Seeds Per Second: %lld\tProgress: %f\n", sps, percent);
countOut = 0;
}
countOut += SEEDS_PER_CALL;
}
time_t end = time(NULL);
uint64_t diff = end - start;
double seedsPerSec = (double)total/(double)diff;
printf("Time taken: %lld\nSeeds per second: %15.9f", diff, seedsPerSec);
fclose(fp);
return 0;
}
|
1,684
|
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <iostream>
using namespace std;
thrust::host_vector< thrust::host_vector<double> > times;
void calculateDiffusionSerial (thrust::host_vector<double> u, double k, double rt){
times.push_back(u);
bool stop = false;
long t = 0;
thrust::host_vector<double> new_u(u.size());
while (!stop){
u = times[t];
int i;
for(i = 0; i < u.size(); i++){
if(i==0){
new_u[i] = (k+u[i+1])/2;
}else if(i < u.size()-1){
new_u[i] = (u[i-1] + u[i+1]) /2;
}else{
new_u[i] = (u[i-1]+rt)/2;
}
}
times.push_back(new_u);
t++;
bool allTempsEqual=true;
long j;
for (j=0; j < new_u.size(); j++){
if(u[j] != new_u[j]){
allTempsEqual=false;
}
}
if(allTempsEqual){
stop = true;
}
}
}
int main(int argc, char *argv[])
{
long lengthBar = 0;
long deltaLength = 0;
double k = 0;
double roomTemp = 0;
thrust::host_vector<double> u;
// setup/initialize
if ((argc != 5) || (atoi(argv[2]) > atoi(argv[1]))) {
cerr << "usage: progName lengthBar deltaLength heatSource roomTemp\n" << endl;
exit(-1);
} else {
lengthBar = atol(argv[1]);
deltaLength = atol(argv[2]);
k = atof(argv[3]);
roomTemp = atof(argv[4]);
//u = (double*)malloc(ceil(lengthBar/(float)deltaLength)*sizeof(double));
u.resize(ceil(lengthBar/(float)deltaLength));
int i;
for (i = 0; i < ceil(lengthBar/(float)deltaLength); i++){
u[i] = roomTemp;
}
cout << "lenghtBar: " << lengthBar << " deltaLenght: " << deltaLength << " K: " << k << " Room Temp: " << roomTemp << " u Size: " << u.size() << " u[0]: " << u[0]<< endl;
cout << "initiating serial" << endl;
calculateDiffusionSerial(u, k, roomTemp);
cout << "finished!!! Times to diffuse: " << times.size() << endl;
}
return 0;
}
|
1,685
|
#include "includes.h"
__global__ void __word2vecNeg(int nrows, int ncols, int *WA, int *WB, float *A, float *B, float lrate, float vexp) {}
|
1,686
|
#include "includes.h"
__global__ void cudaDTargetBiasPropagate_kernel( unsigned int size, const double bias, const double* inputs, const double* diffInputs, double* outputs)
{
const unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int stride = blockDim.x * gridDim.x;
for (unsigned int i = index; i < size; i += stride) {
outputs[i] = inputs[i];
if (diffInputs[i] > 0.0 && inputs[i] > -bias)
outputs[i] += bias;
}
}
|
1,687
|
#include<stdio.h>
#include<sys/time.h>
#include<stdlib.h>
#include<iostream>
#include<math.h>
using namespace std;
//----------------------------------- Structures and Globals---------------------------------------------
#define tileDim 32
typedef struct {
int dimension1;
int dimension2;
} ArrayMetadata2D;
// metadata variables describing dimensionalities of all data structures involved in the computation
ArrayMetadata2D A_MD, B_MD, C_MD;
// pointers for input and output arrays in the host memory
float *A, *B, *C, *C_CPU;
// pointers for input and output arrays in the device memory (NVIDIA DRAM)
float *A_GPU, *B_GPU, *C_GPU;
//dimension of each subarray (also called tiles.) All subarrays are square and equal in size. Should be the square root of threads per block for best occupancy
//----------------------------------- host function definitions -----------------------------------------
void allocateAndInitializeAB();
void computeCpuMMM();
void copyMatricesToGPU();
void copyResultFromGPU();
void compareHostAndGpuOutput();
void die(const char *error);
void check_error(cudaError e);
//----------------------------------- CUDA function definitions -----------------------------------------
// A GPU kernel that computes the vector sum A + B
__global__ void matrix_mult(float *A, float *B, float *C, int dim) {
int i, j;
// determine the index of the thread among all GPU threads
int blockId = blockIdx.x;
//I don't know if the grid info supplied by CUDA is in registers or what, but based on the sample code above
//I'm explicitly placing them there just in case. I figure I have 64 registers per thread when using 1024 threads/block, might as well use them
//int blockDim = blockDim.x;
//int threadId = blockId * blockDim.x + threadIdx.x;
int threadId = threadIdx.x;
//int threadCount = gridDim.x * blockDim.x;
//assumes multiplication of square matrices
int totalDim = dim;
int totalCells = totalDim * totalDim;
//number of tiles in each row/column of the C matrix (and therefore the other two matrices)
int tiles_per_dim =(totalDim / tileDim) +1;
//index of the top left corner of the tile represented by this block in the original C array
int tileCorner = (blockId / tiles_per_dim * tileDim * totalDim) + (blockId % tiles_per_dim * tileDim);
//declaring the subarrays for tiles of A and B in shared memory
__shared__ float s_A[tileDim*tileDim], s_B[tileDim*tileDim];
//accumulator variable for the value this thread will place into it's designated spot in C
float c_acc = 0.0f;
//offset for the cell transferred to shared memory by this thread navigating from the current tile corner (which is calculated in loop)
//differences between the matrices here (and in the corner calculation) are intended to get A by rows and B by columns such that
//B is effectively transposed tile by tile
int a_off = (threadId / tileDim) * totalDim + (threadId % tileDim);
int b_off = (threadId % tileDim) * totalDim + threadId / tileDim;
//rows of each tile that the thread in question will be responsible for multiplying together
//this ensures that each thread has the correct unique combination of one row from a and one row from b
int a_row = threadId / tileDim;
int b_row = threadId % tileDim;
for(i = 0; i < tiles_per_dim; i++){
//top left corners for the current operating tile of A and B arrays
int Acorner = (blockId / tiles_per_dim) * tileDim * totalDim + i * tileDim;
int Bcorner = i * tileDim * totalDim + blockId % tiles_per_dim * tileDim;
//detects if the current threads responsibility is on the grid, ie not in the part of a tile that extends past the boundaries
//of the matrix threadId
int a_ongrid = (((i != tiles_per_dim-1) || (threadId % tileDim < totalDim % tileDim)) && ((Acorner + a_off) < totalCells)) ? 1 : 0;
int b_ongrid = (((blockId % tiles_per_dim != tiles_per_dim-1) || (threadId < (totalDim % tileDim) * tileDim)) && ((Bcorner + b_off) < totalCells)) ? 1 : 0;
// printf("%d: %d + %d a_ongrid: %d\n",blockId, Acorner, a_off, a_ongrid);
//tiles on the edge will go over the bounds of the matrix a small amount, in cells where that happens
//I simply set them equal to 0 so they don't affect the computation
if(a_ongrid)
s_A[threadId] = A[Acorner + a_off];
else
s_A[threadId] = 0.0f;
if(b_ongrid)
s_B[threadId] = B[Bcorner + b_off];
else
s_B[threadId] = 0.0f;
//synchronize so that no thread is operating on the tiles before they are properly initialized
__syncthreads();
for(j = 0; j < tileDim; j++){
c_acc += (s_A[a_row*tileDim + j] * s_B[b_row*tileDim + j]);
}
//sync so that one warp doesn't start changing the data as other warps are using it to calculate
__syncthreads();
}
//set the correct cell in C to be equal to the accumulated value
//when using the correct tileCorner according to the blockId, a_off will actually also be the correct C offset.
//if statement because it is possible this is one of the tiles that extend past the border of the matrix
if((tileCorner + a_off < totalCells) && ((blockId % tiles_per_dim != tiles_per_dim-1) || (threadId % tileDim < totalDim % tileDim)))
C[tileCorner + a_off] = c_acc;
}
//-------------------------------------------------------------------------------------------------------
int main(int argc, char **argv) {
A_MD.dimension1 = (argc > 1) ? atoi(argv[1]) : 100;
A_MD.dimension2 = (argc > 2) ? atoi(argv[2]) : A_MD.dimension1;
B_MD.dimension1 = (argc > 3) ? atoi(argv[3]) : A_MD.dimension2;
B_MD.dimension2 = (argc > 4) ? atoi(argv[4]) : B_MD.dimension1;
C_MD.dimension1 = A_MD.dimension1;
C_MD.dimension2 = B_MD.dimension2;
printf("Matrix A is %d-by-%d\n", A_MD.dimension1, A_MD.dimension2);
printf("Matrix B is %d-by-%d\n", B_MD.dimension1, B_MD.dimension2);
printf("Matrix C is %d-by-%d\n", C_MD.dimension1, C_MD.dimension2);
allocateAndInitializeAB();
// matrix matrix multiplication in the CPU
double elapsed;
/** clock_t start = clock();
computeCpuMMM();
clock_t end = clock();
elapsed = (end - start) / (double) CLOCKS_PER_SEC;
printf("Computation time in the CPU: %f seconds\n", elapsed);
fflush(stdout);
**/
copyMatricesToGPU();
int threads_per_block = tileDim*tileDim;
int num_blocks = ((C_MD.dimension1 / tileDim) + 1) * ((C_MD.dimension1 / tileDim) + 1);
clock_t gstart = clock();
matrix_mult<<<num_blocks, threads_per_block>>>(A_GPU, B_GPU, C_GPU, C_MD.dimension1);
cudaThreadSynchronize();
clock_t gend = clock();
elapsed = (gend - gstart) / (double) CLOCKS_PER_SEC;
printf("Computation time in the GPU: %f seconds\n", elapsed);
copyResultFromGPU();
// compareHostAndGpuOutput();
return 0;
}
// allocate and initialize A and B using a random number generator
void allocateAndInitializeAB() {
size_t sizeofA = A_MD.dimension1 * A_MD.dimension2 * sizeof(float);
A = (float*) malloc(sizeofA);
srand(time(NULL));
for (int i = 0; i < A_MD.dimension1; i++) {
for (int j = 0; j < A_MD.dimension2; j++) {
int index = i * A_MD.dimension2 + j;
A[index] = (rand() % 1000) * 0.001;
}
}
size_t sizeofB = B_MD.dimension1 * B_MD.dimension2 * sizeof(float);
B = (float*) malloc(sizeofB);
for (int i = 0; i < B_MD.dimension1; i++) {
for (int j = 0; j < B_MD.dimension2; j++) {
int index = i * B_MD.dimension2 + j;
B[index] = (rand() % 1000) * 0.001;
}
}
}
// allocate memory in the GPU for all matrices, and copy A and B content from the host CPU memory to the GPU memory
void copyMatricesToGPU() {
size_t sizeofA = A_MD.dimension1 * A_MD.dimension2 * sizeof(float);
check_error(cudaMalloc((void **) &A_GPU, sizeofA));
check_error(cudaMemcpy(A_GPU, A, sizeofA, cudaMemcpyHostToDevice));
size_t sizeofB = B_MD.dimension1 * B_MD.dimension2 * sizeof(float);
check_error(cudaMalloc((void **) &B_GPU, sizeofB));
check_error(cudaMemcpy(B_GPU, B, sizeofB, cudaMemcpyHostToDevice));
size_t sizeofC = C_MD.dimension1 * C_MD.dimension2 * sizeof(float);
check_error(cudaMalloc((void **) &C_GPU, sizeofC));
}
// copy results from C_GPU which is in GPU card memory to C_CPU which is in the host CPU for result comparison
void copyResultFromGPU() {
size_t sizeofC = C_MD.dimension1 * C_MD.dimension2 * sizeof(float);
C_CPU = (float*) malloc(sizeofC);
check_error(cudaMemcpy(C_CPU, C_GPU, sizeofC, cudaMemcpyDeviceToHost));
}
// do a straightforward matrix-matrix multiplication in the CPU
// notice that this implementation can be massively improved in the CPU by doing proper cache blocking but we are
// not providing you the efficient CPU implementation as that reveals too much about the ideal GPU implementation
void computeCpuMMM() {
// allocate the result matrix for the CPU computation
size_t sizeofC = C_MD.dimension1 * C_MD.dimension2 * sizeof(float);
C = (float*) malloc(sizeofC);
// compute C[i][j] as the sum of A[i][k] * B[k][j] for all columns k of A
for (int i = 0; i < A_MD.dimension1; i++) {
int a_i = i * A_MD.dimension2;
int c_i = i * C_MD.dimension2;
for (int j = 0; j < B_MD.dimension2; j++) {
int c_index = c_i + j;
C[c_index] = 0;
for (int k = 0; k < B_MD.dimension1; k++) {
int a_index = a_i + k;
int b_index = k * B_MD.dimension2 + j;
C[c_index] += A[a_index] * B[b_index];
}
}
}
}
// function to determine if the GPU computation is done correctly by comparing the output from the GPU with that
// from the CPU
void compareHostAndGpuOutput() {
int totalElements = C_MD.dimension1 * C_MD.dimension2;
int missmatchCount = 0;
for (int i = 0; i < totalElements; i++) {
if (fabs(C[i] - C_CPU[i]) > 0.01) {
missmatchCount++;
printf("mismatch at index %i: %f\t%f\n", i, C[i], C_CPU[i]);
}
}
if (missmatchCount > 0) {
printf("Computation is incorrect: outputs do not match in %d indexes\n", missmatchCount);
} else {
printf("Computation is correct: CPU and GPU outputs match\n");
}
}
// Prints the specified error message and then exits
void die(const char *error) {
printf("%s", error);
exit(1);
}
// If the specified error code refers to a real error, report it and quit the program
void check_error(cudaError e) {
if (e != cudaSuccess) {
printf("\nCUDA error: %s\n", cudaGetErrorString(e));
exit(1);
}
}
|
1,688
|
/*
This code take the kernel and unpacks the kernel(each kernel saperatly) and
stores the position in the orignal kernel of the weights and keep the same weights
together. Now using output tiled convoltion , for each output value position
in input with similar weights are added together and multiplied similar weights
*/
#include<stdio.h>
#include<cuda.h>
#include<math.h>
#define CUDA_CALL(x) do { cudaError_t err=(x); \
if(err!=cudaSuccess) { \
printf("Error %s at %s: %d",cudaGetErrorString(err),__FILE__,__LINE__);\
return EXIT_FAILURE;}} while(0)
#define W 32 // Input DIM
#define D 4 // Input and Kernel Depth
#define T 5 // Kernel DIM
#define N 2 // Number of kernels
#define TILE_W 16 //Tile Width
#define n1 3 //range of INQ n1 > n2
#define n2 1
#define BAND 3 //Number of unique powers
#define STRIDE_LENGTH 1 //stride length
#define OWS (W- T + 1) // Output DIM
#define OW (((W - T)/STRIDE_LENGTH) + 1) //output dim
//comparison operator for sorting
int compare(const void * a, const void * b)
{
return ( *(int*)a - *(int*)b );
}
//filling the matrix
void fillMatrix(unsigned char *matrix){
unsigned char (*m)[W][D]=(unsigned char (*)[W][D])matrix;
for(int i=0;i<W;i++){
for(int j=0;j<W;j++){
for(int k=0;k<D;k++){
m[i][j][k]=(i*j+j*k+i*k+i*2+j*3+k*4)%255;
}
}
}
}
//fill kernel with weight and its position in the kernel
void fillKernel(int *positions){
int (*p)[4]=(int (*)[4])positions;
for(int i=0;i<N;i++){
for(int j=0;j<T;j++){
for(int k=0;k<T;k++){
for(int l=0;l<D;l++){
p[i*T*T*D+j*T*D+k*D+l][0]=((i+j+T+D)%n1 + n2);
p[i*T*T*D+j*T*D+k*D+l][1]=j;
p[i*T*T*D+j*T*D+k*D+l][2]=k;
p[i*T*T*D+j*T*D+k*D+l][3]=l;
}
}
}
}
}
void printtofile(float *m){
const char *fname = "GPU_TEST";
FILE *f = fopen(fname, "w");
float (*mat)[OW][OW]=(float (*)[OW][OW])m;
for(unsigned i=0; i < N; i++) {
for(unsigned j=0; j < OW; j++){
for(unsigned k=0;k<OW;k++){
fprintf(f,"%.4f ", mat[i][j][k]);
}
fprintf(f, "\n" );
}
fprintf(f,"\n");
}
fclose(f);
}
//kernel
__global__ void conv(unsigned char* Dm, float* Do, int* Dp ,int* Dmarkers)
{
__shared__ int ker[4*T*T*D]; // kernel to shared memory
__shared__ unsigned char tile[(TILE_W)*(TILE_W)*D];
int tx=blockDim.x*blockIdx.x+threadIdx.x;
int ty=blockDim.y*blockIdx.y+threadIdx.y;
int bz=blockIdx.z;
int zk=bz*T*T*D;
int ym,xm;
int d=0;
int i=0;
int j=0;
int mark[2];
//mark contain points that saperate unique weights
mark[0] = Dmarkers[bz*BAND + 1];
mark[1] = Dmarkers[bz*BAND + 2];
__syncthreads();
//copying kernel and position in the shared memory
for(d=0;d<D;d++)
{
if(threadIdx.x<T&&threadIdx.y<T){
ker[threadIdx.y*4*T*D+threadIdx.x*4*D+4*d] =Dp[4*zk + threadIdx.y*4*T*D + threadIdx.x*4*D+4*d + 0];
ker[threadIdx.y*4*T*D+threadIdx.x*4*D+4*d+1]=Dp[4*zk + threadIdx.y*4*T*D + threadIdx.x*4*D+4*d + 1];
ker[threadIdx.y*4*T*D+threadIdx.x*4*D+4*d+2]=Dp[4*zk + threadIdx.y*4*T*D + threadIdx.x*4*D+4*d + 2];
ker[threadIdx.y*4*T*D+threadIdx.x*4*D+4*d+3]=Dp[4*zk + threadIdx.y*4*T*D + threadIdx.x*4*D+4*d + 3];
}
}
__syncthreads();
//copying tile
for(d=0;d<D;d++)
{
ym=ty*W*D;
xm=tx*D;
tile[threadIdx.y*(TILE_W)*D+threadIdx.x*D+d]=Dm[ym+xm+d];
if((tx+(TILE_W - T + 1))<W&&(threadIdx.x+(TILE_W - T + 1))<(TILE_W))
{
ym=ty*W*D;
xm=(tx+(TILE_W - T + 1))*D;
tile[threadIdx.y*(TILE_W)*D+(threadIdx.x+(TILE_W - T + 1))*D+d]=Dm[ym+xm+d];
}
if((ty+(TILE_W - T + 1))<W&&(threadIdx.y+(TILE_W - T + 1))<(TILE_W))
{
ym=(ty+(TILE_W - T + 1))*W*D;
xm=(tx)*D;
tile[(threadIdx.y+(TILE_W - T + 1))*(TILE_W)*D+(threadIdx.x)*D+d]=Dm[ym+xm+d];
}
if(((ty+(TILE_W - T + 1))<W&&(threadIdx.y+(TILE_W - T + 1))<(TILE_W))&&((tx+(TILE_W - T + 1))<W&&(threadIdx.x+(TILE_W - T + 1))<(TILE_W)))
{
ym=(ty+(TILE_W - T + 1))*W*D;
xm=(tx+(TILE_W - T + 1))*D;
tile[(threadIdx.y+(TILE_W - T + 1))*(TILE_W)*D+(threadIdx.x+(TILE_W - T + 1))*D+d]=Dm[ym+xm+d];
}
}
__syncthreads();
//output stationary multipllication code
if( ty%STRIDE_LENGTH == 0 && tx%STRIDE_LENGTH == 0 )
{
int sum[BAND];
for(i=0;i<BAND;i++){
sum[i]=0;
}
for(i=0;i<T;i++)
{
int yk1=i*4*T*D;
for(j=0;j<T;j++)
{
int xk1=j*4*D;
for(d=0;d<D;d++){
if (yk1+xk1+d*4 < mark[0]) {//makers contains the points where weights change
sum[0] += tile[(ker[yk1+xk1+d*4+1] + threadIdx.y)*(TILE_W)*D + (ker[yk1+xk1+d*4+2] + threadIdx.x)*D + ker[yk1+xk1+d*4+3]];//<<ker[yk1+xk1+d*4+0];
} else if (yk1+xk1+d*4 < mark[1]) {
sum[1] += tile[(ker[yk1+xk1+d*4+1] + threadIdx.y)*(TILE_W)*D + (ker[yk1+xk1+d*4+2] + threadIdx.x)*D + ker[yk1+xk1+d*4+3]];//<<ker[yk1+xk1+d*4+0];
} else {
sum[2] += tile[(ker[yk1+xk1+d*4+1] + threadIdx.y)*(TILE_W)*D + (ker[yk1+xk1+d*4+2] + threadIdx.x)*D + ker[yk1+xk1+d*4+3]];//<<ker[yk1+xk1+d*4+0];
}
}
}
}
if(tx<OWS&&ty<OWS){
Do[bz*OW*OW+(ty/STRIDE_LENGTH)*OW+(tx/STRIDE_LENGTH)] = sum[0]*2+sum[1]*4+sum[2]*8;
}
}
}
int main()
{
//allocating memory on the host for kernel matrix and markers
int *positions = (int*)malloc(sizeof(int)*4*T*T*D*N);
unsigned char *matrix = (unsigned char*)malloc(sizeof(unsigned char)*W*W*D);
float *output = (float *)malloc(sizeof(float)*N*OW*OW);
int *markers = (int*)malloc(sizeof(int)*3*N);
//filling kernel and the matrix
fillMatrix(matrix);
fillKernel(positions);
//sorting each kernel (N kernels saperately, so that same weights occur together for each kernel)
for(int i = 0; i < N ; i++){
qsort(positions + T*T*D*4*i ,T*T*D , 4*sizeof(int) , compare);
}
//finding markers
for(int i = 0 ; i < N ; i++){
int mark = 1;
markers[i*BAND + 0] = 0;
for(int j = 1; j < T*T*D ; j++){
if(positions[4*i*T*T*D + j*4] > positions[4*i*T*T*D + (j-1)*4]){
markers[i*BAND + mark] = j*4;
mark++;
}
}
}
//allocating memory on the GPU for kernel (+ positions) , marker and matrix
int *Dmarkers;cudaMalloc(&Dmarkers,sizeof(int)*BAND*N);
int *Dpositions;cudaMalloc(&Dpositions,sizeof(int)*4*N*T*T*D);
unsigned char *Dmatrix;cudaMalloc(&Dmatrix,sizeof(unsigned char)*W*W*D);
float *Doutput;cudaMalloc(&Doutput,sizeof(float)*N*OW*OW);
int blockdimx=(TILE_W - T + 1);
int blockdimy=(TILE_W - T + 1);
int griddimz = N;
int griddimy = (OWS+blockdimx-1)/blockdimx;
int griddimx = (OWS+blockdimy-1)/blockdimy;
dim3 blocks(griddimx, griddimy, griddimz);
dim3 thrds_per_block(blockdimx, blockdimy);
//copying matrix kernel and markers to GPU
cudaMemcpy(Dmarkers, markers, sizeof(int)*BAND*N,cudaMemcpyHostToDevice);
cudaMemcpy(Dmatrix, matrix, sizeof(unsigned char)*W*W*D,cudaMemcpyHostToDevice);
cudaMemcpy(Dpositions, positions, sizeof(int)*4*T*T*D*N,cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float milliseconds = 0;
cudaEventRecord(start,0);
conv<<<blocks,thrds_per_block>>>(Dmatrix, Doutput, Dpositions,Dmarkers);
CUDA_CALL(cudaGetLastError());
cudaDeviceSynchronize();
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
printf("%f\n",milliseconds);
cudaMemcpy(output, Doutput, sizeof(float)*N*OW*OW,cudaMemcpyDeviceToHost);
//Use print_matrix_to_file function only
printtofile(output);
}
|
1,689
|
#include <iostream>
using namespace std;
__global__ void Dot(int* d_a, int* d_b, int* d_c, int size)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id<size)
d_c[id]=d_a[id]*d_b[id];
}
__global__ void Add(int* d_c, int* d_out, int size)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
int t_id = threadIdx.x;
int b_id = blockIdx.x;
__shared__ int a[1024];
if(id < size)
a[t_id] = d_c[id];
__syncthreads();
for(int s = 512; s>0; s = s/2)
{
__syncthreads();
if(id>=size || id+s>=size)
continue;
if(t_id<s)
{
a[t_id]+=a[t_id + s];
}
}
__syncthreads();
if(t_id==0)
d_out[b_id] = a[t_id];
}
int main()
{
int size;
cout<<"Enter size : ";
cin>>size;
int h_a[size], h_b[size], h_ans;
int bytes=size*sizeof(int);
int length=(int)ceil(1.0*size/1024);
for(int i=0;i<size;i++)
{
h_a[i]=rand()%10;
h_b[i]=rand()%10;
}
int *d_a, *d_b, *d_c, *d_out, *d_ans;
cudaMalloc((void**)&d_a, bytes);
cudaMalloc((void**)&d_b, bytes);
cudaMalloc((void**)&d_c, bytes);
cudaMalloc((void**)&d_out, bytes);
cudaMalloc((void**)&d_ans, sizeof(int));
cudaMemcpy(d_a, h_a, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, bytes, cudaMemcpyHostToDevice);
Dot<<<((int)ceil(1.0*size/1024)), 1024>>>(d_a, d_b, d_c, size);
Add<<<((int)ceil(1.0*size/1024)), 1024>>>(d_c, d_out, size);
Add<<<1, 1024>>>(d_out, d_ans, length);
cudaMemcpy(&h_ans, d_ans, sizeof(int), cudaMemcpyDeviceToHost);
int res=0;
for(int i=0;i<size;i++)
{
res+=(h_a[i]*h_b[i]);
}
if(h_ans==res)
cout<<"Correct result";
else
cout<<"Invalid";
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cudaFree(d_out);
cudaFree(d_ans);
}
|
1,690
|
/////////////////////////
// arrayXor.cu //
// Andrew Krepps //
// Module 4 Assignment //
// 2/26/2018 //
/////////////////////////
#include <chrono>
#include <stdio.h>
///////////////////////////////////////////////////////////////////////////////
/// \brief calculate the bitwise exclusive OR of two arrays
///
/// \param [in] in1 the first input array
/// \param [in] in2 the second input array
/// \param [out] out the output array
/// \param [in] n the number of elements in each array
///////////////////////////////////////////////////////////////////////////////
__global__
void arrayXor(const unsigned int* in1, const unsigned int* in2, unsigned int* out, const unsigned int n)
{
unsigned int dataIdx = blockIdx.x*blockDim.x + threadIdx.x;
if (dataIdx < n) {
out[dataIdx] = in1[dataIdx]^in2[dataIdx];
}
}
///////////////////////////////////////////////////////////////////////////////
/// \brief initialize input arrays in host memory
///
/// \param [out] hostIn1 the first input array
/// \param [out] hostIn2 the second input array
/// \param [in] n the number of elements in each array
///////////////////////////////////////////////////////////////////////////////
void initializeHostMemory(unsigned int* hostIn1, unsigned int* hostIn2, const unsigned int n)
{
for (unsigned int i = 0; i < n; ++i) {
hostIn1[i] = i;
hostIn2[n-i] = i;
}
}
///////////////////////////////////////////////////////////////////////////////
/// \brief run kernel execution with pageable host memory
///
/// \param [in] n the number of array elements
/// \param [in] numBlocks the number of blocks
/// \param [in] blockSize the number of threads per block
/// \param [in] the number of iterations to execute includes
/// (allocated memory will be reused between executions)
///////////////////////////////////////////////////////////////////////////////
void runWithPageableMemory(const unsigned int n, const unsigned int numBlocks, const unsigned int blockSize, const unsigned int iterations)
{
unsigned int *in1, *in2, *out;
unsigned int *d_in1, *d_in2, *d_out;
// allocate pageable host memory
unsigned int bytes = n*sizeof(unsigned int);
in1 = (unsigned int*)malloc(bytes);
in2 = (unsigned int*)malloc(bytes);
out = (unsigned int*)malloc(bytes);
// initialize host input data
initializeHostMemory(in1, in2, n);
// allocate device memory
cudaMalloc((void**)&d_in1, bytes);
cudaMalloc((void**)&d_in2, bytes);
cudaMalloc((void**)&d_out, bytes);
// run all iterations
for (unsigned int i = 0; i < iterations; ++i) {
// copy input data to device
cudaMemcpy(d_in1, in1, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_in2, in2, bytes, cudaMemcpyHostToDevice);
// execute kernel
arrayXor<<<numBlocks, blockSize>>>(d_in1, d_in2, d_out, n);
// copy output data to host
cudaMemcpy(out, d_out, bytes, cudaMemcpyDeviceToHost);
}
// free allocated memory
cudaFree(d_in1);
cudaFree(d_in2);
cudaFree(d_out);
free(in1);
free(in2);
free(out);
}
///////////////////////////////////////////////////////////////////////////////
/// \brief run kernel execution with pinned host memory
///
/// \param [in] n the number of array elements
/// \param [in] numBlocks the number of blocks
/// \param [in] blockSize the number of threads per block
/// \param [in] the number of iterations to execute includes
/// (allocated memory will be reused between executions)
///////////////////////////////////////////////////////////////////////////////
void runWithPinnedMemory(const unsigned int n, const unsigned int numBlocks, const unsigned int blockSize, const unsigned int iterations)
{
unsigned int *in1, *in2, *out;
unsigned int *d_in1, *d_in2, *d_out;
// allocate pinned host memory
unsigned int bytes = n*sizeof(unsigned int);
cudaMallocHost((void**)&in1, bytes);
cudaMallocHost((void**)&in2, bytes);
cudaMallocHost((void**)&out, bytes);
// initialize host input data
initializeHostMemory(in1, in2, n);
// allocate device memory
cudaMalloc((void**)&d_in1, bytes);
cudaMalloc((void**)&d_in2, bytes);
cudaMalloc((void**)&d_out, bytes);
// run all iterations
for (unsigned int i = 0; i < iterations; ++i) {
// copy input data to device
cudaMemcpy(d_in1, in1, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_in2, in2, bytes, cudaMemcpyHostToDevice);
// execute kernel
arrayXor<<<numBlocks, blockSize>>>(d_in1, d_in2, d_out, n);
// copy output data to host
cudaMemcpy(out, d_out, bytes, cudaMemcpyDeviceToHost);
}
// free allocated memory
cudaFree(d_in1);
cudaFree(d_in2);
cudaFree(d_out);
cudaFreeHost(in1);
cudaFreeHost(in2);
cudaFreeHost(out);
}
int main(int argc, char** argv)
{
// read command line arguments
int totalThreads = (1 << 20);
int blockSize = 256;
if (argc >= 2) {
totalThreads = atoi(argv[1]);
}
if (argc >= 3) {
blockSize = atoi(argv[2]);
}
int numBlocks = totalThreads/blockSize;
// validate command line arguments
if (totalThreads % blockSize != 0) {
++numBlocks;
totalThreads = numBlocks*blockSize;
printf("Warning: Total thread count is not evenly divisible by the block size\n");
printf("The total number of threads will be rounded up to %d\n", totalThreads);
}
// the first execution appears to take longer (possibly due to caching)
// so run each memory type once first to avoid affecting the timing results
runWithPageableMemory(totalThreads, numBlocks, blockSize, 1);
runWithPinnedMemory(totalThreads, numBlocks, blockSize, 1);
// run pageable host memory (1 iteration)
auto singlePageableStart = std::chrono::high_resolution_clock::now();
runWithPageableMemory(totalThreads, numBlocks, blockSize, 1);
auto singlePageableStop = std::chrono::high_resolution_clock::now();
// run pinned host memory (1 iteration)
auto singlePinnedStart = std::chrono::high_resolution_clock::now();
runWithPinnedMemory(totalThreads, numBlocks, blockSize, 1);
auto singlePinnedStop = std::chrono::high_resolution_clock::now();
// run pageable host memory (100 iterations)
auto multiPageableStart = std::chrono::high_resolution_clock::now();
runWithPageableMemory(totalThreads, numBlocks, blockSize, 100);
auto multiPageableStop = std::chrono::high_resolution_clock::now();
// run pinned host memory (100 iterations)
auto multiPinnedStart = std::chrono::high_resolution_clock::now();
runWithPinnedMemory(totalThreads, numBlocks, blockSize, 100);
auto multiPinnedStop = std::chrono::high_resolution_clock::now();
// display timing results (in ms)
std::chrono::duration<float> duration;
duration = singlePageableStop - singlePageableStart;
float singlePageableMs = duration.count()*1000.0f;
printf("Pageable memory (1 iteration): %.6f ms\n", singlePageableMs);
duration = singlePinnedStop - singlePinnedStart;
float singlePinnedMs = duration.count()*1000.0f;
printf("Pinned memory (1 iteration): %.6f ms\n", singlePinnedMs);
duration = multiPageableStop - multiPageableStart;
float multiPageableMs = duration.count()*1000.0f;
printf("Pageable memory (100 iterations): %.6f ms\n", multiPageableMs);
duration = multiPinnedStop - multiPinnedStart;
float multiPinnedMs = duration.count()*1000.0f;
printf("Pinned memory (100 iterations): %.6f ms\n", multiPinnedMs);
if (singlePageableMs < singlePinnedMs) {
printf("1: Pageable wins");
}
else if (singlePageableMs > singlePinnedMs) {
printf("1: Pinned wins");
}
else {
printf("1: It's a tie");
}
if (multiPageableMs < multiPinnedMs) {
printf(" | 100: Pageable wins\n\n");
}
else if (multiPageableMs > multiPinnedMs) {
printf(" | 100: Pinned wins\n\n");
}
else {
printf(" | 100: It's a tie\n\n");
}
return EXIT_SUCCESS;
}
|
1,691
|
/* Copyright 2012 by Erik Opavsky
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
void printDeviceSequences (char * d_sequences, int numSequences, int sequenceLength) {
char * temp = (char *) malloc (sizeof (char) * numSequences * sequenceLength);
cudaMemcpy (temp, d_sequences, sizeof (char) * numSequences * sequenceLength, cudaMemcpyDeviceToHost);
// for (int i = 0; i < numSequences * sequenceLength; i += sequenceLength)
// printf ("d_sequences[%d] = %s\n", i / sequenceLength, temp + i);
for (int i = 0; i < numSequences; i++) {
printf ("d_sequences[%d] = ", i);
for (int j = 0; j < sequenceLength; j++)
printf ("%c", *(temp + i * sequenceLength + j));
printf ("\n");
}
free (temp);
}
void printFirstLastBuckets (char * d_bucketSequence, int numBuckets, int matchLength, int sequenceLength) {
char * temp = (char *) malloc (sizeof (char) * sequenceLength);
cudaMemcpy (temp, d_bucketSequence, sizeof (char) * sequenceLength, cudaMemcpyDeviceToHost);
// cudaMemcpy (temp2, (d_bucketSequence + numBuckets * sizeof (char)), sizeof (char) * matchLength, cudaMemcpyDeviceToHost);
printf ("first bucket = ");
for (int i = 0; i < matchLength; i++)
printf("%c", *(temp + i));
printf("\nlast bucket = ");
for (int i = 0; i < matchLength; i++)
printf("%c", *(temp + numBuckets - 1 + i));
printf("\n");
// printf("numbuckets = %d\n", numBuckets);
free (temp);
// free (temp2);
}
void printDeviceFirstLast (char * d_sequences, int numSequences, int sequenceLength) {
char * temp = (char *) malloc (sizeof (char) * 2 * sequenceLength);
cudaMemcpy (temp, d_sequences, sizeof (char) * sequenceLength, cudaMemcpyDeviceToHost);
cudaMemcpy (temp + sequenceLength, d_sequences + sequenceLength * (numSequences - 1), sizeof (char) * sequenceLength, cudaMemcpyDeviceToHost);
int i;
printf ("d_sequences[0] = ");
for (i = 0; i < sequenceLength; i++)
printf ("%c", *(temp + i));
printf ("\n");
printf ("d_sequences[%d] = ", numSequences - 1);
for (; i < sequenceLength * 2; i++)
printf ("%c", *(temp + i));
printf ("\n");
free (temp);
}
void printFirstLast (char ** sequences, int numSequences, int sequenceLength) {
printf("sequences[0] = %s\n", sequences[0]);
printf("sequences[%d] = %s\n", numSequences - 1, sequences[numSequences - 1]);
}
void printSequences (char ** sequences, int numSequences, int sequenceLength) {
for (int i = 0; i < numSequences; i++) {
printf ("sequences[%d] = ", i);
for (int j = 0; j < sequenceLength; j++)
printf ("%c", sequences[i][j]);
printf ("\n");
}
}
/*
void printFlatSequences (char * sequences, int numSequences, int sequenceLength) {
for (int i = 0; i < numSequences; i++)
printf ("flat_sequences[%d] = %s\n", i , sequences + i * sequenceLength);
}
*/
|
1,692
|
#include "includes.h"
//macro to check return value of the cuda runtime call and exits
//if call failed
__global__ void anyMethod(unsigned char* buff , unsigned char* buffer_out , int w , int h)
{
int x = blockIdx.x * blockDim.x +threadIdx.x ;
int y = blockIdx.y * blockDim.y +threadIdx.y;
int width = w , height = h;
if((x>=0 && x < width) && (y>=0 && y<height))
{
int hx = -buff[width*(y-1) + (x-1)] + buff[width*(y-1)+(x+1)]
-2*buff[width*(y)+(x-1)] + 2* buff[width*(y)+(x+1)]
-buff[width*(y+1)+(x-1)] + buff[width*(y+1)+(x+1)];
int vx = buff[width*(y-1)+(x-1)] +2*buff[width*(y-1)+(x+1)] +buff[width*(y-1)+(x+1)]
-buff[width*(y+1)+(x-1)] -2* buff[width*(y+1)+(x)] - buff[width*(y+1)+(x+1)];
//this is the main part changed to get the sort of tie dye effect for at least
//the first part of the picture
hx = hx*4;
vx = vx/5;
int val = (int)sqrt((float)(hx) * (float)(hx) + (float)(vx) * (float)(vx));
buffer_out[y * width + x] = (unsigned char) val;
}
}
|
1,693
|
float h_A[]= {
0.5602859338938351, 0.9244625971829773, 0.5403795491752821, 0.6848732879891641, 0.531562688776698, 0.8218523016116492, 0.6783106411067508, 0.7026255321983701, 0.9295912736817191, 0.8472516207270454, 0.9685268037452972, 0.6954214751962997, 0.6653124426786916, 0.6914652843931912, 0.7988788108504095, 0.9368593656162654, 0.9430586541206105, 0.585361300467647, 0.7791518490024614, 0.9485752746357684, 0.9221042136394717, 0.7141524765015785, 0.8144929805932117, 0.5179539139937064, 0.7346294998168887, 0.96426000240015, 0.5679862594359419, 0.5666186090354393, 0.8109847270686806, 0.8937879221369244, 0.8955078303684816, 0.665762615168916, 0.7475158712766304, 0.7544482913853279, 0.6809001321441512, 0.596445551158163, 0.5177866850950592, 0.8794812515885455, 0.7706566510489172, 0.6590132928493699, 0.8555275295545666, 0.8005644553148323, 0.9835578990137444, 0.8077401639815301, 0.7853702438031882, 0.7222395351068465, 0.5035252012786733, 0.8689008995912295, 0.8398599528800266, 0.7288338988123368, 0.8015213257766396, 0.5425385973578765, 0.877382192382421, 0.7139950730868097, 0.6625761448951533, 0.8268351223123221, 0.8955821062087717, 0.7091203216056419, 0.5836656618925563, 0.5591350127812281, 0.6539483721613495, 0.5557822651812814, 0.8367608636911397, 0.9628308198084592, 0.563188741388247, 0.9393006767615492, 0.5838946573792916, 0.6079570646883796, 0.9348342676279161, 0.7983693029745332, 0.7272302931680886, 0.7205200449769646, 0.9823846151433383, 0.7439065826780042, 0.52327335538567, 0.6068528466243996, 0.8926119344821397, 0.9110750912112745, 0.6796420923724689, 0.6610545640211996, 0.9959344698033843, 0.9587273124560504, 0.6449431673442578, 0.9982644840300259, 0.8046181934346617, 0.9421115533239537, 0.890046927044618, 0.7888937909604801, 0.8211535928083558, 0.8301006048959668, 0.5536618754710574, 0.5632632630313734, 0.8986673696564687, 0.5878581687610758, 0.9207730333679951, 0.6913457027174847, 0.9009700211272463, 0.9851573265020267, 0.838842097776948, 0.9037131401570611, 0.6278829190271653, 0.680094999888851, 0.765805597548739, 0.7088602063490754, 0.8042260094059304, 0.9522867654324719, 0.8824229867412194, 0.8951140768438586, 0.9253685710364733, 0.5224446769055575, 0.8533668558463001, 0.9343739080237071, 0.8389104660692017, 0.8431543290881001, 0.9529326064833554, 0.9229872267568175, 0.593683297789504, 0.6050183739398671, 0.8218755145780258, 0.5598440506038042, 0.9312595839602344, 0.5110731247126582, 0.9010226215558677, 0.9517261119681184, 0.9426613242101618, 0.5752776910633123, 0.7995007084607759, 0.805606938545111, 0.9161042016296115, 0.8680157588025157, 0.9450763784591129, 0.6736498481809144, 0.785412322906309, 0.5501196832870574, 0.8840503396689989, 0.610243167476666, 0.977954154951203, 0.6420307089912763, 0.6399135373316086, 0.8588291529706362, 0.8730335456496686, 0.8163568294755537, 0.6369606371712093, 0.664973104847842, 0.9025514402182889, 0.7280851910700883, 0.7310916687874247, 0.9264625481837292, 0.6917627255124166, 0.7536619751629128, 0.5583477667005571, 0.6388798346402755, 0.5189410936090224, 0.7637194375820111, 0.6854292994384774, 0.6448472022748338, 0.5897836618560557, 0.7941238938366051, 0.9428132961674744, 0.6490572232632923, 0.8195873777619782, 0.8218539877873824, 0.9219525272164804, 0.8176230781521114, 0.8815073918319527, 0.666427928654188, 0.7060642864450922, 0.754341065500046, 0.6369280883639141, 0.8347502699512808, 0.5792957729158993, 0.8945687021439164, 0.5245093009729496, 0.6240267602391589, 0.8919238942734155, 0.6939221889449527, 0.9523081558298305, 0.9308326329455661, 0.6622876268712143, 0.5201350619231655, 0.9209230590432028, 0.8438122189180625, 0.8126587984329177, 0.5281482292251147, 0.7668723687079889, 0.6461403789469111, 0.7385119037512233, 0.6723709985723925, 0.8056029548560266, 0.9701113408359805, 0.8735599076616001, 0.672205810289173, 0.8008677640696943, 0.6488784698483958, 0.6307258728717913, 0.5429764237259587, 0.7273159785996615, 0.5786801216107815, 0.8655463601992942, 0.9396214921941636, 0.6876023077269628, 0.7899647722310006, 0.8782399444388309, 0.6931999927122516, 0.6417116272978893, 0.9126804484715119, 0.6678129370911956, 0.9522936164785241, 0.7970358685013808, 0.8475749222844252, 0.9120487772987804, 0.9762818179588202, 0.6317608077549985, 0.5801289765831013, 0.844110418671109, 0.7595947799215637, 0.9872509686617044, 0.9681232443513779, 0.9784645026852805, 0.9027550677747307, 0.8595723251760446, 0.5524894226215493, 0.723058211482926, 0.8490426090484029, 0.8741199807034904, 0.5749794506729973, 0.5561120305437917, 0.7437519884275421, 0.6835407071172256, 0.572699844674869, 0.6234954146091314, 0.6706403287972286, 0.8792844997883992, 0.6969077157340133, 0.9573069704126942, 0.6743577954607904, 0.9170547559837807, 0.8473261292982538, 0.7355178531161264, 0.5923018649376179, 0.700976500154896, 0.9390321241574033, 0.8463110823957705, 0.9905849170762884, 0.5767547400517647, 0.8607663474915384, 0.8663834104259936, 0.9318832806831361, 0.6356272468964823, 0.729826153098086, 0.7260990043349883, 0.641664232519926, 0.7434574780948991, 0.75173806991675, 0.582353908692963, 0.5672951903220178, 0.7912853547007791, 0.8137333367055319, 0.9184735871746252, 0.9327916645779175, 0.8089454481568417, 0.7222588891012481, 0.6671712196551935, 0.9401178274737672, 0.8233414095075882, 0.9321154618701059, 0.6062084184220988, 0.7001922569167469, 0.8755654627538887, 0.6043920369850115, 0.8906343839058963, 0.6126498508380056, 0.7417488314368674, 0.904823342379788, 0.6724353918712602, 0.9862013952183087, 0.6287253087506347, 0.6212113958729406, 0.9181516101561282, 0.770056065770437, 0.7590864681014707, 0.9307677449168973, 0.8947661963995615, 0.5529579052416882, 0.9239116816310916, 0.5293394523647081, 0.9214859381552992, 0.8232943445126273, 0.9651187480156813, 0.5560990017745799, 0.7422468805253764, 0.6788472207892571, 0.7342835133914374, 0.7786071231195331, 0.551536891374985, 0.9992961832842966, 0.6994441914550045, 0.5790252513894649, 0.9440209833658946, 0.8254518643465245, 0.8934937437063989, 0.8052052759332211, 0.6631549709081774, 0.9977468502917544, 0.8634836174663434, 0.6850518115376867, 0.642508499732467, 0.9138224036936359, 0.9571627495213437, 0.7920896913524025, 0.5593635691986469, 0.6454848294573212, 0.8611554997870605, 0.8882053220384964, 0.9210406466906558, 0.9878454289729018, 0.7963909199821863, 0.8603838910261321, 0.7929653458699045, 0.5671573356484796, 0.965970333973623, 0.9356590485303994, 0.8685979764856695, 0.8563840433069949, 0.8070828738489619, 0.8177247845123088, 0.824359630967327, 0.8559806872771614, 0.9290015209725759, 0.9961246381345602, 0.552602459203315, 0.5570137582859083, 0.6173407720497609, 0.7758655555417576, 0.5825322659090421, 0.7855200493955009, 0.8549346749259454, 0.9367084666893177, 0.8778079823758391, 0.5804350794599539, 0.5320841180349826, 0.6223608694594873, 0.6977338544815501, 0.6762618110266407, 0.9228031350247128, 0.920408845064296, 0.6242028447342008, 0.9322446418950366, 0.910321599290384, 0.846123390708168, 0.8821115490040621, 0.8292889207696867, 0.8758025057307643, 0.6903278505713804, 0.6501393112931442, 0.8213923259277451, 0.5643617085311765, 0.6312204337500742, 0.5318461688566664, 0.6319575732714651, 0.5971592117784268, 0.6237586210239349, 0.9922318060370828, 0.5007129115839175, 0.5436453815402305, 0.8244882859294074, 0.8152539681562871, 0.6879008979575636, 0.9581360832905261, 0.9629862492963723, 0.8514992849039242, 0.7344168304629999, 0.8117728180196124, 0.5739948568678422, 0.7404374718337967, 0.6775090425565727, 0.5833558287638969, 0.7292784035225515, 0.728051040900989, 0.6625624810660609, 0.614731705141977, 0.5008834250511531, 0.945084026114551, 0.9535684740206795, 0.6673531271683395, 0.6801366454501125, 0.7357086635823358, 0.6435460869726357, 0.8294902960867615, 0.6794836695331952, 0.973848025598022, 0.538832735470153, 0.5505222410865909, 0.5752452820833234, 0.921382856198635, 0.8783783779907398, 0.719196573900408, 0.6909826896250522, 0.7843746644350719, 0.6304289819294866, 0.9889675561378536, 0.987828423882288, 0.7966803739578752, 0.7658653170952934, 0.9130153103790473, 0.7899302533381527, 0.7864427182077847, 0.5232437792603739, 0.9190585568147072, 0.8827207468422382, 0.6290349865304039, 0.896584107074508, 0.6161198443251749, 0.8182402102236628, 0.9980570062694133, 0.5583795773865758, 0.9138739175222992, 0.8437897068578866, 0.8795126224496737, 0.9828949084810616, 0.5374624850706625, 0.7500765964584795, 0.6017840179693535, 0.5492356162870646, 0.8852007776418009, 0.7028252341055696, 0.9870165872944827, 0.7203009361319784, 0.874732420790068, 0.9661223764671591, 0.7600815332848956, 0.5570146946283647, 0.8244005707658999, 0.9829580871783832, 0.8524705065307114, 0.8807269744537467, 0.5053481421953869, 0.5279094494453027, 0.9563805201343283, 0.537331100307756, 0.8931100995639318, 0.6085051017271987, 0.5291669618245348, 0.8452317361508979, 0.7171990991455033, 0.6755592660786607, 0.6041748959474608, 0.6130606378288265, 0.5700654444372486, 0.8184252937968141, 0.7166767468682373, 0.996175429948525, 0.947327762359262, 0.5716579436256629, 0.6230988294141733, 0.849903248012396, 0.8405753943402918, 0.8229541900178472, 0.9558890304983042, 0.6277882783435471, 0.7691082590078364, 0.8193634448852095, 0.9607520261582401, 0.7719557590714959, 0.8646061339718045, 0.6618747024531336, 0.8816653480341926, 0.5133688557631311, 0.7755046190741484, 0.5464756229005239, 0.7136428958551425, 0.8585640104749752, 0.5312420254824575, 0.9295386795083276, 0.9268998379304992, 0.8539576698146357, 0.7033563746626301, 0.6029207446274991, 0.9861806551067134, 0.6304691011583279, 0.8954800341303994, 0.7093434099803819, 0.5264668227561724, 0.5523211415399258, 0.9333465170742236, 0.5007327448768819, 0.6290967783598611, 0.5631692510221028, 0.605293353426254, 0.8500225522370974, 0.9673039193230852, 0.8226636206774158, 0.812796943002918, 0.7163049980202681, 0.7257532011755873, 0.6988588622930725, 0.519758527145781, 0.566071638343431, 0.9982527718519554, 0.6942598545903742, 0.667659924437813, 0.964317033857581, 0.9253435101994187, 0.8856757709043404, 0.7966401640345383, 0.7249211677002154, 0.8002428596502767, 0.9935584798377177, 0.57841327806145, 0.6237351962391869, 0.9000089149940206, 0.6388841547290025, 0.6649897648631884, 0.5022223103327085, 0.5440914017679055, 0.7270557030663549, 0.9532296501549793, 0.8305601282714649, 0.5506890500769703, 0.7703837450936979, 0.9270231147603266, 0.9379160996169148, 0.7695822993025516, 0.6389225414656938, 0.955415010170881, 0.7644910601488174, 0.7894724428901743, 0.9396872031870471, 0.668985451623471, 0.9918875575640191, 0.8970326370869157, 0.5654820944546248, 0.669642196036476, 0.6963883479449213, 0.6500350767385142, 0.8687742120095918, 0.7496637413915466, 0.7819261419246328, 0.6072987277151338, 0.8868519313281265, 0.6774585947937866, 0.6006851653163486, 0.7926917884784321, 0.8166986097722952, 0.8611030934730426, 0.5993309075470005, 0.9531194390309117, 0.989310930002265, 0.5078469892988557, 0.6545001833500068, 0.7871101151122437, 0.5873268467279809, 0.7120112529684733, 0.8860875051751622, 0.5689439476171387, 0.8635352464455672, 0.9624840395468637, 0.9339545061803534, 0.5949179054446493, 0.8582966330700976, 0.9938458119375257, 0.5734003710117876, 0.7642839222336588, 0.9773098881361585, 0.6352580899242743, 0.8185470463764145, 0.6523817850456065, 0.7383321228440439, 0.8821898600751884, 0.5335124557309889, 0.6131252513950696, 0.5361166241900334, 0.6896011946665078, 0.5936030550813494, 0.6411856815154011, 0.5841811865863193, 0.607851117596015, 0.6238812460106389, 0.8592435518662557, 0.5639633600392269, 0.8946998138224673, 0.7801509277887577, 0.9237862240804351, 0.936248609947369, 0.9659912684954292, 0.5707083487758453, 0.7781503567117385, 0.5693222679050386, 0.6707302203488859, 0.8738795393135559, 0.9962439179679327, 0.5429279716057016, 0.7527669057186459, 0.6380886386789195, 0.7126859571255652, 0.8438238242463392, 0.7705296304506202, 0.9416117527434875, 0.6929574985340414, 0.5792973382367503, 0.7499847264299684, 0.6686541958096948, 0.9661978773743825, 0.5219034534830342, 0.7288920950094833, 0.784199590066473, 0.9598200476180332, 0.5421954416120043, 0.8883842963121291, 0.5594254330153052, 0.5923554560588589, 0.9265418505928906, 0.5926293918493066, 0.73944535169245, 0.6078466454002233, 0.5705684839605403, 0.6175335295242054, 0.9555616268922409, 0.6060312598293789, 0.6238177625829736, 0.8905967364102092, 0.965995848178241, 0.6672194736252975, 0.679295688214544, 0.668264362036068, 0.6702964351037402, 0.7861920800459061, 0.9672724050264365, 0.6205382908035727, 0.6537371330407671, 0.9958842716507084, 0.9921986987332176, 0.8076025224734611, 0.8948593029587961, 0.9770824073136776, 0.5669181927065856, 0.6447157923625066, 0.9511440563337042, 0.730647970174165, 0.7149238582357347, 0.5975193434053837, 0.6472787312054911, 0.5057030796619809, 0.9116441343248667, 0.9951511730511973, 0.8640802478246319, 0.6805185112723089, 0.7786858398280536, 0.522040985166287, 0.7042702417810474, 0.5189098782241398, 0.8758545469436506, 0.9002523738233545, 0.5513997787033145, 0.9372278440853059, 0.6473375892829814, 0.9186499922352047, 0.7806996595369211, 0.6991427067110926, 0.9942290992429857, 0.8009532471790977, 0.5065501022772412, 0.983951417712493, 0.8675699907375343, 0.8003994965479513, 0.8252966653043887, 0.8568396592256621, 0.5501692772067117, 0.7013790813587252, 0.8556658168122672, 0.6802727101998693, 0.8667950557351437, 0.5848588940313315, 0.6145372729265618, 0.9406378167185738, 0.6332971962338849, 0.9261420879791882, 0.792584963599964, 0.8874539158792039, 0.538355922137934, 0.8002817406782873, 0.6255706294932928, 0.651579781440563, 0.7472935823868303, 0.8249056045812431, 0.5190205560171492, 0.6884108224182401, 0.8442860985501407, 0.728179446005324, 0.8358453637123371, 0.8463288271944187, 0.6755635008422749, 0.799964254832213, 0.5526008482804977, 0.59454797925433, 0.7197576196108977, 0.9103968604509991, 0.7478416755577582, 0.8683772202889608, 0.8901413931483446, 0.671949694047252, 0.9999890725095651, 0.8566119952785733, 0.9779449172381178, 0.6379759921849619, 0.7664684176213934, 0.6027792961275733, 0.6033504838333343, 0.7837599504492747, 0.6736187224843657, 0.8475406857785279, 0.5389440074556124, 0.6966605354390607, 0.9691752461472307, 0.6697495507466413, 0.8019743093793732, 0.6530682404118844, 0.7616198604895271, 0.807311619997193, 0.7280385794672308, 0.8113842401285126, 0.8487501932574756, 0.8515185279830164, 0.5432351513818195, 0.5956439886850191, 0.7111066634157273, 0.8722176203147746, 0.5023590333402096, 0.5763950591637534, 0.7330744712591668, 0.79605542833992, 0.5637216708086312, 0.8867895387226787, 0.8301092129018082, 0.9235375023892421, 0.7767604934286696, 0.6323095812047783, 0.8204237123238949, 0.7262049142078963, 0.5528584755196817, 0.6068608679430909, 0.9860264592663883, 0.6843248965382112, 0.7041019876385921, 0.7743331658458629, 0.9271635541429235, 0.9871946106765723, 0.9412532438724812, 0.8957251776662918, 0.7003329207175781, 0.6847662179488512, 0.6198976811329218, 0.8706599205524564, 0.5452621079190303, 0.5006786794389907, 0.8437410175859938, 0.6473886075485872, 0.6460697532769233, 0.9498581948223833, 0.6434735636258287, 0.5484988420479435, 0.854546814465954, 0.5022911334877558, 0.9988674286374938, 0.7075179327891223, 0.8920368577012373, 0.7496193684701479, 0.9284265292450455, 0.6312286078510687, 0.6606956880732597, 0.7410956610330506, 0.5447258477629295, 0.7039347073170724, 0.8854783761821274, 0.8561134025842907, 0.6373169507990575, 0.8968589457296148, 0.8023908978652984, 0.915381881947771, 0.8462817560395072, 0.5972021755525376, 0.8289159492829166, 0.5889714225581953, 0.740859271445308, 0.6203512520668538, 0.9841058736026633, 0.6380295370434458, 0.8184003911911892, 0.7783665401104432, 0.891076290067148, 0.8308029549095313, 0.6545054222885321, 0.9427379774367926, 0.5035011542187171, 0.9530541082445968, 0.9429655547154183, 0.6373407754655531, 0.5763650400305583, 0.6432463637272665, 0.8283186902797717, 0.9466595688319337, 0.6949894379673708, 0.5527236600687104, 0.771066141486985, 0.5849324165484786, 0.7061640371758151, 0.8517695069016611, 0.9697876137199553, 0.5857917871564251, 0.9878902448171833, 0.7658477263717298, 0.7479474773448842, 0.500283595217448, 0.9235626221071092, 0.5399261110809552, 0.7712025244837963, 0.7778492569595246, 0.5937212873995683, 0.8076708210471049, 0.549915166833498, 0.6747702125788868, 0.8233175908009797, 0.8027760487468032, 0.947140209646828, 0.837027746200257, 0.5870412575384322, 0.5646031018039147, 0.9548310765691308, 0.849048995086068, 0.8145008731715419, 0.9807038777929136, 0.5374801530355516, 0.8360272908879307, 0.6974574497984676, 0.6804140717931904, 0.5525694432011743, 0.8028622988954496, 0.8843068845616251, 0.5881707234726389, 0.685733329814093, 0.5535219208272222, 0.8391024219348393, 0.6336381759344838, 0.5931908376909854, 0.5110024275267792, 0.7759218952929934, 0.7975492190785226, 0.6781521214716197, 0.7905253970321084, 0.9089059109778097, 0.970026608513902, 0.7802111589390517, 0.9357399235747326, 0.6487394592808684, 0.5475101083434972, 0.7915916024689456, 0.9767040118150065, 0.7885863776134125, 0.9688808846833412, 0.8073010577145494, 0.7498796570619833, 0.5821469549307154, 0.8548751715269813, 0.7656672190967198, 0.7792166106304281, 0.689312592104896, 0.8398150873642811, 0.6667220530412856, 0.783562664643751, 0.8590343403105349, 0.7358216782434213, 0.6008299077672877, 0.9204336506145847, 0.6638863251643867, 0.6351664125542968, 0.5004936785832387, 0.5482748788583474, 0.5750129180771658, 0.8826310698456141, 0.5291183037298564, 0.978484138125588, 0.8614758133277884, 0.917927730375349, 0.7648870109611274, 0.7141994472331228, 0.9951932855920949, 0.7336534015035974, 0.7560884704989268, 0.7922296867724157, 0.5848038660133018, 0.7247786732186741, 0.6397764456499901, 0.554783397928825, 0.9823965932273457, 0.5643025796725144, 0.9047713408830762, 0.5734130735964242, 0.6354083130454176, 0.8031274640492178, 0.5969392007819354, 0.6673201074803636, 0.8597179982410517, 0.9169356728791972, 0.5108690176230082, 0.8608292495881491, 0.8840670385004357, 0.8736828164384403, 0.7549148410026898, 0.5054415563095264, 0.6065033629792735, 0.8615875612298397, 0.9697712862762455, 0.7856482565453871, 0.6961574752417466, 0.5025070193431934, 0.7103955591620855, 0.6424844459488206, 0.5143598558821225, 0.9448767230920951, 0.6189492555539172, 0.6274612720644688, 0.5480972482336613, 0.7412337731264186, 0.6771118281385511, 0.9823973670865702, 0.8535496631022996, 0.9244244620623395, 0.8567816703772919, 0.5490187486463598, 0.7031882203129964, 0.7555909098860041, 0.6497289390523511, 0.6904094049111682, 0.823439963214178, 0.6193445002151996, 0.8012923777572998, 0.7870353347285257, 0.8692181902146593, 0.6215651761942373, 0.6729283067431397, 0.7014505398955424, 0.7898154384742022, 0.5183645937125889, 0.8557084899711053, 0.5781060252413519, 0.7015983499428071, 0.7614719121256155, 0.7276638424078115, 0.989367211304632, 0.9583752192848276, 0.6696030463913423, 0.9411493750819574, 0.7718379555154619, 0.751769703531329, 0.87715715014962, 0.8489714499103495, 0.6812579583525643, 0.9852789639720603, 0.5430050698467095, 0.54583579545345, 0.6393404653042908, 0.6951623646582444, 0.5780853840675841, 0.7927046191936935, 0.651414003389065, 0.6500293902280228, 0.5647582004947411, 0.7715560927961693, 0.7987831533463672, 0.7147042585382939, 0.7943493355961224, 0.735589350741807, 0.7862663146312997, 0.9557210611505772, 0.62636886494895, 0.7900674816985984, 0.8932499333033821, 0.5785692125240814, 0.731960933713486, 0.5142237692920122, 0.9981873727752387, 0.6002761757258606, 0.70045859675568, 0.9266041326985952, 0.7307940159857969, 0.8538065375784061, 0.5175001235833231, 0.9848464189838884, 0.8039832887567534, 0.9115924854265914, 0.7695287650244158, 0.8786052570554139, 0.8233827881622506, 0.9417329514809119, 0.7369063210950866, 0.883775659658299, 0.8564254056683114, 0.8994913563914113, 0.6164346882209076, 0.6350574881880251, 0.681787193784655, 0.9736627786615188, 0.929321427097497, 0.6671349521708143, 0.7959882193745534, 0.6215826651088379, 0.6148616291417023, 0.8538767877956737, 0.7280231808011166, 0.5895123490003606, 0.5484985452725487, 0.9544350222439223, 0.8628348279501519, 0.6046183956839015, 0.6102672112123625, 0.889382260555265, 0.9076863418159642, 0.8907682737016387, 0.5031881690192604, 0.9274974466232678, 0.8800685799067312, 0.8136736716276366, 0.8294858408524158, 0.8625394498359785, 0.7035655758491417, 0.9882139683818388, 0.7917928942637094, 0.6733479840008174, 0.6778400229329319, 0.5516943672318371, 0.6024932356262798, 0.9342061944764047, 0.5205604268654549, 0.7545606902956739, 0.656365789041446, 0.8917696865253479, 0.767790619358785, 0.7310871792888101, 0.6210428056051562, 0.9458185787886332, 0.8955782681343257, 0.927550237028726, 0.669362114655452, 0.8055218099708195, 0.5349420876371607, 0.782568221055878, 0.7931314795568623, 0.9046653290685214, 0.7050835403222471, 0.6306923115293621, 0.9629364703176793, 0.972715797756221, 0.712000119967324, 0.8456537347376867, 0.9409887192073099, 0.9519706521271346, 0.9105994365340969, 0.8000324117001969, 0.7046178929498002, 0.9348888074247768, 0.9040650728892008, 0.9183379132676706, 0.7690724495252617, 0.6562657182184731, 0.5106528714510243, 0.9818324639170918, 0.8905377926700979, 0.8544973285819251, 0.7433513201425614, 0.5153419974552932, 0.5809071681611317, 0.8970876903958231, 0.934323155253425, 0.5826009397414862, 0.5591499753609552, 0.6589493895805866, 0.76328482394548, 0.9863618683060935, 0.8319063635879048, 0.6642041349300754, 0.8112715053993713, 0.8659956640148471, 0.8075636834934709, 0.5382386588359318, 0.6270836462505656, 0.636769585707837, 0.5960224084520389, 0.5611007198705824, 0.98067521986524, 0.9642162683906197, 0.9716475065182791, 0.6372772132454435, 0.9489907175757302, 0.8837511752872566, 0.884342201486541, 0.6275440179834901, 0.5949263647663874, 0.5580267556845165, 0.8983630131881736, 0.7230962940311143, 0.7889506137689962, 0.7200047791485691, 0.9507003796118703, 0.9084858849983984, 0.6150284377595598, 0.9146110474092761, 0.7247675925663379, 0.6829359305930309, 0.6735678443982624, 0.5906353257003272, 0.8550185064926245, 0.8145103208332477, 0.9421844008851867, 0.6887046596625287, 0.5231545968897255, 0.5838787696919314, 0.7609973945850562, 0.5096362403158128, 0.6960803350663023, 0.5026206497787857, 0.8782420966663695, 0.9853916554414848, 0.6211937869123032, 0.6438833881684957, 0.9005999106534697, 0.8821481701315337, 0.9068427212833999, 0.5332293315053799, 0.6397625790516144, 0.7323377791922316, 0.580283558139024, 0.6074166234692547, 0.7376696236865778, 0.8312982685515615, 0.5964371940639182, 0.761508779206485, 0.6515109743759074, 0.6673804612522298, 0.7720640026296239, 0.7053736269022838, 0.8881415300347824, 0.6088922760451594, 0.6469122564955287, 0.8996043502735183, 0.7817782932605313, 0.5332582562266138, 0.5358392487857118, 0.6937910749529346, 0.8795894594082868, 0.5810896980743301, 0.7268260104050757, 0.5500997063325453, 0.5404730434731347, 0.6516467089054464, 0.9292085942441499, 0.5452877775638711, 0.6865086469185814, 0.7454118785730244, 0.5420736049220414, 0.5478283349922266, 0.7557547473147543, 0.6318100786221919, 0.7546042967383388, 0.6325079829915439, 0.9030760394521966, 0.918527716498478, 0.8210958941005734, 0.7287135402937359, 0.5616086215811065, 0.9845941762149554, 0.5528925560763482, 0.84781742016541, 0.6186776455256677, 0.6960320522812924, 0.531527437446679, 0.9909244686885387, 0.7135524852073378, 0.5635128353691278, 0.9660307379457569, 0.9257275406144361, 0.5744877232759895, 0.6265777621105335, 0.950416623280475, 0.7164865776356553, 0.8212838158073955, 0.8488920789926935, 0.8004557478500527, 0.6165283133898181, 0.8830286847057784, 0.6303091228742634, 0.8697551418946627, 0.8067303321331234, 0.5991790012548925, 0.8139262989196592, 0.8980122496626233, 0.8180144284951114, 0.804841606029084, 0.6755456552413834, 0.7194469168483112, 0.7113862567263316, 0.9391729707486145, 0.7060185309136409, 0.63369514710858, 0.7217171978554319, 0.8150385307823964, 0.9717833403327056, 0.6091762439939585, 0.6568927797804597, 0.6801194810742897, 0.9589293406160424, 0.8227865945641452, 0.6772311728351154, 0.6088168933317386, 0.6543712205903567, 0.5540943931370004, 0.9978376128400754, 0.8763609143961735, 0.9983455565821994, 0.6491378664476957, 0.8439624460416344, 0.9425369489836652, 0.8115040991524232, 0.5041993422851851, 0.5066676272228738, 0.9428489716906012, 0.6753546029331077, 0.9359975064876349, 0.6431099189746223, 0.5925137667563014, 0.8825560373951133, 0.8608413100712253, 0.746308965412027, 0.750931781817192, 0.536708554147068, 0.6662662681634683, 0.6749531904332361, 0.9581035950015623, 0.9558531205304364, 0.7072459427986768, 0.8351359153258945, 0.6648469330184985, 0.6831864412936255, 0.5306921110582403, 0.6367165138502828, 0.622654980329104, 0.9298931625043481, 0.7403364345417287, 0.9794174295789343, 0.5913521671105377, 0.9621443729899763, 0.6826285565099202, 0.82703469485978, 0.9582141176815815, 0.6927616822535059, 0.541376267577296, 0.6359216323152093, 0.7755220705770718, 0.8862656616129236, 0.7850408022635273, 0.787071801628469, 0.6472127642817114, 0.8427759128523755, 0.6063610379216244, 0.994415508413768, 0.5741768840885085, 0.997254073538443, 0.9698087463473631, 0.5231746133890698, 0.9232492710864724, 0.5467568193712535, 0.5158699203574375, 0.9082579010855756, 0.8129312173179633, 0.9598083048750967, 0.5433273008214976, 0.8362521478702463, 0.9424416553793007, 0.8132043168457153, 0.8784995392808355, 0.6744251673745094, 0.7156970776597313, 0.5266319845489498, 0.5961221705291477, 0.7599773179257598, 0.6205739596504456, 0.737254187135288, 0.762082620448183, 0.5092227650684051, 0.560760739045285, 0.7465670011804313, 0.6478708225704013, 0.9090224822310274, 0.9670051096605482, 0.6824227959828251, 0.95033512131611, 0.7629364788527833, 0.7699503617895109, 0.8760688530278082, 0.9393729449735749, 0.7365298117461847, 0.7044394495243631, 0.8743673825868392, 0.7978372909668863, 0.6077068525909507, 0.9386293373330356, 0.7444144492263859, 0.5679534869819012, 0.541429796787755, 0.5813151557517919, 0.9041436939895724, 0.9180711519498898, 0.8119815103181478, 0.5284274313624255, 0.6916352590038561, 0.6436424985236393, 0.8508050273704881, 0.7005672049279562, 0.800489121668085, 0.9055523911481701, 0.9114836818620782, 0.6080280561634244, 0.5050471348423324, 0.7852609696580133, 0.5871702473417046, 0.6766288641444045, 0.7270352787106027, 0.6508023911144564, 0.6261471244521857, 0.8552588189862989, 0.6334256261445839, 0.5582023794571718, 0.9386030716194507, 0.606015931903215, 0.7709190991891464, 0.6037508946659159, 0.8035554953839235, 0.7248895180606347, 0.9298971056713594, 0.8181474226620957, 0.7186500043511186, 0.9551227244875687, 0.534001055406421, 0.7082720777996754, 0.5243293377149054, 0.9854201936863474, 0.6606423024804928, 0.8733939320455866, 0.9093797151912875, 0.5302382767736797, 0.8363263366636262, 0.9590583409571752, 0.7857678788443323, 0.671542992970998, 0.9812095004682386, 0.7747593227985345, 0.6151978438518579, 0.8936984992801649, 0.7702170663900283, 0.8294283647472993, 0.5721973740194601, 0.984653926845859, 0.7810959351674052, 0.8559601750247987, 0.8284467220060626, 0.9829164350671231, 0.5298915631320722, 0.6997718733825651, 0.7368495323891187, 0.6214629998536256, 0.8322896287685153, 0.5663603543817908, 0.925982793345699, 0.6083733876088906, 0.6563848496589828, 0.5470132563654144, 0.9096854412617157, 0.9970991887182736, 0.9434111942932225, 0.607200977815701, 0.7481367793354567, 0.6302916817175865, 0.9259344614652958, 0.848742179729441, 0.5154279734111142, 0.5069554046762695, 0.5243052824770997, 0.9826632421746914, 0.9938714373313472, 0.6320670711020737, 0.941756516185656, 0.8342844969845647, 0.9996615987486898, 0.8150473079453858, 0.9603199208905124, 0.5930593932972912, 0.5071484993460009, 0.6546802833136062, 0.588926111600269, 0.6005602175341578, 0.9191736076229955, 0.8367163003244831, 0.9904233650784635, 0.705380571704605, 0.6911191795511944, 0.7270391224554129, 0.660791524672184, 0.971891506073771, 0.5675939209366704, 0.9546702309868225, 0.6920990029216099, 0.96490096837978, 0.6160467814990132, 0.841470527688554, 0.9196403822953543, 0.5348211827769215, 0.8760316855813775, 0.7488314212442408, 0.6322085096507671, 0.5881116779206604, 0.7374742252870179, 0.9158799384269274, 0.5503471723479167, 0.5630831144548548, 0.5156396589227219, 0.7519823135004517, 0.9041558744311919, 0.6154088305348631, 0.8649622876989276, 0.5860123612912809, 0.6125314753685736, 0.9663357095548737, 0.5138240079055596, 0.8195754517497023, 0.5134656505731949, 0.6049986062030027, 0.8162033696505947, 0.8937956908641193, 0.8804112917422997, 0.861770123210416, 0.9307376035945603, 0.89613968418629, 0.9987105462944167, 0.6940280434343875, 0.7112012464276589, 0.7450770109779163, 0.5701771235544055, 0.8392480185718459, 0.7190189027254659, 0.687648433111543, 0.881474630155427, 0.9838802059079825, 0.6086715011514571, 0.9087518914174577, 0.9068675217414215, 0.9420710620206947, 0.7683944502188702, 0.9892306057192994, 0.913560531111065, 0.7898373243366226, 0.9299721690642049, 0.7706673483234223, 0.7938431597784696, 0.5947775133985825, 0.5840827249963221, 0.9908578975030518, 0.9107419824242746, 0.7349737149271032, 0.853351041610739, 0.5372842611481718, 0.6522952052258499, 0.5680813709829905, 0.7497711408816807, 0.8087804299872451, 0.897600637753431, 0.8401703075717237, 0.8650612446708397, 0.9972021802611987, 0.8891258420451276, 0.5804191944921849, 0.9784213218368495, 0.5721748240049557, 0.9181378126526532, 0.714604401723242, 0.5188565281990711, 0.6094809772714151, 0.5440493179180077, 0.5353855165219457, 0.7990290207530433, 0.560784254989328, 0.8372802933990131, 0.8766547693114857, 0.6155398221726094, 0.8043195251623018, 0.5782209208468876, 0.956904149112966, 0.5686798758524221, 0.8401625218764154, 0.8740372864237229, 0.7980520793955175, 0.528821849888536, 0.7697084360726469, 0.7113852186940923, 0.8773771854082639, 0.6445327916089127, 0.530017379912019, 0.6522497760437225, 0.8175460879561388, 0.9212503363856326, 0.6519783461878499, 0.9049778092120777, 0.8641184012714058, 0.9094615847700045, 0.6645118452815091, 0.5490884357244956, 0.9897885815048785, 0.5003448595106217, 0.9531575912829717, 0.858808895627132, 0.742886054307146, 0.7431844534272574, 0.7446806975554774, 0.9956200850324852, 0.6110717087645635, 0.6904829479463528, 0.5850682025715054, 0.9418494254947991, 0.5372279145436134, 0.6067046822383193, 0.9370391759312678, 0.8561666386299032, 0.5774098405152043, 0.5317014662215525, 0.7702479598342147, 0.6096554070782605, 0.5715350574971574, 0.7974537582210657, 0.7662381128749218, 0.5388448311545037, 0.8144448315028103, 0.7474299985335936, 0.7993176283886455, 0.5250312834053407, 0.7689484165117437, 0.5700366108419495, 0.7942743486818047, 0.9947883069207002, 0.5952708912896169, 0.8736259253874602, 0.7421703643460359, 0.84366149410221, 0.953536329009735, 0.9040795464772841, 0.878728895101022, 0.5810131732629418, 0.8418384909921581, 0.7063792764341392, 0.6849606602149108, 0.9519631837168987, 0.9970923798714753, 0.6735055481017067, 0.7023584307616989, 0.6053256091703483, 0.648758457522599, 0.6380637676275844, 0.5391651875448693, 0.8351435698612248, 0.931969472220828, 0.5664675779938622, 0.6935858623658184, 0.7390771245840144, 0.8853915633661159, 0.724933747359952, 0.9131456540748952, 0.7902545814425691, 0.7535320903130843, 0.6777151116310733, 0.59289711409026, 0.8522233518351878, 0.5312069672954106, 0.7584857483015316, 0.7501549675097468, 0.8488152048718076, 0.7368113116396979, 0.9967324913157873, 0.6305016655623388, 0.564308676489043, 0.8645208255026593, 0.8608578965642588, 0.8338463985999334, 0.8709126055298575, 0.6194997413165775, 0.6159033470244649, 0.8700651570911664, 0.7087220705833732, 0.9310982596204631, 0.8867385495423031, 0.7298201169294134, 0.9816003679018678, 0.7881345430450717, 0.7781168291279379, 0.5372104425489753, 0.9978888974301486, 0.5034461465480035, 0.626558206177362, 0.5813475512576597, 0.6516506249191055, 0.9745225611488962, 0.6439759407432168, 0.9561256899955315, 0.7660877347399947, 0.7695823171898448, 0.7389165626695947, 0.6397128132958534, 0.535873239091619, 0.5504755473899352, 0.6050290119511981, 0.9496141168633735, 0.8167054509120133, 0.9674390663586476, 0.6245543545734322, 0.7404833809601739, 0.8885215799120478, 0.7310707121901723, 0.6048526977111925, 0.6689461823310632, 0.8665664101184883, 0.7102939571517389, 0.743658851257321, 0.5400247837439679, 0.809042460334112, 0.8462063781287481, 0.8533867610134628, 0.8181191826987597, 0.838488803794036, 0.6711579400785384, 0.5252831421642774, 0.9034524340362475, 0.8627529902123785, 0.5946193717060984, 0.5311642769720101, 0.8687281891452563, 0.8711073458171652, 0.6415958066576757, 0.6288128154316082, 0.7813615197144257, 0.5290812250240087, 0.6881487137225089, 0.54678895684934, 0.5774125126593777, 0.5533677921713167, 0.9644517236504084, 0.9869724015724718, 0.5422274719336662, 0.8249269196646261, 0.8159771094804776, 0.6681359316123019, 0.7598150738017191, 0.9804461888855154, 0.9006748575622734, 0.9631909160065486, 0.8202922680429464, 0.515572935626581, 0.8962478022821909, 0.7458013696669781, 0.802912948445575, 0.6040170104087356, 0.6472797934426807, 0.6961123548977808, 0.6280185301876348, 0.5043920357263018, 0.89505126710195, 0.6560078974464004, 0.6173654389999162, 0.8069374072088216, 0.9149929182160308, 0.6916673456539808, 0.5371527179206648, 0.6567956361093151, 0.8825041844540138, 0.9318767578656553, 0.8095955931734302, 0.8045694746090303, 0.9155256683833584, 0.7649943443997079, 0.5813855145349668, 0.9097897763431642, 0.7638285615054284, 0.5650143733508171, 0.9519764633446854, 0.5947936491166156, 0.8038724491654601, 0.675099465693111, 0.6144815643219295, 0.6024098141724679, 0.990224585092311, 0.8349832092572738, 0.5288150506810114, 0.5766600206646073, 0.5181234213346435, 0.7317282749089623, 0.8967616477155906, 0.8981276021923069, 0.9137716859118147, 0.920026191724426, 0.8975977383398379, 0.6681654800389228, 0.7072972596171989, 0.9169326115198713, 0.953198380516066, 0.9364923352836916, 0.8374791721119484, 0.762238435548673, 0.6088245720621179, 0.5234213170832598, 0.8347113069379362, 0.6738992785491397, 0.7099532504877508, 0.7248072947666544, 0.9194100361713875, 0.8735015273367861, 0.5328496721264376, 0.5532950256181228, 0.5939269214323347, 0.8148807676472447, 0.9500581898604192, 0.6450438948805235, 0.7567447311798142, 0.9730907063743932, 0.8956488741425397, 0.8470055300209502, 0.6815116658983802, 0.6425672298063992, 0.5341729351898739, 0.7773348284685909, 0.968948765096239, 0.9492035398065063, 0.6431371992973163, 0.7712711189151913, 0.8563588902572159, 0.8856722519154518, 0.6726900863433973, 0.8348592406493935, 0.9045075724434056, 0.5904276689043131, 0.8360812281360263, 0.5468865938776291, 0.8345147016915361, 0.6198984799215088, 0.9548729375764802, 0.9096786001190222, 0.9163671751557005, 0.8757305525697547, 0.921672531106857, 0.6856325439258137, 0.87290653242636, 0.9155536490557503, 0.6025668587184725, 0.9568763940848759, 0.9038601894220482, 0.7571532898799269, 0.6870574208866298, 0.7446081863528915, 0.8809432606553066, 0.5640102531136231, 0.5336438157132783, 0.7110336797683774, 0.7855409951676033, 0.7453906458479551, 0.6446775782426217, 0.5701756697555357, 0.9056481007582692, 0.5553030135656825, 0.6957718041064875, 0.7078754185383407, 0.6057089229390525, 0.5226741526746681, 0.7561249600670796, 0.5484932461019292, 0.9974358515892464, 0.6200293247591295, 0.5501211735959364, 0.7543837005413392, 0.5121325079238646, 0.737758439610549, 0.623952619159305, 0.9649446693559773, 0.627267167162605, 0.5406714824191547, 0.7049898743480123, 0.662606357453968, 0.9717828084605314, 0.5001007728565439, 0.8076937490958784, 0.9930145655778083, 0.9934190341194622, 0.5372194020651967, 0.5470798557182446, 0.7715016451001769, 0.7124559522812943, 0.9516461848018731, 0.8511815789447599, 0.8281330201502235, 0.6358452175627107, 0.5918692402497776, 0.8123718542806344, 0.9371311876157943, 0.6316798068693747, 0.5953098777566117, 0.967750809110304, 0.8608389884141813, 0.9830995078038236, 0.5682722291487444, 0.644423486864973, 0.6112873164431536, 0.7387969252282676, 0.899555217090134, 0.8841030220608433, 0.8990394703738889, 0.9621846870191408, 0.7416178075039164, 0.9741587292441526, 0.9228986875478251, 0.8614454695917654, 0.5963640438134946, 0.971994973983044, 0.9384797316592097, 0.8206228820782208, 0.7904907639928132, 0.7642585401028887, 0.7791497779113443, 0.5035090320631148, 0.960080336758873, 0.9259638799622865, 0.7285953695264596, 0.5396422498123314, 0.7724466868558174, 0.8666000743718267, 0.8037147270309474, 0.7727288962096248, 0.5352153753711426, 0.7787729798290242, 0.5110627797057707, 0.8818005765972493, 0.9317990734474595, 0.6098254812407702, 0.7782245992294918, 0.5228187657894288, 0.6284419816948477, 0.7052852129227595, 0.6701965102577425, 0.5723683967923596, 0.7245269306819653, 0.5229986390112877, 0.7861962409667986, 0.6704887842740082, 0.7808019364311835, 0.574340649647451, 0.9967106549275113, 0.9447004653748382, 0.9213629883725583, 0.8605635153708685, 0.6140331534425785, 0.5689915508302021, 0.7374637716468855, 0.8102851003659373, 0.7248332439304707, 0.5177986707596574, 0.9863148980605809, 0.5062701619477419, 0.5235358825885144, 0.553844871470832, 0.7902106074226691, 0.7802087741384136, 0.7976716782209652, 0.556519906372129, 0.5060329465550386, 0.9403233443520682, 0.6005300649193837, 0.6449518899863759, 0.9108463534666501, 0.5678368361752374, 0.5969719513298078, 0.9088601852109539, 0.5484646879458346, 0.6174236229004311, 0.5122722630504786, 0.5078670347982507, 0.5647049475128223, 0.9777915458711177, 0.9170364086702582, 0.8449756759502206, 0.9121430418390177, 0.6670768312750699, 0.7865936405974332, 0.8555481956709106, 0.7845949569053177, 0.5172700841106017, 0.653571758616795, 0.902087314076732, 0.5982983064546501, 0.7263260778505618, 0.6933492185724504, 0.837073315231168, 0.8129399626803537, 0.8988597472245965, 0.7795193441195182, 0.8400655373294329, 0.6477034272468285, 0.6984647061004766, 0.543888953366447, 0.6205314501979986, 0.5345958335157641, 0.9198778960765928, 0.7773684261676573, 0.8370868060123868, 0.5985928797344038, 0.508409196586933, 0.7577295481864958, 0.5685467976725367, 0.5272539317301131, 0.5558380528793307, 0.8766319889889987, 0.5560344611738606, 0.7436144694602573, 0.8579866515507674, 0.7798220200916248, 0.8890909857176488, 0.9274066437662275, 0.6566877764904833, 0.8569398920366005, 0.9695257988721264, 0.8992461895014272, 0.5428957362177551, 0.8795870373936011, 0.568739452104112, 0.7020294471219787, 0.6418556169038248, 0.8096788352396824, 0.6463166948309084, 0.7970867537283132, 0.5100425241213292, 0.6845502505066721, 0.5916379199989413, 0.9813463123280485, 0.8098868834686317, 0.6603466883306526, 0.6150197257922709, 0.9631776569147819, 0.9519629373198029, 0.5438808370920838, 0.9755767356958703, 0.510161581152299, 0.5913385435970226, 0.8785473422487903, 0.5740849349540527, 0.9518707748994536, 0.63894981780765, 0.7237331764201143, 0.929666929785657, 0.5904804840185893, 0.5678823170994716, 0.8991593898925567, 0.6769608600713344, 0.5343009622425677, 0.9001649985985514, 0.8412766509145735, 0.9941499573745709, 0.8535216114040356, 0.5231105819988642, 0.5355962323962178, 0.7791742505917729, 0.9845989132516673, 0.8011502388058326, 0.5418479286321995, 0.5891115232369881, 0.6300537889296579, 0.7068577756977491, 0.7358165601061118, 0.8826465663874905, 0.5526484836442878, 0.7085084182136523, 0.9561932998631646, 0.7712421357038155, 0.7154466662461073, 0.5030929439562177, 0.5695911042646833, 0.5063358678923799, 0.5824783790763947, 0.7593729293503193, 0.5169074668247586, 0.72943793995431, 0.7559753521096124, 0.6195620406919814, 0.9824399369201453, 0.857806396592556, 0.9799038552444248, 0.5882942075855355, 0.5523732518731044, 0.9095345382783865, 0.5969912587228431, 0.6754902137195709, 0.5059067953600819, 0.5314121680303361, 0.7040886388367896, 0.7052120468927736, 0.8286091969470049, 0.5312207477093843, 0.9450141110622412, 0.7766739258837754, 0.5570801279781947, 0.8090856031583484, 0.6633486221264805, 0.6922582463446955, 0.6916121672137217, 0.8509380723900792, 0.6428763845741442, 0.9993176834465353, 0.8620626189818172, 0.8963773138724872, 0.7502201522576246, 0.7246966713347878, 0.8472356105095934, 0.5570916252994773, 0.8527790200949997, 0.6905158371285149, 0.8686335428001235, 0.7564138864649287, 0.5303128847393619, 0.8771252020885424, 0.8447042963996012, 0.8405187795371408, 0.56788420845023, 0.6309504178229601, 0.6920415526183609, 0.6799842956246245, 0.7486225025792234, 0.9318420210734715, 0.8276237639400117, 0.8206938655673592, 0.9861978160926953, 0.8945292517633889, 0.9308706906729607, 0.8909476369518337, 0.9194656835652684, 0.8668002848909806, 0.5164935950555187, 0.781366767532962, 0.9621143875464775, 0.921084274088598, 0.9096991514275966, 0.7148182976792086, 0.9438360971524276, 0.834322041102042, 0.8914017484181315, 0.7033930311155339, 0.8813127151682401, 0.9902344584488856, 0.963194526482896, 0.8847801869845604, 0.6924836084766148, 0.9424428134831723, 0.8645969570416581, 0.5199618578367295, 0.7176958794280535, 0.651553549092627, 0.546468806384139, 0.6460236353283902, 0.8888451894708335, 0.8654817605844765, 0.9830313379467923, 0.9816210355625252, 0.6566812723603068, 0.8770606618162291, 0.5998761928805015, 0.7108023683894178, 0.8819135760051076, 0.7787031869108472, 0.520042135357538, 0.6731879036731189, 0.8598276534009377, 0.7089003035103445, 0.8317226295557936, 0.5397369080230847, 0.5650529307537782, 0.693668595708671, 0.6068465888549963, 0.5635471078165786, 0.9137891405570052, 0.7354714020727414, 0.9756678663603968, 0.83872604004347, 0.7414409055470175, 0.6547944226442757, 0.6785839861938363, 0.8470498587280457, 0.9416399951616493, 0.9239372048633616, 0.9125445553685679, 0.8818908624146109, 0.8923571095970346, 0.9794563745277682, 0.7988885948975444, 0.6122375375582605, 0.6505829923670188, 0.5903718549493966, 0.5660938810391687, 0.5848036193716426, 0.8055424498155264, 0.594048733354418, 0.8306090275182096, 0.9705115055860707, 0.8412820910471319, 0.6109017954682394, 0.8482486188291236, 0.8313994316040991, 0.684204940543321, 0.8187916436906317, 0.5124478697635944, 0.7328251250383847, 0.6088286245442946, 0.8290790394112408, 0.6803514192033291, 0.9060424772421445, 0.9790793740047321, 0.559942951318064, 0.9967996432576448, 0.7289275317559141, 0.8814158509247254, 0.5770420392261617, 0.7861202483114587, 0.9386554425997157, 0.883318730556971, 0.7894172211969323, 0.8980265329157398, 0.9841505908649173, 0.5903360112086389, 0.5584114562262605, 0.8658334659532045, 0.6509554582449064, 0.835116704480995, 0.7214926893390619, 0.5620184315215035, 0.5137836936501603, 0.9088678797272909, 0.5705729952772097, 0.7855120907270883, 0.5636087206614744, 0.5626493701165609, 0.87632511040835, 0.8819262583708306, 0.6106579227460458, 0.5536501664935226, 0.564246481389151, 0.6695545275469923, 0.6570826374344803, 0.580931229008703, 0.7385701931717683, 0.8793943074983476, 0.8428746013369148, 0.6596059978434173, 0.5461126295708381, 0.7652112412365288, 0.9894734329893162, 0.6013036856772624, 0.5498425246592341, 0.7490626790057467, 0.5653311622687904, 0.9369579354131079, 0.6945659649605745, 0.5369277113017739, 0.747940813657719, 0.7125291916858183, 0.9984843388557005, 0.91755953566714, 0.7033712569528092, 0.8910453266076424, 0.8575422666574519, 0.8159814992462271, 0.59080155327161, 0.9173875183279845, 0.8722917385185754, 0.6918760215410416, 0.8089234002045294, 0.6844063704949395, 0.8127217874949462, 0.5002649617011421, 0.8787086140613563, 0.5839091724575407, 0.648542676178111, 0.8073169804049072, 0.9201610453642499, 0.9185323574172963, 0.5530710548711997, 0.8888624673214521, 0.6593882991388662, 0.9153117875987159, 0.5368574424210573, 0.7034565500508074, 0.8233417927853754, 0.6745962731714268, 0.6245818800936751, 0.9546087048150247, 0.8265896732305047, 0.9644159559751762, 0.8841569994999591, 0.5400176186539505, 0.5853047751289553, 0.8680032328932101, 0.984116772012442, 0.644463354626021, 0.6159656494032939, 0.5834600280103162, 0.890599160735305, 0.9283510331613464, 0.5715304907326865, 0.7277448537633305, 0.7973126589429733, 0.5540523938158035, 0.7525260424298923, 0.5001662533090504, 0.527150172422524, 0.8605972720060737, 0.8067359034508572, 0.7287997015442077, 0.5911403899102687, 0.6014259951546836, 0.5423168317084396, 0.7927245737154858, 0.555382860922156, 0.9909195995024465, 0.6893197435553565, 0.7577567930980438, 0.7898605163852241, 0.7326764178795953, 0.9527184204462006, 0.7732389860653615, 0.8422455940678221, 0.6500047509024199, 0.6364722354459582, 0.5919001527577203, 0.8203381981036106, 0.8670213971739869, 0.9904221931930361, 0.6704349612516013, 0.5528353686866377, 0.8374178293596476, 0.73915197458017, 0.5085867496936842, 0.7097176517961932, 0.6019682649173121, 0.8569603059836017, 0.9121510286876224, 0.8518213353556447, 0.9737920303653778, 0.8525430148673054, 0.8924972578416481, 0.9919037196455931, 0.6316491948598719, 0.6547623039892528, 0.5853860236982948, 0.6193439461284944, 0.6796120379275716, 0.5259480773187581, 0.7577859984183236, 0.565947049039091, 0.7296505241237028, 0.5391953005051142, 0.6258443069688581, 0.5194950867988806, 0.8454991961663625, 0.9556948078521457, 0.7642333314201, 0.6449002050260202, 0.909155956262619, 0.5050086088407577, 0.5448747469302286, 0.7518742551915595, 0.5859973824632743, 0.7486206080113846, 0.7793180432838247, 0.7126006126634079, 0.8089188179722583, 0.768253179688994, 0.7704817553829602, 0.9263595089902352, 0.6528311750640754, 0.9753284450219972, 0.5268380727691386, 0.7440939937062383, 0.5616776672775488, 0.5995019054949113, 0.6626113408479575, 0.6427337889102445, 0.7297064801297014, 0.7850545927809988, 0.5327386242733052, 0.9687712911140629, 0.8325376591600397, 0.7231563046079619, 0.6399843407412127, 0.6513332970275629, 0.7180248579960943, 0.620280915701998, 0.8725581313016999, 0.7759697596808106, 0.8056933791356604, 0.6941371883717793, 0.9239185043885596, 0.5514344843964407, 0.5070212693176765, 0.8435730562664385, 0.7338355275506485, 0.5228797209351854, 0.5189415578254921, 0.8446256198512119, 0.7172027199848373, 0.9565712410979842, 0.9635950739852127, 0.538233621201424, 0.7793838798640239, 0.9192699872333001, 0.6590668938987425, 0.5627996357659281, 0.9491848407485636, 0.8343036175345776, 0.9832582150041983, 0.854429215031012, 0.8394372044546627, 0.9021957298474758, 0.5755738954961174, 0.6554769823525217, 0.9894573962290527, 0.9665758364964091, 0.9692867108266732, 0.7702296771132796, 0.8435682118105312, 0.7383674686183781, 0.6269308344389877, 0.6225044636517163, 0.9799849901533197, 0.6288793321219979, 0.751617656079498, 0.5770944733910659, 0.8996647125680028, 0.5038169724121342, 0.5676674251650085, 0.9625882873929447, 0.6691525718303257, 0.9540475613362162, 0.7072763623618462, 0.9799099965341211, 0.5689339902369603, 0.9417857369302745, 0.74279524627088, 0.8009118314199256, 0.5771036090198268, 0.638554144778881, 0.6675213782553175, 0.7773770425653137, 0.9463156359993363, 0.6833900574398748, 0.6726819385847711, 0.9292208121888712, 0.5847434619788827, 0.8784844104706042, 0.5171960507654556, 0.8413400057220766, 0.9166577178637846, 0.694813353671947, 0.9093813438567087, 0.8589931891633137, 0.8508170327314285, 0.7325006343734721, 0.8118601363402562, 0.6242825178449103, 0.6015949405810973, 0.562234805054417, 0.7825693582035529, 0.840791156447295, 0.5295603027748441, 0.870653943189046, 0.6182886334735023, 0.6508883992256931, 0.6974676492904566, 0.5506977476163137, 0.6034381842547369, 0.5812816714461922, 0.5210643953830354, 0.85636438243618, 0.6415709811083159, 0.5706061347437097, 0.9331382036502998, 0.9773375410780544, 0.8032931160262728, 0.6995446297179948, 0.8520782043778738, 0.509653178185848, 0.8667371621967157, 0.9279358364054415, 0.8021769407447755, 0.816462068058031, 0.6268095671864142, 0.6348929517287155, 0.6095743409678722, 0.7234750265386816, 0.6988358234574319, 0.9619053731190208, 0.5935065544356528, 0.5725384472376442, 0.663463925630742, 0.9378668947909682, 0.839256664170515, 0.6064526859010125, 0.8171320031271541, 0.9889833081911021, 0.882259188965764, 0.8256771149375859, 0.743872874944437, 0.6207709610999335, 0.7134027241193153, 0.5072366608330436, 0.7702254870582219, 0.7574917889210004, 0.7159706010489835, 0.7753014973417109, 0.9672181025274061, 0.6031257473219955, 0.8261052897804972, 0.9944824616836883, 0.607144208998278, 0.6549861346757644, 0.8019288813117782, 0.7338928197858575, 0.8373397144632453, 0.8952620316577133, 0.6646968219144034, 0.8123382262593287, 0.7391158917892493, 0.5412651208687783, 0.6243151703394381, 0.70474871604543, 0.5357085264540151, 0.9660022364773913, 0.6416024556638822, 0.7744958897791452, 0.5586653303737477, 0.688558010541268, 0.7300674136734923, 0.9798138339400942, 0.7019668838446951, 0.6959011636591187, 0.8662526809461147, 0.558748111476866, 0.5713289015646563, 0.6129982364351906, 0.917512782948938, 0.5213274735646494, 0.6735817936888164, 0.6967012111540675, 0.8965002154719097, 0.808581883595874, 0.7135696850756735, 0.9719144731838014, 0.9787832892060626, 0.6314879142788279, 0.8256361790953284, 0.901409255516126, 0.7161627168512383, 0.7291475312654133, 0.8613125635031611, 0.6182926100993599, 0.9785463052017294, 0.8131400905081712, 0.7645379195274166, 0.7131377467636373, 0.9236981130869588, 0.9544323941802104, 0.6413490388725769, 0.9913668757623895, 0.7148526263403492, 0.66335749201744, 0.9844851945410179, 0.8095292296795028, 0.8342905837574743, 0.7465857588524845, 0.6583474670853108, 0.7194165659685183, 0.5885476300684427, 0.9082024336290027, 0.5768543639841577, 0.6835184798589965, 0.5585458742043965, 0.9927258005381119, 0.5068321661601569, 0.5691138676516568, 0.5807569048153083, 0.814539823744036, 0.6653018179101122, 0.6302211786970598, 0.7535657887376581, 0.991581698620896, 0.7466733422823362, 0.7495886770181517, 0.8536112863239382, 0.7007853977817083, 0.852522175677079, 0.7540402941944514, 0.9113761107980674, 0.9757510210327589, 0.9389815537570791, 0.6269145081895582, 0.7391715752509269, 0.9543055223035226, 0.5258442944430417, 0.6013654542443784, 0.8481732858634423, 0.8932632188370428, 0.9366921030532247, 0.9337269457624056, 0.6833028063808828, 0.5854788611392305, 0.5571906834636418, 0.7621742048092582, 0.7700960085770256, 0.7519571004412547, 0.9105862381892023, 0.5852561332199406, 0.786445336351169, 0.8207764629034151, 0.8441722413248758, 0.7182498444760718, 0.9611239894905735, 0.7938399691472495, 0.6651717036321401, 0.7546076660259373, 0.643232627768352, 0.8129378704685513, 0.7337043629823872, 0.6437334747319935, 0.7796977489554828, 0.731261518776954, 0.8550084888664489, 0.7562086176885727, 0.5494154072352808, 0.5267436432329791, 0.7164070261921801, 0.6817913126755123, 0.9437642147224254, 0.7398414718686568, 0.7787139433523715, 0.5232731821472691, 0.6092736477583722, 0.5089629235668621, 0.6139389966279298, 0.8062383028378834, 0.7025502629005209, 0.8764163677303157, 0.6086335473479656, 0.9260344765724577, 0.68535544828311, 0.9061456112114588, 0.8538674039561065, 0.7698182928102029, 0.926324535818473, 0.5889188581651182, 0.7698387317432749, 0.9363825324198112, 0.9698493381195638, 0.8337644729329705, 0.521454730803548, 0.5480008165937966, 0.9994552398639017, 0.9739727710359978, 0.546788188620588, 0.5196100428889086, 0.8834631949100249, 0.8849840729623779, 0.7881361104237875, 0.8493142625727974, 0.7904181681821432, 0.8863359645875536, 0.8683569246573879, 0.5962110321676597, 0.535158199813636, 0.6257797345273397, 0.743421995837164, 0.8949383797725465, 0.8485353456864074, 0.8511225000347515, 0.5645423359441164, 0.6461381497288711, 0.6547189494264057, 0.7638865027367097, 0.7668609283003178, 0.8892354975108336, 0.613808451046673, 0.952123169942011, 0.790425508438874, 0.7870736979416929, 0.948901908292004, 0.9042097648559068, 0.7386103825036576, 0.8361742870827835, 0.673584421342029, 0.6002582441532924, 0.5612223221285595, 0.5366576495468178, 0.9108010203100008, 0.6593974860734311, 0.9927623421679166, 0.5105116220928785, 0.7780425251057614, 0.7026219409638296, 0.5139119442260245, 0.6143243944004968, 0.8410450321454959, 0.6426612223749397, 0.850525476429886, 0.6924598091203527, 0.7538619578385547, 0.8938147977221977, 0.8843567025000633, 0.5195793099471082, 0.5041026109973142, 0.9228545022052482, 0.9807563558530448, 0.9648666168864206, 0.6696646513556708, 0.8113289076529642, 0.6884454401078474, 0.947988215337471, 0.655916919056805, 0.5296737478411855, 0.8826657455949389, 0.9222086523376278, 0.6608268556260233, 0.9485208763405429, 0.8316983505816453, 0.8000053579828854, 0.6967547929450442, 0.859513451978043, 0.9632827870919669, 0.9907540214187127, 0.7541729447717042, 0.7501670588716141, 0.5039631353155521, 0.8157822245171421, 0.6382949356142905, 0.922949459392799, 0.9638775581775494, 0.8807248289125638, 0.8988619597049421, 0.6900910003157723, 0.9818407978069261, 0.5211361110009046, 0.6305182027682501, 0.5556249383906524, 0.8400830674235433, 0.9418728027658823, 0.6592562192461939, 0.8733372311685825, 0.7656817189377905, 0.8726914678854827, 0.5963730645300873, 0.812632215776893, 0.5635258998888759, 0.6307440633236723, 0.9493860245353914, 0.9374560166421946, 0.6043139489828708, 0.9610629268780988, 0.9578245179366544, 0.964164818730859, 0.6201729433556652, 0.6829353258451866, 0.9836095232489201, 0.6316878205142717, 0.5571646314976673, 0.5506085761847135, 0.7533686333102152, 0.5847950463697158, 0.7500972342124719, 0.8951026905452822, 0.8983401241358125, 0.5451007160543968, 0.8098884276434137, 0.9587283634838148, 0.6745518523953146, 0.5056564497635908, 0.889338738346314, 0.6362960169079205, 0.7057035345580528, 0.9979299232597636, 0.6869175843531214, 0.56832013783304, 0.9491664649861921, 0.5518354434270978, 0.7836735650166619, 0.9635488115403612, 0.7641123061761994, 0.5395362753788087, 0.6428678509494048, 0.7980307112373461, 0.5605110058718853, 0.7955603169022096, 0.9654805777273495, 0.6897449542557046, 0.7833467981741504, 0.8279407895480446, 0.7313610138500071, 0.590360960651175, 0.5945588468746932, 0.9770937339168773, 0.7581548766191726, 0.5181542049448905, 0.6986568475402273, 0.9680767176382552, 0.8738605990330413, 0.5840402480652426, 0.9154746036653831, 0.7423062930733464, 0.7980577171510808, 0.6144822616078984, 0.6804328891366664, 0.7057763194223841, 0.5913872018679349, 0.5789231055138948, 0.8858700103668669, 0.5721375999224337, 0.9885080977736025, 0.5475876512358417, 0.8566124765872917, 0.7979924744306729, 0.8918321273250065, 0.721494128592557, 0.8476732240957663, 0.7702835892885345, 0.7563648981443489, 0.8769591752249442, 0.5239445776445677, 0.5008477403232298, 0.6625655379285358, 0.8626462255028715, 0.5992694535866793, 0.9877891576227154, 0.9765683130808958, 0.7693635537702874, 0.6663961210824236, 0.9441664034594673, 0.7317788665312617, 0.647077719124754, 0.9238721438438864, 0.8978162906750945, 0.7481802112236366, 0.6874792210731546, 0.5905965554099106, 0.7867971832013541, 0.8275349823975049, 0.6361672280830217, 0.6504440460901554, 0.728351955565407, 0.8736165636290046, 0.6647588299833818, 0.6874357658084169, 0.7084592369039164, 0.6907867331600799, 0.8026760922303642, 0.7432839422743616, 0.7876439624566105, 0.7633943544453134, 0.8049292580123601, 0.9727676072236062, 0.7525840678738712, 0.933707491214683, 0.5928876809669974, 0.8843944958325907, 0.7315430569226913, 0.5664061543192631, 0.7283788663677261, 0.5169999021840996, 0.8958753375002144, 0.9993898863396742, 0.7430006238535362, 0.6159861040706082, 0.967321529102544, 0.5393126455085338, 0.6205898752408112, 0.5292895886197466, 0.9575577826936842, 0.893992011398981, 0.8184986779057726, 0.7165336727531534, 0.5627117012205602, 0.8579907626607819, 0.9162609806233133, 0.9923776701770555, 0.9502524840990454, 0.7666182167519182, 0.9185638748403209, 0.6672346951489417, 0.9357350809830316, 0.6568313355313686, 0.6424416603209471, 0.9502141446366943, 0.8001685950583572, 0.5481642692063894, 0.5867456783119092, 0.9622485786265428, 0.5002526683691382, 0.700710414782902, 0.8266402692911354, 0.6757473887320102, 0.5748305134276448, 0.9659526750545183, 0.6254687614914627, 0.5250447024865756, 0.5902354949238579, 0.6672423945885337, 0.8661560945461635, 0.8298404157644026, 0.5078683797812014, 0.7090499728546764, 0.5472061226508156, 0.8285271054381409, 0.5036992017526347, 0.9529893935252652, 0.6135119139172626, 0.6887447663243544, 0.8032997685369117, 0.5280773083006409, 0.8978622939876814, 0.987498389445173, 0.7207364122950526, 0.572717577716559, 0.7412030793354476, 0.8086420496574894, 0.623796654837331, 0.5441754183138758, 0.5225716817757327, 0.7749746907337786, 0.7365990262456217, 0.7724617012237467, 0.7837434934655512, 0.6150858556372704, 0.5730526948701227, 0.8107102811556532, 0.8312161044889113, 0.6338109440398463, 0.6982255137912712, 0.9786786133981478, 0.7088167484145067, 0.5016087968996046, 0.6242990574841538, 0.824032721721272, 0.5778312518967395, 0.8635654280990559, 0.9797956649898472, 0.9881825496705646, 0.6236584713978275, 0.7430099063209079, 0.8609982341036853, 0.6068816948820306, 0.9967317820094743, 0.8905545529669712, 0.9915088730926451, 0.7456311419596628, 0.8239018530557034, 0.7480018951870305, 0.9811471429646554, 0.6043600115263112, 0.5671566004670323, 0.5192208299615894, 0.538238189277146, 0.5481598652120716, 0.8004219779353012, 0.6850000349744372, 0.6954083005263768, 0.7036500932050884, 0.9550113552521414, 0.5546413768054629, 0.6564376081710259, 0.8581672268321539, 0.9472942722046231, 0.7091025352098899, 0.7878744873951344, 0.5916654596884973, 0.571367190679299, 0.5664871340778912, 0.7096662695240228, 0.963027122393553, 0.7570712571635183, 0.969190881449445, 0.8738488442976109, 0.8259534943077562, 0.6711231716440349, 0.8600156895467553, 0.9933029285738206, 0.8201286770416341, 0.6478093183579804, 0.7534176736543889, 0.7428848359523138, 0.5120713361578556, 0.8898816414633439, 0.8399378262922942, 0.68267655146396, 0.5738565708339134, 0.7321763013054221, 0.82735492215677, 0.9739172535265511, 0.7657980614304185, 0.8031186024423544, 0.8201272515970321, 0.6189353505192907, 0.7358967244453072, 0.579480535420881, 0.9506874882494741, 0.8254603790322363, 0.9242708388043743, 0.8908103885222176, 0.7031554996553617, 0.7286904066550552, 0.7492328588792131, 0.5784457546461892, 0.9580227586829886, 0.6701746039370707, 0.8271333793036151, 0.7753258639382647, 0.9191494566557485, 0.6999461502788338, 0.5628964314226548, 0.9049612560871, 0.8325547282076003, 0.7645046644484326, 0.7886971874403161, 0.8085028066521852, 0.6651385102677897, 0.9324560820197656, 0.6033777854534925, 0.9843326421101322, 0.6480650370881309, 0.9149374321687236, 0.9427713475165009, 0.6538334904402192, 0.5677937523644336, 0.7796056475242102, 0.8617091697494097, 0.6515448580788816, 0.7898274024971824, 0.5443363347808488, 0.9595026533806984, 0.5059402684094932, 0.8571939444369754, 0.7327743708663935, 0.6834349928423884, 0.89298080403258, 0.9784075141551718, 0.6860833258575778, 0.6297443168468493, 0.8053712952891234, 0.9376289405920808, 0.8808934902875608, 0.7562400570203902, 0.6649455089031915, 0.911568549655629, 0.7605081569571792, 0.9471470949935044, 0.8446649418360813, 0.9992538278360235, 0.612995532284236, 0.5047435544898173, 0.6715214274753231, 0.5567213381482068, 0.6679375871558009, 0.8942431472820054, 0.522379467492929, 0.7835882819464843, 0.6621001535452016, 0.5610647094137815, 0.9892316076978216, 0.5273463330094562, 0.5925247547152408, 0.5851004986209076, 0.6599517422468426, 0.6383142855738839, 0.7058960648016239, 0.6283169045063532, 0.5425836472246517, 0.7369629431115405, 0.8362281045064957, 0.8127687579059378, 0.5100900549917888, 0.7885141574381354, 0.7997467631051102, 0.8955477798982638, 0.8400727070671223, 0.9362855603003549, 0.7095844900737285, 0.7398430434891352, 0.6932786935715467, 0.8248873040351663, 0.677771587864386, 0.7207296206799445, 0.9911932301016229, 0.6832091925109313, 0.8952561691727403, 0.5787811822456229, 0.727985330282187, 0.9562542833777701, 0.5899260712101712, 0.8974197751037425, 0.9919614733032348, 0.9695723408794052, 0.6143814772393809, 0.7958206441008056, 0.7187982091622422, 0.6796292636626527, 0.7807607051002845, 0.9458328661218574, 0.8337995937145589, 0.6126440846027614, 0.5784499740252296, 0.8543337676659957, 0.537717460441985, 0.7209456591368391, 0.606573895162362, 0.7499625212287246, 0.9724460329110394, 0.6147557206700307, 0.5661228510636157, 0.806784171213345, 0.5476478717017137, 0.6038754100575754, 0.5363319144025325, 0.8205609342022193, 0.7741130151162214, 0.9961206989798037, 0.6499544306924435, 0.5962837540756198, 0.6456344838772627, 0.7954696533570494, 0.8500857696417092, 0.6880726020324252, 0.8640997187014758, 0.812786165011182, 0.9827671163160132, 0.9682405922725043, 0.873973544386235, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0};
int h_B[]= {
0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 53, 55, 57, 59, 62, 64, 66, 68, 70, 72, 74, 76, 78, 80, 82, 84, 88, 90, 92, 94, 96, 98, 100, 102, 104, 106, 108, 110, 112, 114, 116, 118, 120, 122, 124, 126, 128, 130, 132, 134, 138, 140, 142, 144, 146, 148, 150, 152, 154, 156, 158, 160, 162, 164, 166, 168, 170, 172, 175, 177, 179, 181, 184, 186, 188, 190, 193, 195, 197, 199, 201, 203, 205, 207, 209, 211, 214, 216, 218, 220, 222, 224, 226, 228, 230, 232, 234, 236, 238, 240, 242, 244, 246, 248, 250, 252, 254, 256, 258, 260, 262, 264, 266, 268, 270, 272, 274, 276, 278, 280, 282, 284, 286, 288, 290, 292, 294, 296, 298, 300, 302, 304, 306, 308, 310, 312, 314, 316, 318, 320, 322, 324, 326, 328, 330, 332, 334, 336, 339, 341, 344, 346, 348, 350, 352, 354, 356, 358, 360, 362, 364, 366, 368, 370, 372, 374, 376, 378, 380, 382, 384, 386, 388, 390, 392, 394, 396, 398, 400, 402, 406, 408, 410, 412, 414, 416, 418, 420, 422, 424, 426, 428, 430, 432, 434, 436, 439, 441, 443, 445, 447, 449, 451, 453, 455, 457, 459, 461, 463, 465, 467, 469, 471, 473, 476, 478, 480, 482, 484, 486, 488, 490, 492, 494, 496, 498, 500, 502, 504, 506, 508, 510, 512, 514, 516, 518, 520, 522, 524, 526, 529, 531, 533, 535, 538, 540, 542, 544, 547, 549, 551, 553, 556, 558, 560, 562, 564, 566, 568, 570, 572, 574, 576, 578, 581, 583, 586, 588, 591, 593, 596, 598, 601, 603, 606, 608, 611, 613, 616, 618, 620, 622, 625, 627, 629, 631, 634, 636, 640, 642, 644, 646, 648, 650, 652, 654, 657, 659, 661, 663, 665, 667, 670, 672, 674, 676, 678, 680, 682, 684, 686, 688, 691, 693, 695, 697, 700, 702, 705, 707, 713, 715, 719, 721, 724, 726, 728, 730, 732, 734, 736, 738, 740, 742, 745, 747, 751, 753, 756, 758, 761, 763, 766, 768, 770, 772, 774, 776, 780, 782, 785, 787, 792, 794, 796, 798, 800, 802, 804, 806, 808, 810, 812, 814, 817, 819, 822, 824, 827, 829, 832, 834, 837, 839, 842, 844, 850, 852, 855, 857, 860, 862, 864, 866, 868, 870, 873, 875, 878, 880, 883, 885, 888, 890, 892, 894, 896, 898, 901, 903, 906, 908, 911, 913, 916, 918, 920, 922, 924, 926, 929, 931, 934, 936, 939, 941, 760, 755, 760, 755, 760, 755, 760, 755, 900, 915, 86, 86, 87, 87, 900, 915, 995, 997, 999, 1001, 1003, 1005, 1008, 1010, 1012, 1014, 1016, 1018, 1020, 1022, 1024, 1026, 1028, 1030, 638, 633, 638, 633, 638, 633, 928, 943, 928, 943, 1107, 1109, 1111, 1113, 1115, 1117, 1119, 1121, 1124, 1126, 1128, 1130, 1132, 1134, 1136, 1138, 1140, 1142, 1144, 1146, 1149, 1151, 1153, 1155, 791, 778, 1208, 1210, 1212, 1214, 1216, 1218, 1221, 1223, 704, 699, 704, 699, 933, 938, 933, 938, 1257, 1259, 933, 938, 1272, 1274, 1277, 1279, 1283, 1285, 1287, 1289, 1291, 1293, 1295, 1297, 1299, 1301, 1303, 1305, 887, 887, 882, 882, 1325, 1327, 1329, 1331, 1333, 1335, 1337, 1339, 1342, 1344, 1346, 1348, 1350, 1352, 1354, 1356, 1358, 1360, 595, 595, 709, 711, 750, 750, 778, 791, 778, 791, 849, 847, 849, 847, 1466, 1468, 1470, 1472, 1474, 1476, 1478, 1480, 1482, 1484, 1486, 1488, 1492, 1494, 1499, 1501, 1503, 1505, 1508, 1510, 1512, 1514, 1517, 1519, 1523, 1525, 1527, 1529, 1531, 1533, 1536, 1538, 1541, 1543, 1521, 1516, 1148, 1547, 1363, 1498, 1496, 1521, 1516, 1521, 1516, 1498, 1496, 1521, 1516, 994, 994, 1281, 1498, 1496, 1007, 1007, 1703, 1705, 1707, 1709, 1711, 1713, 1715, 1717, 1719, 1721, 1723, 1725, 1729, 1731, 1766, 1768, 1521, 1516, 1774, 1776, 1778, 1780, 1782, 1784, 1786, 1788, 1793, 1795, 1797, 1799, 1148, 1281, 1547, 1363, 1911, 1913, 1915, 1917, 1919, 1921, 1521, 1516, 1363, 1547, 1934, 1936, 1938, 1940, 1942, 1944, 1946, 1948, 1270, 1268, 1270, 1268, 1521, 1516, 1521, 1516, 1363, 2039, 2041, 2043, 2045, 1547, 2058, 2060, 1363, 2072, 2074, 1496, 1498, 1498, 1496, 1535, 1547, 1549, 2136, 2138, 2140, 2142, 2144, 2146, 2149, 2151, 2154, 2156, 2159, 2161, 2164, 2166, 2169, 2171, 2175, 2177, 2180, 2182, 2179, 2153, 2148, 2148, 2153, 2179, 2077, 2179, 2077, 2179, 2077, 2179, 2184, 2077, 2179, 2184, 2174, 2174, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 3072, 3074, 3076, 3078, 3080, 3082, 3084, 3086, 3088, 3090, 3092, 3094, 3096, 3098, 3100, 3102, 3104, 3106, 3108, 3110, 3112, 3114, 3116, 3118, 3120, 3122, 3124, 3126, 3128, 3130, 3132, 3134, 3136, 3138, 3140, 3142, 3144, 3146, 3148, 3150, 3152, 3154, 3156, 3158, 3160, 3162, 3164, 3166, 3168, 3170, 3172, 3174, 3176, 3178, 3180, 3182, 3184, 3186, 3188, 3190, 3192, 3194, 3196, 3198, 3200, 3202, 3204, 3206, 3208, 3210, 3212, 3214, 3216, 3218, 3220, 3222, 3224, 3226, 3228, 3230, 3232, 3234, 3236, 3238, 3240, 3242, 3244, 3246, 3248, 3250, 3252, 3254, 3256, 3258, 3260, 3262, 3264, 3266, 3268, 3270, 3272, 3274, 3276, 3278, 3280, 3282, 3284, 3286, 3288, 3290, 3292, 3294, 3296, 3298, 3300, 3302, 3304, 3306, 3308, 3310, 3312, 3314, 3316, 3318, 3320, 3322, 3324, 3326, 3328, 3330, 3332, 3334, 3336, 3338, 3340, 3342, 3344, 3346, 3348, 3350, 3352, 3354, 3356, 3358, 3360, 3362, 3364, 3366, 3368, 3370, 3372, 3374, 3376, 3378, 3380, 3382, 3384, 3386, 3388, 3390, 3392, 3394, 3396, 3398, 3400, 3402, 3404, 3406, 3408, 3410, 3412, 3414, 3416, 3418, 3420, 3422, 3424, 3426, 3428, 3430, 3432, 3434, 3436, 3438, 3440, 3442, 3444, 3446, 3448, 3450, 3452, 3454, 3456, 3458, 3460, 3462, 3464, 3466, 3468, 3470, 3472, 3474, 3476, 3478, 3480, 3482, 3484, 3486, 3488, 3490, 3492, 3494, 3496, 3498, 3500, 3502, 3504, 3505, 3506, 3507, 3508, 3509, 3510, 3511, 3512, 3513, 3514, 3515, 3516, 3517, 3518, 3519, 3520, 3522, 3524, 3526, 3528, 3530, 3532, 3534, 3536, 3538, 3539, 3540, 3541, 3542, 3543, 3544, 3545, 3546, 3547, 3548, 3550, 3552, 3554, 3556, 3558, 3560, 3562, 3564, 3566, 3568, 3570, 3572, 3573, 3574, 3576, 3578, 3580, 3582, 3583, 3584, 3585, 3586, 3587, 3588, 3589, 3590, 3592, 3593, 3594, 3596, 3598, 3600, 3602, 3604, 3606, 3608, 3610, 3611, 3612, 3613, 3614, 3616, 3618, 3620, 3622, 3624, 3626, 3628, 3630, 3632, 3633, 3634, 3635, 3636, 3637, 3638, 3639, 3640, 3641, 3642, 3643, 3644, 3645, 3646, 3648, 3650, 3652, 3654, 3656, 3658, 3660, 3662, 3664, 3666, 3668, 3670, 3672, 3674, 3676, 3678, 3680, 3681, 3682, 3683, 3684, 3685, 3686, 3687, 3688, 3689, 3690, 3691, 3692, 3693, 3694, 3695, 3696, 3697, 3698, 3699, 3700, 3701, 3702, 3704, 3706, 3708, 3710, 3712, 3714, 3716, 3718, 3719, 3720, 3722, 3724, 3726, 3728, 3730, 3732, 3733, 3734, 3735, 3736, 3738, 3740, 3742, 3743, 3744, 3745, 3746, 3748, 3750, 3752, 3754, 3755, 3756, 3757, 3758, 3759, 3760, 3761, 3762, 3763, 3765, 3767, 3768, 3770, 3771, 3773, 3774, 3775, 3776, 3777, 3778, 3779, 3780, 3782, 3784, 3786, 3788, 3790, 3792, 3794, 3796, 3798, 3800, 3801, 3802, 3803, 3804, 3805, 3806, 3807, 3808, 3809, 3810, 3811, 3812, 3813, 3814, 3815, 3816, 3817, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 905, 910, 910, 905, 933, 938, 905, 910, 910, 905, 933, 938, 638, 633, 3850, 717, 712, 4056, 4058, 704, 699, 4060, 4062, 910, 905, 717, 712, 704, 699, 638, 633, 3864, 717, 712, 699, 704, 789, 784, 590, 585, 590, 585, 590, 585, 600, 610, 605, 580, 590, 585, 600, 595, 610, 605, 615, 4081, 3989, 4083, 3994, 656, 3997, 669, 590, 585, 590, 585, 590, 585, 600, 610, 605, 580, 590, 585, 600, 595, 610, 605, 615, 3989, 4085, 3992, 3994, 656, 3997, 669, 910, 905, 3875, 910, 905, 3877, 933, 938, 933, 938, 910, 905, 3884, 910, 905, 3886, 933, 938, 933, 938, 755, 789, 3891, 3893, 784, 789, 4103, 760, 760, 3896, 590, 585, 610, 605, 3901, 784, 590, 585, 3904, 717, 712, 760, 755, 789, 590, 585, 580, 615, 638, 633, 3916, 3917, 3919, 669, 343, 338, 656, 712, 717, 717, 712, 717, 712, 3928, 3930, 3932, 755, 755, 755, 784, 590, 585, 717, 712, 4109, 4012, 4111, 4012, 760, 755, 638, 633, 3942, 638, 633, 3943, 784, 789, 4113, 4115, 854, 859, 3949, 905, 910, 3953, 928, 905, 910, 3953, 928, 4118, 943, 859, 854, 3959, 877, 872, 877, 872, 859, 854, 3965, 877, 872, 877, 872, 590, 585, 590, 585, 590, 585, 600, 610, 605, 580, 590, 585, 600, 595, 610, 605, 615, 638, 633, 3989, 638, 633, 3992, 3994, 656, 3997, 669, 717, 712, 717, 712, 717, 712, 4003, 4004, 704, 699, 717, 712, 717, 712, 4009, 4010, 4012, 760, 755, 760, 755, 760, 755, 765, 789, 784, 789, 784, 4148, 789, 784, 826, 821, 836, 831, 846, 841, 4151, 826, 821, 836, 831, 846, 841, 4153, 859, 854, 4039, 877, 872, 887, 882, 910, 905, 900, 910, 905, 915, 933, 938, 928, 938, 933, 943, 4172, 4175, 4159, 4160, 4161, 4177, 4179, 4181, 4183, 4185, 4190, 1521, 1516, 1521, 1516, 1545, 1540, 1545, 1540, 1547, 1547, 1270, 1268, 1276, 1271, 4202, 1545, 1540, 1148, 1148, 1148, 1521, 1516, 1547, 1363, 4210, 4160, 4161, 1363, 1547, 4212, 4135, 4161, 4217, 4105, 1545, 1540, 4219, 4107, 4108, 4225, 1276, 1271, 4227, 1276, 1271, 1281, 1281, 1281, 4229, 4231, 1545, 1540, 1545, 1540, 1545, 1540, 4135, 4160, 4161, 4135, 4160, 4161, 1521, 1516, 4138, 1521, 1516, 4140, 4156, 4158, 4159, 4160, 4161, 4242, 1521, 1516, 4164, 1521, 1516, 4167, 1545, 1540, 1545, 1540, 4241, 4240, 4241, 4240, 4241, 4240, 2077, 2077, 4241, 4240, 4241, 4240, 4241, 4240, 2148, 2077, 2179, 2077, 2179, 2077, 2179, 2179, 2077, 2179, 2158, 2158, 2077, 2179, 2077, 2179, 2077, 2179, 2077, 2179, 2158, 2153, 2148, 2158, 2153, 2163, 2077, 2179, 2174, 2158, 2153, 2148, 2158, 2153, 2163, 2077, 2179, 2184, 2135, 2133, 2077, 2179, 4263, 2174, 4265, 2174, 4267, 4270, 2135, 2133, 2135, 2133, 2158, 2153, 2148, 2158, 2153, 2163, 2179, 2179, 2179, 2184, 4260, 4259, 4274, 4273, 4260, 4259, 4260, 4259, 4260, 4259, 4260, 4259, 4274, 4273, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 4352, 4353, 4354, 4355, 4356, 4357, 4358, 4359, 4360, 4361, 4362, 4363, 4364, 4365, 4366, 4367, 4368, 4371, 4372, 4375, 4376, 4377, 4378, 4379, 4380, 4381, 4382, 4383, 4384, 4385, 4386, 4387, 4388, 4389, 4390, 4391, 4392, 4393, 4394, 4395, 4396, 4397, 4398, 4399, 4400, 4401, 4402, 4403, 4404, 4405, 4406, 4408, 4410, 4411, 4412, 4413, 4414, 4415, 4416, 4417, 4418, 4419, 4420, 4421, 4422, 4423, 4424, 4425, 4426, 4427, 4428, 4429, 4430, 4431, 4433, 4434, 4435, 4436, 4437, 4438, 4439, 4440, 4441, 4442, 4443, 4444, 4445, 4446, 4447, 4448, 4449, 4450, 4451, 4452, 4453, 4454, 4455, 4456, 4457, 4458, 4459, 4460, 4461, 4462, 4463, 4465, 4466, 4467, 4468, 4469, 4470, 4471, 4472, 4473, 4474, 4475, 4476, 4477, 4478, 4479, 4480, 4481, 4482, 4483, 4484, 4485, 4486, 4487, 4488, 4489, 4490, 4491, 4492, 4493, 4494, 4495, 4496, 4497, 4498, 4499, 4500, 4501, 4502, 4503, 4504, 4505, 4506, 4507, 4508, 4509, 4510, 4511, 4513, 4515, 4516, 4517, 4518, 4519, 4520, 4521, 4522, 4523, 4524, 4525, 4528, 4529, 4530, 4531, 4532, 4533, 4534, 4535, 4536, 4537, 4538, 4540, 4541, 4542, 4543, 4544, 4545, 4546, 4547, 4548, 4549, 4550, 4551, 4552, 4553, 4554, 4555, 4556, 4557, 4558, 4559, 4560, 4561, 4562, 4563, 4564, 4565, 4566, 4567, 4568, 4569, 4570, 4571, 4572, 4573, 4574, 4575, 4576, 4577, 4578, 4579, 4580, 4581, 4582, 4583, 4584, 4585, 4586, 4587, 4588, 4589, 4590, 4591, 4592, 4593, 4594, 4595, 4596, 4597, 4598, 4599, 4600, 4601, 4602, 4603, 4604, 4605, 4606, 4607, 4608, 4609, 4611, 4612, 4613, 4614, 4615, 4616, 4617, 4618, 4620, 4621, 4622, 4623, 4624, 4625, 4627, 4628, 4629, 4630, 4631, 4632, 4633, 4634, 4635, 4636, 4637, 4638, 4639, 4640, 4641, 4642, 4643, 4644, 4645, 4648, 4649, 4650, 4146, 4145, 4146, 4145, 4146, 4145, 4657, 4658, 4659, 4660, 4661, 4662, 4663, 4664, 4665, 4666, 4667, 4668, 4669, 4670, 4672, 4673, 4674, 4675, 4676, 4677, 4678, 4679, 4680, 4682, 4683, 4684, 4685, 4687, 4688, 4690, 4691, 4692, 4694, 4695, 4697, 4698, 4700, 4701, 4702, 4703, 4704, 4707, 4708, 4709, 4710, 4711, 4712, 4713, 4714, 4715, 4716, 4717, 4718, 4719, 4720, 4721, 4722, 4723, 4724, 4725, 4726, 4727, 4728, 4729, 4731, 4732, 4733, 4734, 4735, 4736, 4737, 4738, 4739, 4740, 4651, 4741, 4742, 4654, 4743, 4744, 4656, 4745, 4746, 4747, 4748, 4651, 4749, 4750, 4188, 4187, 4654, 4751, 4752, 4188, 4187, 4656, 4753, 4754, 4755, 4756, 4757, 4758, 4759, 4760, 4761, 4762, 4763, 4764, 4765, 4766, 4767, 4768, 4769, 4770, 4771, 4772, 4773, 4774, 4240, 4240, 4775, 4776, 4777, 4778, 4779, 4780, 4781, 4782, 4783, 4784, 4785, 4786, 4787, 4788, 4789, 4790, 4791, 4792, 4793, 4794, 4795, 4796, 4798, 4800, 4803, 4804, 4805, 4806, 4807, 4808, 4809, 4810, 4811, 4812, 4813, 4814, 4815, 4816, 4817, 4818, 4269, 4273, 4819, 4820, 4272, 4274, 4821, 4822, 4823, 4824, 4825, 4826, 4827, 4828, 4829, 4830, 4272, 4269, 4272, 4269, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 4864, 4866, 4868, 4870, 4872, 4874, 4876, 4879, 4881, 4883, 4885, 4887, 4889, 4892, 4894, 4896, 4898, 4900, 4902, 4905, 4908, 4910, 4912, 4920, 4922, 4924, 4927, 4930, 4932, 4934, 4943, 4946, 4949, 4951, 4953, 4956, 4959, 4961, 4967, 4972, 4974, 4978, 4981, 4983, 4986, 4990, 4996, 4999, 5001, 5003, 5012, 5014, 5018, 5020, 5023, 5026, 5028, 5031, 5035, 5040, 5043, 5045, 5047, 5050, 5052, 5054, 5056, 5058, 5061, 5064, 5066, 5068, 5071, 5074, 5081, 5083, 5085, 5089, 5091, 5093, 5098, 5100, 5102, 5105, 5107, 5109, 5111, 5113, 5115, 5117, 5119, 5121, 5123, 5126, 5128, 5130, 5133, 5136, 5139, 5080, 5078, 5097, 5145, 5146, 5147, 5148, 5080, 5078, 5097, 5149, 5150, 5080, 5078, 5097, 5097, 5080, 5078, 5097, 5151, 5153, 5155, 5157, 4938, 4915, 4919, 4917, 4938, 4937, 4942, 4940, 5161, 5163, 5165, 5170, 5172, 5176, 4995, 4966, 5104, 4146, 4145, 4147, 5078, 4995, 4966, 4995, 5104, 4146, 4145, 4147, 5078, 4995, 5007, 4150, 5080, 4998, 5007, 4150, 4995, 5007, 5104, 4146, 4145, 4147, 5181, 5080, 5078, 4512, 5016, 4514, 5017, 5080, 5078, 5097, 5039, 5034, 5039, 5038, 5039, 5034, 5185, 5039, 5038, 5187, 5192, 5194, 5196, 5204, 5207, 5080, 5078, 5097, 5215, 5218, 5221, 5223, 5209, 5206, 5225, 5228, 5231, 4730, 4241, 4240, 5211, 5236, 5239, 5240, 5241, 5244, 5245, 5246, 5250, 5252, 5254, 5257, 5209, 5206, 5209, 5180, 5261, 5263, 5265, 4730, 4241, 4240, 5267, 4730, 4241, 5269, 4730, 4241, 5270, 5211, 5211, 4730, 4241, 4240, 5209, 5206, 2135, 2133, 5271, 5274, 5277, 4730, 4241, 4240, 5209, 5180, 2135, 2133, 5280, 5283, 5286, 4730, 4241, 4240, 5209, 5206, 5291, 4730, 4241, 4240, 4730, 4241, 4240, 5211, 4730, 4241, 4240, 5295, 5297, 5299, 5302, 5304, 5304, 5249, 5311, 5312, 5308, 5308, 5293, 5315, 5316, 5304, 5249, 5308, 5293, 5304, 5304, 5304, 5301, 5304, 5304, 5308, 5293, 5327, 5328, 5329, 5330, 5294, 5308, 4274, 4273, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 4065, 4064, 4087, 4065, 4064, 4088, 5060, 4142, 4141, 4988, 5446, 4989, 5076, 5073, 5475, 5476, 5087, 4144, 4143, 5390, 5095, 4144, 4143, 5477, 5478, 4150, 4610, 4147, 5390, 5480, 5060, 4142, 4141, 4988, 5446, 4989, 5076, 5073, 5482, 5483, 5087, 4144, 4143, 5384, 5095, 4144, 4143, 5484, 5485, 4610, 4150, 4147, 4065, 4064, 4090, 4089, 5060, 4142, 4141, 4988, 5446, 4989, 5076, 5073, 5487, 5488, 5087, 4144, 4143, 5387, 5095, 4144, 4143, 5489, 5104, 4610, 4150, 4147, 5076, 5073, 5087, 4144, 4143, 5453, 5095, 4144, 4143, 5490, 5104, 4610, 4150, 4147, 5446, 4989, 5060, 4142, 4141, 4988, 5076, 5073, 5491, 5492, 5095, 4144, 4143, 5493, 5087, 4144, 4143, 5390, 4146, 4145, 4610, 4150, 4147, 4071, 4070, 4904, 4142, 4141, 4907, 5397, 4914, 5498, 5499, 5500, 5501, 4926, 4142, 4141, 4929, 5404, 4936, 5502, 5503, 5504, 5505, 4948, 4945, 4088, 4087, 4958, 4955, 4090, 4089, 5060, 4141, 4142, 4988, 5446, 4989, 4976, 4992, 4998, 5512, 5005, 4144, 4143, 5513, 5514, 5515, 5516, 5517, 5060, 4141, 4142, 4988, 5446, 4989, 5076, 4965, 5518, 5519, 5005, 4144, 4143, 5520, 5104, 4146, 4145, 4464, 5060, 4141, 4142, 4998, 5521, 5005, 4144, 4143, 5522, 5523, 5524, 5525, 5060, 4141, 4142, 4988, 5446, 4989, 4976, 5073, 5526, 5527, 5005, 4144, 4143, 5528, 5104, 4146, 4145, 5529, 5060, 4142, 4141, 4993, 4992, 5530, 5531, 5005, 4144, 4143, 5532, 5104, 4146, 4145, 5533, 5060, 4142, 4141, 4988, 4989, 4993, 4992, 4998, 5534, 5005, 4144, 4143, 5535, 5536, 5537, 5538, 5539, 4130, 4128, 5060, 4142, 4141, 5063, 5446, 5070, 5025, 5022, 5541, 5542, 4144, 4143, 5543, 5544, 5104, 4146, 4145, 4150, 4147, 4610, 4144, 4143, 5545, 4144, 4143, 5546, 5104, 4146, 4145, 5060, 4142, 4141, 5063, 5446, 5070, 5025, 5022, 5547, 5548, 5087, 5453, 5095, 5549, 5104, 4146, 4145, 4150, 4147, 4610, 4129, 4131, 5550, 5551, 5552, 5553, 5030, 5033, 5554, 5555, 5037, 5557, 5558, 5042, 4129, 4128, 5049, 4131, 4130, 5060, 4142, 4141, 5063, 5446, 5070, 5076, 5073, 5565, 5566, 5087, 4144, 4143, 5453, 5095, 4144, 4143, 5567, 5104, 4146, 4145, 4150, 4610, 4147, 5463, 4619, 5466, 4626, 5125, 5470, 5135, 5132, 5141, 5138, 5572, 5573, 5574, 5575, 5576, 4647, 5577, 5578, 5579, 5580, 5581, 5582, 5584, 5585, 5587, 4193, 4192, 4233, 4233, 5159, 5160, 5507, 5592, 5593, 5594, 5595, 4233, 5599, 5600, 5601, 4233, 5603, 5604, 5209, 5206, 5510, 5606, 5607, 5209, 5180, 5511, 5609, 5610, 5611, 5612, 5613, 5614, 5615, 4686, 5616, 5617, 5621, 5622, 5623, 5624, 5625, 4693, 5626, 5627, 5556, 5556, 5556, 5559, 5631, 5632, 5633, 5634, 5635, 4233, 4233, 4233, 4236, 4236, 5637, 5638, 5639, 5640, 5641, 5642, 5209, 5206, 4238, 4238, 5643, 5644, 5645, 5646, 5220, 5217, 4245, 4245, 5651, 5294, 5652, 5653, 4272, 4274, 4273, 4269, 5304, 5301, 5654, 5304, 5301, 5656, 5657, 5658, 5659, 5661, 5662, 4272, 4274, 4273, 4269, 5663, 5664, 5665, 4272, 5294, 4269, 5276, 5273, 5279, 5666, 4272, 4269, 5667, 5668, 5293, 4272, 4269, 5669, 5294, 5276, 5273, 5279, 5285, 5282, 5288, 5276, 5273, 5279, 5285, 5282, 5288, 5670, 5294, 5671, 5672, 5304, 5301, 5677, 5675, 5304, 5301, 5678, 5679, 5680, 121, 122, 123, 124, 125, 126, 127, 5760, 5761, 5762, 5763, 5764, 5765, 5766, 5767, 5768, 5769, 5770, 5771, 5772, 5773, 5774, 5776, 5777, 5778, 5779, 5780, 5781, 5782, 5785, 5786, 5787, 5788, 5790, 5791, 5792, 5793, 5794, 5795, 5796, 5797, 5798, 5800, 5801, 5802, 5803, 5804, 5805, 5806, 5809, 5810, 5811, 5812, 5813, 5814, 5815, 5816, 5817, 5818, 5819, 5820, 5821, 5822, 5823, 5824, 5826, 5827, 5828, 5829, 5830, 5831, 5832, 5834, 5835, 5836, 5837, 5838, 5839, 5840, 5841, 5842, 5843, 5844, 5845, 5846, 5848, 5849, 5850, 5851, 5852, 5853, 5854, 5855, 5856, 5857, 5858, 5859, 5860, 5862, 5863, 5864, 5866, 5867, 5868, 5869, 5870, 5871, 5872, 5873, 5874, 5875, 5876, 5877, 5878, 5879, 5880, 5881, 5882, 5883, 5885, 5887, 5888, 5889, 5890, 5891, 5892, 5893, 5895, 5897, 5898, 5899, 5900, 5901, 5902, 5903, 5904, 5905, 5906, 5907, 5908, 5909, 5910, 5911, 5912, 5913, 5915, 5916, 5917, 5919, 5923, 5924, 5925, 5926, 5927, 5928, 5929, 5930, 5931, 5933, 5934, 5935, 5937, 5938, 5939, 5940, 5941, 5942, 5943, 5944, 5946, 5947, 5948, 5949, 5953, 5954, 5955, 5956, 5957, 5958, 5959, 5960, 5961, 5963, 5964, 5965, 5967, 5968, 5969, 5971, 5972, 5973, 5974, 5975, 5976, 5978, 5979, 5980, 5982, 5983, 5984, 5986, 5987, 5988, 5989, 5990, 5991, 5992, 5993, 5995, 5996, 5997, 5999, 6003, 6004, 6005, 6006, 6007, 6008, 6009, 6010, 6011, 6012, 6013, 6015, 6016, 6019, 6020, 6021, 6022, 6023, 6024, 6025, 6026, 6028, 6029, 6031, 6032, 6033, 6034, 6035, 6036, 6037, 6038, 6039, 6040, 6041, 6042, 6044, 6045, 6046, 6048, 6049, 6050, 6051, 6052, 6053, 6054, 6055, 6056, 6058, 6060, 6061, 6062, 6064, 6065, 6067, 6068, 6069, 6070, 6071, 6072, 6073, 6074, 6075, 6076, 6077, 6078, 6079, 6080, 6081, 6083, 6084, 6085, 6086, 6087, 6088, 6089, 6091, 6092, 6093, 6094, 6095, 6096, 6097, 6098, 6099, 6100, 6101, 6102, 6103, 6104, 6105, 6106, 6107, 6109, 6110, 6111, 6112, 6113, 6117, 6119, 6121, 6122, 6123, 6124, 6125, 6126, 6127, 6128, 6129, 6131, 6133, 6134, 6137, 6138, 6140, 6141, 6142, 6143, 6145, 6146, 6147, 6150, 6153, 6155, 6156, 6158, 6161, 6163, 6164, 6166, 6167, 6168, 6169, 6170, 6173, 6175, 6176, 6177, 6178, 6179, 6180, 6183, 6186, 6187, 6188, 6189, 6191, 6194, 6195, 6196, 6197, 6198, 6199, 6200, 6202, 6203, 6204, 6205, 6206, 6207, 6209, 6210, 6211, 6212, 6215, 6217, 6218, 6219, 6220, 6221, 6223, 6224, 6225, 6226, 6227, 6228, 6229, 6230, 6231, 6232, 6233, 6235, 6236, 6237, 6238, 6239, 6240, 6241, 6242, 6243, 6244, 6245, 6246, 6247, 6248, 6249, 6250, 6251, 6252, 6253, 6254, 6255, 6256, 6257, 6259, 6260, 6261, 6262, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 6272, 6275, 6278, 6284, 6287, 6291, 6294, 6298, 6304, 6307, 6311, 6314, 6317, 6319, 6321, 6327, 6330, 6334, 6338, 6341, 6343, 6347, 6351, 6356, 6360, 6363, 6366, 6370, 6372, 6375, 6377, 6385, 6393, 6395, 6397, 6399, 6401, 6407, 6409, 6410, 6413, 6414, 6420, 6423, 6426, 6430, 6433, 6434, 6437, 6438, 6444, 6447, 6450, 6453, 6456, 6459, 6462, 6465, 6470, 6472, 6473, 6476, 6479, 6485, 6488, 6490, 6493, 6496, 6498, 6500, 6503, 6509, 6515, 6518, 6531, 6534, 6536, 6542, 6545, 6549, 6552, 6555, 6564, 6566, 6573, 6326, 6283, 6303, 6561, 6559, 6563, 6283, 6355, 6303, 6326, 6355, 6563, 6577, 6382, 6384, 6390, 6392, 6587, 6589, 6590, 6593, 6594, 6406, 6443, 6469, 6419, 6559, 6406, 6419, 6469, 6443, 6508, 6469, 6561, 6597, 6601, 6541, 6561, 6559, 6478, 6477, 6523, 6484, 6508, 6047, 6513, 6561, 6559, 6522, 6521, 6523, 6524, 6541, 6561, 6559, 6527, 6529, 6609, 6616, 6617, 6618, 6541, 6561, 6559, 6563, 6622, 6623, 5648, 5290, 5289, 6627, 6120, 6118, 5648, 5290, 5289, 6630, 6632, 5648, 5647, 6634, 6600, 6604, 5648, 5647, 6636, 6638, 6120, 6118, 5648, 5290, 5289, 6641, 6643, 5648, 5647, 5648, 5289, 5290, 6646, 6647, 6600, 6650, 6604, 5648, 5290, 5289, 6653, 6654, 5289, 5290, 5648, 6657, 5648, 5290, 5289, 6660, 6600, 6662, 6604, 6665, 6600, 6668, 6604, 6671, 5290, 5289, 5648, 6674, 5648, 5647, 6676, 6677, 5648, 5647, 6678, 5648, 5647, 6681, 6683, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 6786, 6788, 6789, 6790, 6791, 6793, 6794, 6795, 6798, 6800, 6801, 6802, 6804, 6805, 6806, 6807, 6809, 6810, 6812, 6814, 6815, 6820, 6823, 6825, 6827, 6828, 6829, 6831, 6833, 6835, 6836, 6837, 6839, 6840, 6841, 6844, 6846, 6849, 6850, 6853, 6854, 6856, 6857, 6860, 6862, 6863, 6864, 6865, 6869, 6329, 6870, 6286, 6871, 6306, 6872, 6873, 6874, 6277, 6274, 6875, 6286, 6876, 6362, 6877, 6306, 6797, 6878, 6329, 6511, 6879, 6362, 6880, 6867, 6882, 6883, 6884, 6885, 6819, 6817, 6891, 6822, 5922, 6892, 6446, 6893, 6843, 6002, 6894, 6422, 6895, 6858, 6859, 6867, 6896, 6822, 5922, 6897, 6422, 6898, 6830, 5952, 6899, 6446, 6900, 6458, 6901, 6843, 6002, 6902, 6867, 6905, 6544, 6906, 6907, 6908, 6909, 6910, 6911, 6487, 6018, 6017, 6030, 6027, 6912, 6511, 6913, 6914, 6915, 6916, 6917, 6918, 6919, 6920, 6921, 6544, 6922, 6923, 6858, 6859, 6924, 6925, 6859, 6858, 6930, 6544, 6931, 6932, 6933, 6867, 6568, 6936, 6937, 6938, 6881, 6940, 6941, 6942, 6943, 6944, 6945, 6935, 6947, 6948, 6598, 6950, 6951, 6935, 6952, 6953, 6935, 6881, 6956, 6957, 6958, 6959, 6960, 6961, 6963, 6964, 6935, 6965, 6966, 6967, 6969, 6584, 6970, 6585, 6972, 6929, 6973, 6974, 6975, 6978, 6979, 6980, 6981, 6610, 6982, 6983, 6984, 6888, 6986, 6890, 6988, 6598, 6990, 6602, 6992, 6610, 6994, 6995, 6996, 6935, 6998, 6999, 6935, 6929, 7002, 7003, 6935, 7005, 7006, 6628, 6208, 6955, 6639, 6214, 6645, 6652, 6977, 6661, 6664, 6667, 6670, 6673, 6675, 7000, 7001, 6680, 7008, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 6324, 7089, 5833, 6333, 7051, 6281, 7091, 5783, 6290, 7043, 6301, 7093, 5807, 6310, 7047, 7094, 7097, 7098, 6281, 7100, 5783, 6290, 7043, 6359, 7102, 5847, 6297, 7054, 6301, 7104, 5807, 6310, 7047, 7105, 6324, 7107, 5833, 6333, 7051, 7108, 5847, 6346, 7054, 6359, 7110, 6369, 5865, 7058, 7112, 6380, 6388, 7117, 7118, 6404, 7120, 5918, 7121, 6441, 7123, 5966, 5970, 6468, 7125, 5998, 7126, 6417, 7128, 5936, 6429, 7130, 7131, 7132, 6404, 7134, 5918, 7135, 6417, 7137, 5936, 6429, 6506, 7139, 5981, 7140, 6441, 7142, 5966, 5970, 6506, 7144, 5981, 5985, 6468, 7146, 5998, 7147, 7149, 6539, 7151, 6090, 6548, 7087, 7152, 7154, 6482, 7158, 7159, 7160, 7078, 7161, 7162, 7082, 6506, 7164, 7165, 7082, 7167, 7169, 6539, 7174, 6090, 6548, 7087, 7175, 7177, 7178, 7181, 7182, 6539, 7184, 6090, 6548, 7087, 7185, 7188, 6608, 6605, 6608, 6606, 7189, 7190, 7193, 7196, 7200, 7201, 7203, 7206, 7207, 7209, 7210, 7213, 7217, 7219, 7220, 7224, 7226, 6608, 6605, 7228, 7229, 7232, 6608, 6605, 6608, 6606, 7236, 7237, 7240, 7242, 7244, 7246, 6608, 6605, 6608, 6606, 6608, 6607, 7248, 7249, 7252, 7253, 7255, 7256, 7257, 7259, 7260, 7262, 7199, 7263, 7264, 7265, 7266, 7216, 7267, 7223, 7268, 7269, 7235, 7270, 7271, 7272, 7273, 7274, 7275, 7276, 7277, 7278, 7279, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 7296, 7298, 7299, 7300, 7301, 7303, 7304, 7305, 7306, 7308, 7309, 7310, 7312, 7314, 7316, 7317, 7318, 7319, 7321, 7322, 7323, 7324, 7326, 7327, 7328, 7330, 7332, 7333, 7334, 7336, 7337, 7338, 7339, 7341, 7342, 7343, 7345, 7346, 7347, 7349, 7351, 7353, 7355, 7356, 7357, 7359, 7361, 7363, 7364, 7365, 7368, 7370, 7372, 7374, 7375, 7376, 7378, 7380, 7382, 7383, 7384, 7386, 7387, 7388, 7390, 7393, 7395, 7396, 7397, 7400, 7402, 7404, 7405, 7407, 7408, 7411, 7414, 7416, 7417, 7418, 7420, 7422, 7424, 7426, 7427, 7428, 7399, 7431, 7432, 7413, 7433, 7434, 7436, 7096, 7437, 7438, 7187, 6190, 7187, 6190, 7096, 6149, 7111, 6149, 7445, 7446, 7449, 6148, 7399, 7452, 7453, 7455, 7456, 7399, 7457, 7458, 7413, 7459, 7460, 7462, 6148, 6148, 6149, 7399, 7467, 7468, 7413, 7469, 7470, 7471, 7472, 7474, 7187, 6190, 7187, 6190, 7483, 7440, 7204, 7205, 7443, 7447, 7479, 7488, 7447, 7490, 7225, 7227, 7493, 7241, 7243, 7245, 7247, 7476, 7479, 7479, 7481, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 7088, 7553, 7090, 7557, 7092, 7561, 7099, 7566, 7101, 7570, 7103, 7574, 7106, 7578, 7581, 7584, 7585, 7113, 7115, 7119, 7122, 7124, 7127, 7133, 7136, 7138, 7141, 7143, 7145, 7150, 7618, 7157, 7163, 7173, 7629, 7183, 7635, 7638, 7639, 7623, 7641, 7642, 7645, 6116, 7646, 7648, 7649, 7650, 7651, 7652, 6116, 7653, 7654, 7655, 7656, 7633, 6583, 7600, 7360, 7601, 7659, 7660, 7661, 7665, 7666, 7623, 7668, 7669, 7360, 7595, 7600, 7352, 7601, 7672, 7352, 7595, 7360, 7600, 7601, 7673, 7371, 7606, 7379, 7611, 7614, 7391, 7632, 7674, 7675, 7676, 7623, 7625, 7627, 7678, 7679, 7632, 7681, 7633, 7684, 7685, 7686, 7687, 7644, 7689, 7690, 7691, 7692, 7693, 7694, 7696, 7658, 7698, 7699, 7663, 7664, 7671, 7701, 7702, 7703, 7704, 7683, 7705, 7706, 7707, 7708, 125, 126, 127, 7394, 7620, 7401, 7847, 7302, 7559, 7297, 7555, 7307, 7563, 7851, 7425, 7637, 7425, 7637, 7297, 7555, 7302, 7559, 7307, 7563, 7858, 7315, 7568, 7320, 7572, 7325, 7576, 7331, 7580, 7335, 7583, 7340, 7587, 7116, 7114, 7637, 7863, 7864, 7362, 7865, 7358, 7866, 7867, 7394, 7620, 7394, 7620, 7401, 7873, 7358, 7876, 7354, 7877, 7362, 7878, 7350, 7879, 7880, 7350, 7882, 7354, 7883, 7358, 7884, 7362, 7885, 7886, 7369, 7888, 7373, 7889, 7377, 7890, 7381, 7891, 7385, 7892, 7389, 7893, 7894, 7394, 7620, 7401, 7898, 7899, 7409, 7900, 7415, 7631, 7903, 7905, 7425, 7637, 7425, 7637, 7846, 7849, 7910, 7647, 7854, 7856, 7859, 7861, 7657, 7918, 7870, 7921, 7922, 7872, 7875, 7923, 7897, 7902, 7928, 7907, 7909, 7484, 7498, 7491, 7485, 7486, 7487, 7489, 7498, 7491, 7496, 7495, 7498, 7497, 7500, 7501, 7502, 7503, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 7936, 7937, 7938, 7940, 7941, 7942, 7943, 7944, 7945, 7947, 7948, 7949, 7950, 7951, 7952, 7953, 7954, 7955, 7956, 7958, 7959, 7960, 7961, 7962, 7963, 7964, 7965, 7966, 7967, 7968, 7969, 7970, 7971, 7972, 7975, 7977, 7980, 7981, 7982, 7983, 7984, 7986, 7988, 7990, 7992, 7995, 7997, 7999, 8001, 8004, 8006, 8008, 8010, 8012, 8014, 8017, 8018, 8019, 8022, 8024, 8025, 8028, 8029, 8030, 8031, 8032, 8033, 7946, 8035, 8036, 8037, 7957, 8038, 8039, 8040, 7974, 7868, 8042, 8045, 8046, 7881, 7887, 7895, 8048, 8049, 7904, 7907, 8051, 8052, 7482, 8053, 8054, 8055, 8056, 8057, 8058, 8059, 7697, 8060, 8061, 7492, 7700, 7494, 8062, 8063, 8064, 8065, 7499, 8066, 8067, 8068, 8069, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 8223, 8193, 8023, 8021, 7939, 8200, 8214, 8196, 8220, 8198, 8259, 8202, 8204, 8206, 8220, 8208, 8214, 8210, 8263, 8220, 8212, 8214, 8218, 8216, 8222, 8220, 8218, 8267, 8009, 7978, 7976, 8013, 7996, 7998, 8268, 8229, 8231, 8023, 8021, 7985, 7989, 7993, 8013, 7987, 7991, 8009, 8272, 7998, 8002, 8013, 7996, 8009, 8000, 8273, 8007, 8009, 8005, 8015, 8013, 8011, 8274, 8248, 8023, 8021, 8020, 8252, 8277, 8278, 8254, 8256, 8281, 7688, 8283, 7695, 8289, 8290, 8292, 8293, 8294, 8295, 8297, 8299, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 8321, 8322, 8323, 8324, 8325, 8326, 8327, 8328, 8329, 8331, 8332, 8333, 8334, 8335, 8336, 8337, 8339, 8340, 8341, 8342, 8343, 8344, 8345, 8346, 8225, 8348, 8349, 8350, 8351, 8352, 8353, 8355, 8356, 8357, 8358, 8359, 8360, 8361, 8362, 8363, 8364, 8365, 8367, 8368, 8369, 8370, 8371, 8372, 8374, 8375, 8376, 8377, 8378, 8379, 8381, 8382, 8383, 8384, 8385, 8388, 8389, 8391, 8393, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 8449, 8452, 8454, 8459, 8461, 8464, 8466, 8469, 8472, 8473, 8475, 8477, 8481, 8484, 8486, 8488, 8490, 8492, 8494, 8496, 8498, 8500, 8503, 8386, 8257, 8261, 8262, 8386, 8269, 8386, 8270, 8386, 8275, 8387, 8279, 8280, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 8576, 8577, 8579, 8581, 8583, 8585, 8588, 8589, 8592, 8595, 8598, 8599, 8600, 8601, 8602, 8347, 8603, 8604, 8605, 8606, 8607, 8608, 8609, 8610, 8611, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 8705, 8706, 8707, 8709, 8711, 8712, 8713, 8258, 8265, 8265, 8719, 8271, 8271, 8276, 8285, 8286, 8302, 8288, 8282, 8303, 8397, 8287, 8300, 8301, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 8839, 8840, 8264, 8330, 8841, 8264, 8338, 8380, 8354, 8843, 8844, 8380, 8366, 8380, 8373, 8845, 8846, 8847, 8848, 8849, 8850, 8394, 8851, 8852, 8853, 8854, 8855, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 8715, 8962, 8963, 8965, 8966, 8967, 8968, 8720, 8722, 8971, 8972, 8973, 8974, 8724, 8981, 8977, 8984, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 9088, 8961, 8964, 9093, 9095, 9096, 9097, 9099, 9101, 9102, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 9217, 9218, 8395, 8390, 8399, 8401, 8396, 8398, 8400, 8392, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 9346, 8509, 9347, 9348, 9349, 9350, 9351, 9352, 8510, 9353, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 9473, 9480, 8976, 9474, 8980, 8983, 9478, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 8979, 9601, 9602, 9604, 9605, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 9728, 9606, 9731, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 9730, 9857, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 9984, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 10112, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127};
int h_C[]= {
1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 33, 35, 37, 39, 41, 43, 45, 47, 49, 51, 54, 56, 58, 60, 63, 65, 67, 69, 71, 73, 75, 77, 79, 81, 83, 85, 89, 91, 93, 95, 97, 99, 101, 103, 105, 107, 109, 111, 113, 115, 117, 119, 121, 123, 125, 127, 129, 131, 133, 135, 139, 141, 143, 145, 147, 149, 151, 153, 155, 157, 159, 161, 163, 165, 167, 169, 171, 173, 176, 178, 180, 182, 185, 187, 189, 191, 194, 196, 198, 200, 202, 204, 206, 208, 210, 212, 215, 217, 219, 221, 223, 225, 227, 229, 231, 233, 235, 237, 239, 241, 243, 245, 247, 249, 251, 253, 255, 257, 259, 261, 263, 265, 267, 269, 271, 273, 275, 277, 279, 281, 283, 285, 287, 289, 291, 293, 295, 297, 299, 301, 303, 305, 307, 309, 311, 313, 315, 317, 319, 321, 323, 325, 327, 329, 331, 333, 335, 337, 340, 342, 345, 347, 349, 351, 353, 355, 357, 359, 361, 363, 365, 367, 369, 371, 373, 375, 377, 379, 381, 383, 385, 387, 389, 391, 393, 395, 397, 399, 401, 403, 407, 409, 411, 413, 415, 417, 419, 421, 423, 425, 427, 429, 431, 433, 435, 437, 440, 442, 444, 446, 448, 450, 452, 454, 456, 458, 460, 462, 464, 466, 468, 470, 472, 474, 477, 479, 481, 483, 485, 487, 489, 491, 493, 495, 497, 499, 501, 503, 505, 507, 509, 511, 513, 515, 517, 519, 521, 523, 525, 527, 530, 532, 534, 536, 539, 541, 543, 545, 548, 550, 552, 554, 557, 559, 561, 563, 565, 567, 569, 571, 573, 575, 577, 579, 582, 584, 587, 589, 592, 594, 597, 599, 602, 604, 607, 609, 612, 614, 617, 619, 621, 623, 626, 628, 630, 632, 635, 637, 641, 643, 645, 647, 649, 651, 653, 655, 658, 660, 662, 664, 666, 668, 671, 673, 675, 677, 679, 681, 683, 685, 687, 689, 692, 694, 696, 698, 701, 703, 706, 708, 714, 716, 720, 722, 725, 727, 729, 731, 733, 735, 737, 739, 741, 743, 746, 748, 752, 754, 757, 759, 762, 764, 767, 769, 771, 773, 775, 777, 781, 783, 786, 788, 793, 795, 797, 799, 801, 803, 805, 807, 809, 811, 813, 815, 818, 820, 823, 825, 828, 830, 833, 835, 838, 840, 843, 845, 851, 853, 856, 858, 861, 863, 865, 867, 869, 871, 874, 876, 879, 881, 884, 886, 889, 891, 893, 895, 897, 899, 902, 904, 907, 909, 912, 914, 917, 919, 921, 923, 925, 927, 930, 932, 935, 937, 940, 942, 52, 52, 52, 52, 61, 61, 61, 61, 183, 183, 690, 723, 690, 723, 192, 192, 996, 998, 1000, 1002, 1004, 1006, 1009, 1011, 1013, 1015, 1017, 1019, 1021, 1023, 1025, 1027, 1029, 1031, 136, 136, 136, 136, 137, 137, 174, 174, 213, 213, 1108, 1110, 1112, 1114, 1116, 1118, 1120, 1122, 1125, 1127, 1129, 1131, 1133, 1135, 1137, 1139, 1141, 1143, 1145, 1147, 1150, 1152, 1154, 1156, 779, 779, 1209, 1211, 1213, 1215, 1217, 1219, 1222, 1224, 404, 404, 405, 405, 438, 438, 438, 438, 1258, 1260, 475, 475, 1273, 1275, 1278, 1280, 1284, 1286, 1288, 1290, 1292, 1294, 1296, 1298, 1300, 1302, 1304, 1306, 528, 537, 528, 537, 1326, 1328, 1330, 1332, 1334, 1336, 1338, 1340, 1343, 1345, 1347, 1349, 1351, 1353, 1355, 1357, 1359, 1361, 546, 555, 710, 710, 744, 749, 790, 779, 779, 790, 816, 816, 848, 848, 1467, 1469, 1471, 1473, 1475, 1477, 1479, 1481, 1483, 1485, 1487, 1489, 1493, 1495, 1500, 1502, 1504, 1506, 1509, 1511, 1513, 1515, 1518, 1520, 1524, 1526, 1528, 1530, 1532, 1534, 1537, 1539, 1542, 1544, 1123, 1123, 1491, 1362, 1362, 1497, 1497, 1282, 1282, 1282, 1282, 1497, 1497, 1123, 1123, 1507, 1522, 1491, 1497, 1497, 1507, 1522, 1704, 1706, 1708, 1710, 1712, 1714, 1716, 1718, 1720, 1722, 1724, 1726, 1730, 1732, 1767, 1769, 1123, 1123, 1775, 1777, 1779, 1781, 1783, 1785, 1787, 1789, 1794, 1796, 1798, 1800, 1491, 1491, 1546, 1546, 1912, 1914, 1916, 1918, 1920, 1922, 1282, 1282, 1546, 1546, 1935, 1937, 1939, 1941, 1943, 1945, 1947, 1949, 1256, 1256, 1269, 1269, 1282, 1282, 1282, 1282, 1546, 2040, 2042, 2044, 2046, 1362, 2059, 2061, 1362, 2073, 2075, 1490, 1490, 1497, 1497, 1548, 1546, 1548, 2137, 2139, 2141, 2143, 2145, 2147, 2150, 2152, 2155, 2157, 2160, 2162, 2165, 2167, 2170, 2172, 2176, 2178, 2181, 2183, 2076, 1765, 2036, 2037, 2038, 2057, 2057, 2057, 2076, 2076, 2076, 2076, 2168, 2076, 2076, 2173, 2168, 2173, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 3073, 3075, 3077, 3079, 3081, 3083, 3085, 3087, 3089, 3091, 3093, 3095, 3097, 3099, 3101, 3103, 3105, 3107, 3109, 3111, 3113, 3115, 3117, 3119, 3121, 3123, 3125, 3127, 3129, 3131, 3133, 3135, 3137, 3139, 3141, 3143, 3145, 3147, 3149, 3151, 3153, 3155, 3157, 3159, 3161, 3163, 3165, 3167, 3169, 3171, 3173, 3175, 3177, 3179, 3181, 3183, 3185, 3187, 3189, 3191, 3193, 3195, 3197, 3199, 3201, 3203, 3205, 3207, 3209, 3211, 3213, 3215, 3217, 3219, 3221, 3223, 3225, 3227, 3229, 3231, 3233, 3235, 3237, 3239, 3241, 3243, 3245, 3247, 3249, 3251, 3253, 3255, 3257, 3259, 3261, 3263, 3265, 3267, 3269, 3271, 3273, 3275, 3277, 3279, 3281, 3283, 3285, 3287, 3289, 3291, 3293, 3295, 3297, 3299, 3301, 3303, 3305, 3307, 3309, 3311, 3313, 3315, 3317, 3319, 3321, 3323, 3325, 3327, 3329, 3331, 3333, 3335, 3337, 3339, 3341, 3343, 3345, 3347, 3349, 3351, 3353, 3355, 3357, 3359, 3361, 3363, 3365, 3367, 3369, 3371, 3373, 3375, 3377, 3379, 3381, 3383, 3385, 3387, 3389, 3391, 3393, 3395, 3397, 3399, 3401, 3403, 3405, 3407, 3409, 3411, 3413, 3415, 3417, 3419, 3421, 3423, 3425, 3427, 3429, 3431, 3433, 3435, 3437, 3439, 3441, 3443, 3445, 3447, 3449, 3451, 3453, 3455, 3457, 3459, 3461, 3463, 3465, 3467, 3469, 3471, 3473, 3475, 3477, 3479, 3481, 3483, 3485, 3487, 3489, 3491, 3493, 3495, 3497, 3499, 3501, 3503, 961, 962, 963, 964, 967, 968, 969, 970, 973, 974, 977, 980, 981, 982, 992, 993, 3521, 3523, 3525, 3527, 3529, 3531, 3533, 3535, 3537, 1049, 1050, 1052, 1053, 1076, 1077, 1091, 1094, 1103, 1106, 3549, 3551, 3553, 3555, 3557, 3559, 3561, 3563, 3565, 3567, 3569, 3571, 1163, 1164, 3575, 3577, 3579, 3581, 1229, 1230, 1232, 1233, 1245, 1246, 1247, 1248, 3591, 1265, 1266, 3595, 3597, 3599, 3601, 3603, 3605, 3607, 3609, 1312, 1315, 1321, 1324, 3615, 3617, 3619, 3621, 3623, 3625, 3627, 3629, 3631, 1366, 1369, 1405, 1406, 1414, 1417, 1423, 1426, 1427, 1430, 1437, 1438, 1445, 1446, 3647, 3649, 3651, 3653, 3655, 3657, 3659, 3661, 3663, 3665, 3667, 3669, 3671, 3673, 3675, 3677, 3679, 1550, 1551, 1552, 1553, 1554, 1678, 1679, 1680, 1681, 1682, 1683, 1684, 1685, 1686, 1687, 1688, 1689, 1690, 1691, 1692, 1695, 1698, 3703, 3705, 3707, 3709, 3711, 3713, 3715, 3717, 1770, 1771, 3721, 3723, 3725, 3727, 3729, 3731, 1805, 1806, 1909, 1910, 3737, 3739, 3741, 1925, 1926, 1930, 1931, 3747, 3749, 3751, 3753, 2011, 2012, 2018, 2019, 2025, 2026, 2027, 2028, 2035, 3764, 3766, 2047, 3769, 2071, 3772, 2115, 2117, 2119, 2120, 2129, 2132, 2134, 3781, 3783, 3785, 3787, 3789, 3791, 3793, 3795, 3797, 3799, 2197, 2300, 2484, 2485, 2486, 2491, 2497, 2498, 2507, 2508, 2510, 2511, 2512, 2513, 2514, 2515, 2545, 2547, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 3840, 4048, 4048, 3841, 3843, 3842, 3844, 4048, 4048, 4048, 3846, 3845, 3848, 3847, 3849, 3852, 3851, 4057, 4059, 3854, 3853, 4061, 4063, 3856, 3855, 3858, 3857, 3860, 3859, 3862, 3861, 3863, 3866, 3865, 3868, 3867, 4023, 3869, 3971, 3970, 3973, 3972, 3975, 3974, 3976, 3978, 3870, 3871, 3981, 3980, 3983, 3982, 3985, 3984, 3872, 4082, 624, 4084, 3993, 3995, 3996, 3998, 3971, 3970, 3973, 3972, 3975, 3974, 3976, 3978, 3977, 3979, 3981, 3980, 3983, 3982, 3985, 3984, 3986, 624, 4086, 639, 3993, 3995, 3996, 3998, 3874, 3873, 183, 4048, 3876, 192, 3879, 3878, 3881, 3880, 3883, 3882, 183, 4048, 3885, 192, 3888, 3887, 3890, 3889, 3907, 3909, 624, 3892, 3895, 3894, 4104, 3908, 4017, 3934, 3898, 3897, 3900, 3899, 639, 3909, 3902, 3980, 3903, 3906, 3905, 3908, 3907, 3909, 3911, 3910, 3912, 3913, 3915, 3914, 624, 639, 3918, 3920, 3922, 3921, 3923, 3925, 3924, 4008, 3926, 4008, 3927, 718, 3929, 3931, 3933, 4015, 4016, 3934, 3980, 3935, 3937, 3936, 4110, 404, 4112, 405, 3939, 3938, 3941, 3940, 624, 3991, 3991, 639, 3945, 3944, 4114, 4116, 3947, 3946, 3948, 3951, 3950, 3952, 3954, 3951, 3950, 3952, 3954, 4119, 3955, 3957, 3956, 3958, 4041, 3960, 4041, 3961, 3963, 3962, 3964, 3967, 3966, 3969, 3968, 3971, 3970, 3973, 3972, 3975, 3974, 3976, 3978, 3977, 3979, 3981, 3980, 3983, 3982, 3985, 3984, 3986, 3988, 3987, 624, 3991, 3990, 639, 3993, 3995, 3996, 3998, 4000, 3999, 4008, 4001, 4008, 4002, 718, 690, 4006, 4005, 4008, 4007, 4008, 4008, 718, 723, 4011, 4014, 4013, 4017, 4015, 4017, 4016, 4018, 4020, 4019, 4023, 4021, 4149, 4023, 4022, 4025, 4024, 4027, 4026, 4029, 4028, 4152, 4031, 4030, 4033, 4032, 4035, 4034, 4154, 4037, 4036, 4038, 4041, 4040, 4043, 4042, 4045, 4044, 4046, 4048, 4047, 4049, 4051, 4050, 4052, 4054, 4053, 4055, 4173, 4176, 1491, 1491, 1491, 4178, 4180, 4182, 4184, 4186, 4191, 4073, 4072, 4166, 4074, 4076, 4075, 4078, 4077, 4079, 4080, 4092, 4091, 4094, 4093, 4203, 4096, 4095, 4132, 4133, 4134, 4098, 4097, 4100, 4099, 4211, 1491, 1491, 4102, 4101, 4213, 1341, 1341, 4218, 1507, 4123, 4106, 4220, 1220, 1220, 4226, 4120, 4117, 4228, 4120, 4120, 4121, 4133, 4134, 4230, 4232, 4123, 4122, 4125, 4124, 4127, 4126, 4132, 4133, 4134, 1341, 1341, 1341, 4137, 4136, 1507, 4166, 4139, 1522, 4155, 4157, 1491, 1491, 1491, 4243, 4163, 4162, 1507, 4166, 4165, 1522, 4169, 4168, 4171, 4170, 4174, 4174, 4174, 4174, 4174, 4174, 4207, 4224, 4189, 4189, 4189, 4189, 4189, 4189, 4194, 4224, 4195, 4197, 4196, 4199, 4198, 4200, 4224, 4201, 4248, 4251, 4224, 4204, 4224, 4205, 4207, 4206, 4209, 4208, 4248, 4247, 4221, 4223, 4222, 4252, 4215, 4214, 4216, 4248, 4247, 4221, 4223, 4222, 4252, 4224, 4255, 4256, 4244, 4244, 4235, 4234, 4264, 4237, 4266, 4239, 4268, 4271, 4244, 4244, 4246, 4246, 4248, 4247, 4249, 4251, 4250, 4252, 4253, 4254, 4255, 4256, 4261, 4261, 4257, 4257, 4258, 4258, 4261, 4261, 4261, 4261, 4261, 4261, 4262, 4262, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 944, 945, 946, 947, 948, 949, 950, 951, 952, 953, 954, 955, 956, 957, 958, 959, 960, 965, 966, 971, 972, 975, 976, 978, 979, 983, 984, 985, 986, 987, 988, 989, 990, 991, 1032, 1033, 1034, 1035, 1036, 1037, 1038, 1039, 1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047, 1048, 1051, 1054, 1055, 1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063, 1064, 1065, 1066, 1067, 1068, 1069, 1070, 1071, 1072, 1073, 1074, 1075, 1078, 1079, 1080, 1081, 1082, 1083, 1084, 1085, 1086, 1087, 1088, 1089, 1090, 1092, 1093, 1095, 1096, 1097, 1098, 1099, 1100, 1101, 1102, 1104, 1105, 1157, 1158, 1159, 1160, 1161, 1162, 1165, 1166, 1167, 1168, 1169, 1170, 1171, 1172, 1173, 1174, 1175, 1176, 1177, 1178, 1179, 1180, 1181, 1182, 1183, 1184, 1185, 1186, 1187, 1188, 1189, 1190, 1191, 1192, 1193, 1194, 1195, 1196, 1197, 1198, 1199, 1200, 1201, 1202, 1203, 1204, 1205, 1206, 1207, 1225, 1226, 1227, 1228, 1231, 1234, 1235, 1236, 1237, 1238, 1239, 1240, 1241, 1242, 1243, 1244, 1249, 1250, 1251, 1252, 1253, 1254, 1255, 1261, 1262, 1263, 1264, 1267, 1307, 1308, 1309, 1310, 1311, 1313, 1314, 1316, 1317, 1318, 1319, 1320, 1322, 1323, 1364, 1365, 1367, 1368, 1370, 1371, 1372, 1373, 1374, 1375, 1376, 1377, 1378, 1379, 1380, 1381, 1382, 1383, 1384, 1385, 1386, 1387, 1388, 1389, 1390, 1391, 1392, 1393, 1394, 1395, 1396, 1397, 1398, 1399, 1400, 1401, 1402, 1403, 1404, 1407, 1408, 1409, 1410, 1411, 1412, 1413, 1415, 1416, 1418, 1419, 1420, 1421, 1422, 1424, 1425, 1428, 1429, 1431, 1432, 1433, 1434, 1435, 1436, 1439, 1440, 1441, 1442, 1443, 1444, 1447, 1448, 1449, 1450, 1451, 1452, 1453, 1454, 1455, 1456, 1457, 1458, 1459, 1460, 1461, 1462, 1463, 1464, 1465, 1555, 1556, 1557, 4370, 4369, 4370, 4369, 4374, 4373, 1693, 1694, 1696, 1697, 1699, 1700, 1701, 1702, 1727, 1728, 1761, 1762, 1763, 1764, 1772, 1773, 1790, 1791, 1792, 1801, 1802, 1803, 1804, 1807, 1808, 1809, 1810, 1923, 1924, 1927, 1928, 1929, 1932, 1933, 2013, 2014, 2020, 2021, 2022, 2023, 2024, 2029, 2030, 2031, 2032, 2033, 2034, 2054, 2055, 2056, 2062, 2063, 2064, 2065, 2066, 2067, 2068, 2069, 2070, 2112, 2113, 2114, 2116, 2118, 2121, 2122, 2123, 2124, 2125, 2126, 2127, 2128, 2130, 2131, 4174, 2188, 2189, 4174, 2191, 2192, 4174, 2194, 2195, 2198, 2199, 4189, 2265, 2266, 4653, 4652, 4189, 2270, 2271, 4671, 4655, 4189, 2275, 2276, 2281, 2282, 2283, 2284, 2285, 2286, 2287, 2290, 2301, 2302, 2308, 2309, 2310, 2311, 2312, 2313, 2314, 2315, 2320, 2321, 4681, 4681, 2400, 2401, 2402, 2403, 2404, 2405, 2406, 2407, 2408, 2417, 2418, 2419, 2420, 2421, 2422, 2423, 2424, 2425, 2481, 2483, 2487, 2488, 2499, 2509, 2533, 2534, 2536, 2537, 2538, 2539, 2540, 2541, 2542, 2543, 2544, 2546, 2548, 2549, 2565, 2566, 4801, 4799, 2612, 2613, 4802, 4799, 2668, 2669, 2696, 2697, 2723, 2724, 2803, 2804, 2810, 2811, 4797, 4797, 4802, 4801, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 4865, 4867, 4869, 4871, 4873, 4875, 4877, 4880, 4882, 4884, 4886, 4888, 4890, 4893, 4895, 4897, 4899, 4901, 4903, 4906, 4909, 4911, 4913, 4921, 4923, 4925, 4928, 4931, 4933, 4935, 4944, 4947, 4950, 4952, 4954, 4957, 4960, 4962, 4968, 4973, 4975, 4979, 4982, 4984, 4987, 4991, 4997, 5000, 5002, 5004, 5013, 5015, 5019, 5021, 5024, 5027, 5029, 5032, 5036, 5041, 5044, 5046, 5048, 5051, 5053, 5055, 5057, 5059, 5062, 5065, 5067, 5069, 5072, 5075, 5082, 5084, 5086, 5090, 5092, 5094, 5099, 5101, 5103, 5106, 5108, 5110, 5112, 5114, 5116, 5118, 5120, 5122, 5124, 5127, 5129, 5131, 5134, 5137, 5140, 5079, 4878, 4067, 1582, 1583, 1588, 1589, 5079, 4891, 5096, 1608, 1609, 5079, 4891, 4067, 4069, 5079, 4891, 5096, 5152, 5154, 5156, 5158, 4409, 4407, 4918, 4916, 4432, 4432, 4941, 4939, 5162, 5164, 5166, 5171, 5173, 5177, 5079, 5006, 5010, 5009, 4963, 4964, 4980, 4994, 5006, 5079, 4970, 4970, 4969, 4971, 4980, 5079, 5006, 4977, 5079, 4980, 5006, 4985, 4994, 5006, 5010, 5009, 5008, 5011, 5182, 5079, 5077, 5088, 5096, 5088, 5096, 5079, 5077, 5096, 4527, 4526, 4527, 4527, 4539, 4539, 5186, 4539, 4539, 5188, 5193, 5195, 5197, 5205, 5208, 5079, 5077, 5096, 5216, 5219, 5222, 5224, 4671, 4646, 2187, 2190, 2193, 5144, 5143, 5142, 5210, 2264, 2267, 2268, 2269, 2272, 2273, 2274, 5251, 5253, 5255, 5258, 4671, 4671, 4671, 4671, 5262, 5264, 5266, 5169, 5168, 5167, 5268, 5175, 5174, 2324, 5175, 5174, 2330, 5210, 5210, 5179, 5202, 5178, 4706, 4689, 5184, 5183, 5272, 5275, 5278, 5179, 5202, 5178, 4706, 4689, 5184, 5183, 5281, 5284, 5287, 5191, 5190, 5189, 4706, 4705, 5292, 5200, 5199, 5198, 5203, 5202, 5201, 5210, 5214, 5213, 5212, 5296, 5298, 5300, 5303, 4261, 5260, 5259, 2597, 2598, 4257, 5235, 5234, 2617, 2618, 5260, 5259, 5307, 5256, 4258, 4261, 5260, 5259, 4261, 4261, 4262, 4797, 2815, 2816, 2826, 2827, 4799, 5307, 5306, 5305, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 5377, 5376, 5378, 5380, 5379, 5381, 5443, 5442, 5417, 5444, 5445, 5416, 5449, 5382, 1572, 1573, 5452, 5451, 5383, 4066, 5455, 5455, 5454, 1581, 5479, 5461, 5460, 5459, 4068, 5481, 5443, 5442, 5426, 5444, 5445, 5416, 5449, 5388, 1598, 1599, 5452, 5451, 5389, 5088, 5455, 5455, 5454, 1607, 5486, 5391, 5461, 5459, 5472, 5385, 5474, 5473, 5443, 5442, 5441, 5444, 5445, 5416, 5449, 5388, 1625, 1626, 5452, 5451, 5386, 4066, 5455, 5455, 5454, 1634, 5458, 5391, 5461, 5431, 5449, 5388, 5452, 5451, 5389, 4068, 5455, 5455, 5454, 1648, 5458, 5391, 5461, 5431, 5445, 5416, 5443, 5442, 5441, 5444, 5449, 5388, 1661, 1662, 5455, 5455, 5454, 1666, 5452, 5451, 5389, 5088, 5457, 5428, 5391, 5461, 5431, 5472, 5471, 5394, 5393, 5392, 5395, 5396, 5398, 1739, 1740, 1741, 1742, 5401, 5400, 5399, 5402, 5403, 5405, 1749, 1750, 1751, 1752, 5407, 5406, 5409, 5408, 5411, 5410, 5413, 5412, 5443, 5441, 5420, 5444, 5445, 5416, 5449, 5421, 5422, 1820, 5425, 5424, 5423, 1824, 1825, 1826, 1827, 1828, 5443, 5441, 5415, 5444, 5445, 5416, 5449, 5421, 1837, 1838, 5425, 5424, 5418, 1842, 5458, 5457, 5456, 5414, 5443, 5441, 5415, 5422, 1851, 5425, 5424, 5423, 1855, 1856, 1857, 1858, 5443, 5441, 5415, 5444, 5445, 5416, 5449, 5421, 1867, 1868, 5425, 5424, 5418, 1872, 5458, 5457, 5419, 1876, 5443, 5442, 5417, 5449, 5421, 1882, 1883, 5425, 5424, 5418, 1887, 5458, 5457, 5419, 1891, 5443, 5420, 5426, 5444, 5447, 5449, 5421, 5422, 1900, 5425, 5424, 5423, 1904, 1905, 1906, 1907, 1908, 5439, 5436, 5443, 5442, 5426, 5444, 5445, 5447, 5430, 5429, 1960, 1961, 5451, 5427, 1964, 1965, 5458, 5457, 5428, 5461, 5431, 5460, 5451, 5450, 1974, 5455, 5454, 1977, 5458, 5457, 5428, 5443, 5442, 5441, 5444, 5445, 5447, 5430, 5429, 1989, 1990, 5452, 5088, 5455, 1994, 5458, 5457, 5456, 5461, 5431, 5460, 5437, 5440, 2003, 2004, 2005, 2006, 5432, 5433, 2009, 2010, 5434, 2016, 2017, 5435, 5437, 5436, 5438, 5440, 5439, 5443, 5442, 5441, 5444, 5445, 5447, 5449, 5448, 2086, 2087, 5452, 5451, 5450, 5088, 5455, 5455, 5454, 2095, 5458, 5457, 5456, 5461, 5460, 5459, 5462, 5464, 5465, 5467, 5468, 5469, 5472, 5471, 5474, 5473, 2185, 2186, 5226, 5229, 5232, 5540, 2200, 2201, 2202, 2226, 5237, 5583, 5242, 5586, 5247, 5495, 5494, 5496, 5497, 5570, 5571, 5506, 2303, 2304, 2305, 2306, 5508, 2316, 2317, 2318, 5571, 2322, 2323, 5564, 5509, 5540, 2328, 2329, 5564, 5563, 5540, 2358, 2391, 2392, 2393, 2394, 2395, 2396, 5540, 2398, 2399, 2409, 2410, 2411, 2412, 2413, 5540, 2415, 2416, 4696, 4696, 4696, 4699, 2474, 2475, 2476, 2477, 2478, 5560, 5561, 5562, 5570, 5571, 2494, 2495, 2496, 2500, 2501, 2502, 5564, 5563, 5570, 5571, 2526, 2527, 2528, 2529, 5569, 5568, 5570, 5571, 2564, 5602, 2582, 2583, 5597, 5590, 5589, 5588, 5650, 5649, 5655, 5650, 5649, 2611, 2614, 2615, 5660, 2648, 2649, 5597, 5590, 5589, 5588, 2656, 2657, 2667, 5597, 5598, 5591, 5619, 5618, 5620, 2695, 5597, 5596, 2703, 2704, 5598, 5597, 5596, 2722, 5602, 5619, 5618, 5620, 5629, 5628, 5630, 5619, 5618, 5620, 5629, 5628, 5630, 2802, 5636, 2809, 2814, 5650, 5649, 2828, 5676, 5650, 5649, 2838, 2839, 2840, 121, 122, 123, 124, 125, 126, 127, 1558, 1559, 1560, 1561, 1562, 1563, 1564, 1565, 1566, 1567, 1568, 1569, 1570, 1571, 5775, 1574, 1575, 1576, 1577, 1578, 1579, 1580, 1584, 1585, 1586, 1587, 1590, 1591, 1592, 1593, 1594, 1595, 1596, 1597, 5799, 1600, 1601, 1602, 1603, 1604, 1605, 1606, 1610, 1611, 1612, 1613, 1614, 1615, 1616, 1617, 1618, 1619, 1620, 1621, 1622, 1623, 1624, 5825, 1627, 1628, 1629, 1630, 1631, 1632, 1633, 1635, 1636, 1637, 1638, 1639, 1640, 1641, 1642, 1643, 1644, 1645, 1646, 1647, 1649, 1650, 1651, 1652, 1653, 1654, 1655, 1656, 1657, 1658, 1659, 1660, 5861, 1663, 1664, 1665, 1667, 1668, 1669, 1670, 1671, 1672, 1673, 1674, 1675, 1676, 1677, 1733, 1734, 1735, 1736, 1737, 1738, 5884, 5886, 1743, 1744, 1745, 1746, 1747, 1748, 5894, 5896, 1753, 1754, 1755, 1756, 1757, 1758, 1759, 1760, 1811, 1812, 1813, 1814, 1815, 1816, 1817, 1818, 1819, 1821, 1822, 1823, 5920, 1829, 1830, 1831, 1832, 1833, 1834, 1835, 1836, 5932, 1839, 1840, 1841, 1843, 1844, 1845, 1846, 1847, 1848, 1849, 1850, 1852, 1853, 1854, 5950, 1859, 1860, 1861, 1862, 1863, 1864, 1865, 1866, 5962, 1869, 1870, 1871, 1873, 1874, 1875, 1877, 1878, 1879, 1880, 1881, 5977, 1884, 1885, 1886, 1888, 1889, 1890, 1892, 1893, 1894, 1895, 1896, 1897, 1898, 1899, 1901, 1902, 1903, 6000, 1950, 1951, 1952, 1953, 1954, 1955, 1956, 1957, 1958, 1959, 6014, 1962, 1963, 1966, 1967, 1968, 1969, 1970, 1971, 1972, 1973, 1975, 1976, 1978, 1979, 1980, 1981, 1982, 1983, 1984, 1985, 1986, 1987, 1988, 6043, 1991, 1992, 1993, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 6057, 6059, 2007, 2008, 6063, 2015, 6066, 2048, 2049, 2050, 2051, 2052, 2053, 2078, 2079, 2080, 2081, 2082, 2083, 2084, 2085, 6082, 2088, 2089, 2090, 2091, 2092, 2093, 2094, 2096, 2097, 2098, 2099, 2100, 2101, 2102, 2103, 2104, 2105, 2106, 2107, 2108, 2109, 2110, 2111, 6108, 5227, 5230, 5233, 2196, 6114, 5238, 5243, 5248, 2277, 2278, 2279, 2280, 2288, 2289, 2299, 6130, 6132, 2307, 6135, 2319, 6139, 2325, 2326, 2327, 6144, 2331, 2332, 2333, 6151, 6154, 2397, 6157, 6159, 6162, 2414, 6165, 2437, 2458, 2471, 2473, 6171, 6174, 2479, 2480, 2482, 2489, 2490, 6181, 6184, 2503, 2504, 2505, 2506, 6192, 2530, 2531, 2532, 2535, 5309, 2567, 6201, 2584, 2585, 2586, 2587, 2595, 2596, 2609, 2610, 5313, 6213, 6216, 2650, 2651, 2652, 2653, 6222, 5317, 2670, 2671, 2672, 2681, 2682, 2683, 5319, 2698, 2699, 6234, 2705, 2706, 2707, 5321, 2725, 2738, 2739, 2740, 2743, 2744, 2745, 2772, 2773, 2774, 2777, 2778, 2779, 5323, 2805, 5325, 5673, 2824, 2825, 6258, 2836, 2837, 6263, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 6273, 6276, 6279, 6285, 6288, 6292, 6295, 6299, 6305, 6308, 6312, 6315, 6318, 6320, 6322, 6328, 6331, 6335, 6339, 6342, 6344, 6348, 6352, 6357, 6361, 6364, 6367, 6371, 6373, 6376, 6378, 6386, 6394, 6396, 6398, 6400, 6402, 6408, 5914, 6411, 5921, 6415, 6421, 6424, 6427, 6431, 5945, 6435, 5951, 6439, 6445, 6448, 6451, 6454, 6457, 6460, 6463, 6466, 6471, 5994, 6474, 6001, 6480, 6486, 6489, 6491, 6494, 6497, 6499, 6501, 6504, 6510, 6516, 6519, 6532, 6535, 6537, 6543, 6546, 6550, 6553, 6556, 6565, 6567, 6115, 6325, 6282, 6302, 6560, 6558, 6562, 6282, 6507, 6302, 6325, 6354, 6562, 6578, 6381, 6383, 6389, 6391, 6136, 5605, 6591, 5608, 6595, 6405, 6442, 6507, 6418, 6558, 6405, 6418, 6507, 6442, 6507, 6507, 6560, 6152, 6160, 6540, 6560, 6558, 6525, 6533, 6526, 6483, 6507, 6514, 6512, 6560, 6558, 6533, 6530, 6526, 6528, 6540, 6560, 6558, 6526, 6528, 6172, 6182, 6185, 6619, 6540, 6560, 6558, 6562, 6193, 6624, 6588, 6613, 6612, 5310, 6570, 6569, 6588, 6580, 6579, 6631, 6633, 6621, 6620, 6635, 6572, 6572, 6615, 6614, 6637, 5314, 6575, 6574, 6588, 6580, 6579, 6642, 6644, 6582, 6581, 6588, 6586, 6613, 5318, 6648, 6592, 6651, 6596, 6588, 6613, 6612, 5320, 6655, 6586, 6613, 6611, 6658, 6588, 6613, 6612, 5322, 6592, 6663, 6596, 6666, 6599, 6669, 6603, 6672, 6613, 6612, 6611, 5324, 6615, 6614, 5326, 5674, 6621, 6620, 6679, 6626, 6625, 6682, 6264, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 6280, 6289, 6293, 6296, 6300, 6309, 6313, 6316, 6323, 6332, 6336, 6340, 6345, 6349, 6353, 6358, 6365, 6368, 6374, 6379, 6387, 6403, 6412, 6416, 6425, 6428, 6432, 6436, 6440, 6449, 6452, 6455, 6461, 6464, 6467, 6475, 6481, 6492, 6495, 6502, 6505, 6517, 6520, 6538, 6547, 6551, 6554, 6557, 2203, 6799, 2209, 6787, 2215, 6792, 2221, 2222, 2223, 6785, 6784, 2227, 6787, 2233, 6803, 2239, 6792, 6796, 2246, 6799, 6803, 2257, 6808, 2262, 6813, 2291, 2293, 2294, 2296, 6818, 6816, 2334, 6821, 6824, 2339, 6834, 2344, 6842, 6845, 2349, 6826, 2354, 6525, 6533, 6866, 2359, 6821, 6824, 2364, 6826, 2369, 6838, 6832, 2374, 6834, 2379, 6838, 2384, 6842, 6845, 2389, 6866, 2426, 6861, 2432, 2433, 2434, 2435, 2436, 2438, 6847, 6852, 6848, 6852, 6851, 2447, 6855, 2450, 2451, 2453, 2454, 2455, 2456, 2457, 2459, 2460, 6861, 2466, 2467, 6525, 6533, 2470, 2472, 6533, 6530, 2516, 6861, 2522, 2523, 2524, 6866, 6927, 2561, 2562, 2563, 6571, 2577, 2578, 2579, 2580, 2581, 6946, 6868, 2593, 2594, 6903, 2600, 2601, 6934, 2607, 2608, 6868, 6576, 2643, 2644, 2645, 2646, 2647, 6962, 2654, 2655, 6927, 2664, 2665, 2666, 6649, 6903, 2680, 6904, 2685, 6927, 2692, 2693, 2694, 2700, 2701, 2702, 6659, 6886, 2719, 2720, 2721, 6887, 2737, 6889, 2742, 6903, 2771, 6904, 2776, 6926, 2799, 2800, 2801, 6934, 2807, 2808, 6927, 6928, 2822, 2823, 6934, 2834, 2835, 6939, 6949, 6954, 7004, 7004, 7004, 6971, 6976, 6985, 6987, 6989, 6991, 6993, 6997, 7004, 7004, 7004, 7007, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 7048, 2205, 7050, 7049, 6337, 7040, 2211, 7042, 7041, 5784, 7044, 2217, 7046, 7045, 5808, 7095, 2224, 2225, 7040, 2229, 7042, 7041, 5784, 7071, 2235, 7053, 7052, 5789, 7044, 2241, 7046, 7045, 5808, 2245, 7048, 2248, 7050, 7049, 6337, 2252, 7053, 7052, 6350, 7055, 2258, 7057, 7056, 6811, 2263, 7059, 7060, 2297, 2298, 7061, 2336, 7062, 2338, 7068, 2341, 7069, 7070, 7074, 2346, 7075, 2348, 7063, 2351, 7064, 7065, 2355, 2356, 2357, 7061, 2361, 7062, 2363, 7063, 2366, 7064, 7065, 7066, 2371, 7067, 2373, 7068, 2376, 7069, 7070, 7071, 2381, 7072, 7073, 7074, 2386, 7075, 2388, 2390, 7083, 2428, 7085, 7084, 7086, 7153, 7155, 7076, 2440, 2441, 2442, 7077, 2444, 2445, 7079, 7080, 2449, 7166, 7081, 7168, 7170, 7083, 2462, 7085, 7084, 7086, 7176, 2468, 2469, 2492, 2493, 7083, 2518, 7085, 7084, 7086, 7186, 2525, 7172, 7156, 7172, 7171, 2560, 7191, 2576, 7197, 2592, 7202, 2599, 2606, 7208, 2616, 2642, 7214, 7218, 2663, 7221, 2679, 2684, 7172, 7156, 2691, 7230, 7233, 7172, 7156, 7172, 7171, 2718, 7238, 2736, 2741, 2770, 2775, 7172, 7156, 7172, 7171, 7180, 7179, 2798, 7250, 2806, 7254, 2813, 2821, 7258, 2833, 7261, 2848, 6629, 2860, 2866, 2868, 2870, 6640, 2890, 6968, 2903, 2908, 6656, 2918, 2927, 2929, 2945, 2947, 2957, 2959, 2962, 2966, 2970, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 2204, 2206, 2207, 2208, 2210, 2212, 2213, 2214, 2216, 2218, 2219, 2220, 7313, 2228, 2230, 2231, 2232, 2234, 2236, 2237, 2238, 2240, 2242, 2243, 2244, 2247, 2249, 2250, 2251, 2253, 2254, 2255, 2256, 2259, 2260, 2261, 2292, 2295, 7348, 2335, 2337, 2340, 2342, 2343, 2345, 2347, 2350, 2352, 2353, 7366, 2360, 2362, 2365, 2367, 2368, 2370, 2372, 2375, 2377, 2378, 2380, 2382, 2383, 2385, 2387, 2427, 2429, 2430, 2431, 2439, 7403, 2443, 7406, 2446, 2448, 2452, 2461, 2463, 2464, 2465, 7421, 7423, 2517, 2519, 2520, 2521, 7398, 2553, 2554, 7412, 2558, 2559, 7192, 7311, 7194, 7198, 7429, 7430, 7429, 7430, 7311, 7329, 7419, 7344, 7211, 7215, 7222, 7367, 7398, 2689, 2690, 7231, 7234, 7398, 2711, 2712, 7412, 2716, 2717, 7239, 7367, 7367, 7392, 7398, 2783, 2784, 7412, 2791, 2792, 2796, 2797, 7251, 7429, 7430, 7429, 7430, 2856, 7439, 7441, 7466, 7442, 7477, 7444, 2888, 7478, 2894, 7450, 7451, 2910, 7463, 7464, 7465, 7466, 7475, 7477, 7478, 7480, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 7552, 7554, 7556, 7558, 7560, 7562, 7565, 7567, 7569, 7571, 7573, 7575, 7577, 7579, 7582, 7109, 7586, 7588, 7589, 7591, 7593, 7596, 7598, 7602, 7604, 7607, 7609, 7612, 7615, 7617, 7619, 7621, 7626, 7628, 7630, 7634, 7636, 2552, 7640, 7622, 2557, 7643, 2574, 7564, 7195, 2590, 2591, 2604, 2605, 2625, 7564, 2633, 2640, 2641, 7212, 7429, 7590, 7599, 7597, 7129, 2678, 2688, 7662, 2710, 7667, 7622, 2715, 7670, 7597, 7594, 7599, 7592, 7129, 2735, 7592, 7594, 7597, 7599, 7129, 2755, 7603, 7605, 7608, 7610, 7613, 7616, 7148, 2769, 2782, 7677, 7622, 7624, 7410, 2790, 7680, 7419, 7682, 7429, 2819, 2820, 2831, 2832, 7435, 2859, 2861, 2862, 2865, 2867, 2869, 2889, 7448, 2902, 2904, 7454, 7478, 7461, 2926, 2928, 2944, 2946, 7473, 2958, 2961, 2965, 2969, 125, 126, 127, 7837, 7838, 7839, 2556, 7810, 7811, 7808, 7809, 7812, 7813, 2575, 7843, 7844, 7843, 7844, 7808, 7809, 7810, 7811, 7812, 7813, 2626, 7814, 7815, 7816, 7817, 7818, 7819, 7820, 7821, 7835, 7822, 7823, 7824, 7826, 7825, 7844, 2661, 2662, 7830, 2674, 7829, 2676, 2677, 7837, 7838, 7837, 7838, 7839, 2714, 7829, 2727, 7828, 2729, 7830, 2731, 7827, 2733, 2734, 7827, 2747, 7828, 2749, 7829, 2751, 7830, 2753, 2754, 7831, 2757, 7832, 2759, 7833, 2761, 7834, 2763, 7835, 2765, 7836, 2767, 2768, 7837, 7838, 7839, 2786, 2787, 7840, 2789, 7841, 7842, 2795, 2812, 7843, 7844, 7843, 7844, 7845, 7848, 2847, 7852, 7853, 7855, 7860, 7860, 7862, 2893, 7869, 2907, 2909, 7871, 7874, 2917, 7896, 7901, 2956, 7906, 7908, 7911, 7913, 7912, 7914, 7915, 7916, 7917, 7920, 7919, 7925, 7924, 7927, 7926, 7929, 7930, 7931, 7932, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 2550, 2551, 2555, 2568, 2569, 2570, 2571, 2572, 2573, 2588, 2589, 2602, 2603, 2619, 2620, 2621, 2622, 2623, 2624, 2627, 2628, 2629, 2630, 2631, 2632, 2634, 2635, 2636, 2637, 2638, 2639, 2658, 2659, 2660, 2673, 2675, 2686, 2687, 2708, 2709, 2713, 2726, 2728, 2730, 2732, 2746, 2748, 2750, 2752, 2756, 2758, 2760, 2762, 2764, 2766, 2780, 2781, 2785, 2788, 2793, 2794, 2817, 2818, 2829, 2830, 2842, 2846, 7850, 2855, 2858, 2864, 7857, 2882, 2886, 2887, 7973, 7979, 2906, 2912, 2916, 7994, 8003, 8016, 2949, 2953, 8026, 8027, 2964, 2968, 8034, 2980, 2981, 2982, 2984, 2985, 2986, 2991, 8041, 2996, 2997, 8043, 8044, 8047, 3009, 3010, 3013, 3014, 8050, 3019, 3021, 3023, 3025, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 8224, 8192, 8250, 8250, 8194, 8199, 8213, 8195, 8219, 8197, 2854, 8201, 8203, 8205, 8219, 8207, 8213, 8209, 2876, 8219, 8211, 8213, 8217, 8215, 8221, 8219, 8217, 2892, 8243, 8227, 8226, 8245, 8237, 8238, 2901, 8228, 8230, 8250, 8250, 8232, 8234, 8236, 8245, 8233, 8235, 8243, 2925, 8238, 8240, 8245, 8237, 8243, 8239, 2936, 8242, 8243, 8241, 8246, 8245, 8244, 2943, 8247, 8250, 8250, 8249, 8251, 2955, 2960, 8253, 8255, 2974, 8260, 8284, 8266, 2993, 8291, 3001, 3002, 3006, 8296, 8298, 3018, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 2841, 2843, 2844, 2845, 2849, 2850, 2851, 2852, 2853, 2857, 2863, 2871, 2872, 2873, 2874, 2875, 2877, 2878, 2879, 2880, 2881, 2883, 2884, 2885, 8320, 2895, 2896, 2897, 2898, 2899, 2900, 2905, 2911, 2913, 2914, 2915, 2919, 2920, 2921, 2922, 2923, 2924, 2930, 2931, 2932, 2933, 2934, 2935, 2937, 2938, 2939, 2940, 2941, 2942, 2948, 2950, 2951, 2952, 2954, 2963, 2967, 2978, 2990, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 8450, 8453, 8455, 8460, 8462, 8465, 8467, 8470, 2891, 8474, 8476, 8478, 8482, 8485, 8487, 8489, 8491, 8493, 8495, 8497, 8499, 8501, 8504, 8506, 8448, 8457, 8458, 8506, 8479, 8506, 8480, 8506, 8502, 8507, 8507, 8508, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 8451, 8578, 8580, 8582, 8471, 8586, 8483, 8590, 8593, 8596, 8505, 2971, 2973, 2979, 2983, 8584, 2998, 3000, 3003, 3005, 3015, 3017, 3020, 3022, 3024, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 8456, 8463, 8468, 8587, 8591, 8594, 8597, 8704, 8708, 8708, 2992, 8710, 8710, 8714, 8718, 8726, 8727, 8727, 8717, 8728, 8726, 8727, 8727, 8726, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 2972, 2975, 8834, 8832, 2987, 8834, 8833, 8838, 8835, 2999, 3004, 8838, 8836, 8838, 8837, 3016, 3026, 3028, 3029, 3030, 3034, 8842, 3037, 3038, 3040, 3041, 3045, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 8960, 2976, 2977, 2988, 2989, 2994, 2995, 8969, 8970, 3007, 3008, 3011, 3012, 8975, 3036, 8978, 8985, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 8716, 9089, 9091, 9094, 8721, 8723, 9098, 9100, 8725, 8982, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 9090, 9092, 9219, 9216, 9222, 9224, 9220, 9221, 9223, 9219, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 3027, 9344, 3032, 3033, 3035, 3039, 3042, 3043, 9345, 3046, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 3031, 3044, 9472, 9475, 9476, 9477, 9479, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 9600, 8986, 9103, 9225, 9104, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 9603, 9729, 9732, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 9856, 9481, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 9858, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 9985, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127};
bool h_Op[]= {
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
#define THREADS_PER_BLOCK 128
#define BLOCKS_PER_GRID 1
#define SIZE_OF_IN 3072
#define SIZE_OF_AC 7296
__device__ void
ac(float *A, const int *B, const int *C, const bool *Op, int n_iter) {
int i= blockDim.x * blockIdx.x + threadIdx.x;
__shared__ float R[81*THREADS_PER_BLOCK];
const int t= THREADS_PER_BLOCK;
__shared__ float final;
final=0;
R[i + 0*t] = A[i + 0*t];
R[i + 1*t] = A[i + 1*t];
R[i + 2*t] = A[i + 2*t];
R[i + 3*t] = A[i + 3*t];
R[i + 4*t] = A[i + 4*t];
R[i + 5*t] = A[i + 5*t];
R[i + 6*t] = A[i + 6*t];
R[i + 7*t] = A[i + 7*t];
R[i + 8*t] = A[i + 8*t];
R[i + 9*t] = A[i + 9*t];
R[i + 10*t] = A[i + 10*t];
R[i + 11*t] = A[i + 11*t];
R[i + 12*t] = A[i + 12*t];
R[i + 13*t] = A[i + 13*t];
R[i + 14*t] = A[i + 14*t];
R[i + 15*t] = A[i + 15*t];
R[i + 16*t] = A[i + 16*t];
R[i + 17*t] = A[i + 17*t];
R[i + 18*t] = A[i + 18*t];
R[i + 19*t] = A[i + 19*t];
R[i + 20*t] = A[i + 20*t];
R[i + 21*t] = A[i + 21*t];
R[i + 22*t] = A[i + 22*t];
R[i + 23*t] = A[i + 23*t];
__syncthreads();
for (int iter=0; iter< n_iter; iter++) {
R[i + 24*t] = Op[i + 0*t] ? R[B[i + 0*t]] * R[C[i + 0*t]] : R[B[i + 0*t]] + R[C[i + 0*t]];
R[i + 25*t] = Op[i + 1*t] ? R[B[i + 1*t]] * R[C[i + 1*t]] : R[B[i + 1*t]] + R[C[i + 1*t]];
R[i + 26*t] = Op[i + 2*t] ? R[B[i + 2*t]] * R[C[i + 2*t]] : R[B[i + 2*t]] + R[C[i + 2*t]];
R[i + 27*t] = Op[i + 3*t] ? R[B[i + 3*t]] * R[C[i + 3*t]] : R[B[i + 3*t]] + R[C[i + 3*t]];
R[i + 28*t] = Op[i + 4*t] ? R[B[i + 4*t]] * R[C[i + 4*t]] : R[B[i + 4*t]] + R[C[i + 4*t]];
R[i + 29*t] = Op[i + 5*t] ? R[B[i + 5*t]] * R[C[i + 5*t]] : R[B[i + 5*t]] + R[C[i + 5*t]];
__syncthreads();
R[i + 30*t] = Op[i + 6*t] ? R[B[i + 6*t]] * R[C[i + 6*t]] : R[B[i + 6*t]] + R[C[i + 6*t]];
R[i + 31*t] = Op[i + 7*t] ? R[B[i + 7*t]] * R[C[i + 7*t]] : R[B[i + 7*t]] + R[C[i + 7*t]];
R[i + 32*t] = Op[i + 8*t] ? R[B[i + 8*t]] * R[C[i + 8*t]] : R[B[i + 8*t]] + R[C[i + 8*t]];
R[i + 33*t] = Op[i + 9*t] ? R[B[i + 9*t]] * R[C[i + 9*t]] : R[B[i + 9*t]] + R[C[i + 9*t]];
__syncthreads();
R[i + 34*t] = Op[i + 10*t] ? R[B[i + 10*t]] * R[C[i + 10*t]] : R[B[i + 10*t]] + R[C[i + 10*t]];
R[i + 35*t] = Op[i + 11*t] ? R[B[i + 11*t]] * R[C[i + 11*t]] : R[B[i + 11*t]] + R[C[i + 11*t]];
R[i + 36*t] = Op[i + 12*t] ? R[B[i + 12*t]] * R[C[i + 12*t]] : R[B[i + 12*t]] + R[C[i + 12*t]];
R[i + 37*t] = Op[i + 13*t] ? R[B[i + 13*t]] * R[C[i + 13*t]] : R[B[i + 13*t]] + R[C[i + 13*t]];
__syncthreads();
R[i + 38*t] = Op[i + 14*t] ? R[B[i + 14*t]] * R[C[i + 14*t]] : R[B[i + 14*t]] + R[C[i + 14*t]];
R[i + 39*t] = Op[i + 15*t] ? R[B[i + 15*t]] * R[C[i + 15*t]] : R[B[i + 15*t]] + R[C[i + 15*t]];
R[i + 40*t] = Op[i + 16*t] ? R[B[i + 16*t]] * R[C[i + 16*t]] : R[B[i + 16*t]] + R[C[i + 16*t]];
R[i + 41*t] = Op[i + 17*t] ? R[B[i + 17*t]] * R[C[i + 17*t]] : R[B[i + 17*t]] + R[C[i + 17*t]];
__syncthreads();
R[i + 42*t] = Op[i + 18*t] ? R[B[i + 18*t]] * R[C[i + 18*t]] : R[B[i + 18*t]] + R[C[i + 18*t]];
R[i + 43*t] = Op[i + 19*t] ? R[B[i + 19*t]] * R[C[i + 19*t]] : R[B[i + 19*t]] + R[C[i + 19*t]];
R[i + 44*t] = Op[i + 20*t] ? R[B[i + 20*t]] * R[C[i + 20*t]] : R[B[i + 20*t]] + R[C[i + 20*t]];
__syncthreads();
R[i + 45*t] = Op[i + 21*t] ? R[B[i + 21*t]] * R[C[i + 21*t]] : R[B[i + 21*t]] + R[C[i + 21*t]];
R[i + 46*t] = Op[i + 22*t] ? R[B[i + 22*t]] * R[C[i + 22*t]] : R[B[i + 22*t]] + R[C[i + 22*t]];
R[i + 47*t] = Op[i + 23*t] ? R[B[i + 23*t]] * R[C[i + 23*t]] : R[B[i + 23*t]] + R[C[i + 23*t]];
R[i + 48*t] = Op[i + 24*t] ? R[B[i + 24*t]] * R[C[i + 24*t]] : R[B[i + 24*t]] + R[C[i + 24*t]];
__syncthreads();
R[i + 49*t] = Op[i + 25*t] ? R[B[i + 25*t]] * R[C[i + 25*t]] : R[B[i + 25*t]] + R[C[i + 25*t]];
R[i + 50*t] = Op[i + 26*t] ? R[B[i + 26*t]] * R[C[i + 26*t]] : R[B[i + 26*t]] + R[C[i + 26*t]];
R[i + 51*t] = Op[i + 27*t] ? R[B[i + 27*t]] * R[C[i + 27*t]] : R[B[i + 27*t]] + R[C[i + 27*t]];
R[i + 52*t] = Op[i + 28*t] ? R[B[i + 28*t]] * R[C[i + 28*t]] : R[B[i + 28*t]] + R[C[i + 28*t]];
__syncthreads();
R[i + 53*t] = Op[i + 29*t] ? R[B[i + 29*t]] * R[C[i + 29*t]] : R[B[i + 29*t]] + R[C[i + 29*t]];
R[i + 54*t] = Op[i + 30*t] ? R[B[i + 30*t]] * R[C[i + 30*t]] : R[B[i + 30*t]] + R[C[i + 30*t]];
__syncthreads();
R[i + 55*t] = Op[i + 31*t] ? R[B[i + 31*t]] * R[C[i + 31*t]] : R[B[i + 31*t]] + R[C[i + 31*t]];
R[i + 56*t] = Op[i + 32*t] ? R[B[i + 32*t]] * R[C[i + 32*t]] : R[B[i + 32*t]] + R[C[i + 32*t]];
__syncthreads();
R[i + 57*t] = Op[i + 33*t] ? R[B[i + 33*t]] * R[C[i + 33*t]] : R[B[i + 33*t]] + R[C[i + 33*t]];
R[i + 58*t] = Op[i + 34*t] ? R[B[i + 34*t]] * R[C[i + 34*t]] : R[B[i + 34*t]] + R[C[i + 34*t]];
__syncthreads();
R[i + 59*t] = Op[i + 35*t] ? R[B[i + 35*t]] * R[C[i + 35*t]] : R[B[i + 35*t]] + R[C[i + 35*t]];
R[i + 60*t] = Op[i + 36*t] ? R[B[i + 36*t]] * R[C[i + 36*t]] : R[B[i + 36*t]] + R[C[i + 36*t]];
__syncthreads();
R[i + 61*t] = Op[i + 37*t] ? R[B[i + 37*t]] * R[C[i + 37*t]] : R[B[i + 37*t]] + R[C[i + 37*t]];
__syncthreads();
R[i + 62*t] = Op[i + 38*t] ? R[B[i + 38*t]] * R[C[i + 38*t]] : R[B[i + 38*t]] + R[C[i + 38*t]];
R[i + 63*t] = Op[i + 39*t] ? R[B[i + 39*t]] * R[C[i + 39*t]] : R[B[i + 39*t]] + R[C[i + 39*t]];
__syncthreads();
R[i + 64*t] = Op[i + 40*t] ? R[B[i + 40*t]] * R[C[i + 40*t]] : R[B[i + 40*t]] + R[C[i + 40*t]];
__syncthreads();
R[i + 65*t] = Op[i + 41*t] ? R[B[i + 41*t]] * R[C[i + 41*t]] : R[B[i + 41*t]] + R[C[i + 41*t]];
__syncthreads();
R[i + 66*t] = Op[i + 42*t] ? R[B[i + 42*t]] * R[C[i + 42*t]] : R[B[i + 42*t]] + R[C[i + 42*t]];
__syncthreads();
R[i + 67*t] = Op[i + 43*t] ? R[B[i + 43*t]] * R[C[i + 43*t]] : R[B[i + 43*t]] + R[C[i + 43*t]];
__syncthreads();
R[i + 68*t] = Op[i + 44*t] ? R[B[i + 44*t]] * R[C[i + 44*t]] : R[B[i + 44*t]] + R[C[i + 44*t]];
__syncthreads();
R[i + 69*t] = Op[i + 45*t] ? R[B[i + 45*t]] * R[C[i + 45*t]] : R[B[i + 45*t]] + R[C[i + 45*t]];
__syncthreads();
R[i + 70*t] = Op[i + 46*t] ? R[B[i + 46*t]] * R[C[i + 46*t]] : R[B[i + 46*t]] + R[C[i + 46*t]];
__syncthreads();
R[i + 71*t] = Op[i + 47*t] ? R[B[i + 47*t]] * R[C[i + 47*t]] : R[B[i + 47*t]] + R[C[i + 47*t]];
__syncthreads();
R[i + 72*t] = Op[i + 48*t] ? R[B[i + 48*t]] * R[C[i + 48*t]] : R[B[i + 48*t]] + R[C[i + 48*t]];
__syncthreads();
R[i + 73*t] = Op[i + 49*t] ? R[B[i + 49*t]] * R[C[i + 49*t]] : R[B[i + 49*t]] + R[C[i + 49*t]];
__syncthreads();
R[i + 74*t] = Op[i + 50*t] ? R[B[i + 50*t]] * R[C[i + 50*t]] : R[B[i + 50*t]] + R[C[i + 50*t]];
__syncthreads();
R[i + 75*t] = Op[i + 51*t] ? R[B[i + 51*t]] * R[C[i + 51*t]] : R[B[i + 51*t]] + R[C[i + 51*t]];
__syncthreads();
R[i + 76*t] = Op[i + 52*t] ? R[B[i + 52*t]] * R[C[i + 52*t]] : R[B[i + 52*t]] + R[C[i + 52*t]];
__syncthreads();
R[i + 77*t] = Op[i + 53*t] ? R[B[i + 53*t]] * R[C[i + 53*t]] : R[B[i + 53*t]] + R[C[i + 53*t]];
__syncthreads();
R[i + 78*t] = Op[i + 54*t] ? R[B[i + 54*t]] * R[C[i + 54*t]] : R[B[i + 54*t]] + R[C[i + 54*t]];
__syncthreads();
R[i + 79*t] = Op[i + 55*t] ? R[B[i + 55*t]] * R[C[i + 55*t]] : R[B[i + 55*t]] + R[C[i + 55*t]];
__syncthreads();
R[i + 80*t] = Op[i + 56*t] ? R[B[i + 56*t]] * R[C[i + 56*t]] : R[B[i + 56*t]] + R[C[i + 56*t]];
if (i==0) { final += R[80*t]; }
__syncthreads();
}
if (i==0) { A[0]= final;}
}
|
1,694
|
#include "includes.h"
#define MAXN 8000 /* Max value of N */
int N; /* Matrix Dimension*/
int numThreads; /* Number of Threads */
/*Random*/
#define randm() 4|2[uid]&3
/*CUDA Function for calculating mean column-wise and then reducing each column's totals*/
/*This Function will be called Number of blocks times*/
/* returns a seed for srand based on the time */
__global__ void Mean_SD_Norm(float* input,float* output ,float* mean_out,float* sd_out, int dim1, int numThread,int eval_ceil)
{
extern __shared__ float mean[];//shared 1D-matrix for storing temporary results for mean of each threads
extern __shared__ float sd[];//shared 1D-matrix for storing temporary results for sd of each threads
__shared__ float meansum;//shared 1D-matrix for storing mean total of each threads
__shared__ float sdsum;//shared 1D-matrix for storing SD total of each threads
int idx_x = blockIdx.x * blockDim.x + threadIdx.x;//Getting Thread X Index for Particular Block
int idx_y = blockIdx.y * blockDim.y + threadIdx.y;//Getting Thread Y Index for Particular Block
int eva_block,index;
unsigned int thread_id = threadIdx.y;//Getting Id of thread
unsigned int j = idx_y * dim1 + idx_x;//calculating index for input matrix
__syncthreads();//waiting for all threads
mean[thread_id]=input[j];//Assigned each column element of matrix to each thread
/*If Dimension is more than Threads then reduce the remaining elements to assigned elements*/
for(int i=0;i<dim1;i+=numThread)
{
index=dim1*(numThread+thread_id+i);//calculating index of remaining element
eva_block=index+blockIdx.x;
if(eva_block < dim1*dim1)
{
mean[thread_id]+=input[index];
}
}
/*Reducing sum of each thread to final block sum*/
if(thread_id==0)
{
for(int i=0;i<numThread;i++)
{
meansum+=mean[thread_id+i];
}
mean_out[blockIdx.x]=meansum/dim1;//Mean of block
}
__syncthreads();
sd[thread_id] = powf(input[j] - mean_out[blockIdx.x], 2.0);//evaluating SD for each thread for particular block
/*If Dimension is more than Threads then reduce the remaining elements to assigned elements*/
for(int i=0;i<dim1;i+=numThread)
{
index=dim1*(numThread+thread_id+i);
eva_block=index+blockIdx.x;
if(eva_block < dim1*dim1)
{
sd[thread_id]+=powf(input[index] - mean_out[blockIdx.x], 2.0);
}
}
/*Reducing SD Sum of each thread to final block SD sum*/
if(thread_id==0)
{
sdsum=0;
for(int i=0;i<numThread;i++)
{
sdsum+=sd[thread_id+i];//calculating index of remaining element
}
sd_out[blockIdx.x]=sdsum/dim1;//SD of block
}
__syncthreads();//waiting for threads
/*Normalization of each block data on basis of mean and sd of each block*/
output[blockIdx.x*dim1+thread_id] = (input[thread_id+blockIdx.x*dim1] - mean_out[blockIdx.x]) / sd_out[blockIdx.x];
/*Reducing Normalized Sum for remaining elements*/
for(int i=0;i<eval_ceil;i++){
if((numThread+thread_id)+blockIdx.x*dim1 < dim1*dim1)
{
output[(numThread+thread_id)+blockIdx.x*dim1] = (input[(numThread+thread_id)+blockIdx.x*dim1] - mean_out[blockIdx.x])/sd_out[blockIdx.x];//Normalizing the Matrix Indexes
}
}
}
|
1,695
|
/*************************************************************************************************************
* FILE: lakegpu.cu
*
* AUTHORS: attiffan Aurora T. Tiffany-Davis
* ssbehera Subhendu S. Behera
* wpmoore2 Wade P. Moore
*
* DESCRIPTION: Assist with modeling the surface of a lake,
* where some pebbles have been thrown onto the surface.
* The energy level at any point on the lake is influenced by
* the energy level on that point in the past,
* and by the current energy levels at neighboring points.
* This program takes into account all 8 neighboring points,
* and parallelizes the simulation by using EXACTLY ONE compute node,
* using multiple GPU threads.
*
* TO RUN: srun -N1 -n1 -p opteron -x c[53,101,102] --pty /bin/bash
* make -f p3.Makefile lake
* prun ./lake [lake size] [# pebbles] [duration of simulation in seconds] [# GPU threads]
*************************************************************************************************************/
// INCLUDES
#include <stdlib.h>
#include <stdio.h>
#include <cuda_runtime.h>
#include <time.h>
// DEFINES
#define __DEBUG
#define TSCALE 1.0
#define VSQR 0.1
/*************************************************************************************************************
* FUNCTION: kf
*
* DESCRIPTION: Get the energy impact of a given pebble size on the lake based on time.
* Impact decreases as time increases.
*
* ARGUMENTS: nPebbleSize - The size of a given pebble
* nTime - The amount of time that has elapsed in the simulation
*
* AUTHORS: ssbehera Subhendu S. Behera
*************************************************************************************************************/
__device__ double kf(double nPebbleSize, double nTime)
{
return -expf(-TSCALE * nTime) * nPebbleSize;
}
/*************************************************************************************************************
* FUNCTION: evolve
*
* DESCRIPTION: Update the energy levels in the lake for every lake point.
* Each lake point's new energy level depends upon
* old energy levels and the energy levels of neighboring points.
* This version uses 9 points (point of interest and 8 neighboring points).
*
* ARGUMENTS: aDeviceEnergy - Array representing new energy levels at every point in the lake
* aDeviceEnergyStep0 - Array representing current energy levels at every point in the lake
* aDeviceEnergyStep1 - Array representing old energy levels at every point in the lake
* aDevicePebbleSizes - Array representing the pebble sizes at every point in the lake (sparse)
* nTime - The amount of time that has elapsed in the simulation
* nLakePointsOneAxis - The number of points in the map of the lake (one axis)
* timeStep - The amount of time between one simulation step and the next
* nPointSpacing - The spacing between two points on the lake
*
* RETURNS: None
*
* AUTHOR: ssbehera Subhendu S. Behera
*************************************************************************************************************/
__global__ void evolve(
double *aDeviceEnergy,
double *aDeviceEnergyStep0,
double *aDeviceEnergyStep1,
double *aDevicePebbleSizes,
float nTime,
int nLakePointsOneAxis,
float timeStep,
double nPointSpacing
)
{
int idx, idy;
int nIndex;
/*
* calculate idx & idy.
*/
idx = threadIdx.x + blockIdx.x * blockDim.x;
idy = threadIdx.y + blockIdx.y * blockDim.y;
if (idx <= nLakePointsOneAxis - 1 && idy <= nLakePointsOneAxis - 1) {
/*
* calculate energy only if you are inside the lake.
*/
nIndex = idx * nLakePointsOneAxis + idy;
if (idx == 0 || idx == nLakePointsOneAxis - 1 || idy == 0 || idy == nLakePointsOneAxis - 1)
aDeviceEnergy[nIndex] = 0.;
else
aDeviceEnergy[nIndex] = 2 * aDeviceEnergyStep1[nIndex] - aDeviceEnergyStep0[nIndex] +
VSQR * (timeStep * timeStep) * ((aDeviceEnergyStep1[nIndex - 1] +
aDeviceEnergyStep1[nIndex + 1] + aDeviceEnergyStep1[nIndex + nLakePointsOneAxis]
+ aDeviceEnergyStep1[nIndex - nLakePointsOneAxis] + 0.25 *
(aDeviceEnergyStep1[nIndex + nLakePointsOneAxis - 1] +
aDeviceEnergyStep1[nIndex + nLakePointsOneAxis + 1] +
aDeviceEnergyStep1[nIndex - nLakePointsOneAxis - 1] +
aDeviceEnergyStep1[nIndex - nLakePointsOneAxis + 1]) -
5 * aDeviceEnergyStep1[nIndex]) /
(nPointSpacing * nPointSpacing) + kf(aDevicePebbleSizes[nIndex], nTime));
}
}
/*************************************************************************************************************
* FUNCTION: run_gpu
*
* DESCRIPTION: Simulate the energy changes over time in the lake, on the GPU
*
* ARGUMENTS: aEnergy - Array representing energy levels at every point in the lake
* aEnergyStep0 - Array representing energy levels at every point in the lake at time 0
* aEnergyStep1 - Array representing energy levels at every point in the lake at time 1
* aPebbleSizes - Array representing the pebble sizes at every point in the lake (sparse)
* nLakePointsOneAxis - The number of points in the map of the lake (one axis)
* nPointSpacing - The spacing between two points on the lake
* nFinishTime - The duration of the simulation in seconds
* nThreads - The number of threads to be used per block
* for instance, with nthreads=8,
* and a domain of grid points (nLakePointsOneAxis=128 x 128),
* you will create (nLakePointsOneAxis/nthreads)x(nLakePointsOneAxis/nthreads) = (16 x 16) blocks,
* with (8 x 8) threads on each block.
*
* RETURNS: None
*
* AUTHORS: ssbehera Subhendu S. Behera
*************************************************************************************************************/
void run_gpu(
double *aEnergy,
double *aEnergyStep0,
double *aEnergyStep1,
double *aPebbleSizes,
int nLakePointsOneAxis,
double nPointSpacing,
double nFinishTime,
int nThreads
)
{
cudaEvent_t kstart, kstop;
float ktime, timeStep, nTime = 0.0f;
double *aDeviceEnergy, *aDeviceEnergyStep0, *aDeviceEnergyStep1, *aDevicePebbleSizes;
double *aEnergyCurrent, *aEnergyOld;
int nLakePointsTotal = nLakePointsOneAxis * nLakePointsOneAxis;
int blockDimension = (nLakePointsOneAxis / nThreads) +
(nLakePointsOneAxis % nThreads != 0 ? 1 : 0);
dim3 threadsPerBlock(nThreads, nThreads);
dim3 noOfBlocks(blockDimension, blockDimension);
/*
* allocate host memory
*/
aEnergyCurrent = (double *)malloc(sizeof(double) * nLakePointsTotal);
aEnergyOld = (double *)malloc(sizeof(double) * nLakePointsTotal);
/*
* copy the data of energy step 0, step 1 to current & old energy respectively.
*/
memcpy(aEnergyOld, aEnergyStep0, sizeof(double) * nLakePointsTotal);
memcpy(aEnergyCurrent, aEnergyStep1, sizeof(double) * nLakePointsTotal);
/*
* allocate memory on the device.
*/
cudaMalloc((void **)&aDeviceEnergy, sizeof(double) * nLakePointsTotal);
cudaMalloc((void **)&aDeviceEnergyStep0, sizeof(double) * nLakePointsTotal);
cudaMalloc((void **)&aDeviceEnergyStep1, sizeof(double) * nLakePointsTotal);
cudaMalloc((void **)&aDevicePebbleSizes, sizeof(double) * nLakePointsTotal);
/*
* setup the timers before copying the memory from host to device.
*/
cudaSetDevice(0);
cudaEventCreate(&kstart);
cudaEventCreate(&kstop);
/*
* Start recording time.
*/
cudaEventRecord(kstart, 0);
/*
* copy the pebblesize data only once to device memory.
*/
cudaMemcpy(aDevicePebbleSizes, aPebbleSizes, sizeof(double) * nLakePointsOneAxis * nLakePointsOneAxis,
cudaMemcpyHostToDevice);
for (timeStep = nPointSpacing / 2; nTime < nFinishTime; nTime += timeStep) {
/*
* copy data
*/
cudaMemcpy(aDeviceEnergyStep0, aEnergyOld, sizeof(double) * nLakePointsOneAxis * nLakePointsOneAxis,
cudaMemcpyHostToDevice);
cudaMemcpy(aDeviceEnergyStep1, aEnergyCurrent, sizeof(double) * nLakePointsOneAxis * nLakePointsOneAxis,
cudaMemcpyHostToDevice);
/*
* Make the kernel call.
*/
evolve<<<noOfBlocks, threadsPerBlock>>>(aDeviceEnergy, aDeviceEnergyStep0,
aDeviceEnergyStep1, aDevicePebbleSizes,
nTime, nLakePointsOneAxis, timeStep, nPointSpacing);
/*
* copy the current energy to old energy as cpu is free.
*/
memcpy(aEnergyOld, aEnergyCurrent, sizeof(double) * nLakePointsOneAxis * nLakePointsOneAxis);
/*
* copy the new energy to current energy directly from the device.
*/
cudaMemcpy(aEnergyCurrent, aDeviceEnergy, sizeof(double) * nLakePointsOneAxis * nLakePointsOneAxis,
cudaMemcpyDeviceToHost);
}
memcpy(aEnergy, aEnergyCurrent, sizeof(double) * nLakePointsOneAxis * nLakePointsOneAxis);
/* Stop GPU computation timer */
cudaEventRecord(kstop, 0);
cudaEventSynchronize(kstop);
cudaEventElapsedTime(&ktime, kstart, kstop);
printf("GPU computation: %f msec\n", ktime);
/*
* Free the device & host memory.
*/
free(aEnergyCurrent);
free(aEnergyOld);
cudaFree(aDeviceEnergy);
cudaFree(aDeviceEnergyStep0);
cudaFree(aDeviceEnergyStep1);
cudaFree(aDevicePebbleSizes);
/* timer cleanup */
cudaEventDestroy(kstart);
cudaEventDestroy(kstop);
}
|
1,696
|
#include <cuda.h>
#include <iostream>
using std::cout;
using std::endl;
#include<fstream>
using std::ofstream;
#include <algorithm>
using std::fill;
/***** ERROR CHECKING MACRO *****/
cudaError_t _TempErrorCode;
#define CHECK_CUDA_ERROR() _TempErrorCode = cudaGetLastError(); if(_TempErrorCode) fprintf(stderr,"!!CUDA ERROR in %s at line %d : %s\n",__FILE__,__LINE__,cudaGetErrorString(_TempErrorCode));
/***** CUSTOM COMMAND LINE ARGUMENT PARSING *****/
//list of global variables (with default values)
int NumberOfArgs = 1; //how many constants are listed below
// you can add your own global variables to be parsed here
// (I start with underline to distiguish that it is a global variable):
int _ArraySize = 1024;
float _IncrementValue = 1.0f;
char _OutputFile[] = "output.txt";
//this will display the global variable values before program starts running
void displayGlobals(void){
cout<<"Setting ArraysSize to " << _ArraySize << endl;
cout<<"Setting IncrementValue to " << _IncrementValue << endl;
}
//this parses the command line arguments
void parseArguments(int arg_count, char* args[]){
//the first argument is always the program
cout << "Running (" << args[0] << ")" << endl;
if(arg_count > NumberOfArgs){
// add your string to whatever parsing here
_ArraySize = atoi(args[1]);
_IncrementValue = atof(args[2]);
//for strings just copy the pointer? (address):
//OutputFile = args[3];
displayGlobals();
}else{
//output usage
cout << "Usage: "<< args[0] << " <ArraySize> <IncrementValue> " << endl;// <OutputFile>" << endl;
//show default values
displayGlobals();
}
}
/***** A DEVICE FUNCTION *****/
__device__ float AddNum(float a, float b){
return a + b;
}
/***** CUDA KERNEL ******/
/** this function increments the inArray by increment for all indicies less than MaxIndex **/
__global__ void incrementKernel(float* outArray,float* inArray, int MaxIndex, float increment){
//the objects (gridDim,blockIdx,blockDim,threadIdx) are already defined:
int threadIndex = blockIdx.x*blockDim.x + threadIdx.x;
if(threadIndex < MaxIndex){ //keep it safe
outArray[threadIndex] = AddNum(inArray[threadIndex], increment);
}
}
/***** MAIN *****/
int main(int argc, char* argv[]){
parseArguments(argc, argv); //this will set the global variables
//Device array pointers
float* inArray_dev; //set to zero to avoid compile warnings
float* outArray_dev;
//Host array pointers
float* inArray_host;
float* outArray_host;
//initialize arrays on host (using c++)
inArray_host = new float[_ArraySize]; //equiv. to (float*)malloc(sizeof(float)*ARRAY_SIZE);
outArray_host = new float[_ArraySize];
//fill
fill(inArray_host, inArray_host+_ArraySize, 1.0f); //fill with ones
fill(outArray_host, outArray_host+_ArraySize, 0.0f); //fill with zeros
//initialize arrays on device (GPU)
cudaMalloc((void**)&inArray_dev, sizeof(float)*_ArraySize);
CHECK_CUDA_ERROR();
cudaMalloc((void**)&outArray_dev, sizeof(float)*_ArraySize);
CHECK_CUDA_ERROR();
//fill
cudaMemset(inArray_dev, sizeof(float)*_ArraySize, 0); //set input array to zero (must be a byte value)
CHECK_CUDA_ERROR();
cudaMemset(outArray_dev, sizeof(float)*_ArraySize, 0); //set output array to zero (must be a byte value)
CHECK_CUDA_ERROR();
//copy input array to device
//cudaMemcpy(DestinationPointer, SourcePointer, NumberOfBytes, cudaMemcpy[Host|Device]To[Host|Device]);
cudaMemcpy(inArray_dev, inArray_host, sizeof(float)*_ArraySize, cudaMemcpyHostToDevice);
//__LAUNCH KERNEL__
//in general this geometry can be 3D, but for now we are just indexing a linear array
int threadsPerBlock = 512; //this is typically the max for most GPUs except Fermi
int blockCount;
//special case for small array size:
if(_ArraySize <= threadsPerBlock){
blockCount = 1;
}else{
blockCount = _ArraySize/threadsPerBlock + 1; //max block size
}
incrementKernel <<< blockCount,threadsPerBlock >>> (outArray_dev, inArray_dev, _ArraySize, _IncrementValue);
cudaThreadSynchronize();
CHECK_CUDA_ERROR();
// copy back results
cudaMemcpy(outArray_host, outArray_dev, sizeof(float)*_ArraySize, cudaMemcpyDeviceToHost);
CHECK_CUDA_ERROR();
ofstream outFileStream;
outFileStream.open(_OutputFile);
//print output
for(int i = 0; i<_ArraySize; i++){
outFileStream << outArray_host[i] << endl;
}
cout << "Data saved in file " << _OutputFile << endl;
//cleanup
outFileStream.close();
delete inArray_host; //like free()
delete outArray_host;
cudaFree(inArray_dev);
cudaFree(outArray_dev);
}
|
1,697
|
/* Small CUDA exercise to detect bad (non-coalesced) memory access,
and to make it coalesced.
For NMAX=1000000, STRIDE=30, BLOCK_SIZE=128, the speedup (from
non-coalesced to coalesced versions of the code) should be ~3.8x.
Make sure that the "Result:" value printed by the code is (almost)
identical in both original and modified versions of the code. If not,
you have a bug!
To compile:
nvcc -O2 -arch=sm_20 coalesce.cu -o coalesce
The best/average timings:
../best_time.sh ./coalesce
*/
#include <sys/time.h>
#include <ctype.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <unistd.h>
// Number of times to run the test (for better timings accuracy):
#define NTESTS 10
// Number of threads in one block (possible range is 32...1024):
#define BLOCK_SIZE 128
// Size of the array:
#define NMAX 1000000
// "Stride" (the smaller dimension) of the 2D array:
#define STRIDE 30
// Input 2D array:
float h_A[NMAX][STRIDE];
__device__ float d_A[NMAX][STRIDE];
// The result will go here:
__device__ float d_B[NMAX];
float h_B[NMAX];
/* Subtract the `struct timeval' values X and Y,
storing the result in RESULT.
Return 1 if the difference is negative, otherwise 0. */
// It messes up with y!
int
timeval_subtract (float *result, struct timeval *x, struct timeval *y)
{
struct timeval result0;
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
tv_usec is certainly positive. */
result0.tv_sec = x->tv_sec - y->tv_sec;
result0.tv_usec = x->tv_usec - y->tv_usec;
*result = ((float)result0.tv_usec)/1e6 + (float)result0.tv_sec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
// The kernel:
__global__ void MyKernel ()
{
// Global index within a block:
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i >= NMAX)
return;
// Second array index is a function of the blockID:
int j = blockIdx.x % STRIDE;
d_B[i] = pow(d_A[i][j], 0.73f);
return;
}
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
int main (int argc,char **argv)
{
struct timeval tdr0, tdr1, tdr;
float restime;
int devid, devcount, error, Max_gridsize;
/* find number of device in current "context" */
cudaGetDevice(&devid);
/* find how many devices are available */
if (cudaGetDeviceCount(&devcount) || devcount==0)
{
printf ("No CUDA devices!\n");
exit (1);
}
else
{
cudaDeviceProp deviceProp;
cudaGetDeviceProperties (&deviceProp, devid);
printf ("Device count, devid: %d %d\n", devcount, devid);
printf ("Device: %s\n", deviceProp.name);
printf("[deviceProp.major.deviceProp.minor] = [%d.%d]\n\n", deviceProp.major, deviceProp.minor);
Max_gridsize = deviceProp.maxGridSize[0];
}
// Loop to run the timing test multiple times:
int kk;
for (kk=0; kk<NTESTS; kk++)
{
// Initializing the structure elements:
for (int i=0; i<NMAX; i++)
for (int j=0; j<STRIDE; j++)
h_A[i][j] = (float)rand()/(float)RAND_MAX;
// Copying the data to device:
if (error = cudaMemcpyToSymbol( d_A, h_A, sizeof(h_A), 0, cudaMemcpyHostToDevice))
{
printf ("Error %d\n", error);
exit (error);
}
// Number of blocks of threads:
int Nblocks = (NMAX+BLOCK_SIZE-1) / BLOCK_SIZE;
if (Nblocks > Max_gridsize)
{
printf ("Nblocks > Max_gridsize! %d %d\n", Nblocks, Max_gridsize);
exit (1);
}
// Only the code between the two horizontal lines is timed:
if (error = cudaDeviceSynchronize())
{
printf ("Error %d\n", error);
exit (error);
}
gettimeofday (&tdr0, NULL);
//--------------------------------------------------------------------------------
// The kernel call:
MyKernel <<<Nblocks, BLOCK_SIZE>>> ();
//--------------------------------------------------------------------------------
if (error = cudaDeviceSynchronize())
{
printf ("Error %d\n", error);
exit (error);
}
gettimeofday (&tdr1, NULL);
tdr = tdr0;
timeval_subtract (&restime, &tdr1, &tdr);
// Copying the result back to host:
if (error = cudaMemcpyFromSymbol (h_B, d_B, sizeof(h_B), 0, cudaMemcpyDeviceToHost))
{
printf ("Error %d\n", error);
exit (error);
}
if (error = cudaDeviceSynchronize())
{
printf ("Error %d\n", error);
exit (error);
}
// Adding up the results, for accuracy/correctness testing:
double result = 0.0;
for (int i=0; i<NMAX; i++)
{
result += h_B[i];
}
printf ("Result: %e\n\n", result);
printf ("Time: %e\n", restime);
} // kk loop
return 0;
}
|
1,698
|
#include <iostream>
#define SIZE (10*1024*1024)
float cuda_malloc_test(int size, bool up);
int main(void) {
float elapsedTime;
float MB = (float)100 * SIZE * sizeof(int)/1024/1024;
elapsedTime = cuda_malloc_test(SIZE, true);
std::cout << "Time using cudaMalloc: " << elapsedTime << std::endl;
std::cout << "MB/s during copy up: " << MB/(elapsedTime/1000) << std::endl;
elapsedTime = cuda_malloc_test(SIZE, false);
std::cout << "Time using cudaMalloc: " << elapsedTime << std::endl;
std::cout << "MB/s during copy down: " << MB/(elapsedTime/1000) << std::endl;
}
float cuda_malloc_test(int size, bool up) {
cudaEvent_t start, stop;
int *a, *dev_a;
float elapsedTime;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaMallocHost((void**)&a, size * sizeof(*a));
cudaMalloc((void**)&dev_a, size * sizeof(*dev_a));
cudaEventRecord(start, 0);
for (int i = 0; i < 100; i++){
if (up)
cudaMemcpy(dev_a, a, size * sizeof(*dev_a), cudaMemcpyHostToDevice);
else
cudaMemcpy(a, dev_a, size * sizeof(*dev_a), cudaMemcpyDeviceToHost);
}
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime,start, stop);
cudaFreeHost(a);
cudaFree(dev_a);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return elapsedTime;
}
|
1,699
|
#include <stdio.h>
__global__ void helloWorld(float f) {
int blockId = blockIdx.x + blockIdx.y * gridDim.x;
int threadId = blockId * blockDim.y + blockDim.x + threadIdx.x + threadIdx.y * blockDim.x;
printf("Hello block %i (x %i, y %i) running thread %i (x %i, y %i), f=%f\n", blockId, blockIdx.x, blockIdx.y, threadId, threadIdx.x, threadIdx.y, f);
}
int main() {
dim3 grid(2, 2, 1);
dim3 block(2, 2, 1);
helloWorld<<<grid, block>>>(1.2345f); cudaDeviceReset();
return 0;
}
|
1,700
|
#include <cuda_runtime_api.h>
#include <stdint.h>
__device__ float catrom_w0_f32(float a) {
//return -0.5f*a + a*a - 0.5f*a*a*a;
return a*(-0.5f + a*(1.0f - 0.5f*a));
}
__device__ float catrom_w1_f32(float a) {
//return 1.0f - 2.5f*a*a + 1.5f*a*a*a;
return 1.0f + a*a*(-2.5f + 1.5f*a);
}
__device__ float catrom_w2_f32(float a) {
//return 0.5f*a + 2.0f*a*a - 1.5f*a*a*a;
return a*(0.5f + a*(2.0f - 1.5f*a));
}
__device__ float catrom_w3_f32(float a) {
//return -0.5f*a*a + 0.5f*a*a*a;
return a*a*(-0.5f + 0.5f*a);
}
__device__ float image_tex2d_clamp_f32(const float *pixels, int width, int height, int u, int v, int c) {
int clamp_u = min(max(0, u), width-1);
int clamp_v = min(max(0, v), height-1);
return pixels[clamp_u + clamp_v * width + c * width * height];
}
__device__ float image_catmullrom_filter_f32(
float x,
float a0,
float a1,
float a2,
float a3)
{
float r = a0 * catrom_w0_f32(x);
r += a1 * catrom_w1_f32(x);
r += a2 * catrom_w2_f32(x);
r += a3 * catrom_w3_f32(x);
return r;
}
__device__ float image_catmullrom_interpolate_f32(
const float *pixels,
int width,
int height,
float u,
float v,
int c)
{
u -= 0.5f;
v -= 0.5f;
float px = floorf(u);
float py = floorf(v);
float fx = u - px;
float fy = v - py;
int ipx = (int)px;
int ipy = (int)py;
return image_catmullrom_filter_f32(fy,
image_catmullrom_filter_f32(fx,
image_tex2d_clamp_f32(pixels, width, height, ipx-1, ipy-1, c),
image_tex2d_clamp_f32(pixels, width, height, ipx, ipy-1, c),
image_tex2d_clamp_f32(pixels, width, height, ipx+1, ipy-1, c),
image_tex2d_clamp_f32(pixels, width, height, ipx+2, ipy-1, c)),
image_catmullrom_filter_f32(fx,
image_tex2d_clamp_f32(pixels, width, height, ipx-1, ipy, c),
image_tex2d_clamp_f32(pixels, width, height, ipx, ipy, c),
image_tex2d_clamp_f32(pixels, width, height, ipx+1, ipy, c),
image_tex2d_clamp_f32(pixels, width, height, ipx+2, ipy, c)),
image_catmullrom_filter_f32(fx,
image_tex2d_clamp_f32(pixels, width, height, ipx-1, ipy+1, c),
image_tex2d_clamp_f32(pixels, width, height, ipx, ipy+1, c),
image_tex2d_clamp_f32(pixels, width, height, ipx+1, ipy+1, c),
image_tex2d_clamp_f32(pixels, width, height, ipx+2, ipy+1, c)),
image_catmullrom_filter_f32(fx,
image_tex2d_clamp_f32(pixels, width, height, ipx-1, ipy+2, c),
image_tex2d_clamp_f32(pixels, width, height, ipx, ipy+2, c),
image_tex2d_clamp_f32(pixels, width, height, ipx+1, ipy+2, c),
image_tex2d_clamp_f32(pixels, width, height, ipx+2, ipy+2, c)));
}
__global__ void image_catmullrom_resize_f32_kernel(
const float *in_pixels,
int in_width,
int in_height,
int channels,
float *out_pixels,
int out_width,
int out_height)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int x = idx % out_width;
int y = (idx / out_width) % out_height;
int c = idx / (out_width * out_height);
if ((x < out_width) && (y < out_height) && (c < channels)) {
float u = ((float)x) / ((float)out_width) * ((float)in_width);
float v = ((float)y) / ((float)out_height) * ((float)in_height);
float interp_value = image_catmullrom_interpolate_f32(in_pixels, in_width, in_height, u, v, c);
out_pixels[x + y * out_width + c * out_width * out_height] = interp_value;
}
}
extern "C" void devicemem_cuda_kernel_plane_image_catmullrom_resize_f32(
const float *in_pixels,
size_t in_width,
size_t in_height,
size_t channels,
float *out_pixels,
size_t out_width,
size_t out_height,
cudaStream_t stream)
{
int n = out_width * out_height * channels;
image_catmullrom_resize_f32_kernel<<<(n+1024-1)/1024, 1024, 0, stream>>>(
in_pixels,
in_width,
in_height,
channels,
out_pixels,
out_width,
out_height);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.