serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
19,001 | #include "includes.h"
__global__ void merge(unsigned char * img_all, unsigned char * img, float * selection, int n, int stride)
{
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
int width = gridDim.x * TILE_DIM;
int idx = 0;
float sum = 0;
float weight = 0;
for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS) {
sum = 0;
for (idx = 0; idx < n; idx ++) sum += selection[idx * stride + ((y+j)*width + x)];
for (idx = 0; idx < n; idx ++) selection[idx * stride + ((y+j)*width + x)] /= (sum + 1e-5);
img[3*((y+j)*width + x)] = 0;
img[3*((y+j)*width + x)+1] = 0;
img[3*((y+j)*width + x)+2] = 0;
for (idx = 0; idx < n; idx ++) {
//weight = selection[idx * stride + ((y+j)*width + x)];
weight = 0.25;
//weight = 0.5;
img[3*((y+j)*width + x)] += (unsigned char) (img_all[idx * stride * 3 + 3*((y+j)*width + x)] * weight);
img[3*((y+j)*width + x)+1] += (unsigned char) (img_all[idx * stride * 3 + 3*((y+j)*width + x) + 1] * weight);
img[3*((y+j)*width + x)+2] += (unsigned char)(img_all[idx * stride * 3 + 3*((y+j)*width + x) + 2] * weight);
}
}
} |
19,002 | #include <float.h>
#include <cstdlib>
#include "../device/device.cu"
// __global__ void
// reduce0(float* g_idata,float* g_odata, unsigned int n) {
// extern __shared__ float temp[];
// int thid = threadIdx.x;
// temp[thid] = g_idata[thid];
// __syncthreads();
// for(int offset = 1;offset < n; offset *= 2) {
// if(thid >= offset)
// temp[thid] += temp[thid - offset];
// __syncthreads();
// }
// g_odata[thid] = temp[thid];
// }
// cluster assignment using randomization
__global__
void init_cluster_assignment(int k, int size, int * cluster_size, int * cluster_assignment){
for (int i=0; i<k; i++)
cluster_size[i] = 0;
int group = 0;
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int random = index % k ;
for (int i=index; i<size; i+=stride){
group = (int) random;
cluster_assignment[i] = group;
cluster_size[group] += 1;
}
}
__global__
void update_clusters(int k, float ** cluster, const int * cluster_assignment, int data_size, \
int dimensions, float ** feature_vector,const int * cluster_size, int * response){
response[0] = 0;
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
float ** temp;
temp = new float* [k];
for (int i=0; i<k; i++){
temp[i] = new float[dimensions];
for (int j=0; j<dimensions; j++){
temp[i][j] = (float) 0;
}
}
for (int i=0; i<data_size; i++){
for (int j=0; j<dimensions; j++){
temp[cluster_assignment[i]][j] += feature_vector[i][j];
}
}
for (int i=index; i<k; i+=stride){
if (cluster_size[i] == 0){
continue;
}
for (int j=0; j<dimensions; j++){
if (cluster[i][j] != temp[i][j]/cluster_size[i]){
response[0] = 1;
}
cluster[i][j] = temp[i][j]/cluster_size[i];
}
}
}
__global__
void update_cluster_assignment(int k, int * cluster_assignment, float ** cluster, int size, int dimension, float ** features){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i=index; i<size; i+=stride){
cluster_assignment[i] = find_nearest_center(k, features[i], dimension, cluster);
}
} |
19,003 | #include "includes.h"
__global__ void kernel_fill(float4* d_dx1, float val, int numel) {
size_t col = threadIdx.x + blockIdx.x * blockDim.x;
if (col >= numel) { return; }
d_dx1[col].x = val;
d_dx1[col].y = val;
d_dx1[col].z = val;
d_dx1[col].w = val;
} |
19,004 | #include "includes.h"
#define UMUL(a, b) ( (a) * (b) )
#define UMAD(a, b, c) ( UMUL((a), (b)) + (c) )
typedef unsigned int uint;
typedef unsigned short ushort;
typedef unsigned char uchar;
#define SHARED_MEMORY_SIZE 49152
#define MERGE_THREADBLOCK_SIZE 128
static uint *d_PartialHistograms;
/*
* Function that maps value to bin in range 0 inclusive to binCOunt exclusive
*/
__global__ void mergePartialHistogramsKernel(uint *d_Histogram, uint *d_PartialHistograms, uint histogramCount, uint binCount)
{
for (uint bin = blockIdx.x; bin < binCount; bin += gridDim.x)
{
uint sum = 0;
for (uint histogramIndex = threadIdx.x; histogramIndex < histogramCount; histogramIndex += MERGE_THREADBLOCK_SIZE)
{
sum += d_PartialHistograms[bin + histogramIndex * binCount];
}
__shared__ uint data[MERGE_THREADBLOCK_SIZE];
data[threadIdx.x] = sum;
for (uint stride = MERGE_THREADBLOCK_SIZE / 2; stride > 0; stride >>= 1)
{
__syncthreads();
if (threadIdx.x < stride)
{
data[threadIdx.x] += data[threadIdx.x + stride];
}
}
if (threadIdx.x == 0)
{
d_Histogram[bin] = data[0];
}
}
} |
19,005 | #include "CommonDataStructure.cuh"
#pragma region GraphStructure_Part
GraphStructure::GraphStructure()
{
vertexNum = 0;
adjList.clear();
}
GraphStructure::GraphStructure(int num)
{
vertexNum = 0;
adjList.clear();
ReserveSpace(num);
}
GraphStructure::~GraphStructure()
{
}
void GraphStructure::ReserveSpace(int num)
{
adjList.reserve(num);
}
void GraphStructure::SetVertexNum(int vNum)
{
vertexNum = vNum;
adjList.resize(vertexNum);
}
void GraphStructure::AddEdge(int s, int e)
{
adjList[s].insert(e);
}
void GraphStructure::DeleteEdge(int s, int e)
{
adjList[s].erase(adjList[s].find(e));
}
void GraphStructure::DeleteAllEdge(int s)
{
adjList[s].clear();
}
#pragma endregion |
19,006 |
/* check-thread-index.cu */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda_runtime.h>
#define CHECK_CUDA_CALL(call) \
{ \
const cudaError_t error = call; \
\
if (error != cudaSuccess) { \
fprintf(stderr, "Error (%s:%d), code: %d, reason: %s\n", \
__FILE__, __LINE__, \
error, cudaGetErrorString(error)); \
exit(EXIT_FAILURE); \
} \
} \
void printMatrix(int* matC, int row, int col)
{
int i;
int j;
int* pC = matC;
printf("Matrix (%d, %d)\n", row, col);
for (i = 0; i < row; ++i) {
for (j = 0; j < col; ++j)
printf("%2d ", pC[j]);
printf("\n");
pC += col;
}
printf("\n");
return;
}
__global__ void printThreadIndex(int* matA, int row, int col)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
unsigned int i = y * col + x;
printf("threadIdx: (%d, %d, %d), blockIdx: (%d, %d, %d), "
"coordinate: (%d, %d), array index: %d, "
"matrix value: %d\n",
threadIdx.x, threadIdx.y, threadIdx.z,
blockIdx.x, blockIdx.y, blockIdx.z,
x, y, i, matA[i]);
}
int main(int argc, char** argv)
{
int i;
int dev;
cudaDeviceProp deviceProp;
int matRow;
int matCol;
int numOfElements;
int numOfBytes;
int* hostMatA;
int* devMatA;
/* Setup device */
dev = 0;
CHECK_CUDA_CALL(cudaGetDeviceProperties(&deviceProp, dev));
printf("Using device %d: %s\n", dev, deviceProp.name);
CHECK_CUDA_CALL(cudaSetDevice(dev));
/* Set matrix size */
matCol = 8;
matRow = 6;
numOfElements = matRow * matCol;
numOfBytes = numOfElements * sizeof(int);
/* Allocate host memory */
hostMatA = (int*)calloc(numOfElements, sizeof(int));
for (i = 0; i < numOfElements; ++i)
hostMatA[i] = i;
printMatrix(hostMatA, matRow, matCol);
/* Allocate device memory */
CHECK_CUDA_CALL(cudaMalloc((void**)&devMatA, numOfBytes));
/* Set execution configuration */
dim3 block(4, 2);
dim3 grid((matCol + block.x - 1) / block.x, (matRow + block.y - 1) / block.y);
/* Transfer matrix data from host */
CHECK_CUDA_CALL(cudaMemcpy(devMatA, hostMatA, numOfBytes, cudaMemcpyHostToDevice));
/* Call kernel from host */
printThreadIndex<<<grid, block>>>(devMatA, matRow, matCol);
CHECK_CUDA_CALL(cudaDeviceSynchronize());
/* Free device and host memory */
CHECK_CUDA_CALL(cudaFree(devMatA));
free(hostMatA);
/* Reset device */
CHECK_CUDA_CALL(cudaDeviceReset());
return EXIT_SUCCESS;
}
|
19,007 | #include <stdint.h>
#include <unistd.h>
#include <stdio.h>
#include <assert.h>
#include <sys/time.h>
#include <time.h>
#include <stdlib.h>
#include <sys/mman.h>
static void HandleError( cudaError_t err, const char *file, int line ) {
if (err != cudaSuccess) {
printf( "%s in %s at line %d\n", cudaGetErrorString( err ), file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
#define HANDLE_NULL( a ) {if (a == NULL) { \
printf( "Host memory failed in %s at line %d\n", \
__FILE__, __LINE__ ); \
exit( EXIT_FAILURE );}}
inline double diff_s(struct timeval start, struct timeval end)
{
return ((double) (end.tv_usec - start.tv_usec) / 1000000 + (double) (end.tv_sec - start.tv_sec));
}
__global__ void readKernel(uint64_t *memory, uint64_t *memoryToRead)
{
int tid = threadIdx.x + blockIdx.x*blockDim.x;
//memory[tid]=memoryToRead[tid];
//__shared__ uint64_t temp;
uint64_t temp = memoryToRead[tid];
if(!temp)
__syncthreads();
}
__global__ void writeKernel(uint64_t *memory)
{
int tid = threadIdx.x + blockIdx.x*blockDim.x;
memory[tid]=5;
}
__global__ void nullKernel(int *memory)
{
}
__global__ void initCudaMallocd(uint64_t *memory, int N)
{
int tid =threadIdx.x;
if(tid==0){
for(int k=0;k< N ;k++)
memory[k]=5;
}
}
void verify(uint64_t* memory, int N)
{
int error = 0;
for(int i =0; i<N; i++){
if(memory[i]!=5){
error = 1;
break;
}
}
if(error)
printf("error in verification\n");
else
printf("verified SUCCESS\n");
}
__global__ void verifyCudaMallocd(uint64_t* memory, int N)
{
int tid=threadIdx.x;
if(tid==0) {
int error = 0;
for(int i =0; i<N; i++){
if(memory[i]!=5){
error = 1;
break;
}
}
if(error)
printf("error in verification\n");
else
printf("verified SUCCESS\n");
}
}
int
main( int argc, char *argv[] )
{
uint64_t *hostAllocd, *cudaMallocd, *cpuMallocd;
int ITERATIONS = 100000;
int numBytes = 1;
struct timeval tv1, tv2;
int opt;
int read=0; //read benchmark? or write?
int benchmarkType = 0;
int locked = 0; //mlock data?
int dryRun = 0; //dry run to measure noise TLB misses/...etc
while ((opt = getopt(argc, argv, "m:b:i:r:ld")) != -1) {
switch (opt) {
case 'm':
numBytes = atoi(optarg);
//assert(numBytes%16 == 0 && numBytes<=1024);
break;
case 'b':
benchmarkType = atoi(optarg);
break;
case 'i':
ITERATIONS = atoi(optarg);
break;
case 'r':
read = atoi(optarg);
break;
case 'l':
locked = 1;
break;
case 'd':
dryRun = 1;
break;
default: /* '?' */
break;
}
}
int num_of_blocks=1;
int num_of_threads_per_block=numBytes;
if(numBytes>1024){
num_of_blocks = 16;
num_of_threads_per_block = numBytes/16;
}
if(benchmarkType == 0 || benchmarkType == 1)
HANDLE_ERROR(cudaFree(0));
switch (benchmarkType) {
case 0: {//read/Write to hostAlloc'd data
HANDLE_ERROR( cudaHostAlloc( &hostAllocd, sizeof(uint64_t)*numBytes, 0 ) );
for(int k=0;k< numBytes ;k++){
hostAllocd[k]=1;
}
if(read)
{
uint64_t *memoryToRead;
HANDLE_ERROR( cudaHostAlloc( &memoryToRead, sizeof(uint64_t)*numBytes, 0 ) );
for(int k=0;k< numBytes ;k++)
memoryToRead[k]=5;
gettimeofday(&tv1, NULL);
for(int i = 0; i < ITERATIONS; i++) {
readKernel<<<num_of_blocks,num_of_threads_per_block>>>(hostAllocd,memoryToRead);
HANDLE_ERROR( cudaDeviceSynchronize());
}
gettimeofday(&tv2, NULL);
cudaFreeHost(memoryToRead);
//verify(hostAllocd,numBytes);
}
else
{
gettimeofday(&tv1, NULL);
for(int i = 0; i < ITERATIONS; i++) {
writeKernel<<<num_of_blocks,num_of_threads_per_block>>>(hostAllocd);
HANDLE_ERROR( cudaDeviceSynchronize());
}
gettimeofday(&tv2, NULL);
verify(hostAllocd,numBytes);
}
HANDLE_ERROR( cudaGetLastError());
double elapsedTimeSeconds = diff_s(tv1,tv2);
printf("HostAlloc [%s] Latency including kernel launch overhead = %f us\n",(read==1)?"read":"write",elapsedTimeSeconds*1e6/(float)ITERATIONS);
cudaFreeHost(hostAllocd);
break;
}
case 1: {//read/Write to cudaMalloc'd data
cpuMallocd = (uint64_t *)malloc(sizeof(uint64_t)*numBytes);
assert(cpuMallocd);
for(int k=0;k< numBytes ;k++){
cpuMallocd[k]=1;
}
HANDLE_ERROR( cudaMalloc( &cudaMallocd, sizeof(uint64_t)*numBytes) );
HANDLE_ERROR( cudaMemcpy( cudaMallocd,cpuMallocd, sizeof(uint64_t)*numBytes,cudaMemcpyDefault) );
if(read)
{
uint64_t *memoryToRead;
HANDLE_ERROR( cudaMalloc( &memoryToRead, sizeof(uint64_t)*numBytes ) );
initCudaMallocd<<<1,1>>>(memoryToRead,numBytes);
HANDLE_ERROR( cudaDeviceSynchronize());
gettimeofday(&tv1, NULL);
for(int i = 0; i < ITERATIONS; i++) {
readKernel<<<num_of_blocks,num_of_threads_per_block>>>(cudaMallocd,memoryToRead);
HANDLE_ERROR( cudaDeviceSynchronize());
}
gettimeofday(&tv2, NULL);
cudaFree(memoryToRead);
//verifyCudaMallocd<<<1,1>>>(cudaMallocd,numBytes);
//HANDLE_ERROR( cudaDeviceSynchronize());
}
else
{
gettimeofday(&tv1, NULL);
for(int i = 0; i < ITERATIONS; i++) {
writeKernel<<<num_of_blocks,num_of_threads_per_block>>>(cudaMallocd);
HANDLE_ERROR( cudaDeviceSynchronize());
}
gettimeofday(&tv2, NULL);
//verifyCudaMallocd<<<1,1>>>(cudaMallocd,numBytes);
//HANDLE_ERROR( cudaDeviceSynchronize());
}
HANDLE_ERROR( cudaGetLastError());
double elapsedTimeSeconds = diff_s(tv1,tv2);
printf("CudaMalloc [%s] Latency including kernel launch overhead = %f us\n",(read==1)?"read":"write",elapsedTimeSeconds*1e6/(float)ITERATIONS);
free(cpuMallocd);
cudaFree(cudaMallocd);
break;
}
case 2:
{
gettimeofday(&tv1, NULL);
for(int i = 0; i < ITERATIONS; i++) {
nullKernel<<<num_of_blocks,num_of_threads_per_block>>>(0);
HANDLE_ERROR( cudaDeviceSynchronize());
}
gettimeofday(&tv2, NULL);
HANDLE_ERROR( cudaGetLastError());
double elapsedTimeSeconds = diff_s(tv1,tv2);
printf("null kernel launch overhead = %f us\n",elapsedTimeSeconds*1e6/(float)ITERATIONS);
break;
}
case 3: {//read/Write to cpu mallocd data
uint64_t *memory_to_access = (uint64_t *)malloc(sizeof(uint64_t)*numBytes );
if(!dryRun) {
if(locked)
mlock(memory_to_access,sizeof(uint64_t)*numBytes);
assert(memory_to_access);
if(read)
{
for(int k=0;k< numBytes ;k++)
memory_to_access[k]=5;
uint64_t fake;
if(numBytes<8) {
gettimeofday(&tv1, NULL);
for(int i=0; i<ITERATIONS; i++) {
for (int j = 0; j < (numBytes); j++) {
fake += memory_to_access[j];
}
}
gettimeofday(&tv2, NULL);
}
else {
gettimeofday(&tv1, NULL);
for(int i=0; i<ITERATIONS; i++) {
for (int j = 0; j < (numBytes); j += 8) {
fake += memory_to_access[j];
fake += memory_to_access[j + 1];
fake += memory_to_access[j + 2];
fake += memory_to_access[j + 3];
fake += memory_to_access[j + 4];
fake += memory_to_access[j + 5];
fake += memory_to_access[j + 6];
fake += memory_to_access[j + 7];
}
}
gettimeofday(&tv2, NULL);
}
}
else
{
uint64_t fake=5;
if(numBytes<8) {
gettimeofday(&tv1, NULL);
for(int i=0; i<ITERATIONS; i++) {
for (int j = 0; j < (numBytes); j++) {
memory_to_access[j] = fake;
}
}
gettimeofday(&tv2, NULL);
}
else {
gettimeofday(&tv1, NULL);
for(int i=0; i<ITERATIONS; i++) {
for (int j = 0; j < (numBytes); j += 8) {
memory_to_access[j] = fake;
memory_to_access[j + 1] = fake;
memory_to_access[j + 2] = fake;
memory_to_access[j + 3] = fake;
memory_to_access[j + 4] = fake;
memory_to_access[j + 5] = fake;
memory_to_access[j + 6] = fake;
memory_to_access[j + 7] = fake;
}
}
gettimeofday(&tv2, NULL);
}
}
double elapsedTimeSeconds = diff_s(tv1,tv2);
printf("cpu malloc [%s] Latency = %f us\n",(read==1)?"read":"write",elapsedTimeSeconds*1e6/(float)ITERATIONS);
}
// printf("Press enter to continue...\n");
// getchar();
verify(memory_to_access,numBytes);
free(memory_to_access);
break;
}
case 4: {//read/Write to cpu but hostsllocd data
uint64_t *memory_to_access;
HANDLE_ERROR(cudaSetDevice(0));
HANDLE_ERROR(cudaSetDeviceFlags(cudaDeviceMapHost));
HANDLE_ERROR(cudaFree(0));
HANDLE_ERROR(cudaHostAlloc(&memory_to_access,sizeof(uint64_t)*numBytes,cudaHostAllocMapped));
if(!dryRun) {
if(read)
{
for(int k=0;k< numBytes ;k++)
memory_to_access[k]=5;
uint64_t fake;
if(numBytes<8) {
gettimeofday(&tv1, NULL);
for(int i=0; i<ITERATIONS; i++) {
for (int j = 0; j < (numBytes); j++) {
fake += memory_to_access[j];
}
}
gettimeofday(&tv2, NULL);
}
else {
gettimeofday(&tv1, NULL);
for(int i=0; i<ITERATIONS; i++) {
for (int j = 0; j < (numBytes); j += 8) {
fake += memory_to_access[j];
fake += memory_to_access[j + 1];
fake += memory_to_access[j + 2];
fake += memory_to_access[j + 3];
fake += memory_to_access[j + 4];
fake += memory_to_access[j + 5];
fake += memory_to_access[j + 6];
fake += memory_to_access[j + 7];
}
}
gettimeofday(&tv2, NULL);
}
}
else
{
uint64_t fake=5;
if(numBytes<8) {
gettimeofday(&tv1, NULL);
for(int i=0; i<ITERATIONS; i++) {
for (int j = 0; j < (numBytes); j++) {
memory_to_access[j] = fake;
}
}
gettimeofday(&tv2, NULL);
}
else {
gettimeofday(&tv1, NULL);
for(int i=0; i<ITERATIONS; i++) {
for (int j = 0; j < (numBytes); j += 8) {
memory_to_access[j] = fake;
memory_to_access[j + 1] = fake;
memory_to_access[j + 2] = fake;
memory_to_access[j + 3] = fake;
memory_to_access[j + 4] = fake;
memory_to_access[j + 5] = fake;
memory_to_access[j + 6] = fake;
memory_to_access[j + 7] = fake;
}
}
gettimeofday(&tv2, NULL);
}
}
double elapsedTimeSeconds = diff_s(tv1,tv2);
printf("cpu hostAlloc [%s] Latency = %f us\n",(read==1)?"read":"write",elapsedTimeSeconds*1e6/(float)ITERATIONS);
}
// printf("Press enter to continue...\n");
// getchar();
verify(memory_to_access,numBytes);
cudaFreeHost(memory_to_access);
break;
}
case 5: {//read/Write to cpu but mallocManaged data
uint64_t *memory_to_access;
HANDLE_ERROR(cudaMallocManaged(&memory_to_access,sizeof(uint64_t)*numBytes));
if(!dryRun) {
if(read)
{
for(int k=0;k< numBytes ;k++)
memory_to_access[k]=5;
uint64_t fake;
if(numBytes<8) {
gettimeofday(&tv1, NULL);
for(int i=0; i<ITERATIONS; i++) {
for (int j = 0; j < (numBytes); j++) {
fake += memory_to_access[j];
}
}
gettimeofday(&tv2, NULL);
}
else {
gettimeofday(&tv1, NULL);
for(int i=0; i<ITERATIONS; i++) {
for (int j = 0; j < (numBytes); j += 8) {
fake += memory_to_access[j];
fake += memory_to_access[j + 1];
fake += memory_to_access[j + 2];
fake += memory_to_access[j + 3];
fake += memory_to_access[j + 4];
fake += memory_to_access[j + 5];
fake += memory_to_access[j + 6];
fake += memory_to_access[j + 7];
}
}
gettimeofday(&tv2, NULL);
}
}
else
{
uint64_t fake=5;
if(numBytes<8) {
gettimeofday(&tv1, NULL);
for(int i=0; i<ITERATIONS; i++) {
for (int j = 0; j < (numBytes); j++) {
memory_to_access[j] = fake;
}
}
gettimeofday(&tv2, NULL);
}
else {
gettimeofday(&tv1, NULL);
for(int i=0; i<ITERATIONS; i++) {
for (int j = 0; j < (numBytes); j += 8) {
memory_to_access[j] = fake;
memory_to_access[j + 1] = fake;
memory_to_access[j + 2] = fake;
memory_to_access[j + 3] = fake;
memory_to_access[j + 4] = fake;
memory_to_access[j + 5] = fake;
memory_to_access[j + 6] = fake;
memory_to_access[j + 7] = fake;
}
}
gettimeofday(&tv2, NULL);
}
}
double elapsedTimeSeconds = diff_s(tv1,tv2);
printf("cpu mallocManaged [%s] Latency = %f us\n",(read==1)?"read":"write",elapsedTimeSeconds*1e6/(float)ITERATIONS);
}
// printf("Press enter to continue...\n");
// getchar();
verify(memory_to_access,numBytes);
cudaFree(memory_to_access);
break;
}
}
if(benchmarkType == 0 || benchmarkType == 1)
cudaDeviceReset();
return 0;
}
|
19,008 | #include "includes.h"
__global__ void erosionRows3DKernel ( unsigned short *d_dst, unsigned short *d_src, int w, int h, int d, int kernel_radius )
{
__shared__ unsigned short smem[ER_ROWS_BLOCKDIM_Z][ER_ROWS_BLOCKDIM_Y][(ER_ROWS_RESULT_STEPS + 2 * ER_ROWS_HALO_STEPS) * ER_ROWS_BLOCKDIM_X];
unsigned short *smem_thread = smem[threadIdx.z][threadIdx.y];
//Offset to the left halo edge
const int baseX = (blockIdx.x * ER_ROWS_RESULT_STEPS - ER_ROWS_HALO_STEPS) * ER_ROWS_BLOCKDIM_X + threadIdx.x;
const int baseY = blockIdx.y * ER_ROWS_BLOCKDIM_Y + threadIdx.y;
const int baseZ = blockIdx.z * ER_ROWS_BLOCKDIM_Z + threadIdx.z;
d_src += (baseZ * h + baseY) * w + baseX;
d_dst += (baseZ * h + baseY) * w + baseX;
//Load main data
#pragma unroll
for (int i = ER_ROWS_HALO_STEPS; i < ER_ROWS_HALO_STEPS + ER_ROWS_RESULT_STEPS; i++) {
smem_thread[threadIdx.x + i * ER_ROWS_BLOCKDIM_X] = d_src[i * ER_ROWS_BLOCKDIM_X];
}
//Load left halo
#pragma unroll
for (int i = 0; i < ER_ROWS_HALO_STEPS; i++) {
smem_thread[threadIdx.x + i * ER_ROWS_BLOCKDIM_X] = (baseX + i * ER_ROWS_BLOCKDIM_X >= 0) ? d_src[i * ER_ROWS_BLOCKDIM_X] : 0;
}
//Load right halo
#pragma unroll
for (int i = ER_ROWS_HALO_STEPS + ER_ROWS_RESULT_STEPS; i < ER_ROWS_HALO_STEPS + ER_ROWS_RESULT_STEPS + ER_ROWS_HALO_STEPS; i++) {
smem_thread[threadIdx.x + i * ER_ROWS_BLOCKDIM_X] = (baseX + i * ER_ROWS_BLOCKDIM_X < w) ? d_src[i * ER_ROWS_BLOCKDIM_X] : 0;
}
//Compute and store results
__syncthreads();
#pragma unroll
for (int i = ER_ROWS_HALO_STEPS; i < ER_ROWS_HALO_STEPS + ER_ROWS_RESULT_STEPS; i++) {
unsigned short *smem_kern = &smem_thread[threadIdx.x + i * ER_ROWS_BLOCKDIM_X - kernel_radius];
unsigned short val = smem_kern[0];
//#pragma unroll
for (int j = 1; j <= 2*kernel_radius; j++) {
val = min(val, smem_kern[j]);
}
d_dst[i * ER_ROWS_BLOCKDIM_X] = val;
}
} |
19,009 | #include <curand.h>
#include <curand_kernel.h>
#define DIM 1600
#define PI 3.14159265
__global__ void erode(unsigned char *R_input, unsigned char *G_input,
unsigned char *B_input, size_t i_size,
unsigned char *r_dataC, unsigned char *g_dataC,
unsigned char *b_dataC, unsigned long col, unsigned long row,
unsigned int dim, int m) {
int x = threadIdx.x + (blockIdx.x * blockDim.x);
int y = threadIdx.y + (blockIdx.y * blockDim.y);
int offset = x + y * i_size;
int offset2, ximg, yimg;
int c1 = 255,c2 = 255,c3 = 255;
int end = dim/2, ini = -end;
for (int i = ini; i <= end; i++) {
ximg = x + i;
for (int j = ini; j <= end; j++) {
yimg = y + j;
offset2 = ximg + yimg * i_size;
if (ximg < col && yimg < row)
if (ximg > 0 && yimg > 0)
if(R_input[offset2]+G_input[offset2]+B_input[offset2]<c1+c2+c3)
c1 = R_input[offset2];
c2 = G_input[offset2];
c3 = B_input[offset2];
}
}
r_dataC[offset] = c1;
g_dataC[offset] = c2;
b_dataC[offset] = c3;
}
__global__ void dilate(unsigned char *R_input, unsigned char *G_input,
unsigned char *B_input, size_t i_size,
unsigned char *r_dataC, unsigned char *g_dataC,
unsigned char *b_dataC, unsigned long col, unsigned long row,
unsigned int dim, int m) {
int x = threadIdx.x + (blockIdx.x * blockDim.x);
int y = threadIdx.y + (blockIdx.y * blockDim.y);
int offset = x + y * i_size;
int offset2, ximg, yimg;
int c1 = 0,c2 = 0,c3 = 0;
int end = dim/2, ini = -end;
for (int i = ini; i <= end; i++) {
ximg = x + i;
for (int j = ini; j <= end; j++) {
yimg = y + j;
offset2 = ximg + yimg * i_size;
if (ximg < col && yimg < row)
if (ximg > 0 && yimg > 0)
if(R_input[offset2]+G_input[offset2]+B_input[offset2]>c1+c2+c3)
c1 = R_input[offset2];
c2 = G_input[offset2];
c3 = B_input[offset2];
}
}
r_dataC[offset] = c1;
g_dataC[offset] = c2;
b_dataC[offset] = c3;
}
__global__ void median_filter(unsigned char *R_input, unsigned char *G_input,
unsigned char *B_input, size_t i_size,
unsigned char *r_dataC, unsigned char *g_dataC,
unsigned char *b_dataC, unsigned long col, unsigned long row,
unsigned int dim) {
int x = threadIdx.x + (blockIdx.x * blockDim.x);
int y = threadIdx.y + (blockIdx.y * blockDim.y);
int offset = x + y * i_size;
int offset2, ximg, yimg;
unsigned char temp_r = 0, temp_g = 0, temp_b = 0, temp;
int end = dim/2, ini = -end, k = 0, n = 0, i, j;
int hr[9];
int hg[9];
int hb[9];
for (i = ini; i <= end; i++) {
ximg = x + i;
for (j = ini; j <= end; j++) {
yimg = y + j;
offset2 = ximg + yimg * i_size;
if (ximg < col && yimg < row)
if (ximg > 0 && yimg > 0) {
hr[n] = R_input[offset2];
hg[n] = G_input[offset2];
hb[n] = B_input[offset2];
n++;}
k++;
}
}
for (i = 0; i < n; i++)
for (j= i + 1; j < n; j++)
if (hr[j] < hr[i]) {
temp = hr[j];
hr[j] = hr[i];
hr[i] = temp;}
for (i = 0; i < n; i++)
for (j= i + 1; j < n; j++)
if (hg[j] < hg[i]) {
temp = hg[j];
hg[j] = hg[i];
hg[i] = temp;}
for (i = 0; i < n; i++)
for (j= i + 1; j < n; j++)
if (hb[j] < hb[i]) {
temp = hb[j];
hb[j] = hb[i];
hb[i] = temp;}
if(n%2 == 1){
temp_r = hr[(n/2)];
temp_g = hg[(n/2)];
temp_b = hb[(n/2)];
}else{
temp_r = hr[(n/2)] + hr[(n/2) - 1];
temp_g = hg[(n/2)] + hg[(n/2) - 1];
temp_b = hb[(n/2)] + hb[(n/2) - 1];}
r_dataC[offset] = temp_r;
g_dataC[offset] = temp_g;
b_dataC[offset] = temp_b;
}
__global__ void Operador_Convolucion(unsigned char *R_input, unsigned char *G_input,
unsigned char *B_input, size_t i_size,
unsigned char *r_dataC, unsigned char *g_dataC,
unsigned char *b_dataC, unsigned long col, unsigned long row,
float *mask, unsigned int dim) {
int x = threadIdx.x + (blockIdx.x * blockDim.x);
int y = threadIdx.y + (blockIdx.y * blockDim.y);
int offset = x + y * i_size;
int offset2, ximg, yimg;
unsigned char temp_r = 0, temp_g = 0, temp_b = 0;
int end = dim/2, ini = -end, k = 0;
for (int i = ini; i <= end; i++) {
ximg = x + i;
for (int j = ini; j <= end; j++) {
yimg = y + j;
offset2 = ximg + yimg * i_size;
if (ximg < col && yimg < row)
if (ximg > 0 && yimg > 0) {
temp_r += R_input[offset2]*mask[k];
temp_g += G_input[offset2]*mask[k];
temp_b += B_input[offset2]*mask[k];}
k++;
}
}
r_dataC[offset] = temp_r;
g_dataC[offset] = temp_g;
b_dataC[offset] = temp_b;
}
|
19,010 | #include <stdio.h>
#include <math.h>
#include <cuda.h>
// void Radix(int* array, int array_size, int max_digit); /* Thread function */
__host__ void rng(int* arr, int n); /* Seed function */
__host__ int max_el(int * vec, int n);
__host__ int num_digit(int el);
__device__ int to_digit(int el, int divider);
__host__ int to_digit_host(int el, int divider);
__host__ void print_array(int * array, int n);
__global__ void count_to_bucket(int * data, int * bucket, int length, int digit);
__host__ void countSort(int * data, int * bucket, int length, int digit);
__host__ void empty_bucket(int * bucket, int size);
__host__ void print_array_file(int * array, int array_len);
int main(int argc,char *argv[]) {
if(argc != 2) {
perror("Please specify data length");
exit(1);
}
printf("flag 1\n");
int data_size = strtol(argv[1], NULL, 10);
int numThread = 1000;
float numBlocksFloat = (float) data_size / numThread;
int numBlocks = ceil(numBlocksFloat);
int *global_array;
int *global_bucket;
int max_digit;
int base= 10;
printf("data size : %d\n%.f\n", data_size,numBlocksFloat);
printf("flag 2 thread %d block %d \n", numThread, numBlocks);
// aloocating array to be accessible by both cpu and gpu
cudaMallocManaged(&global_array, data_size*sizeof(int)+1);
// cudaMalloc(&local_array,data_size*sizeof(int)+1);
rng(global_array, data_size);
// cudaMemcpy(d_a, a, sizeof(float) * N, cudaMemcpyHostToDevice);
printf("flag 3\n");
printf("flag 4\n");
// global_array = (*int )malloc(data_size * sizeof(int));
// initialization data
print_array(global_array, data_size);
max_digit = num_digit(max_el(global_array, data_size));
printf("max digit %d\n", max_digit);
int bucket_el = base*max_digit;
cudaMallocManaged(&global_bucket, bucket_el*sizeof(int)+1);
empty_bucket(global_bucket,bucket_el);
for(int i = 1; i<= max_digit; i++){
count_to_bucket<<<numBlocks,numThread>>>(global_array,global_bucket,data_size,i);
}
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
for(int i = 0; i<max_digit; i++){
countSort(global_array, global_bucket, data_size, i);
}
print_array(global_bucket,bucket_el);
print_array(global_array, data_size);
cudaFree(global_array);
//cudaFree(global_bucket);
return 0;
}
__global__
void count_to_bucket(int * data, int * bucket, int length, int digit){
int block = blockIdx.x;
int thread = threadIdx.x;
int i = block*1000+thread;
// printf("block %d thread %d\n", digit, thread);
//for(int i = (digit-1)*10 + thread;i <=(digit-1)*10+thread && i < length; i++){
if(block*1000+thread < length){
int num_bucket = to_digit(data[i], digit) + 10*(digit-1);
printf("%d [%d] %d\n", data[i], digit, num_bucket);
bucket[num_bucket] ++;
}
//}
};
__host__
// void countSort(int * data, int * bucket, int length, int digit){
// int *local_sort = (int*) malloc (length * sizeof(int));
// int index = 0;
// // sort
// // printf("local sort ");
// for(int block =0; block < digit; block++){
// for(int d = 0; d < 10; d++){
// for(int j = 0; j < length; j++){
// if(to_digit_host(data[j], block) == d){
// local_sort[index] = data[j];
// index ++;
// bucket[block*10+d] --;
// }
// if(bucket[block*10+d] == 0) {
// // printf("\n");
// break;
// }
// }
// }
// }
// // printf("index ends in %d \n", index);
// // copy
// for(int i=0; i < length; i++){
// data[i] = local_sort[i];
// }
// free(local_sort);
// //empty_bucket(bucket, 10);
// }
void countSort(int * data, int * bucket, int length, int digit){
int * local_sort = (int*) malloc (length * sizeof(int));
int index = 0;
// sort
// printf("local sort ");
for(int i =0; i < 10; i++){
for(int j = 0; j < length; j++){
if(to_digit_host(data[j], digit+1) == i){
local_sort[index] = data[j];
index ++;
bucket[digit*10+i] --;
}
if(bucket[digit*10+i] == 0) {
// printf("\n");
break;
}
}
}
// printf("index ends in %d \n", index);
// copy
for(int i=0; i < length; i++){
data[i] = local_sort[i];
}
free(local_sort);
empty_bucket(bucket, 10);
}
__host__
void empty_bucket(int * bucket, int size){
for(int i = 0; i < size; i++){
bucket[i] = 0;
}
}
__host__
void rng(int* arr, int n) {
int seed = 13516123;
srand(seed);
for(long i = 0; i < n; i++) {
arr[i] = (int)rand();
}
}
__host__
int max_el(int * vec, int n){
int max = vec[0];
for(int i = 0; i < n; i++){
if(vec[i] > max) max = vec[i];
}
return max;
};
__device__
int to_digit(int el, int divider){
for(int i = 1; i< divider; i++){
el /= 10;
}
return el % 10;
};
__host__
int to_digit_host(int el, int divider){
for(int i = 1; i< divider; i++){
el /= 10;
}
return el % 10;
};
__host__
void print_array(int * array, int array_len){
int n = array_len;
for(int i = 0; i < n; i++){
printf("%d ", array[i]);
}
printf("\n");
}
__host__
void print_array_file(int * array, int array_len){
int n = array_len;
FILE * fp;
FILE * fo;
int i;
/* open the file for writing*/
fp = fopen ("../test/result.txt","w");
fo = fopen ("../output/output.txt","w");
/* write 10 lines of text into the file stream*/
for(i = 0; i < n;i++){
fprintf (fp, "%d ", array[i]);
fprintf (fo, "%d ", array[i]);
}
fprintf (fp, "\n ");
fprintf (fo, "\n ");
/* close the file*/
fclose (fp);
fclose (fo);
}
__host__
int num_digit(int el){
int digit = 1;
while(el > 9){
el /= 10;
digit++;
}
return digit;
};
|
19,011 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#define _USE_MATH_DEFINES
#include <cmath>
#include <iostream>
#include <string>
#include <stdio.h>
static const int DIM = 128;
static const int NODES = DIM * DIM;
static const double L = 1.0 * NODES;
static const double TIME_OVERALL = 30.0; // seconds
static const double DIRICHLET = 0.0;
static const double NEUMAN = 5;
__global__ void Kernel(double *v, double *vPrev, double hx, double ht, unsigned long timeSteps) {
unsigned int idx_X = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int idx_Y = threadIdx.y + blockIdx.y * blockDim.y;
unsigned int idx = idx_X + idx_Y * DIM;
for (int iStep = 0; iStep < timeSteps; ++iStep) {
if(idx > 0 && idx < (NODES - 1))
v[idx] = (((vPrev[idx + 1] - 2 * vPrev[idx] + vPrev[idx - 1]) * ht) / (hx * hx)) + vPrev[idx];
__syncthreads();
if (idx == 0) {
v[idx] = DIRICHLET;
} else if (idx == (NODES - 1)) {
v[idx] = hx * NEUMAN + v[idx - 1];
}
vPrev[idx] = v[idx];
__syncthreads();
}
}
int main()
{
size_t mem_size = sizeof(double) * NODES;
cudaError cudaStatus;
double hx = L / (NODES - 1);
double ht = 1e-1;//(hx * hx) / 2;
unsigned long timeSteps = TIME_OVERALL / ht;
std::cout << "hx = " << hx << std::endl;
std::cout << "ht = " << ht << std::endl;
std::cout << "steps = " << timeSteps << std::endl;
if (hx < 1e-9 || ht < 1e-14) {
std::cout << "too small values of hx and (or) ht: " << std::endl;
return 1;
}
double *devNodes;
double *prevDevNodes;
cudaMalloc((void **) &devNodes, mem_size);
cudaMalloc((void **) &prevDevNodes, mem_size);
cudaMemset(prevDevNodes, 0, mem_size);
cudaMemset(devNodes, 0, mem_size);
dim3 N_Grid(DIM / 32, DIM / 32, 1);
dim3 N_Block(32, 32, 1);
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
std::cout << "failed to set cuda device 0" << std::endl;
cudaFree(devNodes);
return 1;
}
Kernel <<< N_Grid, N_Block >>> (devNodes, prevDevNodes, hx, ht, timeSteps);
cudaStatus = cudaGetLastError();
if(cudaStatus != cudaSuccess) {
std::cout << "last error: " << cudaGetErrorString(cudaStatus) << std::endl;
cudaFree(devNodes);
return 1;
}
double *hostNodes = (double *) malloc(mem_size);
cudaMemcpy(hostNodes, devNodes, mem_size, cudaMemcpyDeviceToHost);
cudaFree(devNodes);
cudaFree(prevDevNodes);
for(size_t i = 0; i < NODES; i++) {
std::cout << i << ": " << hostNodes[i] << std::endl;
}
free(hostNodes);
return 0;
}
|
19,012 | #include <stdio.h>
#include <stdlib.h>
#include "cuda_runtime.h"
// Defines
#define GridWidth 60
#define BlockWidth 128
// Variables for host and device vectors.
__global__ void AddVectors(float* A, float* B, float *C, int N)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx<=N)
C[idx] = A[idx] + B[idx];
}
// Host code performs setup and calls the kernel.
int main(int argc, char** argv)
{
float* h_A;
float* h_B;
float* h_C;
float* d_A;
float* d_B;
float* d_C;
int N= GridWidth * BlockWidth;
printf("N=%d\n",N);
size_t size = N * sizeof(float);
dim3 dimGrid(GridWidth);
dim3 dimBlock(BlockWidth);
h_A = (float*)malloc(size);
h_B = (float*)malloc(size);
h_C = (float*)malloc(size);
cudaMalloc((void**)&d_A, size);
cudaMalloc((void**)&d_B, size);
cudaMalloc((void**)&d_C, size);
// Initialize host vectors h_A and h_B
for(int i=0; i<N; ++i)
{
h_A[i] = (float)i;
h_B[i] = (float)(N-i);
}
// Copy host vectors h_A and h_B to device vectores d_A and d_B
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
AddVectors<<<dimGrid, dimBlock>>>(d_A, d_B, d_C,N);
cudaThreadSynchronize();
// Copy result from device memory to host memory
cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
for(int i=0; i<N; ++i)
{
printf("%f\n",h_C[i]);
}
// Free device vectors
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
// Free host memory
free(h_A);
free(h_B);
free(h_C);
}
|
19,013 | #include "includes.h"
//Bibliotecas Basicas
//Biblioteca Thrust
//Biblioteca cuRAND
//PARAMETROS GLOBAIS
const int QUANT_PAIS_AVALIA = 4;
int POP_TAM = 200;
int N_CIDADES = 20;
int BLOCKSIZE = 1024;
int TOTALTHREADS = 2048;
int N_GERA = 100;
const int MUT = 10;
const int MAX = 19;
const int MIN = 0;
const int ELITE = 2;
/*
* Busca por erros nos processos da gpu
*/
__global__ void escolhePais(unsigned int n, unsigned int np, int *paisAle, double *fitness, int *pool) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i=index; i<n; i+=stride) {
double best = 10000.0;
int best_index = -1;
int idx;
for (int j=0; j<QUANT_PAIS_AVALIA; j++) {
idx = paisAle[i*QUANT_PAIS_AVALIA+j];
if (fitness[idx] < best) {
best = fitness[idx];
best_index = idx;
}
}
pool[i] = best_index;
}
} |
19,014 | #include "includes.h"
__global__ void sobelEdgeDetectionSharedMemUnrollCoalsed(int *input, int *output, int width, int height, int thresh) {
__shared__ int shMem[4 * _TILESIZE_2 * _TILESIZE_2 ];
int num = _UNROLL_;
int size = num * _TILESIZE_2;
int i = blockIdx.x * (num * _TILESIZE_) + threadIdx.x;
int j = blockIdx.y * (num * _TILESIZE_) + threadIdx.y;
int xind = threadIdx.x;
int yind = threadIdx.y;
for(int x = 0; x < num; x++)
{
for(int y = 0; y < num; y++)
{
int xOffset = x * (_TILESIZE_), yOffset = y * (_TILESIZE_);
shMem[ size * (yind + yOffset) + (xind + xOffset)] = input[(j + yOffset) * width + (i + xOffset)];
}
}
__syncthreads();
if (i < width - _TILESIZE_ && j < height - _TILESIZE_ && xind > 0 && yind > 0 && xind < (_TILESIZE_2 - 1) && yind < (_TILESIZE_2 - 1))
{
for(int x = 0; x < num; x++)
{
for(int y = 0; y < num; y++)
{
int xOffset = x * _TILESIZE_, yOffset = y * _TILESIZE_;
int sum1 = shMem[(xind + 1 + xOffset) + size * (yind - 1 + yOffset)] - shMem[(xind - 1 + xOffset) + size * (yind - 1 + yOffset)]
+ 2 * shMem[(xind + 1 + xOffset) + size * (yind + yOffset)] - 2 * shMem[(xind - 1 + xOffset) + size * (yind + yOffset)]
+ shMem[(xind + 1 + xOffset) + size * (yind + 1 + yOffset)] - shMem[(xind - 1 + xOffset) + size * (yind + 1 + yOffset)];
int sum2 = shMem[(xind - 1 + xOffset) + size * (yind - 1 + yOffset)] + 2 * shMem[(xind + xOffset) + size * (yind - 1 + yOffset)] + shMem[(xind + 1 + xOffset) + size * (yind - 1 + yOffset)]
- shMem[(xind - 1 + xOffset) + size * (yind + 1 + yOffset)] - 2 * shMem[(xind + xOffset) + size * (yind + 1 + yOffset)] - shMem[(xind + 1 + xOffset) + size * (yind + 1 + yOffset)];
int magnitude = sum1 * sum1 + sum2 * sum2;
int index = (j + yOffset) * width + (i + xOffset);
if(magnitude > thresh)
output[index] = 255;
else
output[index] = 0;
}
}
}
} |
19,015 | #include "includes.h"
__global__ void box_iou_cuda_kernel(float * box_iou, float4 * box1, float4 * box2, long M, long N, int idxJump) {
int idx = blockIdx.x*blockDim.x + threadIdx.x;
size_t b1_idx, b2_idx, b1_row_offset, b2_row_offset;
float xmin1, xmin2, xmax1, xmax2, ymin1, ymin2, ymax1, ymax2, x_tl, y_tl, x_br, y_br, w, h, inter, area1, area2, iou;
for (long i=idx; i<M*N; i+=idxJump){
b1_idx = i/N;
b2_idx = i%N;
b1_row_offset = b1_idx;
b2_row_offset = b2_idx;
xmin1 = box1[b1_row_offset].x;
ymin1 = box1[b1_row_offset].y;
xmax1 = box1[b1_row_offset].z;
ymax1 = box1[b1_row_offset].w;
xmin2 = box2[b2_row_offset].x;
ymin2 = box2[b2_row_offset].y;
xmax2 = box2[b2_row_offset].z;
ymax2 = box2[b2_row_offset].w;
x_tl = fmaxf(xmin1,xmin2);
y_tl = fmaxf(ymin1,ymin2);
x_br = fminf(xmax1,xmax2);
y_br = fminf(ymax1,ymax2);
w = (x_br - x_tl + 1)<0 ? 0.0f:(x_br - x_tl + 1);
h = (y_br - y_tl + 1)<0 ? 0.0f:(y_br - y_tl + 1);
inter = w*h;
area1 = (xmax1-xmin1+1)*(ymax1-ymin1+1);
area2 = (xmax2-xmin2+1)*(ymax2-ymin2+1);
iou = inter/(area1+area2-inter);
box_iou[b1_idx*N+b2_idx]=iou;
}
} |
19,016 | float h_A[]= {
0.7213110389323074, 0.7036072892760992, 0.7972208480899733, 0.5153771118746243, 0.9216180039321551, 0.9615308379788774, 0.6739112073326918, 0.7155256159701121, 0.79258098175816, 0.9020993892924711, 0.8466942281531666, 0.5537511206889679, 0.5438491782768006, 0.7116611841848426, 0.5345733881172183, 0.6305435920228089, 0.9277412059481013, 0.9601175756536102, 0.5446681834227135, 0.8851113168080971, 0.9213980885405118, 0.9915170026771223, 0.9587707645596766, 0.6219713016116879, 0.59276932445235, 0.8176418731352921, 0.6798959501796369, 0.7878206193846803, 0.9719590782979264, 0.9033969542114628, 0.5257703038302184, 0.5060736164359405, 0.7617378755679971, 0.6813167728709468, 0.9829966035233257, 0.9436321253947957, 0.7141845778293617, 0.8573416967736969, 0.5422510511098686, 0.8854271419940865, 0.7079571984354764, 0.5809701005539115, 0.7670813919644976, 0.5026467498089977, 0.9606221012390379, 0.881001105638563, 0.5452291376880389, 0.6132540058623579, 0.8398635939579402, 0.8950965387747699, 0.577723664133599, 0.65654378252201, 0.551610115160845, 0.7471229692397765, 0.6064047653161813, 0.5348684993169526, 0.5714834053077453, 0.5699335991556576, 0.9581613026614635, 0.5800884251550227, 0.5621315217726607, 0.7260545065405787, 0.8263375959182511, 0.7004871816808094, 0.6628612849215572, 0.7630559501530698, 0.6090032592882435, 0.5297518102839368, 0.9716773789062979, 0.9792908103361173, 0.813729030576051, 0.9358222319928466, 0.9552555022955985, 0.9659368142992655, 0.5503401170334505, 0.7983438229770574, 0.8612723782138829, 0.8954838168746426, 0.6707221843700835, 0.87132828504265, 0.6813242207001085, 0.8731189965641872, 0.8963920044873096, 0.6589936053193723, 0.5142121673426723, 0.831984826281815, 0.7639888826373817, 0.6420662927592042, 0.8954349479086924, 0.6547561830427948, 0.7015274218684611, 0.9130054177465503, 0.8337834495754337, 0.9077154271987949, 0.8910566519116231, 0.9933333958880843, 0.7208793338115973, 0.8732275221059247, 0.6499131589293037, 0.6225016536582735, 0.506024346065687, 0.6334473626646867, 0.8642540267703913, 0.6723920546622816, 0.6454028787559742, 0.7549523427254714, 0.805806790664657, 0.6303127937986285, 0.8937464695521591, 0.9274795925381079, 0.7702126603748829, 0.8055277344962463, 0.8157804696830382, 0.6219542206282047, 0.5431210508452308, 0.5110240954422681, 0.5513970040916557, 0.9426855261472222, 0.8748081215557051, 0.741094255177086, 0.8497124066557136, 0.6273164134428054, 0.7748003989421978, 0.6611962772062256, 0.7401352720701797, 0.7687019107934498, 0.7767744755636825, 0.5913898182077495, 0.5385837135035092, 0.6082693609582921, 0.8626094369370461, 0.9699138062719916, 0.6910816521277436, 0.8865828246797212, 0.7391787580562894, 0.8063511498264284, 0.7425674516704125, 0.6912159215757671, 0.6498378086468368, 0.5128719865057241, 0.7829125973602015, 0.7452102281974154, 0.9838934615177628, 0.7448248499577833, 0.7821284001877189, 0.8336625562667601, 0.7947946889587203, 0.6821792645687259, 0.6749657511955991, 0.82056385260541, 0.594456091081921, 0.5025934920429402, 0.5358054634484388, 0.7299644543146196, 0.800198263470014, 0.5200795780935461, 0.9127085069646359, 0.520189261488305, 0.5721914853245895, 0.8353448213929731, 0.6954853620284434, 0.5287393914234155, 0.7749133682929376, 0.9626332237047208, 0.504531933674538, 0.7416881835220519, 0.8029161141930703, 0.9512371692847112, 0.678590959823639, 0.5942002011283005, 0.7231946939255616, 0.8891894086624462, 0.8467433825580544, 0.8250284374644645, 0.5006569190556542, 0.7168051210436979, 0.7906952071921669, 0.8270348886306684, 0.6098752346678269, 0.5222347961416989, 0.7929018318998295, 0.5101055813477642, 0.7169833755909871, 0.5702461928129504, 0.9023390198515662, 0.8818647192968105, 0.963439971720073, 0.5654149968549657, 0.9693649864156556, 0.8935600740999206, 0.7186220005851536, 0.7447207297667482, 0.9237355949222104, 0.9169525724065697, 0.5802019547819947, 0.9405852601832476, 0.8876001178289146, 0.7693074221942977, 0.5587374478203172, 0.94048087746272, 0.7081619194854067, 0.5577780946399669, 0.639746138541101, 0.5677279896423741, 0.7013171406572657, 0.8531962429999775, 0.9534461688042684, 0.9544454327368554, 0.7419798377038764, 0.7781598798873539, 0.9880995476366918, 0.9085277704260113, 0.8808235015558032, 0.645561561502733, 0.6756942754185749, 0.5067823378774516, 0.506378470755337, 0.810777692389024, 0.5123445978187132, 0.8708238156107807, 0.8503474958572756, 0.954442138732992, 0.9080537030698932, 0.9018808628797211, 0.9449707700689803, 0.8191690079487154, 0.6827385944940092, 0.8929245413314859, 0.5292200559271782, 0.8624408829473031, 0.6845293685637726, 0.6978517435484533, 0.7839079461033043, 0.8375322422631433, 0.9537265678949418, 0.8208246713448575, 0.9297464038826257, 0.8533886395580749, 0.6595065038741466, 0.5367401755113856, 0.5711884859357816, 0.9956268735292508, 0.5355328946765383, 0.5455838142054785, 0.9854809795755022, 0.7816238163921971, 0.5441177260680754, 0.7809034256329053, 0.8833044171043674, 0.7976267671978272, 0.6949480150237191, 0.97620470200642, 0.8229129809550366, 0.869410101611787, 0.7857192541226444, 0.9049082880373116, 0.8217825261671052, 0.5978171261180575, 0.7516740248586333, 0.835374844356755, 0.6647492038017275, 0.654372380384227, 0.533928025397404, 0.8754977563944351, 0.5272353988484589, 0.9324332907020879, 0.8092159617417161, 0.8081259134157913, 0.7270657386338355, 0.9038540598848672, 0.9711575053233915, 0.6771643891208445, 0.9579552939218283, 0.853761852888534, 0.8960824555273585, 0.5269678068509709, 0.6943165773545628, 0.5796276656005226, 0.706014162848424, 0.8372419249150096, 0.6996409865937712, 0.5716824987141625, 0.6438334075025813, 0.5260148903229024, 0.9331886427333145, 0.9323333298241967, 0.7260844671738941, 0.9270432682797352, 0.9337665703653302, 0.6315745755524134, 0.7385872282881337, 0.7804016374726153, 0.8425343267477277, 0.8992882216574574, 0.8154295497769282, 0.9454044330995546, 0.9504649658919259, 0.7552753505697389, 0.703333042647019, 0.851159545729975, 0.6219148319282171, 0.916502088917321, 0.8613541833378122, 0.9124095174944935, 0.9286629029328552, 0.612550255934398, 0.8679719411162063, 0.9693607203380965, 0.8453412431169058, 0.8725376854813824, 0.7912174040299396, 0.5888947879582849, 0.9848924865749478, 0.8174611882014551, 0.7400586120333806, 0.9892661252731871, 0.9813085520067423, 0.9573712978603379, 0.649588198345867, 0.6250235203353991, 0.5955909287888107, 0.620373053569861, 0.526029835569561, 0.7144405404754234, 0.780285293622486, 0.8666553691054684, 0.8964231072882586, 0.8591118144087425, 0.9572999862830796, 0.9572257787849424, 0.7738885343608862, 0.6271141855599663, 0.5539161885973134, 0.674124267886359, 0.8456009864511342, 0.789085976005303, 0.9179901181123216, 0.6757078684677362, 0.554766193187559, 0.8991436828308943, 0.72861832616026, 0.8437634536967329, 0.9579330569265415, 0.9850939686630349, 0.8228998187444237, 0.8388421319341619, 0.5297061437767218, 0.6094811259115935, 0.6935463743720558, 0.8634731923807555, 0.8062166169630336, 0.7894429442727542, 0.7651759682262698, 0.9731711420190147, 0.9941330255107579, 0.7341363475012623, 0.9521718941174941, 0.9765870463957484, 0.7892140968169058, 0.7051042952693836, 0.7433897579012426, 0.7796078583896936, 0.9420146913245346, 0.5585284274921852, 0.6614237390233539, 0.8225768496189233, 0.9228783746366208, 0.5543525000804628, 0.712200484202808, 0.7458232809428115, 0.6370976984346218, 0.7614142413586191, 0.807507380810081, 0.5224827912351996, 0.6724736119732511, 0.593179868665466, 0.9306430166660243, 0.5324363741372664, 0.8889130123351461, 0.9107423542503432, 0.6550202302554903, 0.944894451539642, 0.9052118156355418, 0.5590466975750978, 0.805467627477866, 0.980986543801699, 0.5502338170531667, 0.7555823165396496, 0.5443648978107398, 0.7345272275399661, 0.5414860274201246, 0.5203901051451236, 0.5423934957350249, 0.6539933857773308, 0.9208207415813712, 0.5502128020162411, 0.6876157144549884, 0.5702853582447343, 0.5904228543623173, 0.8889623987647162, 0.8985261691881565, 0.5048154598888724, 0.6076562850468037, 0.6961908971137987, 0.6500172650564393, 0.8511948985756508, 0.66319992047087, 0.9484600768282544, 0.5089392129473143, 0.7668151252216436, 0.8848797511184859, 0.8442684141503054, 0.8293842491761947, 0.9228608712069555, 0.716848629788235, 0.9187140383200945, 0.7381151908607154, 0.9316436232594469, 0.9007970659292505, 0.8571124627981633, 0.7563461277617688, 0.513696518145864, 0.5748871483045002, 0.9520785587809371, 0.8896683038703164, 0.6998782192395402, 0.8834708853708064, 0.6918430479828297, 0.9714088838129342, 0.7358356385444389, 0.706251044986796, 0.6445022136693828, 0.6502160963722614, 0.67845490395182, 0.6989987170451886, 0.5019984250344179, 0.595813407554385, 0.7070955606691687, 0.8216025133858569, 0.6394441217980584, 0.9861550745536893, 0.5630267871638102, 0.7197774220892457, 0.5707820282987905, 0.5554174483683547, 0.5608175917649101, 0.7346401576638559, 0.6966702058717653, 0.6445646315267469, 0.8744094501599702, 0.8013922028376101, 0.8827004493426109, 0.6030222621908303, 0.8946255779552771, 0.5787758041400044, 0.7187007220827769, 0.6284769420023835, 0.7346188868724551, 0.695829407120654, 0.6386811653525657, 0.7150252903858563, 0.6293714022285715, 0.5249633990391243, 0.8647394391293555, 0.8230188310468833, 0.8976785086232437, 0.5039818379281453, 0.8211569054426457, 0.7194889824918933, 0.9289131829642242, 0.7541040312860572, 0.742051339762712, 0.5515601578515075, 0.8893119930560853, 0.7849628852472481, 0.8274409410965613, 0.9430988897880357, 0.5593617432017448, 0.9863674781085019, 0.8818683165019685, 0.5334322723076458, 0.5431232249183211, 0.956274780832098, 0.6750059580031558, 0.8033812934976199, 0.5633421647434877, 0.7588173120251935, 0.6148214442198539, 0.5306560401308527, 0.9159055555823894, 0.7829682549085655, 0.6077550001723855, 0.6768516473519923, 0.9083875292208695, 0.8238798732507031, 0.8842950293224175, 0.7509042583446859, 0.5835260581189603, 0.771432641391331, 0.8735359080421995, 0.6654501818823724, 0.8330481168914817, 0.6586063723028632, 0.5324851641008854, 0.6516233791749912, 0.8668811241368004, 0.6901352977817978, 0.8276546369593913, 0.883845084525178, 0.5816936278385099, 0.6363005501359384, 0.9231059836000148, 0.7356217224283114, 0.8089825425159602, 0.5589388507840811, 0.6306424940443877, 0.5350914779207365, 0.5810872349283129, 0.8127313666556297, 0.9468961803825973, 0.7860397530364289, 0.6428268710185303, 0.8479121114897317, 0.9319903051408973, 0.7831470045544986, 0.7486175937861625, 0.9891526397281045, 0.8517228564544181, 0.8630442177017649, 0.7009876895581848, 0.919993632123095, 0.9742906773769642, 0.5001017712541294, 0.7288059511752198, 0.5356269422575284, 0.5009525538060497, 0.506362747718705, 0.6855898077307808, 0.8186058722239021, 0.6437350504120393, 0.718239019281738, 0.5968857591904404, 0.609524616029788, 0.7346307101193603, 0.5303081011328907, 0.8945951420123923, 0.714368980223252, 0.9270788314109553, 0.7772798233338185, 0.5043792436685017, 0.5489902958837589, 0.8161466086153648, 0.9002587902927479, 0.8594978043993857, 0.7720918657186675, 0.708471889407926, 0.8553134366489652, 0.8307261279699127, 0.9196334979451786, 0.5460130796628213, 0.5322359638794936, 0.7015642375586723, 0.9454702956809771, 0.5614611510499937, 0.9886486033452997, 0.8505700200768735, 0.7705538255754345, 0.5242606557494717, 0.5487130885383176, 0.9797450718881408, 0.7620902625223458, 0.5234501381612409, 0.6015105519508768, 0.6908773949001565, 0.8052201557728094, 0.5619060781574086, 0.8365028605718405, 0.7381675988564596, 0.5518180430531955, 0.7932454856705924, 0.8872510212340574, 0.7280059763374385, 0.978804367491308, 0.9211564036702524, 0.8495645790636963, 0.9434113274257316, 0.9900948520859476, 0.5320469764868729, 0.824062921633453, 0.6392225464923971, 0.7767066000638363, 0.5870722591297224, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0};
int h_B[]= {
0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64, 66, 68, 70, 72, 74, 76, 78, 80, 82, 84, 86, 88, 90, 92, 94, 97, 99, 101, 103, 106, 108, 111, 113, 117, 119, 121, 123, 125, 127, 129, 131, 133, 135, 137, 139, 142, 144, 146, 148, 150, 152, 158, 160, 163, 165, 168, 170, 173, 175, 177, 179, 181, 183, 185, 187, 189, 191, 199, 201, 204, 206, 209, 211, 198, 116, 203, 198, 116, 198, 116, 203, 193, 215, 116, 156, 156, 193, 196, 196, 198, 215, 587, 589, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 768, 770, 772, 774, 776, 778, 780, 782, 784, 786, 788, 790, 792, 794, 796, 798, 800, 802, 804, 806, 808, 810, 812, 814, 816, 818, 820, 822, 824, 826, 828, 830, 832, 834, 836, 838, 840, 842, 844, 846, 848, 850, 852, 854, 856, 858, 860, 862, 864, 865, 866, 867, 868, 869, 870, 871, 872, 873, 874, 875, 876, 877, 878, 879, 880, 881, 882, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 162, 157, 162, 157, 167, 141, 105, 141, 110, 213, 208, 162, 157, 162, 157, 162, 157, 167, 141, 110, 105, 1065, 1072, 203, 213, 208, 162, 157, 141, 110, 105, 213, 208, 213, 208, 1075, 162, 157, 141, 1059, 162, 157, 172, 1065, 208, 162, 157, 141, 1059, 162, 157, 172, 1065, 213, 162, 157, 162, 157, 162, 157, 167, 141, 110, 105, 162, 157, 172, 1065, 213, 208, 162, 157, 162, 157, 167, 141, 105, 141, 110, 213, 208, 162, 157, 162, 157, 167, 141, 1036, 162, 157, 172, 1065, 203, 213, 208, 162, 157, 162, 157, 162, 157, 167, 141, 110, 105, 162, 157, 172, 110, 105, 213, 208, 213, 208, 162, 157, 141, 110, 105, 213, 208, 213, 208, 162, 157, 162, 157, 162, 157, 167, 141, 110, 105, 162, 157, 172, 1065, 213, 208, 162, 157, 162, 157, 162, 157, 167, 141, 1059, 162, 157, 162, 157, 167, 172, 1065, 203, 213, 208, 203, 213, 208, 1086, 1074, 1086, 1074, 1086, 1087, 1086, 1087, 1086, 1087, 1086, 1087, 1086, 1087, 1086, 1079, 1087, 1087, 1087, 1086, 1087, 1086, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 1280, 1281, 1282, 1283, 1284, 1285, 1286, 1287, 1288, 1289, 1290, 1291, 1292, 1293, 1294, 1295, 1296, 1297, 1298, 1299, 1300, 1301, 1303, 1304, 1305, 1306, 1307, 1308, 1309, 1310, 1311, 1312, 1313, 1314, 1316, 1317, 1318, 1319, 1320, 1321, 1322, 1323, 1324, 1325, 1326, 1327, 1328, 1329, 1330, 1331, 1332, 1333, 1334, 1335, 1336, 1337, 1338, 1339, 1340, 1341, 1342, 1343, 1344, 1345, 1346, 1347, 1348, 1349, 1350, 1351, 1352, 1353, 1354, 1355, 1356, 1357, 1358, 1359, 1360, 1361, 1362, 1363, 1364, 1365, 1366, 1367, 1368, 1369, 1370, 1371, 1372, 1373, 1374, 1375, 1376, 1377, 1378, 1379, 1380, 1381, 1382, 1383, 1384, 1385, 1386, 1387, 1388, 1389, 1390, 1391, 1392, 1393, 1394, 1395, 1396, 1397, 1398, 1399, 1400, 1401, 1402, 1403, 1404, 1405, 1406, 1407, 1408, 1409, 1410, 1411, 1412, 1413, 1414, 1415, 1416, 1417, 1418, 1419, 1420, 1421, 1422, 1423, 1424, 1425, 1426, 1427, 1428, 1429, 1430, 1431, 1432, 1433, 1434, 1435, 1436, 1437, 1438, 1439, 1440, 1441, 1442, 1443, 1444, 1445, 1087, 1086, 1087, 1086, 1446, 1447, 1448, 1449, 1450, 1451, 1452, 1453, 1454, 1455, 1456, 1457, 1458, 1459, 1460, 1461, 1462, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 1536, 1538, 1545, 1547, 1549, 1551, 1555, 1559, 1561, 1564, 1566, 1568, 1570, 1574, 1579, 1583, 1588, 1590, 1592, 1596, 1598, 1602, 1604, 1606, 1613, 1615, 1617, 1622, 1627, 1629, 1631, 1633, 1637, 1639, 1642, 1644, 1646, 1648, 1651, 1653, 1655, 1657, 1659, 1661, 1665, 1667, 1671, 1673, 1675, 1677, 1682, 1684, 1690, 1693, 1544, 1542, 1557, 1558, 1692, 1696, 1698, 1681, 1689, 1700, 1701, 1692, 1702, 1703, 1573, 1577, 1692, 1089, 1582, 1586, 1692, 1089, 1601, 1708, 1710, 1612, 1610, 1621, 1625, 1626, 1714, 1670, 1681, 1688, 1689, 1692, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 1540, 1084, 1083, 1846, 1847, 1085, 1553, 1084, 1083, 1798, 1848, 1849, 1085, 1850, 1679, 1084, 1083, 1801, 1080, 1081, 1853, 1854, 1857, 1679, 1084, 1083, 1860, 1686, 1084, 1083, 1861, 1862, 1863, 1679, 1084, 1083, 1864, 1686, 1084, 1083, 1865, 1866, 1867, 1594, 1084, 1083, 1811, 1686, 1084, 1083, 1868, 1085, 1608, 1084, 1083, 1871, 1872, 1085, 1619, 1084, 1083, 1873, 1686, 1084, 1083, 1874, 1875, 1085, 1635, 1084, 1083, 1824, 1686, 1084, 1083, 1826, 1085, 1089, 1679, 1084, 1083, 1830, 1080, 1081, 1663, 1084, 1083, 1836, 1686, 1084, 1083, 1877, 1085, 1679, 1084, 1083, 1878, 1686, 1084, 1083, 1879, 1880, 1085, 1881, 1089, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 2048, 2049, 2050, 2051, 2053, 2054, 2055, 2056, 2057, 2060, 2062, 2063, 2064, 2065, 2066, 2067, 2069, 2070, 2071, 2072, 2073, 2075, 2076, 2077, 2079, 2081, 2082, 2083, 2085, 2086, 2087, 2089, 2091, 2092, 2093, 2094, 2095, 2096, 2097, 2099, 2100, 2101, 2102, 2103, 2105, 2106, 2107, 2108, 2110, 2111, 2112, 2115, 2116, 2117, 2118, 2119, 2120, 2121, 2122, 2123, 2124, 2125, 2126, 2127, 2128, 2129, 2130, 2131, 2132, 2133, 2134, 2135, 2136, 2137, 2138, 2140, 2141, 2142, 2143, 2145, 2146, 2147, 2149, 2150, 2151, 2152, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 2304, 2309, 2314, 2320, 2321, 2322, 2325, 2328, 2329, 2332, 2335, 2336, 2340, 2344, 2349, 2352, 2356, 2360, 2366, 2372, 2376, 2380, 2383, 2386, 2388, 2308, 2389, 2313, 2319, 2318, 2389, 2343, 2348, 2355, 2365, 2364, 2371, 2370, 2389, 2379, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 2560, 2561, 2562, 2565, 2566, 2568, 2569, 2571, 2572, 2573, 2574, 2575, 2576, 2577, 2578, 2579, 2580, 2581, 2582, 2585, 2586, 2587, 2588, 2589, 2371, 2370, 2080, 2090, 2590, 2591, 2592, 2593, 2594, 2595, 2596, 2597, 2371, 2370, 2598, 2599, 2389, 2387, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 2148, 2307, 2058, 2312, 2836, 2148, 2317, 2838, 2148, 2068, 2840, 2841, 2078, 2074, 2842, 2088, 2084, 2843, 2098, 2339, 2844, 2148, 2347, 2113, 2109, 2363, 2359, 2848, 2850, 2148, 2369, 2852, 2853, 2139, 2375, 2854, 2148, 2144, 2856, 2857, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 3072, 3073, 3074, 3075, 3077, 3078, 3080, 3081, 3082, 3084, 3085, 3087, 3088, 3090, 3091, 3093, 3094, 3095, 3096, 3097, 3098, 3101, 3102, 3103, 3105, 3106, 3108, 3109, 3110, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 3328, 3330, 3332, 3334, 3337, 3339, 3341, 3343, 3345, 3347, 3349, 3352, 3354, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 3100, 3356, 2847, 3089, 3092, 2835, 3076, 3336, 2846, 3079, 3086, 3099, 3107, 3351, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 3840, 3841, 3842, 3843, 3844, 3845, 3846, 3847, 3848, 3849, 3850, 3851, 3852, 3853, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 4096, 4098, 4100, 4102, 4104, 4106, 4108, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 4352, 4354, 4356, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 4608, 4610, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 4864, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 1090, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 5376, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255};
int h_C[]= {
1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 33, 35, 37, 39, 41, 43, 45, 47, 49, 51, 53, 55, 57, 59, 61, 63, 65, 67, 69, 71, 73, 75, 77, 79, 81, 83, 85, 87, 89, 91, 93, 95, 98, 100, 102, 104, 107, 109, 112, 114, 118, 120, 122, 124, 126, 128, 130, 132, 134, 136, 138, 140, 143, 145, 147, 149, 151, 153, 159, 161, 164, 166, 169, 171, 174, 176, 178, 180, 182, 184, 186, 188, 190, 192, 200, 202, 205, 207, 210, 212, 115, 115, 194, 115, 115, 115, 197, 195, 96, 96, 115, 154, 155, 214, 194, 195, 197, 214, 588, 590, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 769, 771, 773, 775, 777, 779, 781, 783, 785, 787, 789, 791, 793, 795, 797, 799, 801, 803, 805, 807, 809, 811, 813, 815, 817, 819, 821, 823, 825, 827, 829, 831, 833, 835, 837, 839, 841, 843, 845, 847, 849, 851, 853, 855, 857, 859, 861, 863, 238, 239, 250, 253, 254, 289, 334, 335, 343, 346, 363, 375, 376, 385, 386, 387, 388, 392, 883, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 1053, 1034, 1061, 1055, 1056, 1030, 1024, 1032, 1033, 1068, 1067, 1053, 1052, 1061, 1054, 1061, 1055, 1056, 1048, 1043, 1025, 1051, 1073, 1066, 1068, 1067, 1053, 1041, 1048, 1043, 1042, 1045, 1044, 1047, 1046, 1076, 1053, 1026, 1057, 1058, 1061, 1060, 1063, 1064, 1070, 1053, 1027, 1057, 1058, 1061, 1060, 1063, 1064, 1071, 1053, 1052, 1061, 1054, 1061, 1055, 1056, 1028, 1050, 1029, 1061, 1060, 1063, 1051, 1068, 1067, 1053, 1034, 1061, 1055, 1056, 1030, 1031, 1032, 1033, 1068, 1067, 1053, 1034, 1061, 1055, 1056, 1035, 1058, 1061, 1060, 1063, 1064, 1066, 1068, 1037, 1053, 1052, 1061, 1054, 1061, 1055, 1056, 1048, 1050, 1038, 1061, 1060, 1063, 1040, 1039, 1068, 1067, 1071, 1070, 1053, 1041, 1048, 1043, 1042, 1045, 1044, 1047, 1046, 1053, 1052, 1061, 1054, 1061, 1055, 1056, 1048, 1050, 1049, 1061, 1060, 1063, 1051, 1068, 1067, 1053, 1052, 1061, 1054, 1061, 1055, 1056, 1057, 1058, 1061, 1060, 1061, 1061, 1062, 1063, 1064, 1066, 1068, 1067, 1069, 1071, 1070, 1078, 1078, 1078, 1078, 1078, 1078, 1078, 1078, 1078, 1077, 1077, 1077, 1077, 1078, 1082, 1078, 1078, 1082, 1088, 1088, 1088, 1088, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 251, 252, 255, 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 290, 291, 292, 293, 294, 295, 296, 297, 298, 299, 300, 301, 302, 303, 304, 305, 306, 307, 308, 309, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, 331, 332, 333, 336, 337, 338, 339, 340, 341, 342, 344, 345, 347, 348, 349, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362, 364, 365, 366, 367, 368, 369, 370, 371, 372, 373, 374, 377, 378, 379, 380, 381, 382, 383, 384, 389, 390, 391, 398, 412, 413, 415, 416, 1315, 1315, 1315, 1315, 434, 435, 446, 447, 457, 458, 460, 461, 467, 488, 490, 491, 507, 517, 518, 521, 522, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 1537, 1539, 1546, 1548, 1550, 1552, 1556, 1560, 1562, 1565, 1567, 1569, 1571, 1575, 1580, 1584, 1589, 1591, 1593, 1597, 1599, 1603, 1605, 1607, 1614, 1616, 1618, 1623, 1628, 1630, 1632, 1634, 1638, 1640, 1643, 1645, 1647, 1649, 1652, 1654, 1656, 1658, 1660, 1662, 1666, 1668, 1672, 1674, 1676, 1678, 1683, 1685, 1691, 1694, 1543, 1541, 1687, 1302, 1315, 1697, 1699, 1650, 1315, 420, 421, 1315, 423, 424, 1572, 1576, 1078, 1578, 1581, 1585, 1078, 1587, 1600, 1709, 1711, 1611, 1609, 1620, 1624, 1078, 1715, 1669, 1680, 1687, 1088, 1088, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 1793, 1840, 1792, 396, 397, 1794, 1797, 1796, 1795, 1554, 404, 405, 1799, 407, 1841, 1840, 1800, 1563, 1802, 1803, 418, 419, 422, 1841, 1840, 1804, 428, 1843, 1843, 1805, 432, 433, 436, 1841, 1840, 1806, 440, 1843, 1843, 1807, 444, 445, 448, 1810, 1809, 1808, 1595, 1843, 1843, 1812, 456, 1813, 1815, 1840, 1814, 465, 466, 1816, 1818, 1840, 1817, 472, 1843, 1843, 1819, 476, 477, 1820, 1823, 1822, 1821, 1636, 1843, 1843, 1825, 1641, 1827, 1828, 1841, 1840, 1829, 1650, 1831, 1832, 1835, 1834, 1833, 1664, 1843, 1843, 1837, 505, 1838, 1841, 1840, 1839, 511, 1843, 1843, 1842, 515, 516, 1844, 520, 1845, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 393, 394, 395, 2052, 399, 400, 401, 402, 403, 406, 408, 409, 410, 411, 414, 417, 1855, 1858, 425, 426, 427, 429, 430, 431, 1704, 437, 438, 439, 441, 442, 443, 1706, 449, 450, 451, 452, 453, 454, 455, 459, 462, 463, 464, 2104, 468, 469, 470, 471, 473, 474, 475, 478, 479, 480, 481, 482, 483, 484, 485, 486, 487, 489, 492, 493, 494, 495, 496, 497, 498, 499, 500, 501, 502, 503, 504, 506, 508, 509, 510, 512, 513, 514, 1717, 519, 1719, 523, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 2305, 2310, 2315, 1856, 1859, 2323, 2326, 1705, 2330, 2333, 1707, 2337, 2341, 2345, 2350, 2353, 2357, 2361, 2367, 2373, 2377, 2381, 2384, 1718, 1720, 1695, 2061, 2059, 1852, 1851, 1870, 1869, 1712, 2114, 1713, 1713, 1876, 1876, 1716, 1716, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 2306, 2311, 2316, 2324, 2327, 2331, 2334, 2338, 2342, 2346, 2351, 2354, 2358, 2362, 2368, 2374, 2378, 2382, 2385, 526, 529, 530, 533, 534, 2564, 2563, 2567, 2570, 547, 548, 551, 554, 557, 558, 559, 560, 2584, 2583, 567, 568, 2584, 2583, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 2834, 2816, 2834, 2817, 2837, 2834, 2818, 2839, 2834, 2830, 537, 538, 2820, 2819, 541, 2822, 2821, 544, 2824, 2823, 2845, 2834, 2825, 2827, 2826, 2829, 2828, 2849, 2851, 2834, 2830, 563, 564, 2832, 2831, 2855, 2834, 2833, 571, 572, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 524, 525, 527, 528, 531, 532, 535, 536, 3083, 539, 540, 542, 543, 545, 546, 549, 550, 552, 553, 555, 556, 561, 562, 3104, 565, 566, 569, 570, 3111, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 3329, 3331, 3333, 3335, 3338, 3340, 3342, 3344, 3346, 3348, 3350, 3353, 3355, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 3594, 3596, 3592, 3589, 3590, 3584, 3585, 3587, 3591, 3586, 3588, 3593, 3595, 3594, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 573, 574, 575, 576, 577, 578, 579, 580, 581, 582, 583, 584, 585, 586, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 4097, 4099, 4101, 4103, 4105, 4107, 4109, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 4353, 4355, 4357, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 4609, 4358, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 4865, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 5120, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 591, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255};
bool h_Op[]= {
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
#define THREADS_PER_BLOCK 256
#define BLOCKS_PER_GRID 1
#define SIZE_OF_IN 768
#define SIZE_OF_AC 5120
__device__ void
ac(float *A, const int *B, const int *C, const bool *Op, int n_iter) {
int i= blockDim.x * blockIdx.x + threadIdx.x;
__shared__ float R[23*THREADS_PER_BLOCK];
const int t= THREADS_PER_BLOCK;
__shared__ float final;
final=0;
R[i + 0*t] = A[i + 0*t];
R[i + 1*t] = A[i + 1*t];
R[i + 2*t] = A[i + 2*t];
__syncthreads();
for (int iter=0; iter< n_iter; iter++) {
R[i + 3*t] = Op[i + 0*t] ? R[B[i + 0*t]] * R[C[i + 0*t]] : R[B[i + 0*t]] + R[C[i + 0*t]];
__syncthreads();
R[i + 4*t] = Op[i + 1*t] ? R[B[i + 1*t]] * R[C[i + 1*t]] : R[B[i + 1*t]] + R[C[i + 1*t]];
__syncthreads();
R[i + 5*t] = Op[i + 2*t] ? R[B[i + 2*t]] * R[C[i + 2*t]] : R[B[i + 2*t]] + R[C[i + 2*t]];
__syncthreads();
R[i + 6*t] = Op[i + 3*t] ? R[B[i + 3*t]] * R[C[i + 3*t]] : R[B[i + 3*t]] + R[C[i + 3*t]];
__syncthreads();
R[i + 7*t] = Op[i + 4*t] ? R[B[i + 4*t]] * R[C[i + 4*t]] : R[B[i + 4*t]] + R[C[i + 4*t]];
__syncthreads();
R[i + 8*t] = Op[i + 5*t] ? R[B[i + 5*t]] * R[C[i + 5*t]] : R[B[i + 5*t]] + R[C[i + 5*t]];
__syncthreads();
R[i + 9*t] = Op[i + 6*t] ? R[B[i + 6*t]] * R[C[i + 6*t]] : R[B[i + 6*t]] + R[C[i + 6*t]];
__syncthreads();
R[i + 10*t] = Op[i + 7*t] ? R[B[i + 7*t]] * R[C[i + 7*t]] : R[B[i + 7*t]] + R[C[i + 7*t]];
__syncthreads();
R[i + 11*t] = Op[i + 8*t] ? R[B[i + 8*t]] * R[C[i + 8*t]] : R[B[i + 8*t]] + R[C[i + 8*t]];
__syncthreads();
R[i + 12*t] = Op[i + 9*t] ? R[B[i + 9*t]] * R[C[i + 9*t]] : R[B[i + 9*t]] + R[C[i + 9*t]];
__syncthreads();
R[i + 13*t] = Op[i + 10*t] ? R[B[i + 10*t]] * R[C[i + 10*t]] : R[B[i + 10*t]] + R[C[i + 10*t]];
__syncthreads();
R[i + 14*t] = Op[i + 11*t] ? R[B[i + 11*t]] * R[C[i + 11*t]] : R[B[i + 11*t]] + R[C[i + 11*t]];
__syncthreads();
R[i + 15*t] = Op[i + 12*t] ? R[B[i + 12*t]] * R[C[i + 12*t]] : R[B[i + 12*t]] + R[C[i + 12*t]];
__syncthreads();
R[i + 16*t] = Op[i + 13*t] ? R[B[i + 13*t]] * R[C[i + 13*t]] : R[B[i + 13*t]] + R[C[i + 13*t]];
__syncthreads();
R[i + 17*t] = Op[i + 14*t] ? R[B[i + 14*t]] * R[C[i + 14*t]] : R[B[i + 14*t]] + R[C[i + 14*t]];
__syncthreads();
R[i + 18*t] = Op[i + 15*t] ? R[B[i + 15*t]] * R[C[i + 15*t]] : R[B[i + 15*t]] + R[C[i + 15*t]];
__syncthreads();
R[i + 19*t] = Op[i + 16*t] ? R[B[i + 16*t]] * R[C[i + 16*t]] : R[B[i + 16*t]] + R[C[i + 16*t]];
__syncthreads();
R[i + 20*t] = Op[i + 17*t] ? R[B[i + 17*t]] * R[C[i + 17*t]] : R[B[i + 17*t]] + R[C[i + 17*t]];
__syncthreads();
R[i + 21*t] = Op[i + 18*t] ? R[B[i + 18*t]] * R[C[i + 18*t]] : R[B[i + 18*t]] + R[C[i + 18*t]];
__syncthreads();
R[i + 22*t] = Op[i + 19*t] ? R[B[i + 19*t]] * R[C[i + 19*t]] : R[B[i + 19*t]] + R[C[i + 19*t]];
if (i==0) { final += R[22*t]; }
__syncthreads();
}
if (i==0) { A[0]= final;}
}
|
19,017 | /**
* @author Eddie Davis (eddiedavis@u.boisestate.edu)
* @author Jeff Pope (jeffreymithoug@u.boisestate.edu)
* @file mandelbrot.cu
* @brief CS530 PA4: Mandelbrot-CUDA Impementation
* @date 12/4/2016
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <limits.h>
#include <cuda_runtime.h>
#define RMIN 0.3129928802767
#define RMAX 0.31299305009252
#define IMIN 0.0345483210604
#define IMAX 0.0345485012278
#define RADIUS_SQ 4.0 /* 2^2 */
#define DEF_WIDTH 2400 /* Image width in pixels */
#define DEF_HEIGHT 2400 /* Image height in pixels */
#define DEF_BLK_SZ 32 /* BLOCK_SIZE = GCD(WIDTH, THREADS_PER_BLOCK) = GCD(2400, 1024) */
#define MIN_BLK_SZ 1
#define MAX_COLOR UCHAR_MAX /* 255 */
#define OUT_FILE "Mandelbrot.pgm"
#define DEF_ITER 1000
#define DEBUG 0
typedef int DTYPE;
//typedef unsigned char DTYPE;
/**
* writeOutput
*
* Writes Mandelbrot image in PGM format.
*
* @param fileName Filename to write PGM data.
* @param data Output array data (Mandelbrot pixels)
* @param width Image width
* @param height Image height
*/
void writeOutput(const char *fileName, DTYPE *data, int width, int height) {
int i, j; /* index variables */
int max = -1; /* for pgm file output */
int size = width * height;
/* PGM file format requires the largest pixel value, calculate this */
for (i = 0; i < size; ++i) {
if (data[i] > max) {
max = data[i];
}
}
/* open the file for writing. omit error checking. */
FILE * fout = fopen(fileName, "w");
/* PGM file header */
fprintf(fout, "P2\n");
fprintf(fout, "%d\t%d\n", width, height);
fprintf(fout, "%d\n",max);
/* throw out the data */
for (i = 0; i < height; ++i) {
for (j = 0; j < width; ++j) {
fprintf(fout, "%d\t", data[i * width + j]);
}
fprintf(fout,"\n");
}
/* flush the buffer and close the file */
fflush(fout);
fclose(fout);
}
/**
* cudaAssert
*
* CUDA error handler.
*
* @param code cudaError_t error code struct.
* @param file Name of file in which error occurred.
* @param line Line number on which error occurred.
*/
#define cudaAssert(ans) { _cudaAssert((ans), __FILE__, __LINE__); }
inline void _cudaAssert(cudaError_t code, const char *file, int line) {
if (code != cudaSuccess) {
fprintf(stderr, "cudaAssert: %s %s %d\n", cudaGetErrorString(code), file, line);
exit(code);
}
}
/**
* cudaPrintDevice
*
* Prints revelevant information about the given CUDA device.
*
* @param file File pointer to write device properties.
* @param prop cudaDeviceProp structure pointer.
* @param dnum CUDA device number.
*/
void cudaPrintDevice(FILE *file, cudaDeviceProp *prop, int dnum) {
fprintf(file, "Device Number: %d\n", dnum);
fprintf(file, " Device name: %s\n", prop->name);
fprintf(file, " Memory Clock Rate (KHz): %d\n", prop->memoryClockRate);
fprintf(file, " Memory Bus Width (bits): %d\n", prop->memoryBusWidth);
fprintf(file, " Peak Memory Bandwidth (GB/s): %f\n",
2.0 * prop->memoryClockRate * (prop->memoryBusWidth / 8) / 1.0e6);
fprintf(file, " Compute Version: %d.%d\n", prop->major, prop->minor);
fprintf(file, " Compute Mode: ");
switch (prop->computeMode) {
case cudaComputeModeExclusive:
fprintf(file, "Exclusive");
break;
case cudaComputeModeProhibited:
fprintf(file, "Prohibited");
break;
default:
fprintf(file, "Default");
break;
}
fprintf(file, "\n");
fprintf(file, " SM count: %d\n", prop->multiProcessorCount);
fprintf(file, " Shared mem/block: %zd\n", prop->sharedMemPerBlock);
fprintf(file, " Threads per warp: %d\n", prop->warpSize);
fprintf(file, " Max threads per block: %d\n", prop->maxThreadsPerBlock);
fprintf(file, " Max block size: (");
for (int j = 0; j < 3; j++) {
fprintf(file, "%d,", prop->maxThreadsDim[j]);
}
fprintf(file, ")\n Max grid size: (");
for (int j = 0; j < 3; j++) {
fprintf(file, "%d,", prop->maxGridSize[j]);
}
fprintf(file, ")\n\n");
}
/**
* mand (CUDA kernel function)
*
* Generates the Mandelbrot set.
*
* @param output Output array to receive computed Mandelbrot pixels.
* @param maxIter Max iterations to test for escape values.
* @param width Image width.
* @param height Image height.
* @param realRange Range of real component.
* @param imagRange Range of imaginary component.
*/
__global__ void mand(DTYPE* output, int maxIter, int width, int height, double realRange, double imagRange) {
int col = blockDim.x * blockIdx.x + threadIdx.x; // Image col (X coord)
int row = blockDim.y * blockIdx.y + threadIdx.y; // Image row (Y coord)
if (col < width && row < height) {
int idx = row * width + col;
double cReal = RMIN + row * realRange;
double cImag = IMIN + col * imagRange;
double zReal = 0.0;
double zImag = 0.0;
double zReal2 = zReal;
double zImag2 = zImag;
double zCurr;
double zMag;
int iter = 0;
for (; iter < maxIter; ++iter) {
zCurr = zReal;
zReal2 = zReal * zReal;
zImag2 = zImag * zImag;
zReal = zReal2 - zImag2 + cReal;
zImag = (2.0 * zCurr * zImag) + cImag;
zMag = zReal2 + zImag2;
if (zMag > RADIUS_SQ) {
break;
}
}
output[idx] = (DTYPE) floor(((double) (MAX_COLOR * iter)) / (double) maxIter);
}
}
/**
* main
*
* Main function.
*
* @param argc Argument count.
* @param argv Argument values.
* @return
*/
int main(int argc, char ** argv) {
int nDevices = 0;
DTYPE *output = NULL;
DTYPE *d_output = NULL;
float time; /*timer*/
int maxIter = DEF_ITER;
if (argc > 1) {
maxIter = atoi(argv[1]); /* first command line argument... */
}
if (maxIter < 1) {
printf("usage: %s [MAX_ITERATION=%d] [WIDTH=%d] [HEIGHT=%d] [BLOCK_X=%d] [BLOCK_Y=1]\n",
argv[0], DEF_ITER, DEF_WIDTH, DEF_HEIGHT, MIN_BLK_SZ);
return 0;
}
int width = DEF_WIDTH;
if (argc > 2) {
width = atoi(argv[2]);
}
int height = DEF_HEIGHT;
if (argc > 3) {
height = atoi(argv[3]);
}
cudaAssert(cudaGetDeviceCount(&nDevices));
if (nDevices < 1) {
printf("ERROR: No valid CUDA devices on this machine!\n");
return -1;
}
if (DEBUG) {
fprintf(stderr, "nDevices = %d\n", nDevices);
cudaDeviceProp prop;
for (int i = 0; i < nDevices; i++) {
cudaAssert(cudaGetDeviceProperties(&prop, i));
cudaPrintDevice(stderr, &prop, i);
}
}
// Get data size...
int nPixels = width * height;
int nBytes = nPixels * sizeof(DTYPE);
if (DEBUG) fprintf(stderr, "nPixels = %d, nBytes = %d\n", nPixels, nBytes);
/* Allocate memory on host to store output values for pixels */
output = (DTYPE *) malloc(nBytes);
if (output == NULL) {
perror("output");
return -1;
}
// Set block size...
int blockX = 0;
if (argc > 4) {
blockX = atoi(argv[4]);
}
if (blockX < 1) {
blockX = MIN_BLK_SZ;
}
int blockY = 0;
if (argc > 5) {
blockY = atoi(argv[5]);
}
if (blockY < 1) {
blockY = MIN_BLK_SZ;
}
if (blockX == MIN_BLK_SZ && blockY == MIN_BLK_SZ) {
blockX = DEF_BLK_SZ;
}
dim3 blockSize(blockX, blockY);
if (DEBUG) fprintf(stderr, "blockSize = (%d,%d,%d)\n", blockSize.x, blockSize.y, blockSize.z);
// Set grid size...
int gridX = width / blockSize.x;
int gridY = height / blockSize.y;
dim3 gridSize(gridX, gridY);
if (DEBUG) fprintf(stderr, "gridSize = (%d,%d,%d)\n", gridSize.x, gridSize.y, gridSize.z);
printf("Running Mandelbrot-CUDA with (w,h,mi,bx,by,gx,gy) = (%d,%d,%d,%d,%d,%d,%d)...\n",
width, height, maxIter, blockSize.x, blockSize.y, gridSize.x, gridSize.y);
// Create event timers...
cudaEvent_t start, stop;
cudaAssert(cudaEventCreate(&start));
cudaAssert(cudaEventCreate(&stop));
// Start timer...
cudaAssert(cudaEventRecord(start));
// Allocate memory on device...
if (DEBUG) fprintf(stderr, "cudaMalloc...\n");
cudaAssert(cudaMalloc(&d_output, nBytes));
double realRange = (RMAX - RMIN) / (double) (width - 1);
double imagRange = (IMAX - IMIN) / (double) (height - 1);
// Invoke the kernel...
if (DEBUG) {
fprintf(stderr, "kernel: mand(d_output[%d], maxIter=%d, realRange=%lf, imagRange=%lf)...\n",
nPixels, maxIter, realRange, imagRange);
}
mand<<<gridSize, blockSize>>>(d_output, maxIter, width, height, realRange, imagRange);
// cudaMemcpy is an implicit barrier so need need for sync.
// Copy data back to host...
if (DEBUG) fprintf(stderr, "cudaMemcpy...\n");
cudaAssert(cudaMemcpy(output, d_output, nBytes, cudaMemcpyDeviceToHost));
// Free data on device...
if (DEBUG) fprintf(stderr, "cudaFree...\n");
cudaAssert(cudaFree(d_output));
// Stop timer...
cudaAssert(cudaEventRecord(stop));
// Get elapsed time...
if (DEBUG) fprintf(stderr, "cudaEventSynchronize...\n");
cudaAssert(cudaEventSynchronize(stop));
if (DEBUG) fprintf(stderr, "cudaEventElapsedTime...\n");
cudaAssert(cudaEventElapsedTime(&time, start, stop));
// Write the output...
if (DEBUG) fprintf(stderr, "writeOutput...\n");
writeOutput(OUT_FILE, output, width, height);
// Free host data...
free(output);
// Report timing...
printf("Elapsed time: %lf sec\n", time * 1E-3);
return 0;
}
|
19,018 |
#include <cstdio>
#include "kernel.cuh"
__global__ void matmul_basic(const float* A, const float* B, float* C, const int len) {
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
if (i<len&&j<len) {
float sum=0;
for (int k=0; k<len; k++) {
sum += A[i*len+k]*B[k*len+j];
}
C[i*len+j]=sum;
}
}
__global__ void matmul_tiled(const float* A, const float* B, float* C, const int len, const int len_tile) {
extern __shared__ float smem[];
float *sA = &smem[0];
float *sB = &smem[len_tile*len_tile];
int i = blockIdx.y * blockDim.y + threadIdx.y;
int si = threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
int sj = threadIdx.x;
int num_tiles = (len+(len_tile-1))/len_tile;
float sum = 0;
for (int t=0; t<num_tiles; t++) {
if (i<len && (sj+t*len_tile) < len)
sA[si*len_tile+sj] = A[(i)*len+(sj+t*len_tile)];
else
sA[si*len_tile+sj] = 0;
if (j<len && (si+t*len_tile) < len)
sB[si*len_tile+sj] = B[(si+t*len_tile)*len+(j)];
else
sB[si*len_tile+sj] = 0;
__syncthreads();
for (int k=0; k<len_tile; k++) {
sum += sA[(si*len_tile)+k]*sB[(k*len_tile)+sj];
}
__syncthreads();
}
if (i<len && j<len) {
C[i*len+j] = sum;
}
}
__global__ void transpose(const float* A, float* A_T, const int len) {
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
if (j<len && i<len) {
A_T[i*len+j] = A[j*len+i];
}
}
__global__ void matmul_tiled_transposed(const float* A, const float* B, float* C, const int len, const int len_tile) {
extern __shared__ float smem[];
float *sA = &smem[0];
float *sB = &smem[len_tile*len_tile];
int i = blockIdx.y * blockDim.y + threadIdx.y;
int si = threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
int sj = threadIdx.x;
int num_tiles = (len+(len_tile-1))/len_tile;
int itosj = ((i/len_tile)*len_tile) + sj;
float sum = 0;
for (int t=0; t<num_tiles; t++) {
if (itosj<len && (si+t*len_tile) < len)
sA[si*len_tile+sj] = A[(si+t*len_tile)*len+(itosj)];
else
sA[si*len_tile+sj] = 0;
if (j<len && (si+t*len_tile) < len)
sB[si*len_tile+sj] = B[(si+t*len_tile)*len+(j)];
else
sB[si*len_tile+sj] = 0;
__syncthreads();
for (int k=0; k<len_tile; k++) {
sum += sA[(k*len_tile)+si]*sB[(k*len_tile)+sj];
}
__syncthreads();
}
if (i<len && j<len) {
C[i*len+j] = sum;
}
}
|
19,019 | #include <iostream>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/host_vector.h>
#include <thrust/scan.h>
using namespace std;
int main(int argc, const char *argv[]) {
string N;
if (argc > 1) {
N = string(argv[1]);
}
unsigned int n = atoi(N.c_str());
thrust::host_vector<float> H(n);
for (unsigned int i = 0; i < n; i++) {
H[i] = 1;
}
thrust::device_vector<float> D = H;
thrust::device_vector<float> res(n);
cudaEvent_t start;
cudaEvent_t stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
thrust::exclusive_scan(D.begin(), D.end(), res.begin());
cudaEventRecord(stop);
cudaEventSynchronize(stop);
// Get the elapsed time in milliseconds
float ms;
cudaEventElapsedTime(&ms, start, stop);
cout << res[n - 1] << endl;
cout << ms << endl;
return 0;
} |
19,020 | #include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <algorithm>
#include <cstring> //memset
#define MAX_INT_BITS 32
#define checkCudaErrors(call) \
{ \
const cudaError_t error = call; \
if (error != cudaSuccess) \
{ \
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \
fprintf(stderr, "code: %d, reason: %s\n", error, \
cudaGetErrorString(error)); \
exit(1); \
} \
}
void printArray(unsigned int* const h_input, const size_t numElems)
{
for (unsigned int i = 0; i < numElems; ++i)
printf("%d ", h_input[i]);
printf("\n");
}
__device__
void dev_printArray(unsigned int* const d_input, const size_t numElems)
{
for (unsigned int i = 0; i < numElems; ++i)
printf("%d \t", d_input[i]);
printf("\n");
}
__device__ unsigned int d_count = 0;
__device__ unsigned int d_curBlock = 0;
__global__
void set_init_scan()
{
d_count = 0;
d_curBlock = 0;
}
__global__
void exclusive_scan(const unsigned int* const d_in,
unsigned int* const d_out,
unsigned int* const d_prevSums,
const int n)
{
extern __shared__ unsigned int s_blkIn[]; //blockSize
__shared__ unsigned int s_prevSum;
__shared__ unsigned int s_bid;
__shared__ unsigned int s_endBlock;
// get block idx
if (0 == threadIdx.x)
{
s_bid = atomicAdd(&d_count, 1);
s_endBlock = blockDim.x - 1;
}
__syncthreads();
int i = s_bid * blockDim.x + threadIdx.x;
if (i == n - 1)
s_endBlock = threadIdx.x;
if (i >= n)
return;
// load data
if (0 == i)
s_blkIn[0] = 0;
else
s_blkIn[threadIdx.x] = d_in[i - 1];
__syncthreads();
/// reduction
for (int stride = 1; stride < blockDim.x; stride *= 2)
{
int localThreadIdx = 2 * (threadIdx.x + 1) * stride - 1;
if (localThreadIdx < blockDim.x)
{
s_blkIn[localThreadIdx] += s_blkIn[localThreadIdx - stride];
}
__syncthreads();
}
//post reduction
for (int stride = blockDim.x / 4; stride > 0; stride /= 2)
{
int localThreadIdx = 2 * (threadIdx.x + 1) * stride - 1;
if (localThreadIdx + stride < blockDim.x)
{
s_blkIn[localThreadIdx + stride] += s_blkIn[localThreadIdx];
}
__syncthreads();
}
if (0 == threadIdx.x)
{
while(atomicAdd(&d_curBlock, 0) < s_bid);
s_prevSum = d_prevSums[s_bid];
d_prevSums[s_bid + 1] = s_prevSum + s_blkIn[s_endBlock];
__threadfence();
atomicAdd(&d_curBlock, 1);
}
__syncthreads();
// update output
d_out[i] = s_blkIn[threadIdx.x] + s_prevSum;
}
unsigned int* host_exclusive_scan(const unsigned int* const d_in, const size_t numElems, const dim3 blockSize)
{
const dim3 gridSize((numElems - 1) / blockSize.x + 1);
set_init_scan<<<1, 1>>>();
unsigned int* d_out;
checkCudaErrors(cudaMalloc(&d_out, sizeof(unsigned int) * numElems));
unsigned int* d_prevSums;
checkCudaErrors(cudaMalloc(&d_prevSums, sizeof(unsigned int) * gridSize.x));
checkCudaErrors(cudaMemset(d_prevSums, 0, sizeof(int) * gridSize.x));
int sharedSize = blockSize.x * sizeof(unsigned int) + 2 * sizeof (unsigned int) + sizeof(int);
exclusive_scan<<<gridSize, blockSize, sharedSize>>>(d_in, d_out, d_prevSums, numElems);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaFree(d_prevSums));
return d_out;
}
__global__
void histogram(const unsigned int* const d_inputVals,
const size_t numElems,
const size_t numBins,
unsigned int* const d_hist,
const unsigned int mask,
const unsigned int digitOrder)
{
extern __shared__ unsigned int s_hist[]; // numBins
for (unsigned int bin = threadIdx.x; bin < numBins; bin += blockDim.x)
{
s_hist[bin] = 0;
}
__syncthreads();
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < numElems)
{
unsigned int bin = (d_inputVals[idx] & mask) >> digitOrder;
atomicAdd(&s_hist[bin], 1);
}
__syncthreads();
//update d_hist by s_hist
for (unsigned int bin = threadIdx.x; bin < numBins; bin += blockDim.x)
{
int idxHist = bin * gridDim.x + blockIdx.x;
d_hist[idxHist] = s_hist[bin];
// printf("bin - indexHist - val : %d - %d - %d - %d - %d\n", blockIdx.x, bin, idxHist, d_hist[idxHist]);
}
}
unsigned int* host_histogram(const unsigned int* const d_inputVals,
const size_t numElems,
const size_t numBins,
const unsigned int mask,
const unsigned int digitOrder,
const dim3 blockSize)
{
const dim3 gridSize((numElems - 1) / blockSize.x + 1);
unsigned int *d_hist;
checkCudaErrors(cudaMalloc(&d_hist, sizeof(unsigned int) * numBins * gridSize.x));
checkCudaErrors(cudaMemset(d_hist, 0, sizeof(unsigned int) * numBins * gridSize.x));
int sharedSize = numBins * sizeof(unsigned int);
histogram<<<gridSize, blockSize, sharedSize>>>(d_inputVals,
numElems,
numBins,
d_hist,
mask,
digitOrder);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
return d_hist;
}
__global__
void scatter(unsigned int* const d_inputVals,
unsigned int* const d_outputVals,
const unsigned int* const d_histScan,
const size_t numElems,
const size_t numBins,
const unsigned int mask,
const unsigned int digitOrder)
{
extern __shared__ unsigned int s_blkIn[]; // blockSize
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= numElems)
return;
//load data
s_blkIn[threadIdx.x] = d_inputVals[idx];
__syncthreads();
//get number of elements ( < this element and = this element but locate on prev block
unsigned int bin = (s_blkIn[threadIdx.x] & mask) >> digitOrder;
unsigned int numPrevElems = d_histScan[bin * gridDim.x + blockIdx.x];
//calc number of element = this element on this block
unsigned int count = 0;
for (int i = 0; i < threadIdx.x; ++i)
{
unsigned int prevBin = (s_blkIn[i] & mask) >> digitOrder;
if (bin == prevBin)
++count;
}
// scatter to result array
unsigned int rank = numPrevElems + count;
d_outputVals[rank] = s_blkIn[threadIdx.x];
// printf("block - idx - rank - val: %d - %d - %d - %d\n", blockIdx.x, idx, rank, s_blkIn[threadIdx.x]);
}
void host_scatter(unsigned int* const d_inputVals,
unsigned int* const d_outputVals,
const size_t numElems,
const size_t numBins,
const unsigned int* const d_histScan,
const unsigned int mask,
const unsigned int digitOrder,
const dim3 blockSize)
{
const dim3 gridSize((numElems - 1) / blockSize.x + 1);
unsigned int sharedSize = blockSize.x * sizeof(unsigned int);
scatter<<<gridSize, blockSize, sharedSize>>> (d_inputVals, d_outputVals, d_histScan, numElems,numBins, mask, digitOrder);
}
unsigned int getMaxNumOfBits(unsigned int* h_input, const size_t numElems)
{
unsigned int maxElem = *std::max_element(h_input, h_input + numElems);
unsigned int mask = 1 << 31;
unsigned int count = 0;
while (! (mask & maxElem))
{
++count;
mask >>= 1;
}
return MAX_INT_BITS - count;
}
void radix_sort(unsigned int* h_inputVals,
unsigned int* d_inputVals,
unsigned int* d_outputVals,
const size_t numElems,
const size_t numBits,
const dim3 blockSize)
{
const dim3 gridSize((numElems - 1) / blockSize.x + 1);
size_t numBins = 1 << numBits;
unsigned int* pInVals = d_inputVals;
unsigned int* pOutVals = d_outputVals;
unsigned int maxBits = getMaxNumOfBits(h_inputVals, numElems);
if (maxBits % numBits)
maxBits += numBits;
// loop through digits
for (unsigned int i = 0; i <= maxBits; i += numBits)
{
unsigned int mask = (numBins - 1) << i;
// printf("mask: %d\n", mask);
//histogram
unsigned int* d_hist = host_histogram(pInVals, numElems, numBins, mask, i, blockSize);
// unsigned int* h_hist = (unsigned int*) malloc (sizeof(unsigned int) * numBins * gridSize.x);
// checkCudaErrors(cudaMemcpy(h_hist, d_hist, sizeof(unsigned int) * numBins * gridSize.x, cudaMemcpyDeviceToHost));
// printArray(h_hist, numBins * gridSize.x);
// free(h_hist);
// exclusive scan hist
unsigned int* d_histScan = host_exclusive_scan(d_hist, numBins * gridSize.x, blockSize);
// unsigned int* h_histScan = (unsigned int*) malloc (sizeof(unsigned int) * numBins * gridSize.x);
// checkCudaErrors(cudaMemcpy(h_histScan, d_histScan, sizeof(unsigned int) * numBins * gridSize.x, cudaMemcpyDeviceToHost));
// printArray(h_histScan, numBins * gridSize.x);
// free(h_histScan);
//scatter
host_scatter(pInVals, pOutVals, numElems, numBins, d_histScan, mask, i, blockSize);
std::swap(pInVals, pOutVals);
// unsigned int* h_result = (unsigned int*) malloc (sizeof(unsigned int) * numElems);
// checkCudaErrors(cudaMemcpy(h_result, d_inputVals, sizeof(unsigned int) * numElems, cudaMemcpyDeviceToHost));
// printArray(h_result, numElems);
// free(h_result);
// printf("\n\n\n");
checkCudaErrors(cudaFree(d_hist));
checkCudaErrors(cudaFree(d_histScan));
d_hist = NULL;
d_histScan = NULL;
}
if (pInVals == d_outputVals)
{
checkCudaErrors(cudaMemcpy(d_outputVals, d_inputVals, sizeof(unsigned int) * numElems, cudaMemcpyDeviceToDevice));
}
}
int main()
{
// srand(time(NULL));
const dim3 blockSize(64);
const size_t numElems = 10000000;
const unsigned int numBits = 2;
unsigned int* h_inputVals = (unsigned int*) malloc(sizeof(unsigned int) * numElems);
for (int i = 0; i < numElems; ++i)
{
h_inputVals[i] = rand() % 1000000000 + 1;
}
// printArray(h_inputVals, numElems);
unsigned int* d_inputVals;
checkCudaErrors(cudaMalloc(&d_inputVals, sizeof(unsigned int) * numElems));
checkCudaErrors(cudaMemcpy(d_inputVals, h_inputVals, sizeof(unsigned int) * numElems, cudaMemcpyHostToDevice));
unsigned int* d_outputVals;
checkCudaErrors(cudaMalloc(&d_outputVals, sizeof(unsigned int) * numElems));
radix_sort(h_inputVals, d_inputVals, d_outputVals, numElems, numBits, blockSize);
unsigned int* h_outputVals = (unsigned int*) malloc(sizeof(unsigned int) * numElems);
checkCudaErrors(cudaMemcpy(h_outputVals, d_outputVals, sizeof(unsigned int) * numElems, cudaMemcpyDeviceToHost));
// printArray(h_outputVals, numElems);
bool check = true;
for (int i = 1; i < numElems; ++i)
{
if (h_outputVals[i] < h_outputVals[i - 1])
{
printf("\nfalse at index : %d\n", i);
check = false;
break;
}
}
if (check)
printf("\nTRUE\n");
else
printf("\nFALSE\n");
free(h_inputVals);
free(h_outputVals);
checkCudaErrors(cudaFree(d_inputVals));
checkCudaErrors(cudaFree(d_outputVals));
return 0;
}
|
19,021 | /*
#include "patchBasedObject.cuh"
template <typename T>
__host__ void PatchBasedObject<T>::generateSuperpixels(uint2 & pbbsize, uint2 & stride)
{
int spx_sz = 0; // initialize the superpixel size
float noLabels = 2; // control number of superpixels using-> (int)(noLabels * sqrt( [width * height] /2 )) ;
double compactness = 10; // It was used to control the superpixel shape [compactness factor], but now it is redundant as in SLICO it is defined automatically per each superpixel
Superpixels<T> superpixesl;
superpixesl.runStackSLIC(m_h_stack, m_h_spx_stack, spx_sz, noLabels, compactness);
pbbsize = make_uint2(3 * spx_sz, 3 * spx_sz); // In order to fix the patch size for all the superpixels, we define patch size 3 times the uniform superpixel size. The extra free space is used later for dilation
stride = make_uint2(0.25*spx_sz, 0.25*spx_sz); // define dilation iterations = 0.25 * superpixel_size
cout << "Patch size: " << pbbsize.x << "x" << pbbsize.y << endl;
cout << "Dilation iterations: " << stride.x << endl;
}
template<> class PatchBasedObject < float >;
template<> class PatchBasedObject < double >;
*/ |
19,022 | // a cuda app. we will convert this to opencl, and run it :-)
#include <iostream>
#include <memory>
#include <cassert>
using namespace std;
#include <cuda_runtime.h>
__global__ void setValue(char *data, int idx, char value) {
if(threadIdx.x == 0) {
data[idx] = value;
// data[idx] = 15;
}
}
int main(int argc, char *argv[]) {
int N = 1024;
char *gpuChars;
cudaMalloc((void**)(&gpuChars), N * sizeof(char));
setValue<<<dim3(32, 1, 1), dim3(32, 1, 1)>>>(gpuChars, 2, (char)89);
char hostChars[4];
cudaMemcpy(hostChars, gpuChars, 4 * sizeof(char), cudaMemcpyDeviceToHost);
cout << "hostChars[2] " << (int)hostChars[2] << endl;
assert(hostChars[2] == 89);
setValue<<<dim3(32, 1, 1), dim3(32, 1, 1)>>>(gpuChars, 2, (char)23);
cudaMemcpy(hostChars, gpuChars, 4 * sizeof(char), cudaMemcpyDeviceToHost);
cout << "hostChars[2] " << (int)hostChars[2] << endl;
assert(hostChars[2] == 23);
hostChars[2] = 44;
cudaMemcpy(gpuChars, hostChars, 4 * sizeof(char), cudaMemcpyHostToDevice);
hostChars[2] = 55;
cudaMemcpy(hostChars, gpuChars, 4 * sizeof(char), cudaMemcpyDeviceToHost);
cout << "hostChars[2] " << (int)hostChars[2] << endl;
assert(hostChars[2] == 44);
cudaFree(gpuChars);
return 0;
}
|
19,023 | #include "includes.h"
__global__ void differenceImg(float *d_Octave0,float *d_Octave1,float *d_diffOctave,int pitch,int height){
int x = blockIdx.x*blockDim.x+threadIdx.x;
int y = blockIdx.y*blockDim.y+threadIdx.y;
int index = y * pitch + x;
if(y<height)
d_diffOctave[index] = (d_Octave1[index] - d_Octave0[index]);
} |
19,024 | #include "includes.h"
#define BLOCK_SIZE 16
#define BLOCKSIZE_X 16
#define BLOCKSIZE_Y 16
// STD includes
// CUDA runtime
// Utilities and system includes
static // Print device properties
__global__ void writeChannelKernel( unsigned char* image, unsigned char* channel, int imageW, int imageH, int channelToMerge, int numChannels) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int posOut = y * (imageW*numChannels) + (x*numChannels) + channelToMerge;
int posIn = y * imageW + x;
image[posOut] = channel[posIn];
} |
19,025 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include<stdio.h>
__global__ void unique_grid_id_calculation_2d_2d(int* data)
{
int thread_id = blockDim.x * threadIdx.y + threadIdx.x;
int num_threads_in_a_block = blockDim.x * blockDim.y;
int block_offset = blockIdx.x * num_threads_in_a_block;
int num_threads_in_a_row = num_threads_in_a_block * gridDim.x;
int row_offset = num_threads_in_a_row * blockIdx.y;
int grid_id = thread_id + row_offset + block_offset;
printf("blockIdx.x: %d, blockIdx.y: %d, threadIdx.x: %d, grid ID: %d, - data : %d \n",
blockIdx.x, blockIdx.y, thread_id, grid_id, data[grid_id]);
}
/*
int main()
{
int array_size = 16;
int array_byte_size = sizeof(int) * array_size;
int h_data[] = { 23, 9, 4, 53, 65, 12, 1, 33, 87, 45, 23, 12, 342, 56, 44, 99 };
int* d_data;
cudaMalloc((void**)&d_data, array_byte_size);
cudaMemcpy(d_data, h_data, array_byte_size, cudaMemcpyHostToDevice);
dim3 block(2, 2);
dim3 grid(2, 2);
unique_grid_id_calculation_2d_2d<<<grid, block>>>(d_data);
cudaDeviceSynchronize();
cudaDeviceReset();
return 0;
}
*/ |
19,026 | #include "includes.h"
__global__ void softmax_trivial(float* softmaxP, float* b, int rows, int cols){
int tid = threadIdx.x;
int bid = blockIdx.x;
float _max = -100000000.0;
float sum = 0.0;
if(tid * cols + bid < rows * cols){
for(int i = 0 ; i < rows ; i++) _max = max(_max, b[i * cols + bid]);
for(int i = 0 ; i < rows ; i++) softmaxP[i * cols + bid] = (b[i * cols + bid] - _max);
for(int i = 0 ; i < rows ; i++) softmaxP[i * cols + bid] = __expf(softmaxP[i * cols + bid]);
for(int i = 0 ; i < rows ; i++) sum += softmaxP[i * cols + bid];
for(int i = 0 ; i < rows ; i++) softmaxP[i * cols + bid] /= sum;
}
} |
19,027 | #include "includes.h"
__global__ void LessThan(float * xf, bool * xb, size_t idxf, size_t idxb, size_t N)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x)
{
//printf("From less than %f %f %d \n", xf[(idxf-1)*N+i], xf[(idxf-2)*N+i], xf[(idxf-1)*N+i] < xf[(idxf-2)*N+i]);
xb[idxb*N+i] = (xf[(idxf-1)*N+i] < xf[(idxf-2)*N+i]);
}
return;
} |
19,028 | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <unistd.h>
#include <string.h>
#define DATA_LENGTH 100
#define CUDA_CALL(X) X; // {if(cudaError == X){printf("Error Calling %s at line %s\n", #X, __LINE__);}}
float * genInput(int l);
void verify(float *a, float *b, float *c, int l);
__global__ void vecAdd(float *in1, float *in2, float *out, int len) {
int i=0;
int tid = threadIdx.x;
for(i=0; i<DATA_LENGTH; i++){
// first half of first warp and second half of 2nd warp
if(tid < 16 || tid > 47)
out[i] = in1[i]+in2[i];
else
out[i] = in1[i]+in2[i];
}
for(i=0; i<DATA_LENGTH; i++){
// only even threads not compactable
if(threadIdx.x%2 == 0 )
out[i] = in1[i]+in2[i];
else
out[i] = in1[i]+in2[i];
}
out[0] = in1[0] + in2[0];
}
int main(int argc, char **argv) {
int inputLength;
float *hostInput1;
float *hostInput2;
float *hostOutput;
float *deviceInput1;
float *deviceInput2;
float *deviceOutput;
struct timeval t;
gettimeofday(&t, NULL);
srand(t.tv_sec);
inputLength = DATA_LENGTH;
hostInput1 = genInput(inputLength);
hostInput2 = genInput(inputLength);
hostOutput = ( float * )malloc(inputLength * sizeof(float));
//@@ Allocate GPU memory here
CUDA_CALL(cudaMalloc((void**)&deviceInput1, inputLength*sizeof(float)));
CUDA_CALL(cudaMalloc((void**)&deviceInput2, inputLength*sizeof(float)));
CUDA_CALL(cudaMalloc((void**)&deviceOutput, inputLength*sizeof(float)));
//@@ Copy memory to the GPU here
CUDA_CALL(cudaMemcpy(deviceInput1, hostInput1, sizeof(float)*inputLength, cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpy(deviceInput2, hostInput2, sizeof(float)*inputLength, cudaMemcpyHostToDevice));
//@@ Initialize the grid and block dimensions here
dim3 numBlocks(1,1,1);
//dim3 numThreads(ThreadsPerBlock,1,1);
dim3 numThreads(64,1,1);
//@@ Launch the GPU Kernel here
vecAdd<<<numBlocks, numThreads>>>(deviceInput1, deviceInput2, deviceOutput, inputLength);
cudaDeviceSynchronize();
//@@ Copy the GPU memory back to the CPU here
CUDA_CALL(cudaMemcpy(hostOutput, deviceOutput, inputLength*sizeof(float), cudaMemcpyDeviceToHost));
//@@ Free the GPU memory here
CUDA_CALL(cudaFree(deviceInput1));
CUDA_CALL(cudaFree(deviceInput2));
CUDA_CALL(cudaFree(deviceOutput));
verify(hostInput1, hostInput2, hostOutput, inputLength);
free(hostInput1);
free(hostInput2);
free(hostOutput);
return 0;
}
float * genInput(int l)
{
int i;
float * arr = (float*)malloc(l*sizeof(float));
for(i=0; i<l; i++){
arr[i] = rand();
arr[i] = arr[i]/rand();
}
return arr;
}
void verify(float *a, float *b, float *c, int l)
{
char buff1[50] = {0};
char buff2[50] = {0};
int i;
for(i=0; i<l; i++){
float d = a[i]+b[i];
sprintf(buff1, "%1.8f", d);
sprintf(buff2, "%1.8f", c[i]);
if(strcmp(buff1, buff2) != 0){
printf("ERROR at index %d, Exp %1.8f Got %1.8f\n",i,d,c[i]);
break;
}
}
}
|
19,029 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda.h>
// CUDA kernel. Cada thread ejecuta la operación sobrte un elemencto de c
__global__ void vecAdd(double *a, double *b, double *c, int n)
{
// Obtención del Id global
int id = blockIdx.x*blockDim.x+threadIdx.x;
// Nos aseguramos de no salir de los bordes
if (id < n)
c[id] = a[id] + b[id];
printf("I am thread (%d, %d) in block (%d, %d). Global thread ID = %d\n",threadIdx.y, threadIdx.x, blockIdx.y, blockIdx.x);
}
__host__ int main( int argc, char* argv[] )
{
// Tamaño de los vectores
int n = 100000;
// Vectores de entrada al host (CPU)
double *h_a;
double *h_b;
// Vector de salida del host
double *h_c;
// Vector de entrada del device (GPU)
double *d_a;
double *d_b;
// Vector de salida del device
double *d_c;
// Size, in bytes, of each vector
//size_t bytes = n*sizeof(double);
// Se asigna memoria para cada vector del host
h_a = (double*)malloc(n*sizeof(double));
h_b = (double*)malloc(n*sizeof(double));
h_c = (double*)malloc(n*sizeof(double));
// Se asigna memoria para cada vector del device
cudaMalloc(&d_a, n*sizeof(double));
cudaMalloc(&d_b, n*sizeof(double));
cudaMalloc(&d_c, n*sizeof(double));
int i;
// Se inicializa los vectores del host
for( i = 0; i < n; i++ ) {
h_a[i] = sin(i)*sin(i);
h_b[i] = cos(i)*cos(i);
}
// Se copia el vector del host al vector del device
cudaMemcpy( d_a, h_a, n*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy( d_b, h_b, n*sizeof(double), cudaMemcpyHostToDevice);
int blockSize, gridSize;
// Número de threads en cada bloque
blockSize = 1024;
// Número de bloques en la grilla
gridSize = (int)ceil((float)n/blockSize);
printf("%i\n", gridSize);
// Se ejecuta el kernel
vecAdd<<<gridSize, blockSize>>>(d_a, d_b, d_c, n);
// Se copia el vector resultante del device al host
cudaMemcpy( h_c, d_c, n*sizeof(double), cudaMemcpyDeviceToHost );
// Sum up vector c and print result divided by n, this should equal 1 within error
double sum = 0;
for(i=0; i<n; i++)
sum += h_c[i];
printf("final : %f\n", sum/n);
// Se libera la memoria del device
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
// Se libera la memoria del host
free(h_a);
free(h_b);
free(h_c);
return 0;
} |
19,030 | /*
* CUDA kernel for convolution in 2D, corresponding to conv2 in Matlab
* Sofie Lovdal 5.6.2018
*/
__global__ void conv2(double * output, double * const input, unsigned int const numRows,
unsigned int const numCols, double * const kernel,
unsigned int const height_kernel, unsigned int const width_kernel)
{
/*global thread ID in x dimension - moving horizontally in the image*/
const int colIdx = blockIdx.x*blockDim.x + threadIdx.x;
/*global thread ID in y dimension - moving vertically in the image*/
const int rowIdx = blockIdx.y*blockDim.y + threadIdx.y;
int i, j, kernelIdx, imageIdx;
/*Effectivize: Load local image area into shared memory space*/
/*make sure we are within image*/
if(colIdx>=numCols || rowIdx >= numRows) return;
/*Linear index of pixel corresponding to current thread */
int linearIdx = rowIdx*numCols + colIdx;
int kernel_radius=height_kernel/2;
int imageRowIdx, imageColIdx;
/*Apply convolution to linarIdx (pixel that each thread should treat) */
double sum=0.0;
for (i = -kernel_radius; i <= kernel_radius; i++) {
for (j = -kernel_radius; j <= kernel_radius; j++) {
kernelIdx = width_kernel*(i+kernel_radius) + (j+kernel_radius);
imageRowIdx = rowIdx+i;
imageColIdx = colIdx+j;
imageIdx = imageRowIdx*numCols + imageColIdx;
/*zero padding at borders: top, bottom, left, right*/
if(imageRowIdx<0 || imageRowIdx >=numRows || imageColIdx <0 || imageColIdx >= numCols ) {
sum+=0.0;
} else {
sum=sum+input[imageIdx]*kernel[kernelIdx];
}
}
}
output[linearIdx] = (sum>0.0 ? (sum) : 0.0); //here: half-wave rectification??
}
|
19,031 | /*
*
* Last name: Will
* First name: Peter
* Net ID: pcw276
*
*/
#include <cuda.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <curand.h>
#include <curand_kernel.h>
#include <math.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#define N_WALKERS 1000
#define MAX_THETA_SIZE 255
#define PARAMS 2
__constant__ float center_d[PARAMS];
// Function declarations
__global__ void init(unsigned int seed, curandState_t *state);
__global__ void init_walkers(float *walkers, int n_walkers, int n_theta, int r, curandState_t *state);
__device__ float G(float a, curandState_t state);
__device__ double Rosenbrock(float *point);
__device__ void step_walkers(float *s1_walkers, unsigned int s1_n, float *s2_walkers, unsigned int s2_n, float a, unsigned int k_dim, curandState_t *states);
__global__ void emcee_emcee(float *walkers, unsigned int n_walkers, unsigned int theta_dim, unsigned int steps, int a, curandState_t *states);
/********************************************/
__global__ void init(unsigned int seed, unsigned int n_walkers, curandState_t *state)
{
int id = threadIdx.x + blockIdx.x *blockDim.x;
if (id < n_walkers)
{
curand_init(seed, id, 0, &state[id]);
}
}
__global__ void init_walkers(float *walkers, int n_walkers, int n_theta, int r, curandState_t *state)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if(id < n_walkers)
{
for(int i = 0; i < n_theta; i++)
{
walkers[(id+(i*n_walkers))] = center_d[i] +
(curand_uniform(&state[id])-.5)*2*r;
}
}
}
__device__ float G(float a, float u)
{
return pow((u*(a-1)+1) / sqrtf(a), 2);
}
__device__ double Rosenbrock(float *point)
{
return ((double) exp(-((100*pow(point[1] - pow(point[0],2), 2)) + pow(1 - point[0],2)) / 20));
}
__device__ void step_walkers(float *walkers, unsigned int n_walkers, unsigned int k, unsigned int offset, unsigned int theta_dim, float a, curandState_t *states)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
curandState_t localState = states[id];
float u1 = curand_uniform(&localState);
float u2 = curand_uniform(&localState);
float u3 = curand_uniform(&localState);
int w1_idx = id + offset;
int w2_idx = k+ceil((n_walkers - k)*u1)-1 - offset;
float w1[MAX_THETA_SIZE], x_prime[MAX_THETA_SIZE];
double q1, q2;
float z = G(a, u2);
for (int i = 0; i < theta_dim; i++)
{
w1[i] = walkers[w1_idx+(i*n_walkers)];
x_prime[i] = walkers[w2_idx+(i*n_walkers)] + z*(
walkers[w1_idx+(i*n_walkers)] -
walkers[w2_idx+(i*n_walkers)]
);
}
q1 = Rosenbrock(w1);
q2 = Rosenbrock(x_prime);
if (u3 < (powf(z,theta_dim-1)*(q2/q1)))
{
for (int i =0; i < theta_dim; i++)
{
walkers[w1_idx+(i*n_walkers)] = x_prime[i];
}
}
states[id] = localState;
}
__global__ void emcee_emcee(float *walkers, int k, int k2, unsigned int n_walkers, unsigned int theta_dim, int a, curandState_t *states)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < k)
{
step_walkers(walkers, n_walkers, k, 0, a, theta_dim, states);
}
__syncthreads();
if (id < k2)
{
step_walkers(walkers, n_walkers, k2, k, a, theta_dim, states);
}
}
int get_block_size(int n_walkers)
{
int factor = ceil((double)n_walkers / 4800);
int blocksize = factor*32;
if (blocksize > 256)
{
blocksize = 256;
}
return blocksize;
}
void get_mean(float *walkers, double *means,
unsigned int n_walkers,
unsigned int theta_dim, int step)
{
float *start_ind, *stop_ind;
for(int i = 0; i < theta_dim; i++)
{
start_ind = walkers + i*n_walkers;
stop_ind = walkers + (i+1)*n_walkers;
thrust::device_vector<float> vec(
start_ind, stop_ind
);
means[i + theta_dim*step] = thrust::reduce(
vec.begin(), vec.end()
) / n_walkers;
}
}
int main(int argc, char *argv[]) {
curandState_t *states;
int seed = 10;
int blocksize = get_block_size(N_WALKERS);
int n_blocks = ceil((float) N_WALKERS / blocksize);
int a = 2;
int k = floor((double) N_WALKERS / 2);
int k2 = N_WALKERS - k;
float *walkers_h, *walkers_d;
float *center_h = (float*) calloc(PARAMS, sizeof(float));
int r = 2;
walkers_h = (float*) malloc(PARAMS*N_WALKERS*sizeof(float));
cudaMemcpyToSymbol(center_d, center_h, PARAMS*sizeof(float));
cudaMalloc((void**) &states, N_WALKERS*sizeof(curandState_t));
init<<<n_blocks, blocksize>>>(seed, N_WALKERS, states);
cudaMalloc((void**) &walkers_d, PARAMS*N_WALKERS*sizeof(float));
init_walkers<<<n_blocks*2,blocksize>>>(walkers_d, N_WALKERS, PARAMS, r, states);
printf("USING %d blocks of %d threads\n",
n_blocks, blocksize);
cudaMemcpy(walkers_h, walkers_d, PARAMS*N_WALKERS*sizeof(float), cudaMemcpyDeviceToHost);
FILE *fp2 = fopen("out.txt", "w");
for(int i =0; i < N_WALKERS; i++) {
for(int j=0; j<PARAMS; j++){
fprintf(fp2, "%f\t", walkers_h[i +j*N_WALKERS]);
}
fprintf(fp2, "\n");
}
fclose(fp2);
int STEPS = 10000;
double *means;
means = (double *) malloc(PARAMS*STEPS*sizeof(double));
for(int t = 0; t<STEPS; t++)
{
emcee_emcee<<<n_blocks,blocksize>>>(walkers_d,
k, k2, N_WALKERS, PARAMS,
a, states);
get_mean(walkers_d, means, N_WALKERS, PARAMS, t);
}
FILE *fp4 = fopen("means.txt","w");
for(int j =0; j<STEPS; j++)
{
for(int i =0; i<PARAMS; i++)
{
fprintf(fp4, "%f \t", means[i +j*PARAMS]);
}
fprintf(fp4, "\n");
}
fclose(fp4);
cudaMemcpy(walkers_h, walkers_d, PARAMS*N_WALKERS*sizeof(float), cudaMemcpyDeviceToHost);
FILE *fp3 = fopen("out2.txt", "w");
for(int i =0; i < N_WALKERS; i++) {
for(int j=0; j<PARAMS; j++){
fprintf(fp3, "%f\t", walkers_h[i+j*N_WALKERS]);
}
fprintf(fp3, "\n");
}
fclose(fp3);
return 0;
}
|
19,032 |
// Babak Poursartip
// 09/14/2020
// Udemy Cuda
// unique index calculation
#include <cstdio>
// ===========================================
// 2d grid, 2d block
__global__ void unique_gid_calculation_3d_3d(int *input) {
int threadsPerBlock = blockDim.x * blockDim.y * blockDim.z;
int threadPositionInBlock = threadIdx.x +
blockDim.x * threadIdx.y +
blockDim.x * blockDim.y * threadIdx.z;
int blockPositionInGrid = blockIdx.x +
gridDim.x * blockIdx.y +
gridDim.x * gridDim.y * blockIdx.z;
int tid = blockPositionInGrid * threadsPerBlock + threadPositionInBlock;
printf("gridDim.x: %d, gridDim.y: %d, gridDim.z: %d, blockDim.x: %d, blockDim.y: %d, blockDim.z: %d, blockIdx.x: %d, blockIdx.y: %d, blockIdx.z: %d, threadIdx.x: %d, threadIdx.y: %d, threadIdx.z: %d, tid: %d \n",
gridDim.x, gridDim.y, gridDim.z,
blockDim.x, blockDim.y, blockDim.z,
blockIdx.x, blockIdx.y, blockIdx.z,
threadIdx.x, threadIdx.y, threadIdx.z,
tid);
}
// ===========================================
// 2d grid, 2d block
__global__ void unique_gid_calculation_2d_2d(int *input) {
int tid = blockDim.x * threadIdx.y + threadIdx.x;
int num_threads_in_a_block = blockDim.x * blockDim.y;
int block_offset = blockIdx.x * num_threads_in_a_block;
int num_threads_in_a_row = num_threads_in_a_block * gridDim.x;
int row_offset = num_threads_in_a_row * blockIdx.y;
int gid = tid + block_offset + row_offset;
printf("gridDim.x: %d, gridDim.y: %d, gridDim.z: %d, blockDim.x: %d, blockDim.y: %d, blockDim.z: %d, blockIdx.x: %d, blockIdx.y: %d, blockIdx.z: %d, threadIdx.x: %d, threadIdx.y: %d, threadIdx.z: %d, gid: %d \n",
gridDim.x, gridDim.y, gridDim.z,
blockDim.x, blockDim.y, blockDim.z,
blockIdx.x, blockIdx.y, blockIdx.z,
threadIdx.x, threadIdx.y, threadIdx.z,
gid);
}
// ===========================================
int main() {
printf(" starts ...");
int nCol = 6;
int nRow = 8;
int array_size = nCol*nRow;
int array_byte_size = sizeof(int) * array_size;
int h_data[array_size];
for (int i = 0; i< array_size; ++i)
{
h_data[i] = i;
}
printf(" data on the host: \n");
for (int i = 0; i < array_size; ++i)
{
printf(" %d", h_data[i]);
if((i%8)==0) printf("\n");
}
printf("\n\n");
int *d_data; // array on the device
cudaMalloc((void **)&d_data, array_byte_size);
cudaMemcpy(d_data, h_data, array_byte_size, cudaMemcpyHostToDevice);
dim3 grid(nCol, 1, 1);
dim3 block(nRow, 1, 1);
printf(" data on the device: \n");
//unique_gid_calculation_2d_2d<<<grid, block>>>(d_data);
unique_gid_calculation_3d_3d<<<grid, block>>>(d_data);
cudaDeviceSynchronize();
cudaDeviceReset();
printf(" finished.");
return 0;
}
|
19,033 | #include "includes.h"
__global__ void kernel(unsigned int rows, unsigned int cols , float* ddata,float* vdata ,float *results){
/* unsigned char y;
int m, n ;
unsigned int p = 0 ;
int cases[3];
int controls[3];
int tot_cases = 1;
int tot_controls= 1;
int total = 1;
float chisquare = 0.0f;
float exp[3];
float Conexpected[3];
float Cexpected[3];
float numerator1;
float numerator2;
*/
int i;
float dp =0;
int tid = threadIdx.x + blockIdx.x * blockDim.x;
for(i =0; i<cols ;i++ )
{
dp+= ddata[i*rows+tid]*vdata[i];
}
/*cases[0]=1;cases[1]=1;cases[2]=1;
controls[0]=1;controls[1]=1;controls[2]=1;
i
for ( m = 0 ; m < cRows ; m++ ) {
y = snpdata[m * cols + tid];
if ( y == '0') { cases[0]++; }
else if ( y == '1') { cases[1]++; }
else if ( y == '2') { cases[2]++; }
}
for ( n = cRows ; n < cRows + contRows ; n++ ) {
y = snpdata[n * cols + tid];
if ( y == '0' ) { controls[0]++; }
else if ( y == '1') { controls[1]++; }
else if ( y == '2') { controls[2]++; }
}
tot_cases = cases[0]+cases[1]+cases[2];
tot_controls = controls[0]+controls[1]+controls[2];
total = tot_cases + tot_controls;
for( p = 0 ; p < 3; p++) {
exp[p] = (float)cases[p] + controls[p];
Cexpected[p] = tot_cases * exp[p] / total;
Conexpected[p] = tot_controls * exp[p] / total;
numerator1 = (float)cases[p] - Cexpected[p];
numerator2 = (float)controls[p] - Conexpected[p];
chisquare += numerator1 * numerator1 / Cexpected[p] + numerator2 * numerator2 / Conexpected[p];
}*/
results[tid] = dp;
} |
19,034 | #include <cmath>
#include <cstdio>
#include <iostream>
#include "sobel.cuh"
using namespace std;
__global__ void conv_kernel_no_shmem(const float* image, const float* mask, float* output, unsigned int r, unsigned int c) {
int tidx = threadIdx.x, tidy = threadIdx.y;
int bidx = blockIdx.x, bidy = blockIdx.y;
int bdy = blockDim.y, bdx = blockDim.x;
float avg_intensity = 0;
long x_idx = tidx + (long)bdx * (long)bidx; // long since can be > 2^31 -1
long y_idx = tidy + (long)bdy * (long)bidy;
int idx = y_idx*c+x_idx;
float temp = 0;
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
if (x_idx-1+j < c && y_idx-1+i < r && x_idx-1+j >= 0 && y_idx-1+i >= 0)
temp += image[(y_idx-1+i)*c + x_idx-1+j] * mask[i*3+j];
else
temp += avg_intensity * mask[i*3+j];
}
}
if (y_idx < r && x_idx < c)
output[idx] = temp;
}
__global__ void conv_kernel(const float* image, const float* mask, float* output, unsigned int r, unsigned int c) {
int tidx = threadIdx.x, tidy = threadIdx.y;
int bidx = blockIdx.x, bidy = blockIdx.y;
int bdy = blockDim.y, bdx = blockDim.x;
float avg_intensity = 0;
// printf("%d %d %d %d\n", tidx, tidy, bidx, bidy);
// if (tidx == 0 && tidy == 0 && bidx == 0 && bidy == 0) {
// printf("%d %d\n", bidx, bidy);
// for (int i = 0; i < 9; i++)
// printf("%f ", mask[i]);
// printf("\n");
// }
extern __shared__ float arr[]; // Can't use "volatile" to prevent shmem data from being directly loaded onto registers
float* img = &arr[0];
float* msk = &arr[(bdx + 2) * (bdy + 2)];
float* out = &arr[(bdx + 2) * (bdy + 2) + 3*3];
long x_idx = tidx + (long)bdx * (long)bidx; // long since can be > 2^31 -1
long y_idx = tidy + (long)bdy * (long)bidy;
// load image elements in-place
if (x_idx < c && y_idx < r)
img[(tidy+1)*(bdx+2) + tidx+1] = image[y_idx * c + x_idx];
else
img[(tidy+1)*(bdx+2) + tidx+1] = avg_intensity;
if (tidx < 3 && tidy < 3)
msk[tidy*3 + tidx] = mask[tidy*3 + tidx];
if (tidx == 0 && tidy == 0) { // leftmost top corner
if (x_idx >= 1 && y_idx >= 1)
img[tidy*(bdx+2) + tidx] = image[(y_idx-1) * c + x_idx-1];
else
img[tidy*(bdx+2) + tidx] = avg_intensity;
}
else if (tidx == 0 && tidy == bdy - 1) { // leftmost bottom corner
if (x_idx >= 1 && y_idx < r-1)
img[(tidy+2)*(bdx+2) + tidx] = image[(y_idx+1) * c + x_idx-1];
else
img[(tidy+2)*(bdx+2) + tidx] = avg_intensity;
}
else if (tidx == bdx - 1 && tidy == 0) { // rightmost top corner
if (x_idx < c -1 && y_idx >= 1)
img[tidy*(bdx+2) + tidx+2] = image[(y_idx-1) * c + x_idx+1];
else
img[tidy*(bdx+2) + tidx+2] = avg_intensity;
}
else if (tidx == bdx - 1 && tidy == bdy -1) { // rightmost bottom corner
if (x_idx < c -1 && y_idx < r-1)
img[(tidy+2)*(bdx+2) + tidx+2] = image[(y_idx+1) * c + x_idx+1];
else
img[(tidy+2)*(bdx+2) + tidx+2] = avg_intensity;
}
if (tidx == 0) { // leftmost col
if (x_idx >= 1)
img[(tidy+1)*(bdx+2) + tidx] = image[y_idx*c + x_idx-1];
else
img[(tidy+1)*(bdx+2) + tidx] = avg_intensity;
}
else if (tidx == bdx - 1) { // rightmost col
if (x_idx < c-1)
img[(tidy+1)*(bdx+2) + tidx+2] = image[y_idx*c + x_idx+1];
else
img[(tidy+1)*(bdx+2) + tidx+2] = avg_intensity;
}
if (tidy == 0) { // top row
if (y_idx >= 1)
img[tidy*(bdx+2) + tidx+1] = image[(y_idx-1)*c + x_idx];
else
img[tidy*(bdx+2) + tidx+1] = avg_intensity;
}
else if (tidy == bdy - 1) { // bottom row
if (y_idx < r-1)
img[(tidy+2)*(bdx+2) + tidx+1] = image[(y_idx+1)*c + x_idx];
else
img[(tidy+2)*(bdx+2) + tidx+1] = avg_intensity;
}
__syncthreads();
// if (tidx == 2 && tidy == 1 && bidx == 21 && bidy == 30) {
// for (int i = 0; i < bdy+2; i++) {
// for (int j = 0; j < bdx+2; j++ ) {
// printf("%f ", img[i*(bdx+2)+j]);
// }
// printf("\n");
// }
// for (int i = -1; i < bdy+1; i++) {
// for (int j = -1; j < bdx+1; j++ ) {
// printf("%f ", image[(bidy*bdy+i)*c+(bidx*bdx+j)]);
// }
// printf("\n");
// }
// }
out[tidy*bdx+tidx] = 0;
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
out[tidy*bdx+tidx] += img[(tidy+i)*(bdx+2) + (tidx+j)] * msk[i*3+j];
}
}
__syncthreads();
if (x_idx < c && y_idx < r)
output[y_idx*c+x_idx] = out[tidy*bdy+tidx];
}
__global__ void conv_kernel_horiz(const float* image, const float* mask, float* output, unsigned int r, unsigned int c) {
int tidx = threadIdx.x, tidy = threadIdx.y;
int bidx = blockIdx.x, bidy = blockIdx.y;
int bdy = blockDim.y, bdx = blockDim.x;
float avg_intensity = 0;
extern __shared__ float arr[]; // Can't use "volatile" to prevent shmem data from being directly loaded onto registers
float* img = &arr[0];
float* msk = &arr[(bdx + 2) * bdy];
float* out = &arr[(bdx + 2) * bdy + 3];
long x_idx = tidx + (long)bdx * (long)bidx; // long since can be > 2^31 -1
long y_idx = tidy + (long)bdy * (long)bidy;
// load image elements in-place
if (x_idx < c && y_idx < r)
img[tidy*(bdx+2) + tidx+1] = image[y_idx * c + x_idx];
else
img[tidy*(bdx+2) + tidx+1] = avg_intensity;
// load the mask into shmem
if (tidx < 3 && tidy == 0)
msk[tidx] = mask[tidx];
if (tidx == 0) { // leftmost col
if (x_idx >= 1)
img[tidy*(bdx+2) + tidx] = image[y_idx*c + x_idx-1];
else
img[tidy*(bdx+2) + tidx] = avg_intensity;
}
else if (tidx == bdx - 1) { // rightmost col
if (x_idx < c-1)
img[tidy*(bdx+2) + tidx+2] = image[y_idx*c + x_idx+1];
else
img[tidy*(bdx+2) + tidx+2] = avg_intensity;
}
__syncthreads();
out[tidy*bdx+tidx] = 0;
for (int i = 0; i < 3; i++) {
out[tidy*bdx+tidx] += img[tidy*(bdx+2) + (tidx+i)] * msk[i];
}
__syncthreads();
if (x_idx < c && y_idx < r)
output[y_idx*c+x_idx] = out[tidy*bdy+tidx];
}
__global__ void conv_kernel_vert(const float* image, const float* mask, float* output, unsigned int r, unsigned int c) {
int tidx = threadIdx.x, tidy = threadIdx.y;
int bidx = blockIdx.x, bidy = blockIdx.y;
int bdy = blockDim.y, bdx = blockDim.x;
float avg_intensity = 0;
extern __shared__ float arr[]; // Can't use "volatile" to prevent shmem data from being directly loaded onto registers
float* img = &arr[0];
float* msk = &arr[(bdy + 2) * bdx];
float* out = &arr[(bdy + 2) * bdx + 3];
long x_idx = tidx + (long)bdx * (long)bidx; // long since can be > 2^31 -1
long y_idx = tidy + (long)bdy * (long)bidy;
// load image elements in-place
if (x_idx < c && y_idx < r)
img[(tidy+1)*bdx + tidx] = image[y_idx * c + x_idx];
else
img[(tidy+1)*bdx + tidx] = avg_intensity;
// load the mask into shmem
if (tidx == 0 && tidx < 3)
msk[tidy] = mask[tidy];
if (tidy == 0) { // top row
if (y_idx >= 1)
img[tidy*bdx + tidx] = image[(y_idx-1)*c + x_idx];
else
img[tidy*bdx + tidx] = avg_intensity;
}
else if (tidy == bdy - 1) { // bottom row
if (y_idx < r-1)
img[(tidy+2)*bdx + tidx] = image[(y_idx+1)*c + x_idx];
else
img[(tidy+2)*bdx + tidx] = avg_intensity;
}
__syncthreads();
out[tidy*bdx+tidx] = 0;
for (int i = 0; i < 3; i++) {
out[tidy*bdx+tidx] += img[(tidy+i)*bdx + tidx] * msk[i];
}
__syncthreads();
if (x_idx < c && y_idx < r)
output[y_idx*c+x_idx] = out[tidy*bdy+tidx];
}
__global__ void conv_kernel_vert_opt(const float* image, const float* mask, float* output, unsigned int r, unsigned int c) {
int tidx = threadIdx.x, tidy = threadIdx.y;
int bidx = blockIdx.x, bidy = blockIdx.y;
int bdy = blockDim.y, bdx = blockDim.x;
float avg_intensity = 0;
extern __shared__ float arr[]; // Can't use "volatile" to prevent shmem data from being directly loaded onto registers
float* img = &arr[0];
float* msk = &arr[(bdy + 2) * bdx];
float* out = &arr[(bdy + 2) * bdx + 3];
long x_idx = tidx + (long)bdx * (long)bidx; // long since can be > 2^31 -1
long y_idx = tidy + (long)bdy * (long)bidy;
// load image elements in-place
if (x_idx < c && y_idx < r)
img[(tidy+1) + tidx*(bdy+2)] = image[y_idx * c + x_idx];
else
img[(tidy+1) + tidx*(bdy+2)] = avg_intensity;
// load the mask into shmem
if (tidx == 0 && tidx < 3)
msk[tidy] = mask[tidy];
if (tidy == 0) { // top row
if (y_idx >= 1)
img[tidy + tidx*(bdy+2)] = image[(y_idx-1)*c + x_idx];
else
img[tidy + tidx*(bdy+2)] = avg_intensity;
}
else if (tidy == bdy - 1) { // bottom row
if (y_idx < r-1)
img[(tidy+2) + tidx*(bdy+2)] = image[(y_idx+1)*c + x_idx];
else
img[(tidy+2) + tidx*(bdy+2)] = avg_intensity;
}
__syncthreads();
out[tidy+tidx*bdy] = 0;
for (int i = 0; i < 3; i++) {
out[tidy+tidx*bdy] += img[(tidy+i) + tidx*(bdy+2)] * msk[i];
}
__syncthreads();
if (x_idx < c && y_idx < r)
output[y_idx*c+x_idx] = out[tidy+tidx*bdy];
}
__host__ void conv(const float* image, const float* mask, float* output, unsigned int r, unsigned int c, unsigned int bdx, unsigned int bdy) {
dim3 block(bdx, bdy);
dim3 grid((c + block.x - 1) / block.x, (r + block.y - 1) / block.y);
// cout << bdx << " " << bdy << " " << (c + block.x - 1) / block.x << " " << (r + block.y - 1) / block.y << endl;
// for (int i = 0; i < r; i++) {
// for (int j = 0; j < c; j++) {
// cout << image[i*c+j] << " ";
// }
// cout << endl;
// }
conv_kernel<<<grid, block, sizeof(float) * (bdx + 2) * (bdy + 2) + 3 * 3 * sizeof(float) + sizeof(float) * bdx * bdy>>>(image, mask, output, r, c);
// conv_kernel_no_shmem<<<grid, block>>>(image, mask, output, r, c);
cudaError_t err;
// // Check for kernel launch errors
// err = cudaGetLastError();
// if (err != cudaSuccess)
// printf("Error: %s\n", cudaGetErrorString(err));
err = cudaDeviceSynchronize();
// cout << cudaGetErrorName(err) << endl;
}
__host__ void conv_opt(const float* image, const float* mask1, float* mask2, float* output, unsigned int r, unsigned int c, unsigned int bdx, unsigned int bdy) {
dim3 block(bdx, bdy);
dim3 grid((c + block.x - 1) / block.x, (r + block.y - 1) / block.y);
cudaError_t err;
float *temp;
err = cudaMalloc((void **)&temp, r * c * sizeof(float));
// cout << cudaGetErrorName(err) << endl;
conv_kernel_horiz<<<grid, block, sizeof(float) * bdy * (bdx+2) + 3 * sizeof(float) + sizeof(float) * bdx * bdy>>>(image, mask1, temp, r, c);
err = cudaDeviceSynchronize();
// cout << cudaGetErrorName(err) << endl;
conv_kernel_vert_opt<<<grid, block, sizeof(float) * (bdy+2) * bdx + 3 * sizeof(float) + sizeof(float) * bdx * bdy>>>(temp, mask2, output, r, c);
err = cudaDeviceSynchronize();
// cout << cudaGetErrorName(err) << endl;
err = cudaFree(temp);
// cout << cudaGetErrorName(err) << endl;
}
__global__ void magnitude(const float* outx, const float *outy, float* out, unsigned int r, unsigned int c) {
int x_idx = threadIdx.x + (long)blockDim.x * (long)blockIdx.x;
int y_idx = threadIdx.y + (long)blockDim.y * (long)blockIdx.y;
int idx = y_idx*c + x_idx; // Code motion
if (x_idx < c && y_idx < r)
out[idx] = sqrt(outx[idx]*outx[idx] + outy[idx]*outy[idx]);
}
|
19,035 | // includes, system
#include <stdio.h>
using namespace std;
#include <float.h>
#include <sys/stat.h>
#include <limits>
//#include "cuPrintf.cu"
// includes CUDA
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
// includes, project
//#include <helper_cuda.h>
//#include <helper_functions.h> // helper functions for SDK examples
__global__ void calc_pwtad (double *g_Pwtad,double *g_Ptszd,double *g_Pzd,double *g_Pwz,double *g_Ptrwz,double *g_Pd, int g_nDocs,int g_nZ, int g_nTs,int g_nVoc, int g_nTr)
{
// nTr,Nvoc
//__shared__ double tmp_pwtad[100];
//y is nz x is nts
const int block_width_x=128;
__shared__ double tmp_pd;
__shared__ double tmp_pwtad[block_width_x];
__shared__ double tmp_pzd;
__shared__ double tmp_pwz;
__shared__ double tmp_ptrwz;
int pwtad_index;
int Td_index;
__shared__ double tmp_Ptszd[block_width_x];
tmp_pd=g_Pd[blockIdx.x];
/*
__shared__ double tmp_pwz[block_height_y];
__shared__ double tmp_pzd[block_height_y];
__shared__ double tmp_ptrwz[block_height_y];
*/
int nTs_counter=ceil((float)g_nTs/(float)block_width_x);
int index_Ptszd;
for (int tr=0;tr<g_nTr;tr++)
{
for (int w=0;w<g_nVoc;w++)
{
for (int ts=0;ts<nTs_counter;ts++)
{
tmp_pwtad[threadIdx.x]=0;
__syncthreads();
for (int z=0;z<g_nZ;z++)
{
tmp_pwz=g_Pwz[z*g_nVoc+w];
tmp_ptrwz=g_Ptrwz[(z*g_nVoc+w)*g_nTr+tr];
tmp_Ptszd[threadIdx.x]=0;
tmp_pzd=g_Pzd[blockIdx.x*g_nZ+z];
//tmp_pwz[threadIdx.y]=0;
// tmp_pzd[threadIdx.y]=0;
// tmp_ptrwz[threadIdx.y]=0;
__syncthreads();
index_Ptszd=(blockIdx.x*g_nZ+z)*g_nTs+block_width_x*ts+threadIdx.x;
/*
tmp_pwz[threadIdx.y]=g_Pwz[w*g_nz+z*blockDim.y+threadIdx.y];
tmp_pzd[threadIdx.y]=g_Pzd[blockIdx.x*g_nz+z*blockDim.y+threadIdx.y];
tmp_ptrwz[threadIdx.y]=g_Ptrwz[((z*blockDim.y+threadIdx.y)*g_nVoc+w)*g_nTr+tr];
*/
if ((block_width_x*ts+threadIdx.x)<g_nTs )
{
tmp_Ptszd[threadIdx.x]= g_Ptszd[index_Ptszd];
}
__syncthreads();
//tmp_pwtad[threadIdx.y][threadIdx.x]=tmp_pzd[threadIdx.y]*tmp_Ptszd[threadIdx.y][threadIdx.x]*tmp_pwz[threadIdx.y]*tmp_ptrwz[threadIdx.y];
//__syncthreads();
if ((block_width_x*ts+threadIdx.x)< g_nTs)
{
tmp_pwtad[threadIdx.x]+=tmp_pd*tmp_pzd*tmp_Ptszd[threadIdx.x]*tmp_pwz*tmp_ptrwz;
}
__syncthreads();
//if (threadIdx.x==0 && threadIdx.y==0)
//printf("tmp_pwtad[0][0]::%0.8f \n",tmp_pwtad[0][0]);
} // nz loop
Td_index=tr+block_width_x*ts+threadIdx.x;
//-((tr+block_width_x*ts+threadIdx.x)/(g_nTr+g_nTs-1));
if(Td_index< (g_nTs+g_nTr-1))
{
pwtad_index=(blockIdx.x*(g_nTs+g_nTr-1)+Td_index)*g_nVoc+w;
//g_Pwtad[(w*(g_nTr+g_nTs-1)+(tr+blockDim.x*ts+threadIdx.x))*g_nDocs+blockIdx.x]+=tmp_pwtad[0][threadIdx.x];
//g_Pwtad[(blockIdx.x*(g_nTr+g_nTs-1)+tr+blockDim.x*ts+threadIdx.x -((tr+blockDim.x*ts+threadIdx.x)/(g_nTr+g_nTs-2)))*g_nVoc+w]+=tmp_pwtad[0][threadIdx.x];
g_Pwtad[pwtad_index]+=tmp_pwtad[threadIdx.x];
}
__syncthreads();
} // nts counter loop
}// tr loop
}// w loop
}
__global__ void calc_tszd(double *g_zd, double *g_tszd,double *g_Ptszd,double *g_Pzd,double *g_Pwz,double *g_Ptrwz,double *g_Pwtad,double *g_Doc,double *pd,double *pz, int g_nDocs,int g_nZ, int g_nTs,int g_nVoc, int g_nTr,float lambdaTsSparsity, bool z_prior , bool trainflag)
{
const int block_width_x=16;
const int block_height_y=16;
__shared__ double partial_tszd[block_height_y*block_width_x];
double tmp_tszd=0;
double tmp_zd=0;
int gdoc_index;
int iterations_nvoc = ceil((float)g_nVoc/(float)block_width_x);
int iterations_ntr = ceil((float)g_nTr/(float)block_height_y);
__shared__ double decrease;
__shared__ double shared_ptszd;
__shared__ double shared_pzd;
__shared__ double shared_pd;
for (int ts=0;ts<g_nTs;ts++)
{
//tmp_sum=0;
tmp_tszd=0;
shared_ptszd=g_Ptszd[(blockIdx.y*g_nZ+blockIdx.x)*g_nTs+ts];
shared_pzd=g_Pzd[(blockIdx.y)*g_nZ+blockIdx.x];
shared_pd=pd[blockIdx.y];
__syncthreads();
for (unsigned int nvoc_counter=0;nvoc_counter<iterations_nvoc;nvoc_counter++)
{
for (unsigned int ntr_counter=0;ntr_counter<iterations_ntr;ntr_counter++)
{
partial_tszd[threadIdx.y*block_width_x+threadIdx.x]=0;
__syncthreads();
if ((ntr_counter*blockDim.y+threadIdx.y)<g_nTr && (nvoc_counter*blockDim.x+threadIdx.x)<g_nVoc )
{ gdoc_index=(blockIdx.y*(g_nTs+g_nTr-1)+ts+ntr_counter*blockDim.y+threadIdx.y-(ts+ntr_counter*blockDim.y+threadIdx.y)/(g_nTs+g_nTr-1))*g_nVoc+nvoc_counter*blockDim.x+threadIdx.x;
partial_tszd[threadIdx.y*block_width_x+threadIdx.x]=(shared_pd*g_Doc[gdoc_index]*shared_pzd* shared_ptszd*g_Pwz[(blockIdx.x)*g_nVoc+nvoc_counter*blockDim.x+threadIdx.x]*g_Ptrwz[(blockIdx.x*g_nVoc+nvoc_counter*blockDim.x+threadIdx.x)*g_nTr+ntr_counter*blockDim.y+threadIdx.y])/(g_Pwtad[gdoc_index]+DBL_EPSILON);
}
__syncthreads();
for (unsigned int s=block_width_x*block_height_y/2;s>=1;s>>=1)
{//g_tszd[s]=s;
if (threadIdx.y*block_width_x+threadIdx.x<s)
{
partial_tszd[threadIdx.y*block_width_x+threadIdx.x]+=partial_tszd[threadIdx.y*block_width_x+threadIdx.x+s];
//shared_partial_tszd[0][][threadIdx.y]+=shared_partial_tszd[idx][threadIdx.z][threadIdx.y];
//sdata[threadIdx.x]+=sdata[threadIdx.x+s];
}
__syncthreads();
}
if (threadIdx.x==0 && threadIdx.y==0)
{
tmp_tszd+=partial_tszd[0];
}
__syncthreads();
} //ntr counter
} //nvoc counter
if (threadIdx.x==0 && threadIdx.y==0)
{
if (lambdaTsSparsity!=0){
decrease = lambdaTsSparsity*pd[blockIdx.y]/(g_nZ*g_nTs);
tmp_tszd-=decrease;
tmp_tszd=max(DBL_EPSILON,tmp_tszd);
}
g_tszd[(blockIdx.y*g_nZ+blockIdx.x)*g_nTs+ts]=tmp_tszd;
tmp_zd+=tmp_tszd;
}
__syncthreads();
} // nts loop
if (threadIdx.x==0 && threadIdx.y==0)
{
if (z_prior && !trainflag)
tmp_zd+= pd[blockIdx.y]/(g_nZ*pz[blockIdx.x]);
g_zd[blockIdx.y*g_nZ+blockIdx.x]=tmp_zd;
}
}
__global__ void calc_trwz(double *g_trwz,double *g_wz,double *g_Ptszd,double *g_Pzd,double *g_Pwz,double *g_Ptrwz,double *g_Pwtad,double *g_Doc,double *pd,double *g_trp,double *priortrwz,double *priorwz,int g_nDocs,int g_nZ, int g_nTs,int g_nVoc, int g_nTr,int totWords,float tr_wt,bool usePriorPtrwz,bool tr_prior)
{
const int block_width_x=64;
int gdoc_index;
int iterations_nTs = ceil((float)g_nTs/(float)block_width_x);
__shared__ double partial_trwz[block_width_x];
int Td;
__shared__ double tmp_sum;
__shared__ double tmp_sum_outerloop;
__shared__ double partial_zd;
__shared__ double partial_pwz;
__shared__ double partial_ptrwz;
__shared__ double more;
__shared__ double tmp_pd;
__shared__ double tmp_sum_wz;
__shared__ double word_sum_count[block_width_x];
double sum_count;
tmp_sum_wz=0;
tmp_sum=0;
tmp_sum_outerloop=0;
sum_count=0;
partial_pwz=g_Pwz[blockIdx.y*g_nVoc+blockIdx.x];
__syncthreads();
for(int tr=0;tr<g_nTr;tr++)
{
partial_ptrwz=g_Ptrwz[(blockIdx.y*g_nVoc+blockIdx.x)*g_nTr+tr];
sum_count=0;
__syncthreads();
for (int d=0;d<g_nDocs;d++)
{ partial_zd=g_Pzd[d*g_nZ+blockIdx.y];
tmp_sum=0;
tmp_pd=pd[d];
__syncthreads();
for (unsigned int nts_counter=0;nts_counter<iterations_nTs;nts_counter++)
{
partial_trwz[threadIdx.x]=0;
word_sum_count[threadIdx.x]=0;
__syncthreads();
if (nts_counter*blockDim.x+threadIdx.x <g_nTs)
{ //gdoc_index=(blockIdx.y*(g_nTs+g_nTr-1)+ts+threadIdx.y-(ts+threadIdx.y)/(g_nTs+g_nTr-2))*g_nVoc+nvoc_counter*blockDim.x+threadIdx.x+ *g blockIdx.x;
Td=(nts_counter*block_width_x+threadIdx.x)+tr;
gdoc_index=(d*(g_nTs+g_nTr-1)+Td-(Td/(g_nTs+g_nTr-1)))*g_nVoc+ blockIdx.x;
word_sum_count[threadIdx.x]=g_Doc[gdoc_index];
partial_trwz[threadIdx.x]=(g_Doc[gdoc_index]*tmp_pd*partial_zd*g_Ptszd[(d*g_nZ+blockIdx.y)*g_nTs+nts_counter*block_width_x+threadIdx.x]*partial_pwz*partial_ptrwz)/(g_Pwtad[gdoc_index]+DBL_MIN);
// if ( blockIdx.x==0 && blockIdx.y==0 && blockIdx.z==0 && d==0 && nts_counter==0 )
// printf("tid x is %d and g_Doc[gdoc_index] is %0.8f and partial_trwz[threadIdx.x] is %0.8f \n",threadIdx.x,g_Doc[gdoc_index],partial_trwz[threadIdx.x]);
}
__syncthreads();
for (unsigned int s=block_width_x/2;s>=1;s>>=1)
{//g_tszd[s]=s;
if (threadIdx.x<s)
{
partial_trwz[threadIdx.x]+=partial_trwz[threadIdx.x+s];
word_sum_count[threadIdx.x]+=word_sum_count[threadIdx.x+s];
}
__syncthreads();
}
//__syncthreads();
if (threadIdx.x==0 )
{
tmp_sum += partial_trwz[0];
sum_count+=word_sum_count[0];
}
__syncthreads();
//g_tszd[(blockIdx.y*g_nZ+blockIdx.x)*g_nTs+ts]+=partial_tszd[0];
//printf("g_tszd value for index %d is %0.8f\n",(blockIdx.y*g_nZ+blockIdx.x)*g_nTs+ts,partial_tszd[0]);
//__syncthreads();
} // nts counter loop
if (threadIdx.x==0 )
{ tmp_sum_outerloop+=tmp_sum;
// if ( blockIdx.x==1 && blockIdx.y==0 && blockIdx.z==0 )
//printf(" d is %d and tmp_sum is %0.8f and tmp_sum_outerloop is %0.8f \n",d,tmp_sum,tmp_sum_outerloop);
}
__syncthreads();
} //nDocs for loop
if (threadIdx.x==0 )
{
if (usePriorPtrwz)
{ //double more = priorPwz->mat[w][z][0] * priorPtrwz->mat[tr][w][z] * m_totWords;
tmp_sum_outerloop+=priortrwz[(blockIdx.y*g_nVoc+blockIdx.x)*g_nTr+tr]*priorwz[blockIdx.y*g_nVoc+blockIdx.x]*totWords;
}
else if (tr_prior){
//temp1.mat[tr][w][z] = sum_trwz + m_Options.tr_wt * (sum_count * trp->mat[tr][0][0]) / (m_nVoc * m_nAspects);
//sum_wz += temp1.mat[tr][w][z];
tmp_sum_outerloop+=tr_wt*sum_count*g_trp[tr]/(g_nVoc*g_nZ);// fill it
}
g_trwz[(blockIdx.y*g_nVoc+blockIdx.x)*g_nTr+tr]=tmp_sum_outerloop;
tmp_sum_wz+=tmp_sum_outerloop;
tmp_sum_outerloop=0;
tmp_sum=0;
}
__syncthreads();
}// ntr loop
if (threadIdx.x==0 )
{
g_wz[blockIdx.y*g_nVoc+blockIdx.x]=tmp_sum_wz;
}
__syncthreads();
}
|
19,036 | #include "includes.h"
__global__ void group_point_grad_gpu(int b, int n, int c, int m, int nsample, const float *grad_out, const int *idx, float *grad_points) {
int index = threadIdx.x;
idx += m*nsample*index;
grad_out += m*nsample*c*index;
grad_points += n*c*index;
for (int j=0;j<m;++j) {
for (int k=0;k<nsample;++k) {
int ii = idx[j*nsample+k];
for (int l=0;l<c;++l) {
grad_points[ii*c+l] += grad_out[j*nsample*c+k*c+l];
}
}
}
} |
19,037 | #include "includes.h"
__global__ void OPT_4(int *d_adjList, int *d_sizeAdj, int *d_lcmMatrix, int *d_LCMSize, int n_vertices)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
if(i<n_vertices)
{
int indexUsed = 0, indexOffset = 0;
int iStart = 0, iEnd = 0;
int k = 0;
if(i > 0)
{
k = d_sizeAdj[i-1];
indexOffset = d_LCMSize[i-1];
}
iEnd = d_sizeAdj[i];
for(int j = indexOffset; j<iEnd; j++)
{
d_lcmMatrix[j] = 0;
}
__syncthreads();
for(int j = 0; j < n_vertices; j++) {
if(i==j)
continue;
iStart = k;
int jStart = 0, jEnd = 0;
if(j > 0)
jStart = d_sizeAdj[j-1];
jEnd = d_sizeAdj[j];
int compVec = 0;
while (iStart < iEnd && jStart < jEnd)
{
if(d_adjList[iStart] < d_adjList[jStart])
iStart++;
else if (d_adjList[jStart] < d_adjList[iStart])
jStart++;
else // if arr1[i] == arr2[j]
{
jStart++;
iStart++;
compVec++;
}
}
if (compVec > 0)
{
atomicAdd((int*)&d_lcmMatrix[indexUsed + indexOffset], compVec);
// d_lcmMatrix[indexUsed + indexOffset] = compVec;
indexUsed++;
}
// __syncthreads();
}
// __syncthreads();
// d_LCMSize[i] = indexUsed;
// __syncthreads();
}
} |
19,038 |
#include<iostream>
#include <fstream>
#include <string>
#include <stdio.h>
#include <stdlib.h>
using namespace std;
__global__ void kernel( float* r_gpu, float* g_gpu, float* b_gpu, int N, int n_m) {
int tId = threadIdx.x + blockIdx.x * blockDim.x;
int i=n_m;
while(i < N && tId < n_m) {
r_gpu[tId] += r_gpu[tId+i];
g_gpu[tId] += g_gpu[tId+i];
b_gpu[tId] += b_gpu[tId+i];
i+=n_m;
}
}
__global__ void kernel2( float* r_gpu, float* g_gpu, float* b_gpu, int N, int l) {
int tId = threadIdx.x + blockIdx.x * blockDim.x;
if(tId < N) {
r_gpu[tId] = r_gpu[tId]/l;
g_gpu[tId] = g_gpu[tId]/l;
b_gpu[tId] = b_gpu[tId]/l;
}
}
void sumar(float* r1, float* g1, float* b1, float* r, float* g, float* b) {
//cout << *r << "|" << 1-*r << endl;
*r1 += *r;
*g1 += *g;
*b1 += *b;
}
void promedio(float* r, float* g, float* b, int l) {
*r = *r/l;
*g = *g/l;
*b = *b/l;
}
int main(int argc, char const *argv[]) {
FILE *pFile;
int n, m, l;
float *r, *g, *b;
pFile = fopen ("images6.txt","r");
fscanf(pFile, "%d %d %d", &l, &m, &n);
r = new float[n*m*l];
g = new float[n*m*l];
b = new float[n*m*l];
int block_size = 256;
int grid_size = (int) ceil((float) n*m / block_size);
float* r_gpu, *g_gpu, *b_gpu;
cudaMalloc(&r_gpu, sizeof(float) * n * m * l);
cudaMalloc(&g_gpu, sizeof(float) * n * m * l);
cudaMalloc(&b_gpu, sizeof(float) * n * m * l);
for (int j = 0; j < l; ++j){
for (int i = 0; i < n*m; ++i) {
fscanf (pFile, "%f", &r[i+(j*n*m)]);
}
for (int i = 0; i < n*m; ++i) {
fscanf (pFile, "%f", &g[i+(j*n*m)]);
}
for (int i = 0; i < n*m; ++i) {
fscanf (pFile, "%f", &b[i+(j*n*m)]);
}
}
fclose (pFile);
cudaMemcpy(r_gpu, r, sizeof(float) * n * m * l, cudaMemcpyHostToDevice);
cudaMemcpy(g_gpu, g, sizeof(float) * n * m * l, cudaMemcpyHostToDevice);
cudaMemcpy(b_gpu, b, sizeof(float) * n * m * l, cudaMemcpyHostToDevice);
int tamanio = n * m * l;
int nm=n * m;
cudaEvent_t ct1, ct2;
float dt;
cudaEventCreate(&ct1);
cudaEventCreate(&ct2);
cudaEventRecord(ct1);
kernel<<<grid_size, block_size>>>(r_gpu, g_gpu, b_gpu, tamanio, nm);
kernel2<<<grid_size, block_size>>>(r_gpu, g_gpu, b_gpu, nm, l);
cudaEventRecord(ct2);
cudaEventSynchronize(ct2);
cudaEventElapsedTime(&dt, ct1, ct2);
cout << "Tiempo GPU: " << dt << " [ms]" << endl;
cudaMemcpy(r, r_gpu, sizeof(float) * n * m * l, cudaMemcpyDeviceToHost);
cudaMemcpy(g, g_gpu, sizeof(float) * n * m * l, cudaMemcpyDeviceToHost);
cudaMemcpy(b, b_gpu, sizeof(float) * n * m * l, cudaMemcpyDeviceToHost);
FILE * pSalida;
pSalida = fopen ("gpu_img_salida.txt","w");
fprintf(pSalida, "%d %d\n", m, n);
for (int i = 0; i < n*m; ++i) {
if(i == n*m - 1) {
fprintf(pSalida, "%f", r[i]);
} else {
fprintf(pSalida, "%f ", r[i]);
}
}
fprintf(pSalida, "\n");
for (int i = 0; i < n*m; ++i) {
if(i == n*m - 1) {
fprintf(pSalida, "%f", g[i]);
} else {
fprintf(pSalida, "%f ", g[i]);
}
}
fprintf(pSalida, "\n");
for (int i = 0; i < n*m; ++i) {
if(i == n*m - 1) {
fprintf(pSalida, "%f", b[i]);
} else {
fprintf(pSalida, "%f ", b[i]);
}
}
delete r;
delete g;
delete b;
cudaFree(r_gpu);
cudaFree(g_gpu);
cudaFree(b_gpu);
//cin.get();
return 0;
} |
19,039 | /*******************************************************************************
GPU OPTIMIZED MONTE CARLO (GOMC) 2.75
Copyright (C) 2022 GOMC Group
A copy of the MIT License can be found in License.txt
along with this program, also can be found at <https://opensource.org/licenses/MIT>.
********************************************************************************/
#include "CUDAMemoryManager.cuh"
#ifdef GOMC_CUDA
long long CUDAMemoryManager::totalAllocatedBytes = 0;
std::unordered_map<void *, std::pair<unsigned int, std::string> > CUDAMemoryManager::allocatedPointers;
cudaError_t CUDAMemoryManager::mallocMemory(void **address, unsigned int size, std::string var_name)
{
cudaError_t ret = cudaMalloc(address, size);
allocatedPointers[*address] = make_pair(size, var_name);
totalAllocatedBytes += size;
if (size == 0) {
std::cout << "Warning! You are trying to allocate " << var_name << " with a size of zero bytes!\n";
}
return ret;
}
cudaError_t CUDAMemoryManager::freeMemory(void *address, std::string var_name)
{
if(allocatedPointers.find(address) != allocatedPointers.end()) {
totalAllocatedBytes -= allocatedPointers[address].first;
allocatedPointers.erase(address);
} else if (address != nullptr) {
std::cout << "Warning! You are trying to free " << var_name << " but it has already been freed\n"
<< "\tor was never allocated!\n";
}
return cudaFree(address);
}
bool CUDAMemoryManager::isFreed()
{
bool ret = allocatedPointers.size() == 0;
while(allocatedPointers.size() != 0) {
auto it = allocatedPointers.begin();
std::cout << "You forgot to free memory " << it->second.second
<< " with " << it->second.first << " bytes allocated!\n";
std::cout << "I am going to free it for you!\n";
freeMemory(it->first, it->second.second);
}
return ret;
}
#endif
|
19,040 |
__device__ int findRoot(const int equivalenceMatrix[], int elementIndex){
while(equivalenceMatrix[elementIndex] != elementIndex)
elementIndex = equivalenceMatrix[elementIndex];
return elementIndex;
}
__global__ void finalUpdate(const int* input, int* output, const int height, const int width){
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
int globalIndex = col * height + row;
output[globalIndex] = findRoot(input, input[globalIndex]);
}
|
19,041 | #include <stdio.h>
__global__ void helloFromGPU() {
printf("Hello World from GPU! %d %d\n", threadIdx.x, blockIdx.x);
}
int main(int argc, char**argv) {
printf("Hello World from CPU!\n");
int blocks = 1024;
int threads = 1;
helloFromGPU<<<blocks, threads>>>();
cudaDeviceReset();
return 0;
}
|
19,042 | // Copyright (c) 2018 John Biddiscombe
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#include "cuda_runtime.h"
__global__
void saxpy(int n, float a, float *x, float *y)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) y[i] = a * x[i] + y[i];
}
|
19,043 | #include "Descriptor.cuh"
Descriptor::Descriptor(int32_t size, int32_t type, bool normalized, int32_t stride, int32_t pointer) : size(size), type(type), normalized(normalized), stride(stride), pointer(pointer)
{
} |
19,044 | /*
* hw04p02.cu
*
* Created on: Oct 04, 2015
* Author: Kazi
* Usage:
* It performs integer multiplication of a 16x32 matrix with a 32x1 vector
* on a GPU. Does not take any arguments. Just generates predefined matrices
* and reports the time taken to do the multiplication.
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda.h>
#include <iostream>
/*
* Given an array A of size (h x w) and a vector b of size (w), it takes the product
* Ab and writes it to a vector c of size (h).
*/
__global__ void multArray(int* A, int* b, int* c, int w, int size)
{
int ti = threadIdx.x;
int dsum = 0; //The sum for the dot product
int k;
//Perform the multiplication
if (ti < size)
{
for(k=0; k<w; k++)
{
//Take the dot of a row of A with b
dsum = dsum + A[ti*w+k]*b[k];
}
c[ti] = dsum;
}
}
/*
* Entry point for the program. Currently specifies matrix and vector size.
* Allocates memory on the host and device and then creates matrices on the
* host. Copies them over to the device to multiply them. Copies the result
* back over to the host.
*/
int main(int argc, char* argv[])
{
//Set the size of the arrays, threads, blocks
int height = 16;
int width = 32;
int threads = height;
int blocks = 1;
//Allocate memory on the host
int *hA = (int*)malloc(width*height*sizeof(int));
int *hb = (int*)malloc(width*sizeof(int));
int *hc = (int*)malloc(height*sizeof(int));
//Allocate memory on the device
int *dA, *db, *dc;
cudaMalloc((void**) &dA, sizeof(int)*width*height);
cudaMalloc((void**) &db, sizeof(int)*width);
cudaMalloc((void**) &dc, sizeof(int)*height);
//Generate the matrices on the host
int i;
int j;
for(i=0; i<height; i++)
{
hc[i] = 0; //Let the storage array be all zeros
for(j=0; j<width; j++)
{
hA[i*width + j] = i+j; //Set the matrix A
if (i == 0) hb[j] = j; //Set the vector b
}
}
//Start inclusive timing here
cudaEvent_t startIn, stopIn;
cudaEventCreate(&startIn);
cudaEventCreate(&stopIn);
cudaEventRecord(startIn, 0);
//Copy hA,hb, hc onto dA,db, dc
cudaMemcpy(dA, hA, sizeof(int)*width*height, cudaMemcpyHostToDevice);
cudaMemcpy(db, hb, sizeof(int)*width, cudaMemcpyHostToDevice);
cudaMemcpy(dc, hc, sizeof(int)*width, cudaMemcpyHostToDevice);
//Start exclusive timing here
cudaEvent_t startEx, stopEx;
cudaEventCreate(&startEx);
cudaEventCreate(&stopEx);
cudaEventRecord(startEx, 0);
//Use kernel to multiply A and b
multArray <<<blocks,threads>>> (dA, db, dc, width, width*height);
//Stop exclusive timing here
cudaEventRecord(stopEx, 0);
cudaEventSynchronize(stopEx);
float exTime;
cudaEventElapsedTime(&exTime, startEx, stopEx);
cudaEventDestroy(startEx);
cudaEventDestroy(stopEx);
//Copy dc back into hc
cudaMemcpy(hc, dc, sizeof(int)*height, cudaMemcpyDeviceToHost);
//Stop inclusive timing here
cudaEventRecord(stopIn, 0);
cudaEventSynchronize(stopIn);
float inTime;
cudaEventElapsedTime(&inTime, startIn, stopIn);
cudaEventDestroy(startIn);
cudaEventDestroy(stopIn);
//For testing - to see what the result vector looks like
for(j=0; j<height; j++)
{
//printf("%d\n", hc[j]);
std::cout << j << ": " << hc[j] << std::endl;
}
//Output timing
printf("Inclusive time: %f ms. \n", inTime);
printf("Exclusive time: %f ms. \n", exTime);
//Get device properties
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, 0);
printf("Device name: %s \n", deviceProp.name);
printf("Clock rate: %d \n", deviceProp.clockRate);
printf("Multiprocessors: %d \n", deviceProp.multiProcessorCount);
printf("L2 Cache: %d \n", deviceProp.l2CacheSize);
printf("Max threads per MP: %d \n", deviceProp.maxThreadsPerMultiProcessor);
printf("Warp size: %d \n", deviceProp.warpSize);
printf("Max threads per block: %d \n", deviceProp.maxThreadsPerBlock);
printf("Max registers per block: %d \n", deviceProp.regsPerBlock);
printf("Max blocks per MP: 32 \n"); //From table
printf("Max warps per MP: 64 \n"); //From table
printf("Shared memory per block (B): %d \n", deviceProp.sharedMemPerBlock);
printf("Compute capability: %d.%d.\n", deviceProp.major, deviceProp.minor);
//Write to file
FILE *fp;
fp = fopen("./problem2.out","w");
fprintf(fp, "Results of hw04p02.cu: \n");
for (i=0; i<height; i++)
{
fprintf(fp, "%d\n", hc[i]);
}
fclose(fp);
//Cleanup
if(dA) cudaFree(dA);
if(db) cudaFree(db);
if(dc) cudaFree(dc);
if(hA) free(hA);
if(hb) free(hb);
if(hc) free(hc);
return 0;
}
|
19,045 | /**
* University of Pittsburgh
* Department of Computer Science
* CS1645: Introduction to HPC Systems
* Instructor: Xiaolong Cui
* This is a skeleton for implementing prefix sum using GPU, inspired
* by nvidia course of similar name.
*/
#include <stdio.h>
#include <math.h>
#include <string.h>
#include <stdlib.h>
#include <time.h>
#define N 512
/* Prototypes */
void random_floats(float* a, int n);
void serial_scan(float* out, float* in, int n);
/*
* You should implement the parallel scan function here!
*/
__global__ void parallel_scan(float *g_odata, float *g_idata, int n)
{
extern __shared__ float temp_buf[];
int thread_name = threadIdx.x;
int out_place = 0;
int in_place = 1;
temp_buf[out_place*n + thread_name] = (thread_name > 0) ? g_idata[thread_name - 1] : 0;
/* Make sure threads are synchronized on transfer*/
__syncthreads();
for (int displacement = 1; displacement < n; displacement = displacement << 1) {
out_place = 1 - out_place;
in_place = 1 - out_place;
if (thread_name >= displacement) {
temp_buf[out_place*n + thread_name] += temp_buf[in_place*n + thread_name - displacement];
}
else {
temp_buf[out_place*n + thread_name] = temp_buf[in_place*n + thread_name];
}
__syncthreads();
}
g_odata[thread_name] = temp_buf[out_place*n + thread_name];
}
/*
* Fills an array a with n random floats.
*/
void random_floats(float* a, int n) {
float d;
// Comment out this line if you want consistent "random".
srand(time(NULL));
for (int i = 0; i < n; ++i) {
d = rand() % 8;
a[i] = ((rand() % 64) / (d > 0 ? d : 1));
}
}
/*
* Simple Serial implementation of exclusive scan.
*/
void serial_scan(float* out, float* in, int n) {
float total_sum = 0;
out[0] = 0;
for (int i = 1; i < n; i++) {
total_sum += in[i - 1];
out[i] = out[i - 1] + in[i - 1];
}
if (total_sum != out[n - 1]) {
printf("Warning: exceeding accuracy of float.\n");
}
}
/*
* This is a simple function that confirms that the output of the scan
* function matches that of a golden image (array).
*/
bool printError(float *gold_out, float *test_out, bool show_all) {
bool firstFail = true;
bool error = false;
float epislon = 0.1;
float diff = 0.0;
for (int i = 0; i < N; ++i) {
diff = abs(gold_out[i] - test_out[i]);
if ((diff > epislon) && firstFail) {
printf("ERROR: gold_out[%d] = %f != test_out[%d] = %f // diff = %f \n", i, gold_out[i], i, test_out[i], diff);
firstFail = show_all;
error = true;
}
}
return error;
}
int main(void) {
float *in, *out, *gold_out; // host
float *d_in, *d_out, *dgold_out; // device
int size = sizeof(float) * N;
in = (float *)malloc(size);
random_floats(in, N);
out = (float *)malloc(size);
gold_out = (float *)malloc(size);
/* Allocate arrays on the device*/
cudaMalloc((void **)&d_in, size);
cudaMalloc((void **)&d_out, size);
cudaMalloc((void **)&dgold_out, size);
/*Copy over the input array to the device*/
cudaMemcpy(d_in, in, size, cudaMemcpyHostToDevice);
// ***********
// RUN SERIAL SCAN
// ***********
serial_scan(gold_out, in, N);
// ***********
// RUN PARALLEL SCAN
// ***********
parallel_scan<<<1,N>>>(dgold_out, d_in, N);
if (printError(gold_out, out, false)) {
printf("ERROR: The parallel scan function failed to produce proper output.\n");
}
else {
printf("CONGRATS: The parallel scan function produced proper output.\n");
}
return 0;
}
|
19,046 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cufft.h>
typedef float2 CComplex;
typedef double2 ZComplex;
template<typename T>
static __device__ __host__ inline T operator+(const T a, const T b);
template<typename T>
static __device__ __host__ inline T operator*(const T a, const T b);
// Complex addition
template<typename T> static __device__ __host__
inline T operator+(const T a, const T b)
{
T c;
c.x = a.x + b.x;
c.y = a.y + b.y;
return c;
}
// Complex multiplication
template<typename T> static __device__ __host__
inline T operator*(const T a, const T b)
{
T c;
c.x = a.x*b.x - a.y*b.y;
c.y = a.x*b.y + a.y*b.x;
return c;
}
/// @brief Computes: \f$ X \leftarrow B X \f$.
/// @param[in,out] x On input this is the spectra of the waveform.
/// On exit, this is the spectra multiplied with the
/// spectra of the filter - i.e., the convolution.
/// This is an array whose dimension is [nw].
/// @param[in] b The spectra of the filter coefficients.
/// This is an array whose dimension is [nw].
/// @param[in] nw The number of frequencies.
static __global__
void multiplySpectra(CComplex *x, const CComplex *b, const int nw)
{
const int numThreads = blockDim.x*gridDim.x;
const int threadID = blockIdx.x*blockDim.x + threadIdx.x;
for (int i=threadID; i<nw; i=i+numThreads)
{
x[i] = x[i]*b[i];
}
}
struct cufft32z_struct
{
cufftHandle mPlan;
};
extern "C"
void clear(struct cufft32z_struct *cuft);
extern "C"
void initialize(struct cufft32z_struct *cuft);
void clear(struct cufft32z_struct *cuft)
{
cufftDestroy(cuft->mPlan);
}
void initialize(struct cufft32z_struct *cuft)
{
//checkCudaErrors(cufftMakePlan1d(plan_input, new_size, CUFFT_C2C, 1, worksize));
}
|
19,047 | #include "includes.h"
__global__ void suma(int a, int b, int *c){
*c = a+b;
} |
19,048 | #include "includes.h"
__global__ void cu_relu(const float* src, float* dst, int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
if(src[tid] > 0.0) dst[tid] = src[tid];
else dst[tid] = 0.0;
tid += stride;
}
} |
19,049 | #include <cstdio>
#include <cstdlib>
#include <math.h>
// Assertion to check for errors
#define CUDA_SAFE_CALL(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"CUDA_SAFE_CALL: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
#define NUM_THREADS_PER_BLOCK 300
#define NUM_BLOCKS 20
#define PRINT_TIME 1
#define TEST_CASE 3
#define X_DIM 6
#define X_LENGTH 6000
#define START_ETA 0.1
#define ETA 1.0
#define DELTA .05
#define MAX_ITERS 40000
#define IMUL(a, b) __mul24(a, b)
const char* getfield(char* line, int num);
__global__ void calculate_weights(float* X, char* Y, float* W, char* misclassified,int x_length, int x_dim, double eta){
__shared__ float block_weights[NUM_THREADS_PER_BLOCK][X_DIM];
int tx = threadIdx.x;
int tx_global = blockIdx.x*blockDim.x + threadIdx.x;
int block_id = blockIdx.x;
int i,j;
if(misclassified[tx_global] == 1){
for(j= 0; j < x_dim;j++){
block_weights[tx][j] = eta*X[tx_global*x_dim+j]*Y[tx_global];
}
}
else{
for(j=0; j < x_dim; j++){
block_weights[tx][j] = 0;
}
}
__syncthreads();
float sum;
if(tx==1){
for(j=0;j<x_dim;j++){
sum = 0;
for(i=0; i < NUM_THREADS_PER_BLOCK;i++){
sum = sum+ block_weights[i][j];
}
W[block_id*x_dim+j]= sum;
}
}
}
__global__ void classify(float* X, char* Y, float* W, char* misclassified, int* not_classified, int* sum_missed, int x_dim){
float score;
int tx = threadIdx.x;
int tx_global = blockIdx.x*blockDim.x + threadIdx.x;
int j;
score = 0;
sum_missed[tx_global] = 0;
not_classified[tx_global] = 0;
__syncthreads();
for(j=0;j < x_dim; j++){
score += X[tx_global*x_dim + j]*W[j];
}
misclassified[tx_global] = score*Y[tx_global] <= 0.0 ? 1:0;
if(misclassified[tx_global] == 1){
sum_missed[tx_global] = 1;
not_classified[tx_global] = 1;
}
}
int main(int argc, char **argv){
// GPU Timing variables
cudaEvent_t start, stop;
float elapsed_gpu;
int test_case = TEST_CASE;
int h_x_length = X_LENGTH;
int h_x_dim = X_DIM;
int line_counter = 0;
int i;
int total_missed =0;
float verify = 0;
int not_classified = 1;
int iters = 0;
float eta = ETA;
float start_eta = START_ETA;
float delta = DELTA;
int sum_missed_iters[5][2];
// global variables on GPU
float* g_W;
float* g_X;
float* g_score;
char* g_Y;
int* g_not_classified;
char* g_misclassified;
int* g_sum_missed;
float* g_W_matrix;
//global arrays on host
float* h_W;
float* h_W_matrix;
float* h_X;
float* h_score;
char* h_misclassified;
char* h_Y;
int* h_not_classified;
int* h_sum_missed;
int missed = 0;
// Select GPU
CUDA_SAFE_CALL(cudaSetDevice(1));
// Allocate GPU memory
size_t allocSize_X = h_x_dim * h_x_length * sizeof(float);
size_t allocSize_Y = h_x_length * sizeof(char);
size_t allocSize_W = h_x_dim * sizeof(float);
size_t allocSize_Score = h_x_length * sizeof(float);
size_t allocSize_sumMissed = sizeof(int)*h_x_length;
size_t allocSize_notClassified = sizeof(int) * NUM_BLOCKS;
size_t allocSize_W_mat = sizeof(float)*h_x_dim*NUM_BLOCKS;
CUDA_SAFE_CALL(cudaMalloc((void **)&g_W, allocSize_W))
CUDA_SAFE_CALL(cudaMalloc((void **)&g_X, allocSize_X));
CUDA_SAFE_CALL(cudaMalloc((void **)&g_Y, allocSize_Y));
CUDA_SAFE_CALL(cudaMalloc((void **)&g_score, allocSize_Score));
CUDA_SAFE_CALL(cudaMalloc((void **)&g_misclassified, allocSize_Y));
CUDA_SAFE_CALL(cudaMalloc((void **)&g_sum_missed, allocSize_sumMissed));
CUDA_SAFE_CALL(cudaMalloc((void **)&g_not_classified, allocSize_sumMissed));
CUDA_SAFE_CALL(cudaMalloc((void **)&g_W_matrix, allocSize_W_mat));
// Allocate arrays on host memory
h_X = (float *) malloc(allocSize_X);
h_Y = (char *) malloc(allocSize_Y);
h_W = (float *) malloc(allocSize_W);
h_W_matrix = (float *) malloc(allocSize_W_mat);
h_misclassified = (char *) malloc(allocSize_Y);
h_score = (float *) malloc(allocSize_Score);
h_sum_missed = (int *) malloc(allocSize_sumMissed);
h_not_classified = (int *) malloc(allocSize_sumMissed);
FILE* stream = fopen("data.csv", "r");
char line[1024];
while (fgets(line, 1024, stream) && line_counter < X_LENGTH)
{
char* tmp = strdup(line);
int idx = line_counter*h_x_dim;
h_X[idx] = 1.0;
h_X[idx + 1] = strtod(getfield(tmp, 1), NULL);
tmp = strdup(line);
h_X[idx + 2] = strtod(getfield(tmp, 2), NULL);
h_X[idx + 3] = h_X[idx + 1]*h_X[idx + 2]; // xy
h_X[idx + 4] = h_X[idx + 1]*h_X[idx + 1]; // x^2
h_X[idx + 5] = h_X[idx + 2]*h_X[idx + 2]; // y^2
// NOTE strtok clobbers tmp
free(tmp);
line_counter++;
}
printf("x length by line counter: %d \n", line_counter);
for(i=0; i < h_x_length; ++i){
switch(test_case) {
case 1:
h_Y[i] = (0.2*(h_X[i*h_x_dim + 1] - 0.5)) +
(.6-h_X[i*h_x_dim + 2]) > 0 ? 1 : -1;
break;
case 2:
h_Y[i] = (h_X[i*h_x_dim + 1]-.5)*(h_X[i*h_x_dim + 1]-.5) +
(h_X[i*h_x_dim + 2]-.5)*(h_X[i*h_x_dim + 2]-.5) > 0.09 ? 1 : -1;
break;
case 3:
h_Y[i] = 4*(h_X[i*h_x_dim + 1]-.5)*(h_X[i*h_x_dim + 1]-.5) +
(.2-h_X[i*h_x_dim + 2]) > 0 ? 1 : -1;
break;
default:
h_Y[i] = 0;
}
}
float exec_times[5][2];
int k, j;
float acc;;
int index = 0;
float current_eta = eta;
for(k = 0; k < 5; k++){
for(i=0;i< h_x_length;i++){
h_misclassified[i] = 1;
h_not_classified[i] = 0;
h_sum_missed[i] = 0;
}
for(i=0; i < h_x_dim; i++){
h_W[i] = 0;
}
int h_W_size = NUM_BLOCKS* h_x_dim;
for(i = 0; i < h_W_size; i++){
h_W_matrix[i] = 0;
}
CUDA_SAFE_CALL(cudaMemcpy(g_X, h_X, allocSize_X, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(g_Y, h_Y, allocSize_Y, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(g_W, h_W, allocSize_W, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(g_misclassified, h_misclassified, allocSize_Y, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(g_W_matrix, h_W_matrix, allocSize_W_mat, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(g_not_classified, h_not_classified,allocSize_sumMissed, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(g_sum_missed, h_sum_missed, allocSize_sumMissed, cudaMemcpyHostToDevice));
iters = 0;
missed = 0;
not_classified = 1;
#ifdef PRINT_TIME
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
#endif
dim3 dimBlock(NUM_THREADS_PER_BLOCK, 1, 1);
dim3 dimGrid(NUM_BLOCKS, 1);
while(not_classified && iters <= MAX_ITERS){
// Increment iters
iters++;
// Set condition to zero (to avoid infinite while loop) and set it to one if there's an element that is misclassified
not_classified = 0;
// One block with 500 threads (one thread working on each row of data in X)
calculate_weights<<<dimGrid, dimBlock>>>(g_X, g_Y,g_W_matrix,g_misclassified,h_x_length, h_x_dim, current_eta);
CUDA_SAFE_CALL(cudaPeekAtLastError());
cudaDeviceSynchronize();
// Copy weight vector to host
CUDA_SAFE_CALL(cudaMemcpy(h_W_matrix, g_W_matrix, allocSize_W_mat, cudaMemcpyDeviceToHost));
for(i=0;i<h_x_dim;i++){
acc =0;
for(j=0;j<NUM_BLOCKS;j++){
acc += h_W_matrix[j*h_x_dim + i];
}
h_W[i] += acc;
}
cudaDeviceSynchronize();
CUDA_SAFE_CALL(cudaMemcpy(g_W, h_W, allocSize_W, cudaMemcpyHostToDevice));
// Check classification success
cudaDeviceSynchronize();
classify<<<dimGrid, dimBlock>>>(g_X, g_Y, g_W, g_misclassified, g_not_classified, g_sum_missed,h_x_dim);
CUDA_SAFE_CALL(cudaPeekAtLastError());
cudaDeviceSynchronize();
// Copy arrays back to host
CUDA_SAFE_CALL(cudaMemcpy(h_not_classified, g_not_classified,allocSize_sumMissed, cudaMemcpyDeviceToHost));
CUDA_SAFE_CALL(cudaMemcpy(h_sum_missed, g_sum_missed,allocSize_sumMissed, cudaMemcpyDeviceToHost));
for(i=0;i<h_x_length;i++){
not_classified += h_not_classified[i];
}
}
#ifdef PRINT_TIME
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_gpu, start, stop);
exec_times[k][0] = elapsed_gpu;
printf("Exec time: %f \n", elapsed_gpu);
cudaEventDestroy(start);
#endif
printf("\n");
for(i=0;i < h_x_length; ++i){
missed += h_sum_missed[i];
}
if(missed == 0){
printf("Perfectly separated data\n");
}
else{
printf("Finished MAX_ITERS (%d iters) and still %d misclassified \n", iters, missed);
}
sum_missed_iters[k][0] = missed;
sum_missed_iters[k][1] = iters;
total_missed =0;
for(i = 0; i < h_x_length; i++){
verify = 0;
for(j=0;j < h_x_dim; j++){
verify += h_X[i*h_x_dim + j] * h_W[j];
}
verify = verify * h_Y[i];
if(verify < 0){
total_missed++;
}
}
exec_times[k][1] = total_missed;
}
printf("Verification Missed: Exec time (ms) Sum Missed: Iters: \n");
int avgIters = 0;
float avg_exec_time = 0;
total_missed = 0;
for(i=0;i<5; i++){
avgIters += sum_missed_iters[i][1];
avg_exec_time += exec_times[i][0];
total_missed += sum_missed_iters[i][0];
printf("\t%f\t\t\t%f\t\t%d\t\t\t%d\n", exec_times[i][1], exec_times[i][0], sum_missed_iters[i][0], sum_missed_iters[i][1]);
}
avgIters = avgIters/ 5;
avg_exec_time = avg_exec_time/ 5;
printf("Avg exec time: %f avg iters: %d sum missed (should be 0): %d \n", avg_exec_time, avgIters, total_missed);
// Free-up device and host memory
CUDA_SAFE_CALL(cudaFree(g_X));
CUDA_SAFE_CALL(cudaFree(g_Y));
CUDA_SAFE_CALL(cudaFree(g_W));
CUDA_SAFE_CALL(cudaFree(g_sum_missed));
CUDA_SAFE_CALL(cudaFree(g_not_classified));
CUDA_SAFE_CALL(cudaFree(g_score));
CUDA_SAFE_CALL(cudaFree(g_misclassified));
free(h_X);
free(h_not_classified);
free(h_sum_missed);
free(h_Y);
free(h_W);
free(h_misclassified);
free(h_score);
return 0;
}
const char* getfield(char* line, int num) {
const char* tok;
for (tok = strtok(line, ",");
tok && *tok;
tok = strtok(NULL, ",\n"))
{
if (!--num)
return tok;
}
return NULL;
}
|
19,050 | //source: https://github.com/lzhengchun/matrix-cuda/blob/master/matrix_cuda.cu
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <assert.h>
#define TYPE float
#define TILE_DIM 32
/*
Returns the current time in miliseconds.
*/
double getMilitime(){
struct timeval ret;
gettimeofday(&ret, NULL);
return ((ret.tv_sec ) * 1000000u + ret.tv_usec) / 1.e6;
}
void MatrixMultiplicationCPU(TYPE*A, TYPE*B, TYPE*C, int M, int N, int K){
int i,j,count;
for(i=0;i<M;++i)
for(j=0;j<K;++j)
for(count=0;count<N;++count){
C[i*K+j] += A[i*N+count] * B[count*K+j];
}
}
#define err(format, ...) do { fprintf(stderr, format, ##__VA_ARGS__); exit(1); } while (0)
#ifdef __CUDACC__
inline void checkCuda(cudaError_t e) {
if (e != cudaSuccess) {
// cudaGetErrorString() isn't always very helpful. Look up the error
// number in the cudaError enum in driver_types.h in the CUDA includes
// directory for a better explanation.
err("CUDA Error %d: %s\n", e, cudaGetErrorString(e));
}
}
inline void checkLastCudaError() {
checkCuda(cudaGetLastError());
}
#endif
void Allocate(TYPE**A, TYPE**B, TYPE**C, int M, int N, int K){
(*A) = (*B) = (*C) = NULL;
(*A) = (TYPE*)malloc(sizeof(TYPE*) * M * N);
(*B) = (TYPE*)malloc(sizeof(TYPE*) * N * K);
(*C) = (TYPE*)malloc(sizeof(TYPE*) * M * K);
assert((*A) != NULL);
assert((*B) != NULL);
assert((*C) != NULL);
}
void Fill(TYPE*A, TYPE*B, int M, int N, int K){
int i;
for (i=0; i<M*N; i++) A[i]= 1.0;
for (i=0; i<N*K; i++) B[i]= 2.0;
}
void UnAllocate(TYPE**A, TYPE**B, TYPE**C){
free((*A));
free((*B));
free((*C));
}
void Print2DMatrix(TYPE*A, int M, int N) {
int i;
for(i = 0; i < M*N; ++i){
if((i%M)==0) printf("\n");
printf("%f ",A[i]);
}
printf("\n");
}
//source: https://stackoverflow.com/questions/18815489/cuda-tiled-matrix-matrix-multiplication-with-shared-memory-and-matrix-size-whic
__global__ void MatrixMultiplication(float* A, float* B, float* C, int ARows, int ACols, int BRows,
int BCols, int CRows, int CCols)
{
float CValue = 0;
int Row = blockIdx.y*TILE_DIM + threadIdx.y;
int Col = blockIdx.x*TILE_DIM + threadIdx.x;
__shared__ float As[TILE_DIM][TILE_DIM];
__shared__ float Bs[TILE_DIM][TILE_DIM];
for (int k = 0; k < (TILE_DIM + ACols - 1)/TILE_DIM; k++) {
if (k*TILE_DIM + threadIdx.x < ACols && Row < ARows)
As[threadIdx.y][threadIdx.x] = A[Row*ACols + k*TILE_DIM + threadIdx.x];
else
As[threadIdx.y][threadIdx.x] = 0.0;
if (k*TILE_DIM + threadIdx.y < BRows && Col < BCols)
Bs[threadIdx.y][threadIdx.x] = B[(k*TILE_DIM + threadIdx.y)*BCols + Col];
else
Bs[threadIdx.y][threadIdx.x] = 0.0;
__syncthreads();
for (int n = 0; n < TILE_DIM; ++n)
CValue += As[threadIdx.y][n] * Bs[n][threadIdx.x];
__syncthreads();
}
if (Row < CRows && Col < CCols)
C[((blockIdx.y * blockDim.y + threadIdx.y)*CCols) +
(blockIdx.x * blockDim.x)+ threadIdx.x] = CValue;
}
int main(int argc, char* argv[]){
if(argc<4){
printf("Input Error\n");
return 1;
}
int M = atoi(argv[1]);
int N = atoi(argv[2]);
int K = atoi(argv[3]);
TYPE*A,*B,*C, *A_Device, *B_Device, *C_Device;
Allocate(&A,&B,&C,M,N,K);
Fill(A,B,M,N,K);
//Allocate in Device
checkCuda(cudaMalloc(&A_Device, M*N*sizeof(TYPE)));
checkCuda(cudaMalloc(&B_Device, N*K*sizeof(TYPE)));
checkCuda(cudaMalloc(&C_Device, M*K*sizeof(TYPE)));
//Copy to Device
cudaMemcpy(A_Device, A, M*N*sizeof(TYPE), cudaMemcpyHostToDevice);
cudaMemcpy(B_Device, B, N*K*sizeof(TYPE), cudaMemcpyHostToDevice);
dim3 dimBlock(32, 32);
dim3 dimGrid;
dimGrid.x = (K + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (M + dimBlock.y - 1) / dimBlock.y;
printf("start timing\tm=%d,n=%d,k=%d\n",M,N,K);
double start_time = getMilitime();
MatrixMultiplication<<<dimGrid,dimBlock>>>(A_Device,B_Device,C_Device,M,N,N,K,M,K);
cudaDeviceSynchronize(); checkLastCudaError();
printf("elapsed time (Tiled CUDA MatMult): %f sec\n", getMilitime()-start_time);
//copy to Host
cudaMemcpy(C, C_Device, M*K*sizeof(TYPE), cudaMemcpyDeviceToHost);
//Free in Device
checkCuda(cudaFree(A_Device));
checkCuda(cudaFree(B_Device));
checkCuda(cudaFree(C_Device));
//verify results
TYPE*C_CPU = (TYPE*) (TYPE*)malloc(sizeof(TYPE*) * M * K);
MatrixMultiplicationCPU(A,B,C_CPU,M,N,K);
int count;
for(count=0;count<M * K;count++){
if(C_CPU[count]!=C[count]) {printf("Not Equal, idx: %d!",count);break;}
}
free(C_CPU);
//free on Host
UnAllocate(&A,&B,&C);
return(0);
} |
19,051 | #include<stdio.h>
#include<stdlib.h>
#include<string.h>
#include <cuda_runtime_api.h>
#define restrict __restrict__
#define PADDINGCLASS -2
#define OUTPUT_FILE "ocuda"
#define INPUT_FILE "data"
#define KMAX 20
#define CLASSESMAX 100
void check_error(cudaError_t err, const char *msg);
void printStats(cudaEvent_t before, cudaEvent_t after, const char *msg);
void readInput(FILE* file, float* coords, int* classes, int spacedim, int numels, int totalElements);
void writeOutput(float* coords, int* classes, int spacedim, int numels);
__global__ void knn(float* coords, float2* kOutput, int totalElements, int numels, int spacedim, int k, int* classes, int classes_num);
__global__ void knnPunisher(float2* kOutput, int* d_classes, int numels, int newels, int k, int classes_num);
__device__ float distance(float* coords, float* coords2, int spacedim);
__device__ int insert(float2* kPoints, float2 newDist, int* size, int k, int gid);
__device__ void swapfloat2(float2* d1, float2* d2);
__device__ int deviceFindMode(int* kclasses, int classes_num, int k);
__device__ float distanceShm(float* coords, int left, int spacedim);
//Declaration of shared-memory. It's going to contains partial minimum of distances
extern __shared__ int mPartial[];
int main(int argc, char *argv[])
{
int newels; //number of points we want classify
int k; //number of nearest points we use to classify
int numels; //total element already classified
int spacedim;
char filePath[255]; //path + filname of input file
int classes_num; //number of classes
float* h_coords; //coords of existing points with a class
int* h_classes; //array contains the class for each points
//*** Device-variables-declaration ***
float* d_coords;
int2* d_determinate;
int* d_classes;
float2* d_kOutput;
//*** end-device-declaration
//***cudaEvent-declaration***
cudaEvent_t before_allocation, before_input, before_upload, before_knn, before_download;
cudaEvent_t after_allocation, after_input, after_upload, after_knn, after_download;
//***end-cudaEvent-declaration***
//Requisiti: numels e newels devono essere maggiori di K
if (argc > 2)
{
strcpy(filePath, argv[1]);
k = atoi(argv[2]);
}
else
{
printf("how-to-use: knn <inputfile> <k> \n");
exit(1);
}
//***cuda-init-event***
check_error(cudaEventCreate(&before_allocation), "create before_allocation cudaEvent");
check_error(cudaEventCreate(&before_input), "create before_input cudaEvent");
check_error(cudaEventCreate(&before_upload), "create before_upload cudaEvent");
check_error(cudaEventCreate(&before_knn), "create before_knn cudaEvent");
check_error(cudaEventCreate(&before_download), "create before_download cudaEvent");
check_error(cudaEventCreate(&after_allocation), "create after_allocation cudaEvent");
check_error(cudaEventCreate(&after_input), "create after_input cudaEvent");
check_error(cudaEventCreate(&after_upload), "create after_upload cudaEvent");
check_error(cudaEventCreate(&after_knn), "create after_knn cudaEvent");
check_error(cudaEventCreate(&after_download), "create after_download cudaEvent");
//***end-cuda-init-event***
FILE *fp;
if((fp = fopen(filePath, "r")) == NULL)
{
printf("No such file\n");
exit(1);
}
fseek(fp, 0L, SEEK_END);
float fileSize = ftell(fp);
rewind(fp);
int count = fscanf(fp, "%d,%d,%d,%d\n", &numels, &newels, &classes_num, &spacedim);
int totalElements = numels + newels;
//*** allocation ***
cudaEventRecord(before_allocation);
h_coords = (float*) malloc(sizeof(float)*totalElements*spacedim);
h_classes = (int*) malloc(sizeof(int)*totalElements);
//*** device-allocation ***
check_error(cudaMalloc(&d_coords, totalElements*spacedim*sizeof(float)), "alloc d_coords_x");
check_error(cudaMalloc(&d_classes, totalElements*sizeof(int)), "alloc d_classes");
check_error(cudaMalloc(&d_determinate, newels*2*sizeof(int)), "alloc d_determinate");
check_error(cudaMalloc(&d_kOutput, newels*KMAX*2*sizeof(float)), "alloc d_kOutput");
//*** end-device-allocation ***
cudaEventRecord(after_allocation);
///***input-from-file***
cudaEventRecord(before_input);
readInput(fp, h_coords, h_classes, spacedim, numels, totalElements);
cudaEventRecord(after_input);
fclose(fp);
///***end-input-from-file***
//***copy-arrays-on-device***
cudaEventRecord(before_upload);
check_error(cudaMemcpy(d_coords, h_coords, totalElements*spacedim*sizeof(float), cudaMemcpyHostToDevice), "copy d_coords");
check_error(cudaMemcpy(d_classes, h_classes, totalElements*sizeof(int), cudaMemcpyHostToDevice), "copy d_classes");
cudaEventRecord(after_upload);
//***end-copy-arrays-on-device***
const int blockSize = 512;
int numBlocks = (newels + blockSize - 1)/blockSize;
cudaEventRecord(before_knn);
knn<<<numBlocks, blockSize>>>(d_coords, d_kOutput, totalElements, numels, spacedim, k, d_classes, classes_num);
knnPunisher<<<numBlocks, blockSize, newels*sizeof(int)*2>>>(d_kOutput, d_classes, numels, newels, k, classes_num);
cudaEventRecord(after_knn);
check_error(cudaMemcpy(h_classes+numels, d_classes+numels, newels*sizeof(int), cudaMemcpyDeviceToHost), "download classes");
check_error(cudaEventSynchronize(after_knn), "sync cudaEvents");
printStats(before_knn, after_knn, "knn");
writeOutput(h_coords, h_classes, spacedim, totalElements);
return 0;
}
void check_error(cudaError_t err, const char *msg)
{
if (err != cudaSuccess)
{
fprintf(stderr, "%s : error %d (%s)\n", msg, err, cudaGetErrorString(err));
exit(err);
}
}
float runtime;
void printStats(cudaEvent_t before, cudaEvent_t after, const char *msg)
{
check_error(cudaEventElapsedTime(&runtime, before, after), msg);
printf("%s %gms\n", msg, runtime);
}
__global__ void knn(float* coords, float2* kOutput, int totalElements, int numels, int spacedim, int k, int* classes, int classes_num)
{
int gid = numels + threadIdx.x + blockIdx.x*blockDim.x; //id del punto da determinare
if (gid >= totalElements) return;
float* newPointCoords = coords+spacedim*gid;
float* pointCoords;
float2 kPoints[KMAX];
int i = 0, size = 0, count = 0;
float2 dist;
for (i = 0; i < numels; i++)
{
pointCoords = coords+spacedim*i;
dist = make_float2(distance(newPointCoords, pointCoords, spacedim), i);
insert(kPoints, dist, &size, k, gid);
}
for (count=0; i < gid; i++)
{
pointCoords = coords+spacedim*i;
dist = make_float2(distance(newPointCoords, pointCoords, spacedim), i);
count += insert(kPoints, dist, &size, k, gid);
}
if (count > 0)
{
classes[gid] = -1;
}
else
{
int kclasses[KMAX];
for (int j = 0; j < k; j++)
kclasses[j] = classes[(int)(kPoints[j].y)];
classes[gid] = deviceFindMode(kclasses, classes_num, k);
}
//copia kPoints in kOutput
int newelId = gid-numels;
for (i = 0; i < k; i++)
kOutput[newelId*KMAX + i] = kPoints[i];
}
__global__ void knnPunisher(float2* kOutput, int* classes, int numels, int newels, int k, int classes_num)
{
//Declaration of shared-memory. It's going to contains partial minimum of distances
extern __shared__ int mPartial[];
int gid = threadIdx.x + blockIdx.x*blockDim.x;
if (gid >= newels) return;
//stiamo lancio un kernel per ogni newels
//prendiamo la classe del newels e la mettiamo nella shared memory
int i, id, lid, kclasses[KMAX], kPoints[KMAX], count = 1;
//Se sono qui la classe per il kPoint è da determinare
for (i = 0; i < k; i++)
kPoints[i] = kOutput[gid*KMAX+i].y; //gid
while(count != 0)
{
for (i = 0; i < gid; i++)
mPartial[i] = classes[i+numels];
mPartial[gid] = classes[gid+numels];
if (mPartial[gid] != -1) return;
__syncthreads();
//Le sue dipendenze, se già determinate stanno nella shared-memory
count = 0;
for (i = k-1; i >= 0; i--)
{
id = kPoints[i];
lid = id - numels;
if (id > numels && mPartial[lid] < 0)
{
//segno quelli indeterminati
count++;
break;
}
}
if (count == 0)
{
//posso determinare il punto
//le sue dipendenze si trovano in shared memory
for (i = 0; i < k; i++)
kclasses[i] = classes[kPoints[i]];
classes[gid+numels] = deviceFindMode(kclasses, classes_num, k);
}
}
}
__device__ int deviceFindMode(int* kclasses, int classes_num, int k)
{
int classesCount[CLASSESMAX];
int i;
int temp=0;
for (i = 0; i < CLASSESMAX; i++)
classesCount[i] = 0;
for (i = 0; i < k; i++){
temp=kclasses[i];
classesCount[temp]+=1;
}
int max = 0;
int maxValue = classesCount[0];
for (i = 1; i < classes_num; i++)
{
int value = classesCount[i];
if (value > maxValue)
{
max = i;
maxValue = value;
}
else if (value != 0 && maxValue == value)
{
int j = 0;
for (j = 0; j < k; j++)
{
if (kclasses[j] == i)
{
max = i;
break;
}
else if (kclasses[j] == max)
break;
}
}
}
return max;
}
//inserimento smart in kPoints
__device__ int insert(float2* kPoints, float2 newDist, int* size, int k, int gid)
{
int inserted = 0;
if (*size == 0)
{
//Caso base: inserimento su array vuoto
kPoints[0] = newDist;
*size = *size + 1;
return 1;
}
int i = 1;
float2* value = &newDist; //nuovo elemento
float2* tail = &(kPoints[*size-i]);
if (*size < k)
{
kPoints[*size] = newDist;
value = &(kPoints[*size]);
inserted = 1;
}
//partire della fine, swap se trovo elemento più grande - mi fermo se trovo elemento più piccolo
while (i <= *size && (*tail).x > (*value).x)
{
swapfloat2(tail, value);
value = tail;
i++;
tail = &(kPoints[*size-i]);
inserted = 1;
}
if (inserted && *size < k) *size = *size + 1;
return inserted;
}
__device__ void swapfloat2(float2* d1, float2* d2)
{
//da provate tmp = d1, d1 = d2, d2 = tmp
float2 tmp;
tmp.x = (*d1).x;
tmp.y = (*d1).y;
(*d1).x = (*d2).x;
(*d1).y = (*d2).y;
(*d2).x = tmp.x;
(*d2).y = tmp.y;
}
// read input from file
void readInput(FILE* file, float* coords, int* classes, int spacedim, int numels, int totalElements)
{
int i, j;
int count;
for(i=0; i<numels; i++)
{
for (j = 0; j < spacedim; j++)
count = fscanf(file, "%f,", &(coords[i*spacedim +j]));
count = fscanf(file, "%d\n", &(classes[i]));
}
for(; i < totalElements; i++)
{
for (j = 0; j < spacedim; j++)
count = fscanf(file, "%f,", &(coords[i*spacedim+j]));
count = fscanf(file, "-1\n");
}
count++;
}
//Write Output on file
void writeOutput(float* coords, int* classes, int spacedim, int numels)
{
FILE *fp;
fp = fopen(OUTPUT_FILE, "w");
int i, j;
for( i = 0; i < numels; i++)
{
for (j = 0; j < spacedim; j++)
fprintf(fp, "%lf,", coords[i*spacedim+j]);
fprintf(fp, "%d\n", classes[i]);
}
fclose(fp);
}
//multidimensional euclidian distance (without sqrt)
__device__ float distance(float* coords, float* coords2, int spacedim)
{
float sum = 0;
int i;
for (i = 0; i < spacedim; i++)
{
float diff = coords[i] - coords2[i];
sum += diff*diff;
}
return sum;
}
|
19,052 | #include <stdio.h>
#include <stdlib.h>
__global__ void multAdd(float *d_in, float *d_out)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int f = d_in[idx];
d_out[idx] = ((2*f) + 1);
}
int main(int argc, char** argv)
{
int ARRAY_SIZE = 128;
int ARRAY_MEM = ARRAY_SIZE*sizeof(float);
float* h_in = (float*)malloc(ARRAY_MEM);
float* h_out = (float*)malloc(ARRAY_MEM);
float *d_out, *d_in;
for(int i = 0; i < ARRAY_SIZE; i++)
{
h_in[i] = i;
}
cudaMalloc(&d_in, ARRAY_MEM);
cudaMalloc(&d_out, ARRAY_MEM);
cudaMemcpy(d_in, h_in, ARRAY_MEM, cudaMemcpyHostToDevice);
multAdd<<<1, 128>>>(d_in, d_out);
cudaMemcpy(h_out, d_out, ARRAY_MEM, cudaMemcpyDeviceToHost);
for(int i = 0; i < ARRAY_SIZE; i += 4)
{
printf("%10f %10f %10f %10f\n", h_out[i], h_out[i+1], h_out[i+2], h_out[i+3]);
}
cudaFree(d_in);
cudaFree(d_out);
free(h_in);
free(h_out);
return 0;
} |
19,053 | #include "includes.h"
__global__ void dirtyFixWindowsVarScaleKernel( float *xMin, float *xMax, float *yMin, float *yMax, const int size, const float h, const float w, const float minWidth) {
int idx = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x;
if (idx < 2*size) {
float paramMin, paramMax;
if (idx < size) {
paramMin = max(-h+1, min(h-1, xMin[idx]));
paramMax = max(-h+1, min(h-1, xMax[idx]));
if (paramMin + minWidth - 0.99 > paramMax) {
const float mean = 0.5 * (paramMin + paramMax);
paramMin = mean - 0.5 * (minWidth - 0.9);
paramMax = mean + 0.5 * (minWidth - 0.9);
}
xMin[idx] = paramMin;
xMax[idx] = paramMax;
} else {
idx -= size;
paramMin = max(-w+1, min(w-1, yMin[idx]));
paramMax = max(-w+1, min(w-1, yMax[idx]));
if (paramMin + minWidth - 0.99 > paramMax) {
const float mean = 0.5 * (paramMin + paramMax);
paramMin = mean - 0.5 * (minWidth - 0.9);
paramMax = mean + 0.5 * (minWidth - 0.9);
}
yMin[idx] = paramMin;
yMax[idx] = paramMax;
}
}
} |
19,054 |
extern "C" __global__
void histgramMakerKernel_SharedMemAtomics(int *d_histgram,
const unsigned char *d_text, int textLength) {
__shared__ int sh_histgram[256];
for (int histPos = threadIdx.x; histPos < 256; histPos += blockDim.x)
sh_histgram[histPos] = 0;
__syncthreads();
int stride = gridDim.x * blockDim.x;
int gid = blockDim.x * blockIdx.x + threadIdx.x;
for (int pos = gid; pos < textLength; pos += stride) {
int ch = d_text[pos];
atomicAdd(&sh_histgram[ch], 1);
}
__syncthreads();
for (int histPos = threadIdx.x; histPos < 256; histPos += blockDim.x)
atomicAdd(&d_histgram[histPos], sh_histgram[histPos]);
}
|
19,055 | #include <stdio.h>
#include <curand_kernel.h>
#include <curand.h>
// Adapted from
// https://stackoverflow.com/questions/26650391/generate-random-number-within-a-function-with-curand-without-preallocation
__global__ void myfunc(double *vals, size_t n) {
int tId = threadIdx.x + (blockIdx.x * blockDim.x);
curandState state;
curand_init((unsigned long long)clock() + tId, 0, 0, &state);
for (size_t i = 0; i < n; i++) {
vals[i] = curand_uniform_double(&state);
}
}
// Adapted from
// https://stackoverflow.com/questions/7989039/use-of-cudamalloc-why-the-double-pointer
int main() {
size_t n = 20;
size_t num_bytes = n * sizeof(double);
double *vals_host = 0;
vals_host = (double*)malloc(num_bytes);
double *vals_device = 0;
cudaMalloc((void**)&vals_device, num_bytes);
myfunc<<<1, 1>>>(vals_device, n);
cudaMemcpy(vals_host, vals_device, num_bytes, cudaMemcpyDeviceToHost);
for (size_t i = 0; i < n; i++) {
printf("%d = %lf\n", i, vals_host[i]);
}
free(vals_host);
cudaFree(vals_device);
return 0;
}
|
19,056 | #include <iostream>
#include <assert.h>
// Here you can set the device ID that was assigned to you
#define MYDEVICE 0
// Simple utility function to check for CUDA runtime errors
void checkCUDAError(const char *msg);
// Part 2 of 4: implement the kernel
__global__ void kernel( int *a, int dimx, int dimy )
{
dim3 index;
index.x = threadIdx.x + blockIdx.x * blockDim.x;
// printf("index.x %d threadIdx.x %d blockIdx.x %d blockDim.x %d \n",index.x,threadIdx.x,blockIdx.x,blockDim.x);
index.y = threadIdx.y + blockIdx.y * blockDim.y;
// printf("index.y %d threadIdx.y %d blockIdx.y %d blockDim.y %d \n",index.y,threadIdx.y,blockIdx.y,blockDim.y);
if (index.x < dimx && index.y < dimy ) {
int i = index.x + index.y * dimx;
// printf("i %d x %d y %d dimx %d \n",i,index.x,index.y,dimx);
a[i] = i;
}
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char* argv[]) {
cudaSetDevice(MYDEVICE);
// Part 1 and 4 of 4: set the dimensions of the matrix
int const dimx = (argc > 4) ? std::atoi(argv[1]) : 4;
int const dimy = (argc > 4) ? std::atoi(argv[2]) : 4;
std::cout << "MATRIX " << dimx << "x" << dimy << std::endl;
// int dimx = 4;
// int dimy = 4;
int num_bytes = dimx*dimy*sizeof(dimx);
int *d_a=0, *h_a=0; // device and host pointers
// h_a = (int*)malloc(num_bytes);
cudaMallocHost( &h_a,num_bytes );
//allocate memory on the device
cudaMalloc( &d_a,num_bytes );
if( NULL==h_a || NULL==d_a ) {
std::cerr << "couldn't allocate memory" << std::endl;
return 1;
}
// Part 2 of 4: define grid and block size and launch the kernel
dim3 grid, block;
// block.x = 2;
// block.y = 2;
block.x = (argc > 4) ? std::atoi(argv[3]) : 2;
block.y = (argc > 4) ? std::atoi(argv[4]) : 2;
std::cout << "#THREADS " << block.x << "x" << block.y << std::endl;
grid.x = (dimx + block.x - 1)/block.x;
grid.y = (dimy + block.y - 1)/block.y;
std::cout << "#BLOCKS " << grid.x << "x" << grid.y << std::endl;
kernel<<<grid, block>>>( d_a, dimx, dimy );
// block until the device has completed
cudaDeviceSynchronize();
// check if kernel execution generated an error
checkCUDAError("kernel execution");
// device to host copy
cudaMemcpy( h_a, d_a, num_bytes, cudaMemcpyDeviceToHost );
// Check for any CUDA errors
checkCUDAError("cudaMemcpy");
// verify the data returned to the host is correct
for(int row=0; row<dimy; row++)
{
for(int col=0; col<dimx; col++)
assert(h_a[row * dimx + col] == row * dimx + col);
}
// free host memory
// free( h_a );
cudaFreeHost(h_a);
// free device memory
cudaFree( d_a );
// If the program makes it this far, then the results are correct and
// there are no run-time errors. Good work!
std::cout << "Correct!" << std::endl;
return 0;
}
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
std::cerr << "Cuda error: " << msg << " " << cudaGetErrorString(err) << std::endl;
exit(-1);
}
}
|
19,057 | #include<bits/stdc++.h>
using namespace std;
#define pi (2.0*acos(0.0))
#define eps 1e-6
#define ll long long
#define inf (1<<29)
#define vi vector<int>
#define vll vector<ll>
#define sc(x) scanf("%d",&x)
#define scl(x) scanf("%lld",&x)
#define all(v) v.begin() , v.end()
#define me(a,val) memset( a , val ,sizeof(a) )
#define pb(x) push_back(x)
#define pii pair<int,int>
#define mp(a,b) make_pair(a,b)
#define Q(x) (x) * (x)
#define L(x) ((x<<1) + 1)
#define R(x) ((x<<1) + 2)
#define M(x,y) ((x+y)>>1)
#define fi first
#define se second
#define MOD 1000000007
#define ios ios::sync_with_stdio(0)
#define N 1024
#define TB 32
int main(){
clock_t tStart = clock();
int *a , *b , *c;
size_t size = N * N * sizeof(int) ;
a = (int *)malloc( size );
b = (int *)malloc( size );
c = (int *)malloc( size );
for(int i = 0; i < N; i++)
for(int j = 0; j < N; j++)
a[i * N + j] = i + j;
for(int i = 0; i < N; i++)
for(int j = 0; j < N; j++)
b[i * N + j] = 1;
for(int i = 0; i < N; i++)
for(int j = 0; j < N; j++)
c[i * N + j] = a[i * N + j] * b[i * N + j];
printf("Time taken: %.2fms\n", 1000.0 * (double)(clock() - tStart)/CLOCKS_PER_SEC);
return 0;
}
|
19,058 | #include "includes.h"
__device__ inline float stableLogit(float x) {
if(x >= 0) {
float z = expf(-x);
return 1.0 / (1.0 + z);
} else {
float z = expf(x);
return z / (1.0 + z);
}
}
__global__ void gLSTMCellBackward(float* outCell, float* outXW, float* outSU, float* outB, const float* cell, const float* xW, const float* sU, const float* b, const float* mask, const float* adj, size_t rows, size_t cols) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float m = !mask || mask[j];
float* rowOutCell = outCell + j * cols;
float* rowOutXW = outXW + j * cols * 4;
float* rowOutSU = outSU + j * cols * 4;
const float* rowCell = cell + j * cols;
const float* xWrow = xW + j * cols * 4;
const float* sUrow = sU + j * cols * 4;
const float* rowAdj = adj + j * cols;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < cols) {
float gf = stableLogit(xWrow[i] + sUrow[i] + b[i]);
int k = i + cols;
float gi = stableLogit(xWrow[k] + sUrow[k] + b[k]);
int l = i + 2 * cols;
float gc = tanhf(xWrow[l] + sUrow[l] + b[l]);
float adj = rowAdj[i];
// dc/dc_{t-1}
if(outCell)
rowOutCell[i] += (m * gf - m + 1) * adj;
// dc/d(b_f) = dc/d(xW_f) ...
float dcdxf = m * rowCell[i] * gf * (1 - gf) * adj;
if(outXW)
rowOutXW[i] += dcdxf;
if(outSU)
rowOutSU[i] += dcdxf;
if(outB)
atomicAdd(outB + i, dcdxf);
// dc/d(b_i) ...
float dcdb_i = m * gc * gi * (1 - gi) * adj;
if(outXW)
rowOutXW[k] += dcdb_i;
if(outSU)
rowOutSU[k] += dcdb_i;
if(outB)
atomicAdd(outB + k, dcdb_i);
// dc/d(b_c) ...
float dcdxc = m * gi * (1 - gc * gc) * adj;
if(outXW)
rowOutXW[l] += dcdxc;
if(outSU)
rowOutSU[l] += dcdxc;
if(outB)
atomicAdd(outB + l, dcdxc);
}
}
}
}
} |
19,059 | #include <iostream>
#include <numeric>
#include <stdlib.h>
#include <stdio.h>
#include<thrust/scan.h>
/*
Somethings so confuse me, why i can't get same correct result every time.
*/
/*
These two kernel could be used on large array, but slow
Best advice: use __syncthreads() before you want to use different index
*/
__global__ void hillis_steele_scan_forward(float * d_out, float * d_in, const int array_size){
int idx = blockDim.x * blockIdx.x + threadIdx.x;
d_out[idx] = d_in[idx];
for(int step = 1; step < array_size; step *= 2){
if(idx + step >= array_size) return;
__syncthreads();
float in1 = d_out[idx];
__syncthreads();
d_out[idx + step] += in1;
}
}
__global__ void hillis_steele_scan_backward(float * d_out, float * d_in){
int idx = blockDim.x * blockIdx.x + threadIdx.x;
d_out[idx] = d_in[idx];
for(int step = 1; step <= idx; step *= 2){
if(idx - step < 0) return;
__syncthreads();
float in1 = d_out[idx - step];
__syncthreads();
d_out[idx] += in1;
}
}
/*
These two kernel could be used on small array, but fast
*/
__global__ void shared_hillis_steele_scan_forward(float *d_out, float *d_in, const int array_size) {
extern __shared__ float sdata[];
int idx = threadIdx.x;
if(idx < array_size) {
sdata[idx] = d_in[idx];
} else {
return;
} // the code below performs iterative scan on XY
for(int step = 1; step < array_size; step *= 2){
if(idx + step >= array_size) return;
__syncthreads();
float in1 = sdata[idx];
__syncthreads();
sdata[idx + step] += in1;
}
d_out[idx] = sdata[idx];
}
__global__ void shared_hillis_steele_scan_backward(float * d_out, float * d_in, const int array_size){
extern __shared__ float sdata[];
int idx = threadIdx.x;
if(idx < array_size) {
sdata[idx] = d_in[idx];
} else {
return;
}
sdata[idx] = d_in[idx];
for(int step = 1; step <= idx; step *= 2){
if(idx - step < 0) return;
__syncthreads();
float in1 = sdata[idx - step];
__syncthreads();
sdata[idx] += in1;
}
d_out[idx] = sdata[idx];
}
/*
This kernel will get correct result when array size is power of 2
*/
__global__ void blelloch_exclusive_scan(float *g_odata, float *g_idata, int n)
{
extern __shared__ float temp[];// allocated on invocation
int thid = threadIdx.x;
int offset = 1;
temp[thid] = g_idata[thid];
for (int d = n / 2; d > 0; d /= 2) // build sum in place up the tree
{
__syncthreads();
if (thid < d)
{
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
temp[bi] += temp[ai];
}
offset <<= 1; //multiply by 2 implemented as bitwise operation
}
if (thid == 0) { temp[n - 1] = 0; } // clear the last element
for (int d = 1; d < n; d *= 2) // traverse down tree & build scan
{
offset >>= 1;
__syncthreads();
if (thid < d)
{
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
float t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
g_odata[thid] = temp[thid];
}
int main(int argc, char ** argv) {
const int ARRAY_SIZE = 1025;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
const int maxThreadPerBlock = 512;
const int numBlock = ARRAY_SIZE / maxThreadPerBlock + 1;
// generate the input array on the host
float h_in[ARRAY_SIZE];
for (int i = 0; i < ARRAY_SIZE; i++) {
h_in[i] = float(i);
}
float h_out[ARRAY_SIZE];
// declare GPU memory pointers
float * d_in;
float * d_out;
// allocate GPU memory
cudaMalloc((void**) &d_in, ARRAY_BYTES);
cudaMalloc((void**) &d_out, ARRAY_BYTES);
// transfer the array to the GPU
cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice);
// launch the kernel
//hillis_steele_scan_forward<<<numBlock, maxThreadPerBlock>>>(d_out, d_in, ARRAY_SIZE);
//hillis_steele_scan_backward<<<numBlock, maxThreadPerBlock>>>(d_out, d_in);
//shared_hillis_steele_scan_forward<<<numBlock, maxThreadPerBlock, maxThreadPerBlock * sizeof(float)>>>(d_out, d_in, ARRAY_SIZE);
//shared_hillis_steele_scan_backward<<<numBlock, maxThreadPerBlock, maxThreadPerBlock * sizeof(float)>>>(d_out, d_in, ARRAY_SIZE);
//blelloch_exclusive_scan<<<numBlock, maxThreadPerBlock, maxThreadPerBlock * sizeof(float)>>>(d_out, d_in, ARRAY_SIZE);
thrust::inclusive_scan(h_in, h_in + ARRAY_SIZE, h_out);
//copy back the result array to the CPU
//cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost);
// print out the resulting array
for (int i =0; i < ARRAY_SIZE; i++) {
printf("%f", h_out[i]);
printf(((i % 4) != 3) ? "\t" : "\n");
}
cudaFree(d_in);
cudaFree(d_out);
return 0;
}
|
19,060 | /*
Simple Monte Carlo Pi Simulation using CUDA Primatives
*/
#include <curand.h>
#include <iostream>
#include <iomanip>
__device__ int total_device_points{};
__global__ void measure_points(const float* random_x,
const float* random_y)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const float x = random_x[i] - 0.5F;
const float y = random_y[i] - 0.5F;
const int n = sqrtf(pow(x, 2) + pow(y, 2)) > 0.5F ? 0 : 1;
atomicAdd(&total_device_points, n);
}
int main() {
constexpr int width = 512;
constexpr int height = 512;
constexpr int count = width * height;
constexpr int size = count * sizeof(float);
curandGenerator_t random_generator;
curandCreateGenerator(&random_generator, CURAND_RNG_PSEUDO_MTGP32);
curandSetPseudoRandomGeneratorSeed(random_generator, time(nullptr));
float *random_x, *random_y;
cudaMalloc(&random_x, size);
cudaMalloc(&random_y, size);
curandGenerateUniform(random_generator, random_x, count);
curandGenerateUniform(random_generator, random_y, count);
measure_points <<<width, height>>> (random_x, random_y);
int total_host_points;
cudaMemcpyFromSymbol(&total_host_points, total_device_points, sizeof(int));
const float estimated_pi = ((4.0F * static_cast<float>(total_host_points)) / static_cast<float>(count));
std::cout << std::setprecision(std::numeric_limits<float>::digits10 + 1)
<< "Using the Monte Carlo Method Pi is estimated to be: "
<< estimated_pi
<< '\n';
cudaFree(random_x);
cudaFree(random_y);
}
|
19,061 | #include "conv2d-transpose-input-grad.hh"
#include "graph.hh"
#include "../runtime/graph.hh"
#include "../runtime/node.hh"
#include "../memory/alloc.hh"
namespace ops
{
Conv2DTransposeInputGrad::Conv2DTransposeInputGrad(Op* y, Op* kernel,
const int strides[], const int input_size[])
: Op("conv2d_transpose_input_grad",
Shape({input_size[0], input_size[1],
input_size[2], input_size[3]}),
{y, kernel})
, m_strides(strides)
{
m_input_size[0] = input_size[0];
m_input_size[1] = input_size[1];
m_input_size[2] = input_size[2];
m_input_size[3] = input_size[3];
}
void Conv2DTransposeInputGrad::compile()
{
auto& g = Graph::instance();
auto& cy = g.compiled(preds()[0]);
auto& ckernel = g.compiled(preds()[1]);
Shape out_shape({m_input_size[0], m_input_size[1], m_input_size[2], m_input_size[3]});
dbl_t* out_data = tensor_alloc(out_shape.total());
int y_size[4] = { cy.out_shape[0], cy.out_shape[1],
cy.out_shape[2], cy.out_shape[3]};
int kernel_size[4] = { ckernel.out_shape[0], ckernel.out_shape[1],
ckernel.out_shape[2], ckernel.out_shape[3]};
auto out_node = rt::Node::op_conv2d_transpose_input_grad(cy.out_data, ckernel.out_data,
m_strides, out_data, y_size,
kernel_size, m_input_size,
{cy.out_node, ckernel.out_node});
g.add_compiled(this, {out_node}, {out_data}, out_node, out_shape, out_data);
}
}
|
19,062 | //******************************************************************************
//
// File: ModCubRoot.cu
//
// Version: 1.0
//******************************************************************************
// Number of threads per block.
#define NT 1024
// Overall counter variable in global memory.
__device__ unsigned long long int count;
__device__ unsigned long long int arraySize = 3;
/**
* Device kernel to compute modular cube root.
*
*
* @author Omkar Kakade
*/
extern "C" __global__ void computeModularCubeRoot
( unsigned long long int c,
unsigned long long int N,
unsigned long long int *final_M)
{
unsigned long long int thr, size, rank;
unsigned long long int local_c;
unsigned long long int local_m;
unsigned long long int increment;
// Determine number of threads and this thread's rank.
thr = threadIdx.x;
size = gridDim.x*NT;
rank = blockIdx.x*NT + thr;
// Initialize per-thread.
local_c = 0;
local_m = 0;
increment = 1;
unsigned long long int atom_result =0;
// Compute modular cube roots.
for (unsigned long long int i = rank; i < N; i += size)
{
unsigned long long int first_mod = (i)%N;
unsigned long long int second_mod = (first_mod * i)%N;
unsigned long long int third_mod = (second_mod * i)%N;
local_c = third_mod;
local_m = i;
if (local_c == c){
// atomic counter value updation.
atom_result = atomicAdd(&count,increment);
if (atom_result < arraySize) {
final_M[atom_result]=local_m;
}
}
}
}
|
19,063 | //
// Created by Peter Rigole on 2019-03-08.
//
#include "Managed.cuh"
__host__
void *Managed::operator new(size_t len) {
void *ptr;
cudaMallocManaged(&ptr, len);
return ptr;
}
__host__
void Managed::operator delete(void *ptr) {
cudaFree(ptr);
}
|
19,064 | #include <iostream>
#include <random>
#include <iomanip>
#include <cuda.h>
#define MASK_DIM_SIZE 10 // Number of elements for one spacial dimension
#define TILE_SIZE 16
__constant__ float d_mask[MASK_DIM_SIZE * MASK_DIM_SIZE];
/*-------------------------------------------------------------------------------------------------
* FORWARD DECLARATION
*-----------------------------------------------------------------------------------------------*/
__global__ void naive_convolution_kernel(const float *d_in, float *d_out,
int M, int N, int mask_m, int mask_n);
__global__ void tiled_convolution_kernel(const float *d_in, float *d_out,
int M, int N, int mask_m, int mask_n);
__global__ void cache_convolution_kernel(const float *d_in, float *d_out,
int M, int N, int mask_m, int mask_n);
void cpu_convolution(const float *A, const float *mask, float *out,
int M, int N, int mask_m, int mask_n);
float convolution_naive(const float *in, float *out, int M, int N, int mask_m, int mask_n);
float convolution_tiled(const float *in, float *out, int M, int N, int mask_m, int mask_n);
float convolution_cache(const float *in, float *out, int M, int N, int mask_m, int mask_n);
float check_convolution(const float *A, const float *B, int M, int N);
/*-------------------------------------------------------------------------------------------------
* MAIN
*-----------------------------------------------------------------------------------------------*/
int main(int argc, char **argv)
{
const int M = 2000, N = 2000;
const int mask_m = 3, mask_n = 3;
float *A = new float[M * N];
float *cpu_out = new float[M * N];
float *naive_out = new float[M * N];
float *tiled_out = new float[M * N];
float *cache_out = new float[M * N];
float *mask = new float[mask_m * mask_n];
std::random_device rd;
std::mt19937 engine(rd());
std::uniform_real_distribution<float> rand0(0, 10.0);
for (int i = 0; i < M * N; i++)
A[i] = rand0(engine);
mask[0] = 1.0;
mask[1] = 2.0;
mask[2] = 1.0;
mask[3] = 2.0;
mask[4] = 4.0;
mask[5] = 2.0;
mask[6] = 1.0;
mask[7] = 2.0;
mask[8] = 1.0;
cudaMemcpyToSymbol(d_mask, mask, mask_m * mask_n * sizeof(float));
cpu_convolution(A, mask, cpu_out, M, N, mask_m, mask_n);
float naive_time = convolution_naive(A, naive_out, M, N, mask_m, mask_n);
float tiled_time = convolution_tiled(A, tiled_out, M, N, mask_m, mask_n);
float cache_time = convolution_cache(A, cache_out, M, N, mask_m, mask_n);
float naive_err = check_convolution(cpu_out, naive_out, M, N);
std::cout << "Naive convolution error = " << naive_err << ". Time = " << naive_time << "ms.\n";
float tiled_err = check_convolution(cpu_out, tiled_out, M, N);
std::cout << "tiled convolution error = " << tiled_err<< ". Time = " << tiled_time << "ms.\n";
float cache_err = check_convolution(cpu_out, cache_out, M, N);
std::cout << "cache convolution error = " << cache_err << ". Time = " << cache_time << "ms.\n";
delete[] A;
delete[] cpu_out;
delete[] naive_out;
delete[] tiled_out;
delete[] cache_out;
delete[] mask;
}
/*-------------------------------------------------------------------------------------------------
* KERNELS
*-----------------------------------------------------------------------------------------------*/
__global__ void naive_convolution_kernel(const float *d_in, float *d_out,
int M, int N, int mask_m, int mask_n)
{
int m_idx = blockIdx.y * blockDim.y + threadIdx.y;
int n_idx = blockIdx.x * blockDim.x + threadIdx.x;
// Check if indices are within matrix
if (m_idx < M && n_idx < N) {
int start_m = m_idx - (mask_m / 2);
int start_n = n_idx - (mask_n / 2);
float val = 0.0;
// Loop over mask
for (int m = 0; m < mask_m; m++) {
for (int n = 0; n < mask_n; n++) {
int i = start_m + m;
int j = start_n + n;
if (i >= 0 && i < M && j >= 0 && j < N)
val += d_in[i * N + j] * d_mask[m * mask_n + n];
}
}
d_out[m_idx * N + n_idx] = val;
}
}
__global__ void tiled_convolution_kernel(const float *d_in, float *d_out,
int M, int N, int mask_m, int mask_n)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int m_half = mask_m / 2;
int n_half = mask_n / 2;
__shared__ float tile[TILE_SIZE + MASK_DIM_SIZE - 1][TILE_SIZE + MASK_DIM_SIZE - 1];
int left_idx = (blockIdx.x - 1) * blockDim.x + threadIdx.x;
int right_idx = (blockIdx.x + 1) * blockDim.x + threadIdx.x;
int top_idx = (blockIdx.y - 1) * blockDim.y + threadIdx.y;
int bottom_idx = (blockIdx.y + 1) * blockDim.y + threadIdx.y;
// Load block matrix
tile[threadIdx.y + m_half][threadIdx.x + n_half] = (row < M || col < N) ? d_in[row * N + col] : 0;
// Load left and right tiles
if (threadIdx.x >= blockDim.x - n_half)
tile[threadIdx.y + m_half][threadIdx.x - blockDim.x + n_half] =
(left_idx < 0) ? 0 : d_in[row * N + left_idx];
if (threadIdx.x < n_half)
tile[threadIdx.y + m_half][threadIdx.x + blockDim.x + n_half] =
(right_idx >= N) ? 0 : d_in[row * N + right_idx];
// Load top and bottom tiles, including corners
if (threadIdx.y >= blockDim.y - m_half) {
tile[threadIdx.y - blockDim.y + m_half][threadIdx.x + n_half] =
(top_idx < 0) ? 0 : d_in[top_idx * N + col];
if (threadIdx.x >= blockDim.x - n_half)
tile[threadIdx.y - blockDim.y + m_half][threadIdx.x - blockDim.x + n_half] =
(top_idx < 0 || left_idx < 0) ? 0 : d_in[top_idx * N + left_idx];
else if (threadIdx.x < n_half)
tile[threadIdx.y - blockDim.y + m_half][threadIdx.x + blockDim.x + n_half] =
(top_idx < 0 || right_idx >= N) ? 0 : d_in[top_idx * N + right_idx];
}
if (threadIdx.y < m_half) {
tile[threadIdx.y + blockDim.y + m_half][threadIdx.x + n_half] =
(bottom_idx >= M) ? 0 : d_in[bottom_idx * N + col];
if (threadIdx.x >= blockDim.x - n_half)
tile[threadIdx.y + blockDim.y + m_half][threadIdx.x - blockDim.x + n_half] =
(bottom_idx >= M || left_idx < 0) ? 0 : d_in[bottom_idx * N + left_idx];
else if (threadIdx.x < n_half)
tile[threadIdx.y + blockDim.y + m_half][threadIdx.x + blockDim.x + n_half] =
(bottom_idx >= M || right_idx >= N) ? 0 : d_in[bottom_idx * N + right_idx];
}
__syncthreads();
if (row < M && col < N) {
float sum = 0.0;
// Perform convolution
for (int i = 0; i < mask_m; i++)
for (int j = 0; j < mask_n; j++)
sum += tile[threadIdx.y + i][threadIdx.x + j] * d_mask[i * mask_n + j];
d_out[row * N + col] = sum;
}
}
__global__ void cache_convolution_kernel(const float *d_in, float *d_out,
int M, int N, int mask_m, int mask_n)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int m_half = mask_m / 2;
int n_half = mask_n / 2;
__shared__ float tile[TILE_SIZE][TILE_SIZE];
if (row < M && col < N)
tile[threadIdx.y][threadIdx.x] = d_in[row * N + col];
__syncthreads();
int row_tile_start = blockIdx.y * blockDim.y;
int row_tile_end = (blockIdx.y + 1) * blockDim.y;
int col_tile_start = blockIdx.x * blockDim.x;
int col_tile_end = (blockIdx.x + 1) * blockDim.x;
int m_start = row - m_half;
int n_start = col - n_half;
float sum = 0.0;
if (row < M && col < N) {
// Loop over mask
for (int i = 0; i < mask_m; i++) {
for (int j = 0; j < mask_n; j++) {
int m = m_start + i;
int n = n_start + j;
// Check if indicies with mask applied is within matrix
if (m >= 0 && m < M && n >= 0 && n < N) {
// Check if elements are in the cache
if (m >= row_tile_start && m < row_tile_end &&
n >= col_tile_start && n < col_tile_end)
sum += tile[threadIdx.y + i - m_half][threadIdx.x + j - n_half] *
d_mask[i * mask_n + j];
else
sum += d_in[m * N + n] * d_mask[i * mask_n + j];
}
}
}
d_out[row * N + col] = sum;
}
}
/*-------------------------------------------------------------------------------------------------
* FUNCTIONS
*-----------------------------------------------------------------------------------------------*/
void cpu_convolution(const float *A, const float *mask, float *out,
int M, int N, int mask_m, int mask_n)
{
int half_m = mask_m / 2;
int half_n = mask_n / 2;
for (int i = 0; i < M; i++) {
for (int j = 0; j < N; j++) {
int start_m = i - half_m;
int start_n = j - half_n;
float val = 0.0;
for (int m = 0; m < mask_m; m++) {
for (int n = 0; n < mask_n; n++) {
int m_idx = start_m + m;
int n_idx = start_n + n;
if (m_idx >= 0 && m_idx < M && n_idx >= 0 && n_idx < N)
val += A[m_idx * N + n_idx] * mask[m * mask_n + n];
}
}
out[i * N + j] = val;
}
}
}
float convolution_naive(const float *in, float *out, int M, int N, int mask_m, int mask_n)
{
int size = M * N * sizeof(float);
float *d_in, *d_out;
cudaMalloc((void**) &d_in, size);
cudaMalloc((void**) &d_out, size);
cudaMemcpy(d_in, in, size, cudaMemcpyHostToDevice);
int N_thd = TILE_SIZE;
int M_blk = ceil(static_cast<float>(M) / static_cast<float>(N_thd));
int N_blk = ceil(static_cast<float>(N) / static_cast<float>(N_thd));
dim3 grid_dim(M_blk, N_blk, 1);
dim3 blk_dim(N_thd, N_thd, 1);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
naive_convolution_kernel<<<grid_dim, blk_dim>>>(d_in, d_out, M, N, mask_m, mask_n);
cudaEventRecord(stop);
cudaMemcpy(out, d_out, size, cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
float time_ms;
cudaEventElapsedTime(&time_ms, start, stop);
cudaFree(d_in);
cudaFree(d_out);
return time_ms;
}
float convolution_tiled(const float *in, float *out, int M, int N, int mask_m, int mask_n)
{
int size = M * N * sizeof(float);
float *d_in, *d_out;
cudaMalloc((void**) &d_in, size);
cudaMalloc((void**) &d_out, size);
cudaMemcpy(d_in, in, size, cudaMemcpyHostToDevice);
int N_thd = TILE_SIZE;
int M_blk = ceil(static_cast<float>(M) / static_cast<float>(N_thd));
int N_blk = ceil(static_cast<float>(N) / static_cast<float>(N_thd));
dim3 grid_dim(M_blk, N_blk, 1);
dim3 blk_dim(N_thd, N_thd, 1);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
tiled_convolution_kernel<<<grid_dim, blk_dim>>>(d_in, d_out, M, N, mask_m, mask_n);
cudaEventRecord(stop);
cudaMemcpy(out, d_out, size, cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
float time_ms;
cudaEventElapsedTime(&time_ms, start, stop);
cudaFree(d_in);
cudaFree(d_out);
return time_ms;
}
float convolution_cache(const float *in, float *out, int M, int N, int mask_m, int mask_n)
{
int size = M * N * sizeof(float);
float *d_in, *d_out;
cudaMalloc((void**) &d_in, size);
cudaMalloc((void**) &d_out, size);
cudaMemcpy(d_in, in, size, cudaMemcpyHostToDevice);
int N_thd = TILE_SIZE;
int M_blk = ceil(static_cast<float>(M) / static_cast<float>(N_thd));
int N_blk = ceil(static_cast<float>(N) / static_cast<float>(N_thd));
dim3 grid_dim(M_blk, N_blk, 1);
dim3 blk_dim(N_thd, N_thd, 1);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
cache_convolution_kernel<<<grid_dim, blk_dim>>>(d_in, d_out, M, N, mask_m, mask_n);
cudaEventRecord(stop);
cudaMemcpy(out, d_out, size, cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
float time_ms;
cudaEventElapsedTime(&time_ms, start, stop);
cudaFree(d_in);
cudaFree(d_out);
return time_ms;
}
float check_convolution(const float *A, const float *B, int M, int N)
{
float sum = 0.0;
for (int i = 0; i < N; i++)
sum += (A[i] - B[i]) * (A[i] - B[i]);
return sqrt(sum);
}
|
19,065 | #include <array>
#include <cassert>
#include <chrono>
#include <iostream>
#include <math.h>
#include <string>
using namespace std;
float lb = 0;
float ub = 2;
const int nx = 41;
const int ny = 41;
int nt = 500;
int nit = 50;
int c = 1;
float dx = ub / float(nx - 1);
float dy = ub / float(ny - 1);
float rho = 1;
float nu = 0.1;
float dt = 0.001;
void npprint(float *u, int dimx = ny, int dimy = nx, string msg = "OUT: ") {
printf("%s\n", msg.c_str());
printf("x-------------------------------x\n");
printf("[\n");
for (int i = 0; i < dimx; i++) {
printf("[");
for (int k = 0; k < dimy; k++)
printf("%3.4f, ", u[i * dimy + k]);
printf("],\n");
}
printf("]\n");
printf("x-------------------------------x\n");
}
void linspace(float *x, int lb, int ub, int num) {
for (int k = 0; k < num; k++) {
x[k] = k * (ub - lb) / float(num - 1) + lb;
}
}
__device__ void copy(float *lhs, float *rhs, int dimx, int dimy) {
for (int k = 0; k < dimy * dimx; k++)
rhs[k] = lhs[k];
}
void fill(float *x, float fillnum, int dimx, int dimy) {
for (int k = 0; k < dimy * dimx; k++)
x[k] = fillnum;
}
void meshgrid(float *x, float *y, float *X, float *Y) {
int dimx = ny;
int dimy = ny;
for (int k = 0; k < dimy; k++)
for (int i = 0; i < dimx; i++) {
X[k * dimx + i] = x[i];
Y[k * dimx + i] = y[k];
}
}
__global__ void build_up_b(float *b, float rho, float dt, float *u, float *v,
float dx, float dy, int nx, int ny) {
/*
def build_up_b(b, rho, dt, u, v, dx, dy):
return b
*/
int xlim = ny;
int ylim = nx;
int idx = blockIdx.x;
int idy = threadIdx.x;
assert(nx = blockDim.x);
if (idx >= xlim - 2)
return;
if (idy >= ylim - 2)
return;
b[(idx + 1) * nx + idy + 1] =
(rho *
(1 / dt *
((u[(idx + 1) * nx + idy + 2] - u[(idx + 1) * nx + idy]) /
(2 * dx) +
(v[(idx + 2) * nx + idy + 1] - v[(idx)*nx + idy + 1]) / (2 * dy)) -
pow(((u[(idx + 1) * nx + idy + 2] - u[(idx + 1) * nx + idy]) /
(2 * dx)),
2) -
2 * ((u[(idx + 2) * nx + idy + 1] - u[(idx)*nx + idy + 1]) / (2 * dy) *
(v[(idx + 1) * nx + idy + 2] - v[(idx + 1) * nx + idy]) /
(2 * dx)) -
pow((v[(idx + 2) * nx + idy + 1] - v[(idx)*nx + idy + 1]) / (2 * dy),
2)));
}
__global__ void pmargin(float *p, int nx, int ny) {
int xlim = ny;
int ylim = nx;
int idx = blockIdx.x;
int idy = threadIdx.x;
p[0 * ylim + idy] = p[1 * ylim + idy]; // dp/dy = 0 at y = 0
p[(xlim - 1) * ylim + idy] = 0; // p = 0 at y = 2
__syncthreads();
p[idx * ylim + ylim - 1] = p[idx * ylim + ylim - 2]; // dp/dx = 0 at x =2
p[idx * ylim + 0] = p[idx * ylim + 1]; // dp/dx = 0 at x =0
__syncthreads();
}
__global__ void pupdate(float *p, float *pn, float dx, float dy, float *b,
int nx, int ny, int nit) {
int xlim = ny;
int ylim = nx;
int idx = blockIdx.x;
int idy = threadIdx.x;
if (idx < xlim - 2 && idy < ylim - 2) {
copy(p, pn, ny, nx);
__syncthreads();
p[(idx + 1) * nx + idy + 1] =
(((pn[(idx + 1) * nx + idy + 2] + pn[(idx + 1) * nx + idy]) *
pow(dy, 2) +
(pn[(idx + 2) * nx + idy + 1] + pn[(idx)*nx + idy + 1]) *
pow(dx, 2)) /
(2 * (pow(dx, 2) + pow(dy, 2))) -
pow(dx, 2) * pow(dy, 2) / (2 * (pow(dx, 2) + pow(dy, 2))) *
b[(idx + 1) * nx + idy + 1]);
}
__syncthreads();
}
void pressure_poisson(float *p, float *pn, float dx, float dy, float *b, int nx,
int ny, int nit) {
/*
def pressure_poisson(p, dx, dy, b):
*/
for (int q = 0; q < nit; q++) {
pupdate<<<ny, nx>>>(p, pn, dx, dy, b, nx, ny, nit);
cudaDeviceSynchronize();
pmargin<<<ny, nx>>>(p, nx, ny);
cudaDeviceSynchronize();
}
}
__global__ void cupdate(int nt, float *u, float *v, float *un, float *vn,
float dt, float dx, float dy, float *p, float rho,
float nu) {
int idx = blockIdx.x;
int idy = threadIdx.x;
copy(u, un, ny, nx);
copy(v, vn, ny, nx);
__syncthreads();
u[(idx + 1) * nx + idy + 1] =
(un[(idx + 1) * nx + idy + 1] -
un[(idx + 1) * nx + idy + 1] * dt / dx *
(un[(idx + 1) * nx + idy + 1] - un[(idx + 1) * nx + idy]) -
vn[(idx + 1) * nx + idy + 1] * dt / dy *
(un[(idx + 1) * nx + idy + 1] - un[(idx)*nx + idy + 1]) -
dt / (2 * rho * dx) *
(p[(idx + 1) * nx + idy + 2] - p[(idx + 1) * nx + idy]) +
nu * (dt / pow(dx, 2) *
(un[(idx + 1) * nx + idy + 2] -
2 * un[(idx + 1) * nx + idy + 1] + un[(idx + 1) * nx + idy]) +
dt / pow(dy, 2) *
(un[(idx + 2) * nx + idy + 1] -
2 * un[(idx + 1) * nx + idy + 1] + un[(idx)*nx + idy + 1])));
__syncthreads();
v[(idx + 1) * nx + idy + 1] =
(vn[(idx + 1) * nx + idy + 1] -
un[(idx + 1) * nx + idy + 1] * dt / dx *
(vn[(idx + 1) * nx + idy + 1] - vn[(idx + 1) * nx + idy]) -
vn[(idx + 1) * nx + idy + 1] * dt / dy *
(vn[(idx + 1) * nx + idy + 1] - vn[(idx)*nx + idy + 1]) -
dt / (2 * rho * dy) *
(p[(idx + 2) * nx + idy + 1] - p[(idx)*nx + idy + 1]) +
nu * (dt / pow(dx, 2) *
(vn[(idx + 1) * nx + idy + 2] -
2 * vn[(idx + 1) * nx + idy + 1] + vn[(idx + 1) * nx + idy]) +
dt / pow(dy, 2) *
(vn[(idx + 2) * nx + idy + 1] -
2 * vn[(idx + 1) * nx + idy + 1] + vn[(idx)*nx + idy + 1])));
__syncthreads();
}
__global__ void cmargin(float *u, float *v, int nx, int ny) {
int xlim = ny;
int ylim = nx;
int idx = blockIdx.x;
int idy = threadIdx.x;
u[0 * ylim + idy] = 0;
u[(xlim - 1) * ylim + idy] = 10;
v[0 * ylim + idy] = 0;
v[(xlim - 1) * ylim + idy] = 0;
__syncthreads();
u[idx * ylim + 0] = 0;
v[idx * ylim + 0] = 0;
u[idx * ylim + ylim - 1] = 0;
v[idx * ylim + ylim - 1] = 0;
__syncthreads();
}
void cavity_flow(int nt, float *u, float *v, float *un, float *vn, float dt,
float dx, float dy, float *p, float *pn, float rho, float nu) {
/*
def cavity_flow(nt, u, v, dt, dx, dy, p, rho, nu):
*/
float *b;
cudaMallocManaged(&b, nx * ny * sizeof(float));
fill(b, 0, nx, ny);
for (int n = 0; n < nt; n++) {
build_up_b<<<nx, ny>>>(b, rho, dt, u, v, dx, dy, nx, ny);
cudaDeviceSynchronize();
pressure_poisson(p, pn, dx, dy, b, nx, ny, nit);
cudaDeviceSynchronize();
cupdate<<<nx, ny>>>(nt, u, v, un, vn, dt, dx, dy, p, rho, nu);
cudaDeviceSynchronize();
cmargin<<<nx, ny>>>(u, v, nx, ny);
cudaDeviceSynchronize();
}
cudaFree(b);
}
int main() {
float x[nx];
float y[ny];
float X[ny * nx];
float Y[ny * nx];
linspace(x, lb, ub, nx);
linspace(y, lb, ub, ny);
meshgrid(x, y, X, Y);
float *u, *un;
float *v, *vn;
float *p, *pn;
cudaMallocManaged(&p, nx * ny * sizeof(float));
cudaMallocManaged(&pn, nx * ny * sizeof(float));
cudaMallocManaged(&u, nx * ny * sizeof(float));
cudaMallocManaged(&v, nx * ny * sizeof(float));
cudaMallocManaged(&un, nx * ny * sizeof(float));
cudaMallocManaged(&vn, nx * ny * sizeof(float));
fill(u, 0, nx, ny);
fill(v, 0, nx, ny);
fill(p, 0, nx, ny);
fill(pn, 0, nx, ny);
fill(vn, 0, nx, ny);
fill(un, 0, nx, ny);
auto start = std::chrono::high_resolution_clock::now();
cavity_flow(nt, u, v, un, vn, dt, dx, dy, p, pn, rho, nu);
cudaDeviceSynchronize();
auto finish = std::chrono::high_resolution_clock::now();
npprint(u, ny, nx, "U");
npprint(v, ny, nx, "V");
npprint(p, ny, nx, "P");
std::chrono::duration<double> elapsed = finish - start;
printf("GPU Elapsed time: %3.3f s\n", elapsed.count());
cudaFree(p);
cudaFree(pn);
cudaFree(vn);
cudaFree(un);
cudaFree(u);
cudaFree(v);
} |
19,066 | #include <cuda.h>
#include <stdio.h>
#define N 32
void printMatrix (unsigned* matrix) {
for (unsigned i = 0; i < N * N; i++) {
printf(" %u ", matrix[i]);
if (i % N == (N-1)) {
printf("\n");
}
}
}
void createMatrix(unsigned* matrix) {
for (unsigned i = 0; i < N; i++) {
for (unsigned j = 0; j < N; j++) {
if (i == j) {
matrix[i * N + j] = i + 1;
} else {
matrix[i * N + j] = 0;
}
}
}
}
__global__ void square (unsigned* matrix, unsigned* result, unsigned matrixSize) {
unsigned id = blockIdx.x * blockDim.x + threadIdx.x;
for (unsigned jj = 0; jj < matrixSize; jj++) {
for (unsigned kk = 0; kk < matrixSize; kk++) {
result[id * matrixSize + jj] += matrix[id * matrixSize + kk] * matrix[kk * matrixSize + jj];
}
}
}
__host__ void call_sqr (unsigned* h_in_matrix, unsigned* h_out_matrix) {
// unsigned n = N;
unsigned *d_in_matrix, *d_out_matrix;
cudaMalloc((void **) &d_in_matrix, N * N * sizeof(unsigned));
cudaMalloc((void **) &d_out_matrix, N * N * sizeof(unsigned));
cudaMemcpy(d_in_matrix, h_in_matrix, N * N * sizeof(unsigned), cudaMemcpyHostToDevice);
square<<<1, N>>>(d_in_matrix, d_out_matrix, N);
cudaMemcpy(h_out_matrix, d_out_matrix, N * N * sizeof(unsigned), cudaMemcpyDeviceToHost);
cudaFree(d_in_matrix);
cudaFree(d_out_matrix);
}
int main() {
unsigned* matrix;
unsigned* result;
matrix = (unsigned*)malloc(N * N * sizeof(unsigned));
result = (unsigned*)malloc(N * N * sizeof(unsigned));
createMatrix(matrix);
call_sqr (matrix, result);
printMatrix(result);
free(matrix);
free(result);
return 0;
}
|
19,067 | #include "includes.h"
#define TILE_WIDTH = 16;
__global__ void matrixMultiply(float *A, float *B, float *C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) {
//@@Y-axis matrix dimension
int row = blockIdx.y*blockDim.y + threadIdx.y;
//@@X-axis matrix Dimension
int columns = blockIdx.x*blockDim.x + threadIdx.x;
//@@Initilaizing final value to add in the output matrix
float pValue = 0;
//@@checking for boundary condition
if (row < numARows && columns < numBColumns) {
//@@adding values from 0 to matrix A width and from 0 to martrix B Height
for (int k = 0; k < numAColumns; k++) {
//@@Summation of the rows from matrix A and columns from matrix B to pValue
pValue += A[row*numAColumns + k] * B[k*numBColumns + columns];
}
//@@add the final value to the output matrix
C[row*numCColumns + columns] = pValue;
}
} |
19,068 | #include "includes.h"
__global__ void binaryCrossEntropyCost(float* cost, float* predictions, float* target, int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
float partial_cost = target[index] * logf(1.0e-15+predictions[index])
+ (1.0f - target[index]) * logf(1.0e-15+(1.0f - predictions[index]));
atomicAdd(cost, -1.0 * partial_cost / size);
}
} |
19,069 | #include<stdio.h>
#include <time.h>
#define TRUE 1
#define FALSE 0
#define MIN(a,b) (a < b?a:b )
static const int N = 150;
__global__ void cerca_array_device(int *array,int *valor,int *res)
{
int b;
int id = threadIdx.x + blockIdx.x * blockDim.x;
if(*res == FALSE && *valor == array[id]){
*res = TRUE;
for(int i = 0; i <= 1000000; i++) //demostrar que si se hacen mas operacines en la busqueda, los threads son
b = (b*70)/3;
}
}
__host__ bool cerca_array_host(int *array, int valor)
{
int b;
for(int i = 0 ; i < N ; ++i){
if(array[i] == valor){
return true;
}
for(int i = 0; i <= 1000000; i++)
b = (b*70)/3;
}
return false;
}
int main()
{
srand(time(NULL));
int a[N],valor;
for(int i=0;i<N;i++)
a[i] = (int)rand()/(int)(RAND_MAX/300.0);
for(int i=0;i<N;i++)
printf("valor: %d \t", a[i]);
printf("\nNombre a cercar: ");
scanf("%d",&valor);
//Execució a la CPU
int res;
clock_t t_host = clock();
res = cerca_array_host(a,valor);
t_host = clock() - t_host;
double time_taken_host = ((double)t_host)/CLOCKS_PER_SEC;
printf("CPU: %f segons \n", time_taken_host);
if(res == TRUE)
printf("host: We found the number\n");
else
printf("host: We don't found the number :(\n");
int *dev_array , *dev_value , *dev_res;
cudaMalloc((void**)&dev_array, N*sizeof(int) );
cudaMalloc((void**)&dev_value, sizeof(int) );
cudaMalloc((void**)&dev_res, sizeof(int) );
res = FALSE;
cudaMemcpy(dev_array, a, N*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_value, &valor, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_res, &res, sizeof(int), cudaMemcpyHostToDevice);
int threads_block = MIN(512,N);
while(N%threads_block != 0)--threads_block;
int blocks = N / threads_block;
clock_t t_device = clock();
cerca_array_device<<<blocks,threads_block>>>(dev_array,dev_value,dev_res);
cudaMemcpy(&res, dev_res, sizeof(int), cudaMemcpyDeviceToHost);//Copy memory from device to host
t_device = clock() - t_device;
double time_taken_device = ((double)t_device)/CLOCKS_PER_SEC;
printf("GPU %f segons \n", time_taken_device);
cudaFree(dev_array);//free device mem
cudaFree(dev_value);
cudaFree(dev_res);
//Printa si ha cercat el nombre
if(res == TRUE)
printf("device: We found the number\n");
else
printf("device: We don't found the number :(\n");
return 0;
}
|
19,070 | #include<stdio.h>
#include<cuda.h>
__global__ void sq(float *d_out, float* d_in)
{
int idx = threadIdx.x;
float f = d_in[idx];
d_out[idx] = f*f;
}
int main(int argc, char** argv)
{
const int ARRAY_SIZE = 64;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
float h_in[ARRAY_SIZE];
for(int i=0; i < ARRAY_SIZE; i++)
{
h_in[i] = float(i);
}
float h_out[ARRAY_SIZE];
//declare GPU memory pointers
float *d_in;
float *d_out;
//allocate memory for the two arrays on the device
cudaMalloc((void**)&d_in,ARRAY_BYTES);
cudaMalloc((void**)&d_out,ARRAY_BYTES);
//transfer the array to the GPU
// destination,source,size,method
cudaMemcpy(d_in,h_in,ARRAY_BYTES,cudaMemcpyHostToDevice);
//launch the kernel
sq<<<ARRAY_SIZE,1>>>(d_out,d_in);
cudaDeviceSynchronize();
cudaError_t error = cudaGetLastError();
if(error!=cudaSuccess)
{
fprintf(stderr,"ERROR: %s\n", cudaGetErrorString(error) );
exit(-1);
}
//copy the results back onto the device
cudaMemcpy(h_out,d_out,ARRAY_BYTES,cudaMemcpyDeviceToHost);
for(int i=0; i < ARRAY_SIZE; i++)
{
printf("%f:%f \t",h_in[i], h_out[i]);
if (i%4==0) printf("\n");
}
printf("\n");
cudaFree(d_in);
cudaFree(d_out);
}//end of main
|
19,071 | #ifdef __NVCC__
// __device__ volatile int PQ[MAX_NODE];
//K in parallel
template <class U>
__global__ void extractMin(unsigned int* PQ, unsigned int* PQ_size, int* expandNodes,int* expandNodes_size,U* Cx,int* openList,int N,int K){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<K && PQ_size[id]>0){
//extract min from PQ
int front = id* ( (N+K-1)/K );
int node = PQ[front];
// restructure the heap
PQ[front]=PQ[front+PQ_size[id]-1];
PQ_size[id]-=1;
int pqIndex = 0;
while(2*pqIndex+1 < PQ_size[id]){
if(2*pqIndex+2 >= PQ_size[id]){
if( Cx[PQ[front+pqIndex]] > Cx[PQ[front+2*pqIndex+1]]){
int swap = PQ[front + 2*pqIndex+1];
PQ[front + 2*pqIndex+1] = PQ[front +pqIndex];
PQ[front + pqIndex] = swap;
pqIndex = 2*pqIndex+1;
}
else
break;
}
else{
if( Cx[PQ[front+pqIndex]] > Cx[PQ[front+2*pqIndex+1]] && Cx[PQ[front+2*pqIndex+1]] <= Cx[PQ[front+2*pqIndex+2]] ){
int swap = PQ[front + 2*pqIndex+1];
PQ[front + 2*pqIndex+1] = PQ[front +pqIndex];
PQ[front + pqIndex] = swap;
pqIndex = 2*pqIndex+1;
}
else if(Cx[PQ[front+pqIndex]] > Cx[PQ[front+2*pqIndex+2]] && Cx[PQ[front+2*pqIndex+2]] <= Cx[PQ[front+2*pqIndex+1]] ){
int swap = PQ[front + 2*pqIndex+2];
PQ[front + 2*pqIndex+2] = PQ[front +pqIndex];
PQ[front + pqIndex] = swap;
pqIndex = 2*pqIndex+2;
}
else{
break;
}
}
}
//removed from openList
openList[node] = -1;
//added to expand next
int len = atomicAdd(expandNodes_size,1);
expandNodes[len]=node;
}
}
//for K in parallel
template <class T,class U>
__global__ void A_star_expand(int* off,int* edge, T* W,U* Hx,int* parent,volatile U* Cx,
int* expandNodes,int* expandNodes_size, int* lock ,int* flagfound,int* openList,int* nVFlag,
int N,int E, int K,int dest,
int flagDiff,int dE,
int* diff_off,int* diff_edge,unsigned int* diff_weight ){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id< *expandNodes_size ){
int node = expandNodes[id];
//reach dest
if(node == dest){
atomicOr(flagfound,1);
}
// expand
int start = off[node];
int end = E;
if(node!=N-1)
end = off[node+1];
while(start < end){
int child = edge[start];
//deleted edges
if(child<0){
start++;
continue;
}
//array L initilaized with 0
//get the lock for child to update C(x)
//loop till acquire the lock
bool leaveLoop = false;
while(leaveLoop==false){
if(atomicCAS(&lock[child],0,1)==0){
//critical section
if( Cx[child] > (Cx[node] - Hx[node])+ W[start]+ Hx[child] ){
Cx[child] = (Cx[node] - Hx[node])+ W[start]+ Hx[child];
__threadfence();
parent[child] = node;
if(openList[child]==-1){
nVFlag[child]=1;
//add only once
}
}
//end critical section
leaveLoop = true;
atomicCAS(&lock[child],1,0);
}
__syncthreads();
}
start++;
}
//diff expand
if(flagDiff){
start = diff_off[node];
end = dE;
if(node!=N-1)
end = diff_off[node+1];
while(start<end){
int child = diff_edge[start];
//deleted edges
if(child<0){
start++;
continue;
}
//array L initilaized with 0
//get the lock for child to update C(x)
bool leaveLoop = false;
while(!leaveLoop){
if(atomicCAS(&lock[child],0,1)==0){
//critical section
if( Cx[child] > (Cx[node] - Hx[node])+ W[start]+ Hx[child] ){
Cx[child] = (Cx[node] - Hx[node])+ W[start]+ Hx[child];
__threadfence();
parent[child] = node;
if(openList[child]==-1){
nVFlag[child]=1;
//add only once
}
}
//end critical section
leaveLoop = true;
atomicCAS(&lock[child],1,0);
}
__syncthreads();
}
start++;
}
}
//end diff
}//end
}
//K in parallel -- O(N)
template <class U>
__global__ void keepHeapPQ(unsigned int* PQ,unsigned int* PQ_size,U* Cx,int N,int K){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < K && PQ_size[id] > 0){
int front = id*( (N+K-1)/K );
int size = PQ_size[id];
for(int i=front;i<front+size;i++){
if(2*i+2 < front+size){
int cost = Cx[PQ[i]];
int costLeft = Cx[PQ[2*i+1]];
int costRight = Cx[PQ[2*i+2]];
if( cost > costLeft || cost > costRight ){
int index ;
if(costLeft <= costRight)
index = 2*i+1;
else
index = 2*i+2;
while(index > front){
if( Cx[PQ[(index-1)/2]] > Cx[PQ[index]] ){
int swap = PQ[index];
PQ[index] = PQ[(index-1)/2];
PQ[(index-1)/2] = swap;
index = (index-1)/2;
}
else
break;
}
}
}
else if(2*i+1 < front+size){
if(Cx[PQ[i]] > Cx[PQ[2*i+1]]){
int index = 2*i+1;
while(index > front){
if( Cx[PQ[(index-1)/2]] > Cx[PQ[index]] ){
int swap = PQ[index];
PQ[index] = PQ[(index-1)/2];
PQ[(index-1)/2] = swap;
index = (index-1)/2;
}
else
break;
}
}
}
}
}
}
//N threads
__global__ void setNV(int* nextFlag,int* nextV,int* nvSize,int N){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < N){
if(nextFlag[id]==1){
int index = atomicAdd(nvSize,1);
nextV[index]=id;
}
}
}
//for K in parallel
template <class U>
__global__ void insertPQ(unsigned int* PQ,unsigned int* PQS,int* nextV,int* nVsize,U* Cx,int K,int N,int* openList){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < K){
int front = id*( (N+K-1)/K );
int i = id;
while(i<*nVsize){
//if not already present
if(openList[nextV[i]]!=-1){
i+=K;
continue;
}
PQ[front+PQS[id]]= nextV[i];
PQS[id]+=1;
//add in openList
openList[nextV[i]] = id;
if(PQS[id]>1){
int index = PQS[id]-1;
while(index>0){
if(Cx[PQ[front+ (index-1)/2]] > Cx[PQ[front+index]]){
int swap = PQ[front+index];
PQ[front+index]=PQ[front+ (index-1)/2];
PQ[front+ (index-1)/2] = swap;
index = (index-1)/2;
}
else
break;
}
}
i += K;
}
}
}
//for K in parallel
template <class U>
__global__ void checkMIN(unsigned int* PQ, unsigned int* PQ_size,int* flagEnd,U* Cx,int dest,int N,int K){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < K && PQ_size[id] > 0 ){
int front = id* ( (N+K-1)/K );
int node = PQ[front];
//check if atleast one min, dont end the a*
if( Cx[node] < Cx[dest] ){
atomicAnd(flagEnd,0);
}
}
}
template <class T, class U>
__global__ void propogateDel(int* delEdgesV,int delEdge, volatile U* Cx,
int* rev_offset,int* rev_edges,T* rev_weight,int N,int E,
U* Hx,volatile int* parent,int* parent_old,int* addFlag,
int* rev_diff_offset,int* rev_diff_edges,T* rev_diff_weight,int dE){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<delEdge){
int node = delEdgesV[id];
//check for the parent and add to nextflag and update the cost
int start = rev_offset[node];
int end = E;
if(node!=N-1)
end = rev_offset[node+1];
//no parent
// write in parent read always from old_parent
parent[node] = -1;
Cx[node]=INT_MAX;
addFlag[node]=1;
int cost = INT_MAX;
int opt_parent = -1;
//if any parent can change the cost
while(start< end){
int p = rev_edges[start];
//del edges
if(p<0 || p==node){
start++;
continue;
}
int weight = rev_weight[start];
int flag_cycle = false;
//check parent doesn't contain node
int ancestor = parent_old[p];
while(ancestor>0){
if(ancestor==node){
flag_cycle = true;
break;
}
ancestor = parent_old[ancestor];
}
//no need to lock only single parent so only one node in array so one node per thread
if(!flag_cycle && Cx[p]!=INT_MAX && cost > (Cx[p]-Hx[p])+weight+Hx[node] ){
cost = (Cx[p]-Hx[p] )+weight+Hx[node];
opt_parent = p;
}
start++;
}
start = rev_diff_offset[node];
end = dE;
if(node!=N-1)
end = rev_diff_offset[node+1];
while(start< end){
int p = rev_diff_edges[start];
//del edges
if(p<0 || p==node){
start++;
continue;
}
int weight = rev_diff_weight[start];
int flag_cycle = false;
//check parent doesn't contain node
int ancestor = parent_old[p];
while(ancestor!=-1){
if(ancestor==node){
flag_cycle = true;
break;
}
ancestor = parent_old[ancestor];
}
//no need to lock only single parent so only one node in array so one node per thread
if(!flag_cycle && Cx[p]!=INT_MAX && cost > (Cx[p]-Hx[p])+weight+Hx[node] ){
cost = (Cx[p]-Hx[p] )+weight+Hx[node];
opt_parent = p;
}
start++;
}
//write here
if(cost!=INT_MAX){
Cx[node]=cost;
parent[node]=opt_parent;
}
}
}
//add inserted edges to propogate
template <class T, class U>
__global__ void propogateAdd(int* diff_off, int* diff_edges,T* diff_W,U* Hx,int* addFlag,
volatile U* Cx,int* lock, int* parent, int* parent_old, int N, int dE){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < N){
int node = id;
int start = diff_off[node];
int end = dE;
if(node!=N-1)
end = diff_off[node+1];
while(start < end ){
int child = diff_edges[start];
//deleted edges
if(child<0){
start++;
continue;
}
//array L initilaized with 0
//get the lock for child to update C(x)
//loop till acquire the lock
bool leaveLoop = false;
while(!leaveLoop){
if(atomicCAS(&lock[child],0,1)==0){
//critical section
bool flag_cycle = false;
int ancestor = node;
while(ancestor > 0){
if(ancestor==child){
flag_cycle = true;
break;
}
ancestor = parent_old[ancestor];
}
if(!flag_cycle && Cx[node] != INT_MAX && Cx[child] > (Cx[node] - Hx[node])+ diff_W[start]+ Hx[child] ){
Cx[child] = (Cx[node] - Hx[node])+ diff_W[start]+ Hx[child];
parent[child] = node;
__threadfence();
addFlag[child]=1;
}
//end critical section
leaveLoop = true;
atomicCAS(&lock[child],1,0);
}
__syncthreads();
}
start++;
}
}
}
template <class T,class U>
__global__ void insert_propagate(int* nodes, int* size, int* off, int* edge,T* W,U* Hx,
int N,int E,volatile U* Cx,int* lock, int* parent,int* addFlag,
int* diff_off,int* diff_edge,T* diff_W,int dE){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < *size){
int node = nodes[id];
int start = off[node];
int end = E;
if(node!=N-1)
end = off[node+1];
while(start < end ){
int child = edge[start];
//deleted edges
if(child<0){
start++;
continue;
}
bool leaveLoop = false;
while(!leaveLoop){
if(atomicExch(&lock[child],1)==0){
if(Cx[node]!=INT_MAX && Cx[child] > (Cx[node] - Hx[node])+ W[start]+ Hx[child] ){
Cx[child] = (Cx[node] - Hx[node])+ W[start]+ Hx[child];
__threadfence();
parent[child] = node;
addFlag[child]=1;
}
leaveLoop = true;
atomicExch(&lock[child],0);
}
__syncthreads();
}
start++;
}
start = diff_off[node];
end = dE;
if(node!=N-1)
end = diff_off[node+1];
while(start < end ){
int child = diff_edge[start];
//deleted edges
if(child<0){
start++;
continue;
}
bool leaveLoop = false;
while(!leaveLoop){
if(atomicCAS(&lock[child],0,1)==0){
//critical section
if(Cx[node]!=INT_MAX && Cx[child] > (Cx[node] - Hx[node])+ diff_W[start]+ Hx[child] ){
Cx[child] = (Cx[node] - Hx[node])+ diff_W[start]+ Hx[child];
__threadfence();
parent[child] = node;
addFlag[child]=1;
}
//end critical section
leaveLoop = true;
atomicCAS(&lock[child],1,0);
}
__syncthreads();
}
start++;
}
}
}
template <class T,class U>
__global__ void delete_propagate(int* nodes, int* size, int* off, int* edge,T* W,U* Hx,
int N,int E,volatile U* Cx,int* lock, int* parent,int* parent_old,int* addFlag,
int* diff_off,int* diff_edge,T* diff_W,int dE,
int* rev_offset,int* rev_edges,T* rev_weight,
int* rev_diff_offset,int* rev_diff_edges,T* rev_diff_weight){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < *size){
int node = nodes[id];
int start = off[node];
int end = E;
if(node!=N-1)
end = off[node+1];
while(start < end ){
int child = edge[start];
if(child<0){
start++;
continue;
}
bool leaveLoop = false;
while(!leaveLoop){
if(atomicExch(&lock[child],1)==0){
if(Cx[node]!=INT_MAX && Cx[child]!=INT_MAX && Cx[child] > (Cx[node] - Hx[node])+ W[start]+ Hx[child] ){
Cx[child] = (Cx[node] - Hx[node])+ W[start]+ Hx[child];
__threadfence();
parent[child] = node;
addFlag[child]=1;
}
else
if( (Cx[node]==INT_MAX && parent[child]==node ) || ( parent[child]==node && (Cx[child] < Cx[node] - Hx[node]+ W[start]+ Hx[child]) ) ){
//use back edges
int rstart = rev_offset[child];
int rend = E;
if(child!=N-1)
rend = rev_offset[child+1];
//there is always one parent that is node.
Cx[child] = INT_MAX;
parent[child]=-1;
while(rstart < rend){
int p = rev_edges[rstart];
if(p<0 || p == child){
rstart++;
continue;
}
int weight = rev_weight[rstart];
bool flag_cycle = false;
//check parent doesn't contain child
int ancestor = parent_old[p];
while(ancestor > 0){
if(ancestor==child){
flag_cycle = true;
break;
}
ancestor = parent_old[ancestor];
}
if(!flag_cycle && Cx[p]!=INT_MAX && Cx[child] > (Cx[p]-Hx[p])+weight+Hx[child] ){
Cx[child] = (Cx[p]-Hx[p] )+weight+Hx[child];
parent[child] = p;
}
rstart++;
}
rstart = rev_diff_offset[child];
rend = dE;
if(child!=N-1)
rend = rev_diff_offset[child+1];
while(rstart < rend){
int p = rev_diff_edges[rstart];
if(p<0 || p==child){
rstart++;
continue;
}
int weight = rev_diff_weight[rstart];
int flag_cycle = false;
//check parent doesn't contain child
int ancestor = parent_old[p];
while(ancestor!=-1){
if(ancestor==child){
flag_cycle = true;
break;
}
ancestor = parent_old[ancestor];
}
if(!flag_cycle && Cx[p]!=INT_MAX && Cx[child] > (Cx[p]-Hx[p])+weight+Hx[child] ){
Cx[child] = (Cx[p]-Hx[p] )+weight+Hx[child];
parent[child] = p;
}
rstart++;
}
addFlag[child]=1;
}
leaveLoop = true;
atomicExch(&lock[child],0);
}
__syncthreads();
}
start++;
}
start = diff_off[node];
end = dE;
if(node!=N-1)
end = diff_off[node+1];
while(start < end ){
int child = diff_edge[start];
//deleted edges
if(child<0){
start++;
continue;
}
//array L initilaized with 0
//get the lock for child to update C(x)
//loop till acquire the lock
bool leaveLoop = false;
while(!leaveLoop){
if(atomicCAS(&lock[child],0,1)==0){
if(Cx[node]!=INT_MAX && Cx[child]!=INT_MAX && Cx[child] > (Cx[node] - Hx[node])+ W[start]+ Hx[child] ){
Cx[child] = (Cx[node] - Hx[node])+ W[start]+ Hx[child];
__threadfence();
parent[child] = node;
addFlag[child]=1;
}
else
if((Cx[node]==INT_MAX && parent[child]==node )|| ( parent[child]==node && (Cx[child] < Cx[node] - Hx[node]+ diff_W[start]+ Hx[child]) ) ){
//use back edges
int rstart = rev_offset[child];
int rend = E;
if(child!=N-1)
rend = rev_offset[child+1];
//there is always one parent that is node.
Cx[child] = INT_MAX;
parent[child]=-1;
while(rstart < rend){
int p = rev_edges[rstart];
if(p<0 || p ==child){
rstart++;
continue;
}
int weight = rev_weight[rstart];
int flag_cycle = false;
//check parent doesn't contain child
int ancestor = parent_old[p];
while(ancestor!=-1){
if(ancestor==child)
flag_cycle = true;
ancestor = parent_old[ancestor];
}
if(!flag_cycle && Cx[p]!=INT_MAX && Cx[child] > (Cx[p]-Hx[p])+weight+Hx[child] ){
Cx[child] = (Cx[p]-Hx[p] )+weight+Hx[child];
parent[child] = p;
}
rstart++;
}
rstart = rev_diff_offset[child];
rend = dE;
if(child!=N-1)
rend = rev_diff_offset[child+1];
while(rstart < rend){
int p = rev_diff_edges[rstart];
if(p<0 || p==child){
rstart++;
continue;
}
int weight = rev_diff_weight[rstart];
int flag_cycle = false;
//check parent doesn't contain child
int ancestor = parent_old[p];
while(ancestor!=-1){
if(ancestor==child){
flag_cycle = true;
break;
}
ancestor = parent_old[ancestor];
}
if(!flag_cycle && Cx[p]!=INT_MAX && Cx[child] > (Cx[p]-Hx[p])+weight+Hx[child] ){
Cx[child] = (Cx[p]-Hx[p] )+weight+Hx[child];
parent[child] = p;
}
rstart++;
}
addFlag[child]=1;
}
//end critical section
leaveLoop = true;
atomicCAS(&lock[child],1,0);
}
__syncthreads();
}
start++;
}
}
}
//do in 1 thread
template <class U>
__global__ void insertDest(unsigned int* PQ,unsigned int* PQ_size,U* Cx,int dest,int* openList){
int id = 0;
int front = 0;
if(openList[dest]==-1){
PQ[front+PQ_size[id]]= dest;
PQ_size[id]+=1;
//add in openList
openList[dest] = id;
if(PQ_size[id]>1){
int index = PQ_size[id]-1;
while(index>0){
if(Cx[PQ[front+ (index-1)/2]] > Cx[PQ[front+index]]){
int swap = PQ[front+index];
PQ[front+index]=PQ[front+ (index-1)/2];
PQ[front+ (index-1)/2] = swap;
index = (index-1)/2;
}
else
break;
}
}
}
}
template <class U>
__global__ void getCx(U* Cx,int dest,U* val){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id==0){
*val = Cx[dest];
}
}
#endif |
19,072 | #include <iostream>
#include <fstream>
#include <algorithm>
#include <cmath>
#include <ctime>
#include <cuda.h>
#include <cuda_runtime.h>
//#define WRITE_TO_FILE
#define NX 4
#define NY 256
using namespace std;
typedef double(*func2)(double,double);
typedef double(*func3)(double,double,double);
//Обработчик ошибок
static void HandleError(cudaError_t err,
const char *file,
int line)
{
if (err != cudaSuccess) {
printf("%s in %s at line %d\n", cudaGetErrorString(err),
file, line);
exit(EXIT_FAILURE);
}
}
#define HANDLE_ERROR( error ) (HandleError( error, __FILE__, __LINE__ ))
#define PSI(x,y) 0.0
#define F(x,y,t) 0.0
#define PHI(x,y) __sinf(M_PI*x)*__sinf(M_PI*y)
__global__ void first_layer_kernel(double *U,double *Uprev,double tau,double a, int N1n,int N2n, double h1, double h2/*,func2 phi,func2 psi,func3 f*/)
{
int i=threadIdx.x+blockIdx.x*blockDim.x+1;
int j=threadIdx.y+blockIdx.y*blockDim.y+1;
if((i < N1n-1)&&(j<N2n-1))
U[i*N2n+j]=Uprev[i*N2n+j]+tau*PSI(i*h1,j*h2)+
tau*tau*0.5*F(i*h1,j*h2,0.0)+
a*a*tau*tau*0.5*((PHI((i+1)*h1,j*h2)-2.0*PHI(i*h1,j*h2)+PHI((i-1)*h1,j*h2))/(h1*h1)+(PHI(i*h1,(j+1)*h2)-2.0*PHI(i*h1,j*h2)+PHI(i*h1,(j-1)*h2))/(h2*h2));
}
__global__ void main_kernel(double *U,double *Uprev,double *Unext,double tau,double a,double t, int N1n,int N2n, double h1, double h2/*,func2 phi,func2 psi,func3 f*/)
{
int i=threadIdx.x+blockIdx.x*blockDim.x+1;
int j=threadIdx.y+blockIdx.y*blockDim.y+1;
if((i < N1n-1)&&(j<N2n-1))
Unext[i*N2n+j]=2.0*U[i*N2n+j]-Uprev[i*N2n+j]+a*a*tau*tau*((U[(i+1)*N2n+j]-2.0*U[i*N2n+j]+U[(i-1)*N2n+j])/(h1*h1)+(U[i*N2n+(j+1)]-2.0*U[i*N2n+j]+U[i*N2n+(j-1)])/(h2*h2))+F(i*h1,j*h2,t);
}
double solveGPU(double a,double L1,double L2,double T,double tau,int N1,int N2,func2 phi,func2 psi,func3 f)
{
double *Unext,*U,*Uprev,*Uloc;
double h1=L1/N1,h2=L2/N2;
int N1n=N1+1,N2n=N2+1;
double t=tau;
float gputime=0.0;
size_t size=N1n*N2n*sizeof(double);
dim3 threads(NX,NY,1),blocks((N1-1)%NX==0?(N1-1)/NX:(N1-1)/NX+1,(N2-1)%NY==0?(N2-1)/NY:(N2-1)/NY+1,1);
Uloc=new double[N1n*N2n];
HANDLE_ERROR( cudaMalloc(&U,size) );
HANDLE_ERROR( cudaMalloc(&Unext,size) );
HANDLE_ERROR( cudaMalloc(&Uprev,size) );
#ifdef WRITE_TO_FILE
ofstream ofile("../membr2dexpl/datagpu.dat");
ofile.precision(16);
#endif
//Нулевой временной слой
for(int i=0;i<N1n;i++)
{
for(int j=0;j<N2n;j++)
{
Uloc[i*N2n+j]=phi(i*h1,j*h2);
#ifdef WRITE_TO_FILE
ofile<<Uloc[i*N2n+j]<<' ';
#endif
}
#ifdef WRITE_TO_FILE
ofile<<endl;
#endif
}
#ifdef WRITE_TO_FILE
ofile<<endl;
ofile<<endl;
#endif
HANDLE_ERROR( cudaMemcpy(Uprev,Uloc,size,cudaMemcpyHostToDevice) );
HANDLE_ERROR( cudaMemcpy(U,Uprev,size,cudaMemcpyDeviceToDevice) );
HANDLE_ERROR( cudaMemcpy(Unext,Uprev,size,cudaMemcpyDeviceToDevice) );
//Первый временной слой
cudaEvent_t start,stop;
HANDLE_ERROR( cudaEventCreate(&start) );
HANDLE_ERROR( cudaEventCreate(&stop) );
HANDLE_ERROR( cudaEventRecord(start) );
first_layer_kernel<<<blocks,threads>>>(U,Uprev,tau,a,N1n,N2n,h1,h2/*,phi,psi,f*/);
HANDLE_ERROR( cudaGetLastError() );
HANDLE_ERROR( cudaDeviceSynchronize() );
#ifdef WRITE_TO_FILE
HANDLE_ERROR( cudaMemcpy(Uloc,U,size,cudaMemcpyDeviceToHost) );
for(int i=0;i<N1n;i++)
{
for(int j=0;j<N2n;j++)
ofile<<Uloc[i*N2n+j]<<' ';
ofile<<endl;
}
ofile<<endl;
ofile<<endl;
#endif
//Основной цикл
while(t<T-0.5*tau)
{
main_kernel<<<blocks,threads>>>(U,Uprev,Unext,tau,a,t,N1n,N2n,h1,h2/*,phi,psi,f*/);
HANDLE_ERROR( cudaGetLastError() );
HANDLE_ERROR( cudaDeviceSynchronize() );
#ifdef WRITE_TO_FILE
HANDLE_ERROR( cudaMemcpy(Uloc,Unext,size,cudaMemcpyDeviceToHost) );
for(int i=0;i<N1n;i++)
{
for(int j=0;j<N2n;j++)
ofile<<Uloc[i*N2n+j]<<' ';
ofile<<endl;
}
ofile<<endl;
ofile<<endl;
#endif
t+=tau;
swap(U,Unext);
swap(Uprev,Unext);
}
HANDLE_ERROR( cudaMemcpy(Uloc,Unext,size,cudaMemcpyDeviceToHost) );
HANDLE_ERROR( cudaEventRecord(stop) );
HANDLE_ERROR( cudaEventSynchronize(stop) );
HANDLE_ERROR( cudaEventElapsedTime(&gputime,start,stop) );
HANDLE_ERROR( cudaFree(U) );
HANDLE_ERROR( cudaFree(Unext) );
HANDLE_ERROR( cudaFree(Uprev) );
HANDLE_ERROR( cudaEventDestroy(start) );
HANDLE_ERROR( cudaEventDestroy(stop) );
delete[] Uloc;
#ifdef WRITE_TO_FILE
ofile.close();
#endif
return (double)gputime/1000.0;
}
double solveCPU(double a,double L1,double L2,double T,double tau,int N1,int N2,func2 phi,func2 psi,func3 f)
{
double *Unext,*U,*Uprev;
double h1=L1/N1,h2=L2/N2;
int N1n=N1+1,N2n=N2+1;
double t=tau;
double cputime=0.0;
U=new double[N1n*N2n];
Unext=new double[N1n*N2n];
Uprev=new double[N1n*N2n];
#ifdef WRITE_TO_FILE
ofstream ofile("../membr2dexpl/datacpu.dat");
ofile.precision(16);
#endif
//Нулевой временной слой
for(int i=0;i<N1n;i++)
{
for(int j=0;j<N2n;j++)
{
Uprev[i*N2n+j]=phi(i*h1,j*h2);
#ifdef WRITE_TO_FILE
ofile<<Uprev[i*N2n+j]<<' ';
#endif
}
#ifdef WRITE_TO_FILE
ofile<<endl;
#endif
}
#ifdef WRITE_TO_FILE
ofile<<endl;
ofile<<endl;
#endif
//Первый временной слой
cputime-=(double)clock();
for(int i=0;i<N1n;i++)
{
for(int j=0;j<N2n;j++)
{
if((i==0)||(j==0)||(i==N1)||(j==N2))
{
U[i*N2n+j]=Uprev[i*N2n+j];
Unext[i*N2n+j]=Uprev[i*N2n+j];
}
else
{
U[i*N2n+j]=Uprev[i*N2n+j]+tau*psi(i*h1,j*h2)+
tau*tau*0.5*f(i*h1,j*h2,0.0)+
a*a*tau*tau*0.5*((phi((i+1)*h1,j*h2)-2.0*phi(i*h1,j*h2)+phi((i-1)*h1,j*h2))/(h1*h1)+(phi(i*h1,(j+1)*h2)-2.0*phi(i*h1,j*h2)+phi(i*h1,(j-1)*h2))/(h2*h2));
}
#ifdef WRITE_TO_FILE
ofile<<U[i*N2n+j]<<' ';
#endif
}
#ifdef WRITE_TO_FILE
ofile<<endl;
#endif
}
#ifdef WRITE_TO_FILE
ofile<<endl;
ofile<<endl;
#endif
//Основной цикл
while(t<T-0.5*tau)
{
for(int i=1;i<N1n-1;i++)
for(int j=1;j<N2n-1;j++)
Unext[i*N2n+j]=2.0*U[i*N2n+j]-Uprev[i*N2n+j]+a*a*tau*tau*((U[(i+1)*N2n+j]-2.0*U[i*N2n+j]+U[(i-1)*N2n+j])/(h1*h1)+(U[i*N2n+(j+1)]-2.0*U[i*N2n+j]+U[i*N2n+(j-1)])/(h2*h2))+f(i*h1,j*h2,t);
#ifdef WRITE_TO_FILE
for(int i=0;i<N1n;i++)
{
for(int j=0;j<N2n;j++)
ofile<<Unext[i*N2n+j]<<' ';
ofile<<endl;
}
ofile<<endl;
ofile<<endl;
#endif
t+=tau;
swap(U,Unext);
swap(Uprev,Unext);
}
cputime+=(double)clock();
cputime/=(double)CLOCKS_PER_SEC;
delete[] U;
delete[] Unext;
delete[] Uprev;
#ifdef WRITE_TO_FILE
ofile.close();
#endif
return cputime;
}
__host__ __device__ double zero3(double a,double b,double c)
{
return 0.0;
}
__host__ __device__ double zero2(double a,double b)
{
return 0.0;
}
__host__ __device__ double init(double x, double y)
{
return sin(M_PI*x)*sin(M_PI*y);
}
__host__ __device__ double init2(double x, double y)
{
double sigma=0.1;
return exp(-((x-0.5)*(x-0.5)+(y-0.5)*(y-0.5))/2.0/sigma/sigma);
}
__host__ __device__ double init3(double x, double y)
{
return (x-0.5)*(x-0.5)+(y-0.5)*(y-0.5)<0.25*0.25?1.0:0.0;
}
int main(int argc, char *argv[])
{
double cputime,gputime;
cputime=solveCPU(1.0,1.0,1.0,0.1,0.0001,1000,1000,init,zero2,zero3);
//cputime=solveCPU(1.0,1.0,1.0,10,0.01,25,25,init2,zero2,zero3);
cout<<"CPU time: "<<cputime<<endl;
gputime=solveGPU(1.0,1.0,1.0,0.1,0.0001,1000,1000,init,zero2,zero3);
//gputime=solveGPU(1.0,1.0,1.0,10,0.01,25,25,init,zero2,zero3);
cout<<"GPU time: "<<gputime<<endl;
cout<<"Ratio: "<<cputime/gputime<<endl;
return 0;
}
|
19,073 | // #include <bits/stdc++.h>
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <sstream>
#include <string>
#include <vector>
#include <algorithm>
#include <climits>
#include <thrust/swap.h>
#include <thrust/extrema.h>
#include <thrust/functional.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
using namespace std;
typedef long long ll;
#define CUDA_ERROR(err) { \
if (err != cudaSuccess) { \
fprintf(stderr, "ERROR: CUDA failed in %s:%d: %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); \
return(1); \
} \
} \
#define NUM_BLOCKS 10
#define BLOCK_SIZE 1024
__device__ void swap_step(int* nums, int* tmp, int size, int start, int stop, int step, int i) {
// Using shared memory to store blocks and sort them
__shared__ int sh_array[BLOCK_SIZE];
// Step for bitonic merge inside merging
for (int shift = start; shift < stop; shift += step) {
// New start pointer
tmp = nums + shift;
// Right side
if (i >= BLOCK_SIZE / 2)
sh_array[i] = tmp[BLOCK_SIZE * 3 / 2 - 1 - i];
else
sh_array[i] = tmp[i];
__syncthreads();
// From half
for (int j = BLOCK_SIZE / 2; j > 0; j /= 2) {
unsigned int XOR = i ^ j;
// The threads with the lowest ids sort the array
if (XOR > i) {
if ((i & BLOCK_SIZE) != 0) {
// Step descending, swap(i, XOR)
if (sh_array[i] < sh_array[XOR])
thrust::swap(sh_array[i], sh_array[XOR]);
} else {
// Step ascending, swap(i, XOR)
if (sh_array[i] > sh_array[XOR])
thrust::swap(sh_array[i], sh_array[XOR]);
}
}
__syncthreads();
}
// Back from shared to temporary
tmp[i] = sh_array[i];
}
}
__global__ void kernel_bitonic_merge_step(int* nums, int size, bool is_odd, bool flag) {
// Temporary array for splitting into blocks
int* tmp = nums;
// Every thread gets exactly one value in the unsorted array
unsigned int i = threadIdx.x;
int id_block = blockIdx.x;
int offset = gridDim.x;
// For odd step
if(is_odd) {
swap_step(nums, tmp, size, (BLOCK_SIZE / 2) + id_block * BLOCK_SIZE, size - BLOCK_SIZE, offset * BLOCK_SIZE, i);
} else { // For even step
swap_step(nums, tmp, size, id_block * BLOCK_SIZE, size, offset * BLOCK_SIZE, i);
}
}
__global__ void bitonic_sort_step(int *nums, int j, int k, int size) {
// Using shared memory to store blocks and sort them
__shared__ int sh_array[BLOCK_SIZE];
// Temporary array for splitting into blocks
int* tmp = nums;
// Every thread gets exactly one value in the unsorted array
unsigned int i = threadIdx.x;
int id_block = blockIdx.x;
int offset = gridDim.x;
// Step for bitonic sort
for (int shift = id_block * BLOCK_SIZE; shift < size; shift += offset * BLOCK_SIZE) {
// New start pointer
tmp = nums + shift;
// Store in shared memory
sh_array[i] = tmp[i];
__syncthreads();
// From half
for (j = k / 2; j > 0; j /= 2) {
unsigned int XOR = i ^ j;
// The threads with the lowest ids sort the array
if (XOR > i) {
if ((i & k) != 0) {
// Step descending, swap(i, XOR)
if (sh_array[i] < sh_array[XOR])
thrust::swap(sh_array[i], sh_array[XOR]);
} else {
// Step ascending, swap(i, XOR)
if (sh_array[i] > sh_array[XOR])
thrust::swap(sh_array[i], sh_array[XOR]);
}
}
__syncthreads();
}
// Back from shared to temporary
tmp[i] = sh_array[i];
}
}
int main(int argc, char *argv[]) {
ios_base::sync_with_stdio(false);
cin.tie(nullptr);
cout.tie(nullptr);
int size, upd_size;
// Allocating + inputting
// scanf("%d", &size);
fread(&size, sizeof(int), 1, stdin);
fprintf(stderr, "%d\n", size);
// To the degree of 2^n (1024 max)
upd_size = ceil((double)size / BLOCK_SIZE) * BLOCK_SIZE;
int* data = (int*)malloc(sizeof(int) * upd_size);
int* dev_data;
CUDA_ERROR(cudaMalloc((void**)&dev_data, sizeof(int) * upd_size));
// for (int i = 0; i < size; ++i) {
// fread(&size, sizeof(int), 1, stdin);
// scanf("%d", &data[i]);
// // fprintf(stderr, "%d ", data[i]);
// }
// fprintf(stderr, "\n");
fread(data, sizeof(int), size, stdin);
for (int i = size; i < upd_size; ++i) {
data[i] = INT_MAX;
}
// Copy to device
CUDA_ERROR(cudaMemcpy(dev_data, data, sizeof(int) * upd_size, cudaMemcpyHostToDevice));
////////////////////////////////////////////////////////////////////////////////////////
// Pre sort of all blocks by bitonic sort
// Main step
for (int k = 2; k <= upd_size; k *= 2) {
if (k > BLOCK_SIZE)
break;
// Merge and split step
for (int j = k / 2; j > 0; j /= 2) {
bitonic_sort_step<<<NUM_BLOCKS, BLOCK_SIZE>>>(dev_data, j, k, upd_size);
CUDA_ERROR(cudaGetLastError());
}
}
////////////////////////////////////////////////////////////////////////////////////////
/*
Implementation of odd-even sort
Sort of buckets with bitonic merge inside
| 1 3 5 7 | 2 4 6 8 | -> | 1 2 3 4 5 6 7 8| (size == 8)
Including 2 steps merge + splitting
*/
for (int i = 0; i < 2 * (upd_size / BLOCK_SIZE); ++i) {
kernel_bitonic_merge_step<<<NUM_BLOCKS, BLOCK_SIZE>>>(dev_data, upd_size, (bool)(i % 2), true);
CUDA_ERROR(cudaGetLastError());
}
CUDA_ERROR(cudaMemcpy(data, dev_data, sizeof(int) * upd_size, cudaMemcpyDeviceToHost))
CUDA_ERROR(cudaFree(dev_data));
for (int i = 0; i < size; ++i) {
fprintf(stderr, "%d ", data[i]);
}
fprintf(stderr, "\n");
fwrite(data, sizeof(int), size, stdout);
free(data);
return 0;
}
|
19,074 | /*
CUDA kernels and functions
Kurt Kaminski 2016
*/
#ifndef __FLUID_KERNELS__
#define __FLUID_KERNELS__
#include <cuda_runtime.h>
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
// Get 1d index from 2d coords
//__device__ int
//IX(int x, int y)
//{
// return x + (y * blockDim.x * gridDim.x);
//}
//
//__device__ int
//getX(int w)
//{
// int x = threadIdx.x + (blockIdx.x * blockDim.x);
// //if (x >= w) x = 0; if (x < 0) x = w-1;
// if (x >= w) return 0;
// else return x;
//}
//
//__device__ int
//getY(int h)
//{
// int y = threadIdx.y + (blockIdx.y * blockDim.y);
// //if (y >= h) y = 0; if (y < 0) y = h-1;
// if (y >= h) return 0;
// else return y;
//}
//
//__global__ void
//ClearArray(float *field, float value, int w, int h)
//{
// int x = getX(w);
// int y = getY(h);
// int id = IX(x,y);
//
// field[id] = value;
//}
//
//__global__ void
//ClearArray(int *field, float value, int w, int h)
//{
// int x = getX(w);
// int y = getY(h);
// int id = IX(x,y);
//
// field[id] = value;
//}
//
//__device__ int
//clamp(int i)
//{
// if (i < 0)
// i = 0;
// if (i > 255)
// i = 255;
//
// return i;
//}
//
//// Functions for converting to/from a int (4 bytes, 1 byte per RGBA, which are in the range 0-255)
//// to 4 floats in the range 0.0-1.0
//// Note how the data is stored in BGRA format due to how its stored on the GPU.
//__device__ int
//rgbaToInt(float r, float g, float b, float a)
//{
// return
// (clamp((int)(a * 255.0f)) << 24) |
// (clamp((int)(r * 255.0f)) << 16) |
// (clamp((int)(g * 255.0f)) << 8) |
// (clamp((int)(b * 255.0f)) << 0);
//}
//
//__device__
//void intToRgba(int pixel, float &r, float &g, float &b, float &a)
//{
// b = float(pixel&0xff) / 255.0f;
// g = float((pixel>>8)&0xff) / 255.0f;
// r = float((pixel>>16)&0xff) / 255.0f;
// a = float((pixel>>24)&0xff) / 255.0f;
//}
//
//
//__global__ void
//sampleKernel( int* src, int inw, int inh, int *dest, int w, int h )
//{
// int tx = threadIdx.x;
// int ty = threadIdx.y;
// int bw = blockDim.x;
// int bh = blockDim.y;
// int x = blockIdx.x*bw + tx;
// int y = blockIdx.y*bh + ty;
//
// // If the resolution isn't a multiple of the grid/thread size, or the resolutions don't match
// // we need to make sure we arn't reading or writting beyond the bounds of the data
// if (x >= inw || y >= inh)
// {
// if (x < w && y < h)
// dest[y * w + x] = rgbaToInt(0.0f, 0.0f, 0.0f, 1.0);
// return;
// }
// else if (x >= w || y >= h)
// {
// return;
// }
// else
// {
// int pixel = src[y * inw + x];
// float r,g,b,a;
// intToRgba(pixel, r, g, b, a);
//
// // Simple monochrome operation
// float v = r*0.3f + g*0.6f + b*0.1f;
// dest[y * w + x] = rgbaToInt(v, v, v, a);
// }
//}
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
__device__ int
clamp(int i)
{
if (i < 0) i = 0;
if (i > 255) i = 255;
return i;
}
__device__ float
clamp(float i, float min, float max)
{
if (i < min) i = min;
if (i > max) i = max;
return i;
}
__device__ float
fitRange(float valueIn, float baseMin, float baseMax, float limitMin, float limitMax)
{
return ((limitMax - limitMin) * (valueIn - baseMin) / (baseMax - baseMin)) + limitMin;
}
// Get 1d index from 2d coords
__device__ int
IX(int x, int y)
{
return x + (y * blockDim.x * gridDim.x);
}
__device__ int
getX(int w)
{
int x = threadIdx.x + (blockIdx.x * blockDim.x);
//if (x >= w) x = 0; if (x < 0) x = w-1;
if (x >= w) return 0;
else return x;
}
__device__ int
getY(int h)
{
int y = threadIdx.y + (blockIdx.y * blockDim.y);
//if (y >= h) y = 0; if (y < 0) y = h-1;
if (y >= h) return 0;
else return y;
}
// Returns true if within the bounds of both the container edges and a user-defined boundary
__device__ bool
checkBounds(float *_boundary, int x, int y, int w, int h)
{
if (x > 1 && x < w-2 && y > 1 && y < h-2 && _boundary[4*IX(x,y)+0] < 1 ){
return true;
}
else {
return false;
}
}
__device__ bool
checkBounds(int x, int y, int w, int h)
{
if (x > 1 && x < w-2 && y > 1 && y < h-2){
return true;
}
else {
return false;
}
}
// Functions for converting to/from a int (4 bytes, 1 byte per RGBA, which are in the range 0-255)
// to 4 floats in the range 0.0-1.0
// Note how the data is stored in BGRA format due to how its stored on the GPU.
__device__ int
rgbaToInt(float r, float g, float b, float a)
{
return
(clamp((int)(a * 255.0f)) << 24) |
(clamp((int)(r * 255.0f)) << 16) |
(clamp((int)(g * 255.0f)) << 8) |
(clamp((int)(b * 255.0f)) << 0);
}
__device__ void
intToRgba(int pixel, float &r, float &g, float &b, float &a)
{
b = float(pixel&0xff) / 255.0f;
g = float((pixel>>8)&0xff) / 255.0f;
r = float((pixel>>16)&0xff) / 255.0f;
a = float((pixel>>24)&0xff) / 255.0f;
}
__device__ void
rgbaToColor(float *dest, int id, float r, float g, float b, float a)
{
dest[4*id+0] = b;
dest[4*id+1] = g;
dest[4*id+2] = r;
dest[4*id+3] = a;
}
// Set boundary conditions
__device__ void
set_bnd( int b, int x, int y, float *field, float *boundary, int w, int h) {
int sz = w*h;
int id = IX(x,y);
bool outOfBnd = boundary[4*id+0] > 0.0 ? true : false;
//if (boundary[4*id+0] > 0.0) outOfBnd = true;
//if (x==0) field[id] = b==1 ? -1*field[IX(1,y)] : field[IX(1,y)];
//if (x==w-1) field[id] = b==1 ? -1*field[IX(w-2,y)] : field[IX(w-2,y)];
//if (y==0) field[id] = b==2 ? -1*field[IX(x,1)] : field[IX(x,1)];
//if (y==h-1) field[id] = b==2 ? -1*field[IX(x,h-2)] : field[IX(x,h-2)];
if (x==0 || outOfBnd) field[id] = b==1 ? -1*field[IX(1,y)] : -1 * field[IX(1,y)];
if (x==w-1 || outOfBnd) field[id] = b==1 ? -1*field[IX(w-2,y)] : -1 * field[IX(w-2,y)];
if (y==0 || outOfBnd) field[id] = b==2 ? -1*field[IX(x,1)] : -1 * field[IX(x,1)];
if (y==h-1 || outOfBnd) field[id] = b==2 ? -1*field[IX(x,h-2)] : -1 * field[IX(x,h-2)];
//if (outOfBnd){
// field[id] = -1*field[id];
// field[IX(x+1,y)] = -1*field[IX(x+1,y)];
// field[IX(x-1,y)] = -1*field[IX(x-1,y)];
// field[IX(x,y+1)] = -1*field[IX(x,y+1)];
// field[IX(x,y-1)] = -1*field[IX(x,y-1)];
//}
if (id == 0) field[id] = 0.5*(field[IX(1,0)]+field[IX(0,1)]); // southwest
if (id == sz-w) field[id] = 0.5*(field[IX(1,h-1)]+field[IX(0, h-2)]); // northwest
if (id == w-1) field[id] = 0.5*(field[IX(w-2,0)]+field[IX(w-1,1)]); // southeast
if (id == sz-1) field[id] = 0.5*(field[IX(w-2,h-1)]+field[IX(w-1,h-2)]); // northeast
}
__global__ void
DrawSquare( float *field, float value, int w, int h ) {
int x = getX(w);
int y = getY(h);
int id = IX(x,y);
float posX = (float)x/w;
float posY = (float)y/h;
if ( posX < .92 && posX > .45 && posY < .51 && posY > .495 ) {
field[id] = value;
}
}
//__global__ void
//DrawBnd( int *boundary, int w, int h ) {
// int x = getX(w);
// int y = getY(h);
// int id = IX(x,y);
//
// float posX = (float)x/w;
// float posY = (float)y/h;
// if ( posX < .82 && posX > .70 && posY < .33 && posY > .21 ) {
// boundary[id] = 1;
// }
// else boundary[id] = 0;
//}
__global__ void
SetBoundary( int b, float *field, float *boundary, int w, int h ) {
int x = getX(w);
int y = getY(h);
set_bnd(b, x, y, field, boundary, w, h);
}
__global__ void
getSum( float *_data, float _sum, int w, int h ) {
int x = getX(w);
int y = getY(h);
_sum += _data[IX(x,y)];
}
__global__ void
ClearArray(float *field, float value, int w, int h)
{
int x = getX(w);
int y = getY(h);
int id = IX(x,y);
field[id] = value;
}
__global__ void
ClearArray(int *field, float value, int w, int h)
{
int x = getX(w);
int y = getY(h);
int id = IX(x,y);
field[id] = value;
}
__global__ void
MapArray(float *field, float value, int w, int h)
{
int x = getX(w);
int y = getY(h);
int id = IX(x,y);
field[id] = float(x) * value;
}
// How can I template these?
__global__ void
AddFromUI ( float *field, float value, float dt, int x_coord, int y_coord, int w, int h ) {
int x = getX(w);
int y = getY(h);
int id = IX(x,y);
if (x>x_coord-5 && x<x_coord+5 && y>y_coord-5 && y<y_coord+5){
// if (x == x_coord && y==y_coord){
field[id] += value * dt;
}
else return;
}
__global__ void
AddFromUI ( float *field, float *valueUI, int index, float dt, int w, int h ) {
int x = getX(w);
int y = getY(h);
int id = IX(x,y);
field[id] += valueUI[4*id+index] * dt;
}
__global__ void
AddObstacleVelocity ( float *u, float *v, float *obstacle, float dt, int w, int h ) {
int x = getX(w);
int y = getY(h);
int id = IX(x,y);
u[id] += obstacle[4*id+2] * dt; //red
v[id] += obstacle[4*id+1] * dt; //green
}
__global__ void
SetFromUI ( float *A, float *B, float *valueUI, int w, int h ) {
int x = getX(w);
int y = getY(h);
int id = IX(x,y);
float v1 = valueUI[4*id+2]; //red
float v2 = valueUI[4*id+1]; //green
if (v1 > 0.0) A[id] = v1;
if (v2 > 0.0) B[id] = v2;
}
__global__ void
MakeSource(int *src, float *dest, int w, int h)
{
int x = getX(w);
int y = getY(h);
int id = IX(x,y);
int pixel = src[id];
float r,g,b,a;
intToRgba(pixel, r, g, b, a);
dest[id] = r;
}
// *!* This is currently only grabbing the red channel *!*
__global__ void
MakeSource(int *src, int *dest, int w, int h)
{
int x = getX(w);
int y = getY(h);
int id = IX(x,y);
int pixel = src[id];
float r,g,b,a;
intToRgba(pixel, r, g, b, a);
dest[id] = src[id]&0xff/255;
}
__global__ void
AddSource(float *field, float *source, float dt, int w, int h)
{
int x = getX(w);
int y = getY(h);
int id = IX(x,y);
field[id] += (dt * source[id]);
}
__global__ void
MakeColor(float *src, int *dest, int w, int h)
{
int x = getX(w);
int y = getY(h);
int id = IX(x,y);
dest[id] = rgbaToInt(src[id], src[id], src[id], 1.0);
//dest[id] = rgbaToInt(1.0, src[id], src[id], 1.0);
}
__global__ void
MakeColor(float *src0, float *src1, float *src2, float *dest, int w, int h)
{
int x = getX(w);
int y = getY(h);
int id = IX(x,y);
rgbaToColor(dest, id, src0[id], src1[id], src2[id], 1.0);
}
__global__ void
MakeColor(float *src0, float *src1, float *src2, float *src3, float *dest, int w, int h)
{
int x = getX(w);
int y = getY(h);
int id = IX(x,y);
rgbaToColor(dest, id, src0[id], src1[id], src2[id], src3[id]);
}
__global__ void
MakeColorLong( float *r1, float *g1, float *b1, float *a1,
float *r2, float *g2, float *b2, float *a2,
float *dest, int w, int h, int stride)
{
int idIn = getY(h) * w + (getX(w));
int idOut = getY(h) * (w*stride) + (getX(w*stride));
//Left image
dest[4*idOut+0] = b1[idIn];
dest[4*idOut+1] = g1[idIn];
dest[4*idOut+2] = r1[idIn];
dest[4*idOut+3] = a1[idIn];
//Right image
idOut += w;
dest[4*idOut+0] = b2[idIn];
dest[4*idOut+1] = g2[idIn];
dest[4*idOut+2] = r2[idIn];
dest[4*idOut+3] = 1.0;
}
__device__ float
bilerp(float *src, float _i, float _j, int w, int h)
{
int i0, j0, i1, j1;
float s0, t0, s1, t1;
float i;
float j;
// fit bounds
i = (_i < 0.5f) ? 0.5f : _i;
i = (_i > float(w)-2.0+0.5f) ? float(w)-2.0+0.5f : _i;
j = (_j > float(h)-2.0+0.5f) ? float(h)-2.0+0.5f : _j;
j = (_j < 0.5) ? 0.5 : _j;
//if (_i < 0.5f) i = 0.5f;
//else if (_i > float(w)-2.0+0.5f) i = float(w)-2.0+0.5f;
//if (j < 0.5f) j = 0.5f;
//else if (j > float(h)-2.0+0.5f) j = float(h)-2.0+0.5f;
// bilinear interpolation
i0 = int(i);
i1 = i0+1;
j0 = int(j);
j1 = j0+1;
s1 = (float)i-i0;
s0 = (float)1-s1;
t1 = (float)j-j0;
t0 = (float)1-t1;
return (float) s0*(t0*src[IX(i0,j0)] + t1*src[IX(i0,j1)])+
s1*(t0*src[IX(i1,j0)] + t1*src[IX(i1,j1)]);
}
__global__ void
Advect (float *vel_u, float *vel_v, float *src_u, float *src_v,
float *boundary, float *dest_u, float *dest_v,
float timeStep, float diff, int w, int h)
{
int x = getX(w);
int y = getY(h);
int id = IX(x,y);
//if (x > 1 && x < w-1 && y > 1 && y < h-1){
if (checkBounds(boundary, x, y, w, h)) {
float dt0 = (float)timeStep * float(w-2);
float i = float(x) - dt0 * vel_u[id];
float j = float(y) - dt0 * vel_v[id];
dest_u[id] = diff * bilerp(src_u, i, j, w, h);
dest_v[id] = diff * bilerp(src_v, i, j, w, h);
}
else {
dest_u[id] = 0.0;
dest_v[id] = 0.0;
}
}
__global__ void
Advect (float *vel_u, float *vel_v, float *src, float *boundary, float *dest,
float timeStep, float diff, bool skipBilerp, int w, int h)
{
int x = getX(w);
int y = getY(h);
int id = IX(x,y);
if (checkBounds(boundary, x, y, w, h)) {
//if (x > 1 && x < w-1 && y > 1 && y < h-1){
float dt0 = (float)timeStep * float(w-2);
float i = float(x) - dt0 * vel_u[id];
float j = float(y) - dt0 * vel_v[id];
dest[id] = diff * bilerp(src, i, j, w, h);
}
else {
dest[id] = 0.0;
}
}
__device__ float
curl(int i, int j, float *u, float *v)
{
float du_dy = (u[IX(i, j+1)] - u[IX(i, j-1)]) * 0.5f;
float dv_dx = (v[IX(i+1, j)] - v[IX(i-1, j)]) * 0.5f;
return du_dy - dv_dx;
}
__global__ void
vorticityConfinement(float *u, float *v, float *Fvc_x, float *Fvc_y, float *_boundary,
float curlAmt, float dt, int w, int h)
{
int x = getX(w);
int y = getY(h);
int id = IX(x,y);
//if (x>1 && x<w-2 && y>1 && y<h-2){
if (checkBounds(_boundary, x, y, w, h)) {
// Calculate magnitude of curl(u,v) for each cell. (|w|)
// curl[I(i, j)] = Math.abs(curl(i, j));
// Find derivative of the magnitude (n = del |w|)
float dw_dx = ( abs(curl(x+1,y, u, v)) - abs(curl(x-1,y, u, v)) ) * 0.5f;
float dw_dy = ( abs(curl(x,y+1, u, v)) - abs(curl(x,y-1, u, v)) ) * 0.5f;
// Calculate vector length. (|n|)
// Add small factor to prevent divide by zeros.
float length = sqrt(dw_dx * dw_dx + dw_dy * dw_dy);
length = length + 0.000001f;
//if (length == 0.0) length -= 0.000001f;
// N = ( n/|n| )
float vel = curl(x, y, u, v);
// N x w
// 0.5 = curl amount
Fvc_y[id] = Fvc_y[id] + ((dw_dx/length) * vel * dt * curlAmt);
Fvc_x[id] = Fvc_x[id] + ((dw_dy/length) * -vel * dt * curlAmt);
}
else {
Fvc_x[id] = 0.0;
Fvc_y[id] = 0.0;
}
}
__global__ void
ApplyBuoyancy( float *vel_u, float *vel_v, float *temp, float *dens,
float *dest_u, float *dest_v, float ambientTemp, float buoy, float weight,
float dt, int w, int h)
{
int x = getX(w);
int y = getY(h);
int id = IX(x,y);
if (checkBounds(x, y, w, h)) {
dest_u[id] = vel_u[id];
dest_v[id] = vel_v[id];
float T = temp[id];
float Sigma = buoy;
float Kappa = weight;
if (T > ambientTemp) {
float D = dens[id];
dest_u[id] += (dt * (T - ambientTemp) * Sigma - D * Kappa) * 0;
dest_v[id] += (dt * (T - ambientTemp) * Sigma - D * Kappa) * .1;
}
else {
return;
}
}
}
__global__ void
ComputeDivergence( float *u, float *v, float *boundary, float *dest, int w, int h )
{
int x = getX(w);
int y = getY(h);
int id = IX(x,y);
//if (x > 2 && x < w-2 && y > 2 && y < h-2){
if (checkBounds(x, y, w, h)){
float vN, vS, vE, vW;
// Find neighboring obstacles:
float oN = boundary[4 * IX(x, y+1) + 0];
float oS = boundary[4 * IX(x, y-1) + 0];
float oE = boundary[4 * IX(x+1, y) + 0];
float oW = boundary[4 * IX(x-1, y) + 0];
// Find neighboring velocities, use center pressure for solid cells:
vN = (oN > 0.0) ? boundary[4 * IX(x, y+1) + 1] : v[IX(x, y+1)];
vS = (oS > 0.0) ? boundary[4 * IX(x, y-1) + 1] : v[IX(x, y-1)];
vE = (oE > 0.0) ? boundary[4 * IX(x+1, y) + 2] : u[IX(x+1, y)];
vW = (oW > 0.0) ? boundary[4 * IX(x-1, y) + 2] : u[IX(x-1, y)];
//float cellSize = 1.0;
//dest[id] = (0.5 / cellSize) * ( u[IX(x+1, y)] - u[IX(x-1, y)] + v[IX(x, y+1)] - v[IX(x, y-1)] );
//dest[id] = 0.5 * ( (u[IX(x+1, y)] - u[IX(x-1, y)]) + (v[IX(x, y+1)] - v[IX(x, y-1)]) ) ;
dest[id] = 0.5 * ( vE - vW + vN - vS ) / float(w-2);
}
else {
return;
}
}
__global__ void
Jacobi( float *p, float *divergence, float *boundary, float *dest, int w, int h )
//Jacobi( float *p, float *divergence, cudaTextureObject_t txBoundary, float *dest, int w, int h )
{
int x = getX(w);
int y = getY(h);
int id = IX(x,y);
if (checkBounds(x, y, w, h)){
// Find neighboring obstacles:
float oN = boundary[4 * IX(x, y+1) + 0];
float oS = boundary[4 * IX(x, y-1) + 0];
float oE = boundary[4 * IX(x+1, y) + 0];
float oW = boundary[4 * IX(x-1, y) + 0];
//float oN = tex1Dfetch<float>(txBoundary, 4 * IX(x, y+1) + 0);
//float oS = tex1Dfetch<float>(txBoundary, 4 * IX(x, y-1) + 0);
//float oE = tex1Dfetch<float>(txBoundary, 4 * IX(x+1, y) + 0);
//float oW = tex1Dfetch<float>(txBoundary, 4 * IX(x-1, y) + 0);
// Find neighboring pressure, use center pressure for solid cells:
//float pC = p[id];
float pN = (oN > 0.0) ? p[id] : p[IX(x, y+1)];
float pS = (oS > 0.0) ? p[id] : p[IX(x, y-1)];
float pE = (oE > 0.0) ? p[id] : p[IX(x+1, y)];
float pW = (oW > 0.0) ? p[id] : p[IX(x-1, y)];
//float cellSize = 1.0;
//float Alpha = -cellSize * cellSize;
float Alpha = -1.0;
float bC = divergence[id];
float InverseBeta = .25;
dest[id] = (pW + pE + pS + pN + Alpha * bC) * InverseBeta;
}
else {
return;
}
}
__global__ void
SubtractGradient( float *vel_u, float *vel_v, float *p, float *boundary,
float *dest_u, float *dest_v, int w, int h)
{
int x = getX(w);
int y = getY(h);
int id = IX(x,y);
if (checkBounds(x, y, w, h)){
// Find neighboring obstacles:
float oN = boundary[4 * IX(x, y+1) + 0];
float oS = boundary[4 * IX(x, y-1) + 0];
float oE = boundary[4 * IX(x+1, y) + 0];
float oW = boundary[4 * IX(x-1, y) + 0];
// Use center pressure for solid cells:
//float obstU = 0.0;
//float obstV = 0.0;
//float vMask = 1.0;
// Find neighboring pressure:
//float pN = p[IX(x, y+1)];
//float pS = p[IX(x, y-1)];
//float pE = p[IX(x+1, y)];
//float pW = p[IX(x-1, y)];
//float pC = p[id];
//if (oN > 0.0) {pN = p[id]; obstV = boundary[4 * IX(x, y+1) + 1]; vMask = 0.0; }
//if (oS > 0.0) {pS = p[id]; obstV = boundary[4 * IX(x, y-1) + 1]; vMask = 0.0; }
//if (oE > 0.0) {pE = p[id]; obstU = boundary[4 * IX(x+1, y) + 2]; vMask = 0.0; }
//if (oW > 0.0) {pW = p[id]; obstU = boundary[4 * IX(x+1, y) + 2]; vMask = 0.0; }
float pN = (oN > 0.0) ? p[id] : p[IX(x, y+1)];
float pS = (oS > 0.0) ? p[id] : p[IX(x, y-1)];
float pE = (oE > 0.0) ? p[id] : p[IX(x+1, y)];
float pW = (oW > 0.0) ? p[id] : p[IX(x-1, y)];
float obstV = (oN > 0.0) ? boundary[4 * IX(x, y+1) + 1] :
(oS > 0.0) ? boundary[4 * IX(x, y-1) + 1] : 0.0;
float obstU = (oE > 0.0) ? boundary[4 * IX(x+1, y) + 2] :
(oW > 0.0) ? boundary[4 * IX(x+1, y) + 2] : 0.0;
float vMask = (oN > 0.0 || oS > 0.0 || oE > 0.0 || oW > 0.0) ? 0.0 : 1.0;
// Enforce the free-slip boundary condition:
float old_u = vel_u[id];
float old_v = vel_v[id];
//float cellSize = 1.0;
//float GradientScale = 1.125 / cellSize;
float GradientScale = 0.5 * float(w-2);
float grad_u = (pE - pW) * GradientScale;
float grad_v = (pN - pS) * GradientScale;
float new_u = old_u - grad_u;
float new_v = old_v - grad_v;
obstU = 0;
obstV = 0;
dest_u[id] = (vMask * new_u) + obstU;
dest_v[id] = (vMask * new_v) + obstV;
}
else {
dest_u[id] = 0.0;
dest_v[id] = 0.0;
}
}
__global__ void
Diffusion(float *_chem, float *_lap, float *_boundary, float _difConst, float xLen, float yLen, float dt, int w, int h)
{
int x = getX(w);
int y = getY(h);
int id = IX(x,y);
// have to do this check for non-powers of 2 to work...?
//if (checkBounds(_boundary, x, y, w, h)) {
if (checkBounds(x, y, w, h)) {
// constants
//float xLength = (float)x/100.0; //gray-scott
//float xLength = (float)x/1.21212; //barkley model
float xLength = (float)x/xLen;
float yLength = (float)y/yLen;
float dx = (float)xLength/(float)x;
float dy = (float)yLength/(float)y;
float alpha = (float)(_difConst * dt / (float)(dx*dy));
_lap[id] = (float)(-4.0f * _chem[id]) + (float)(_chem[IX(x+1,y)] + _chem[IX(x-1,y)] + _chem[IX(x,y+1)] + _chem[IX(x,y-1)]);
_lap[id] = (float)_lap[id]*alpha;
}
else {
return;
}
}
__global__ void
AddLaplacian( float *_chem, float *_lap, int w, int h)
{
int x = getX(w);
int y = getY(h);
int id = IX(x,y);
_chem[id] += _lap[id];
}
__global__ void
React( float *_chemA, float *_chemB, float F, float k, float e, int rdEquation, float *_boundary, float dt, int w, int h) {
int x = getX(w);
int y = getY(h);
int id = IX(x,y);
if (checkBounds(_boundary, x, y, w, h)) {
//float A = _chemA[id];
//float B = _chemB[id];
// Gray-Scott
//float F = 0.05;
//float k = 0.0675;
//float F = 0.0140;
//float k = 0.0490;
//float F = 0.0545;
//float k = 0.062;
//float F = F_input[id]&0xff/255;
//F = fitRange(F, 0.0, 1.0, 0.014, 0.066);
//
//float k = 1.0 - (F_input[id]&0xff/255);
//k = fitRange(k, 0.0, 1.0, 0.05, 0.068);
//if (A > 1.) A = 1.; if (B > 1.) B = 1.;
float A = (_chemA[id] > 1.0) ? 1.0 : _chemA[id];
float B = (_chemB[id] > 1.0) ? 1.0 : _chemB[id];
// Barkley Model
if (rdEquation == 1) {
float reactionA = A * (1-A) * ((A- (B+k)/F) / e) * dt;
float reactionB = (A - B) * dt;
_chemA[id] += (dt * reactionA);
_chemB[id] += (dt * reactionB);
return;
}
//// Barkley Turbulence
//else if (rdEquation == 2) {
// float reactionA = A * (1-A) * ((A- (B+k)/F) / e) * dt;
// float reactionB = (A*A*A - B) * dt;
// _chemA[id] += (dt * reactionA);
// _chemB[id] += (dt * reactionB);
// return;
//}
// Gray-Scott
else {
float reactionA = -A * (B*B) + (F * (1.0-A));
float reactionB = A * (B*B) - (F+k)*B;
_chemA[id] += (dt * reactionA);
_chemB[id] += (dt * reactionB);
return;
}
}
else {
_chemA[id] *= -1.0;
_chemB[id] *= -1.0;
}
}
////////////////////////////////////////////////////////////////////////
//// Wavelet Turbulence Kernels
////////////////////////////////////////////////////////////////////////
//__device__ float
//minDx(int x, int y, float *input, int w, int h) {
// //const int index = x + y * res[0] + z * res[0] * res[1];
// const int maxx = w-2;
// const float FLT_MAX = 1.70141e+38;
//
// // get grid values
// float center = input[IX(x,y)];
// float left = (x <= 1) ? FLT_MAX : input[IX(x-1,y)];
// float right = (x >= maxx) ? FLT_MAX : input[IX(x+1,y)];
//
// const float dx = w;
//
// // get all the derivative estimates
// float dLeft = (x <= 1) ? FLT_MAX : (center - left) * dx;
// float dRight = (x >= maxx) ? FLT_MAX : (right - center) * dx;
// float dCenter = (x <= 1 || x >= maxx) ? FLT_MAX : (right - left) * dx * 0.5f;
//
// // if it's on a boundary, only one estimate is valid
// if (x <= 1) return dRight;
// if (x >= maxx) return dLeft;
//
// // if it's not on a boundary, get the smallest one
// float finalD;
// finalD = (abs(dCenter) < abs(dRight)) ? dCenter : dRight;
// finalD = (abs(finalD) < abs(dLeft)) ? finalD : dLeft;
//
// return finalD;
//}
//
//__device__ float
//minDy(int x, int y, float *input, int w, int h) {
// //const int index = x + y * res[0] + z * res[0] * res[1];
// const int maxy = h-2;
// const float FLT_MAX = 1.70141e+38;
//
// // get grid values
// float center = input[IX(x,y)];
// float down = (y <= 1) ? FLT_MAX : input[IX(x,y-1)];
// float up = (y >= maxy) ? FLT_MAX : input[IX(x,y+1)];
//
// const float dx = h; // only for square domains
//
// // get all the derivative estimates
// float dDown = (y <= 1) ? FLT_MAX : (center - down) * dx;
// float dUp = (y >= maxy) ? FLT_MAX : (up - center) * dx;
// float dCenter = (y <= 1 || y >= maxy) ? FLT_MAX : (up - down) * dx * 0.5f;
//
// // if it's on a boundary, only one estimate is valid
// if (y <= 1) return dUp;
// if (y >= maxy) return dDown;
//
// // if it's not on a boundary, get the smallest one
// float finalD = (abs(dCenter) < abs(dUp)) ? dCenter : dUp;
// finalD = (abs(finalD) < abs(dDown)) ? finalD : dDown;
//
// return finalD;
//}
//
//
//__global__ void
//ComputeEigenvalues(float *_tcU, float *_tcV, float *_eigMin, float *_eigMax, int w, int h) {
// int x = getX(w);
// int y = getY(h);
// int id = IX(x,y);
//
// float A[2][2] = {
// { minDx(x, y, _tcU, w, h), minDx(x, y, _tcV, w, h) },
// { minDy(x, y, _tcU, w, h), minDy(x, y, _tcV, w, h) }
// };
//
// const float T = A[0][0] + A[1][1];
// const float D = (A[0][0]*A[1][1]) - (A[0][1]*A[1][0]);
//
// _eigMin[id] = (T*0.5) - pow((T*T)/(4.0-D), 0.5);
// _eigMax[id] = (T*0.5) + pow((T*T)/(4.0-D), 0.5);
//
//}
//
//__global__ void
//ComputeEnergy(float *_energy, float *_xvel, float *_yvel, int w, int h) {
// int x = getX(w);
// int y = getY(h);
// int id = IX(x,y);
//
// _energy[id] = 0.5 * (_xvel[id]*_xvel[id] + _yvel[id]*_yvel[id]);
//
//}
//
//__global__ void
//CopyObstacles(float *inObstacles, unsigned char *_obstacles, int w, int h) {
// int x = getX(w);
// int y = getY(h);
// int id = IX(x,y);
//
// if (inObstacles[4 * id + 0] > 0) {
// _obstacles[id] = 1;
// }
// else {
// _obstacles[id] = 0;
// }
//}
//
//__global__ void
//IterateEnergy(float *_energy, unsigned char *_obstacles, int w, int h){
// int x = getX(w);
// int y = getY(h);
// int id = IX(x,y);
//
// const int MARCHED = 2;
// const int RETIRED = 4;
// const int slabSizeSm = w * h;
//
// if (checkBounds(x, y, w, h)) {
// if (_obstacles[id] && _obstacles[id] != RETIRED) {
// float sum = 0.0f;
// int valid = 0;
//
// if (!_obstacles[id + 1] || _obstacles[id + 1] == RETIRED) {
// sum += _energy[id + 1];
// valid++;
// }
// if (!_obstacles[id - 1] || _obstacles[id - 1] == RETIRED) {
// sum += _energy[id - 1];
// valid++;
// }
// if (!_obstacles[id + w] || _obstacles[id + w] == RETIRED) {
// sum += _energy[id + w];
// valid++;
// }
// if (!_obstacles[id - w] || _obstacles[id - w] == RETIRED) {
// sum += _energy[id - w];
// valid++;
// }
// if (!_obstacles[id + slabSizeSm] || _obstacles[id + slabSizeSm] == RETIRED) {
// sum += _energy[id + slabSizeSm];
// valid++;
// }
// if (!_obstacles[id - slabSizeSm] || _obstacles[id - slabSizeSm] == RETIRED) {
// sum += _energy[id - slabSizeSm];
// valid++;
// }
// if (valid > 0) {
// _energy[id] = sum / float(valid);
// _obstacles[id] = MARCHED;
// }
// }
// }
//}
//
//__global__ void
//CheckMarchedObstacles(unsigned char *_obstacles, int w, int h) {
// int x = getX(w);
// int y = getY(h);
// int id = IX(x,y);
//
// const int MARCHED = 2;
// const int RETIRED = 4;
//
// if (checkBounds(x, y, w, h)) {
// if (_obstacles[id] == MARCHED) {
// _obstacles[id] = RETIRED;
// }
// }
//
//}
//
//__global__ void
//DownsampleNeumann(float *from, float *to, int n, int stride, int w, int h) {
// int x = getX(w);
// int y = getY(h);
// int id = IX(x,y);
//
// // if these values are not local incorrect results are generated
// float downCoeffs[32] = { 0.000334f,-0.001528f, 0.000410f, 0.003545f,-0.000938f,-0.008233f, 0.002172f, 0.019120f,
// -0.005040f,-0.044412f, 0.011655f, 0.103311f,-0.025936f,-0.243780f, 0.033979f, 0.655340f,
// 0.655340f, 0.033979f,-0.243780f,-0.025936f, 0.103311f, 0.011655f,-0.044412f,-0.005040f,
// 0.019120f, 0.002172f,-0.008233f,-0.000938f, 0.003546f, 0.000410f,-0.001528f, 0.000334f };
//
// const float *const aCoCenter= &downCoeffs[16];
// for (int i = 0; i < n / 2; i++) {
// to[i * stride] = 0;
// for (int k = 2 * i - 16; k < 2 * i + 16; k++) {
// // handle boundary
// float fromval;
// if (k < 0) {
// fromval = from[0];
// } else if(k > n - 1) {
// fromval = from[(n - 1) * stride];
// } else {
// fromval = from[k * stride];
// }
// to[i * stride] += aCoCenter[k - 2 * i] * fromval;
// }
// }
//}
#endif |
19,075 | #include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <cufft.h>
#include <stdio.h>
#include <stdlib.h>
__global__ void DianCheng(cufftDoubleComplex *a, cufftDoubleComplex *b, cufftDoubleComplex *c,int M, int L)//˵GPU
{
int tx = threadIdx.x;
int by = blockIdx.y;
int i=by*L+tx;
if (i<=M*L)
{
c[i].x = a[i].x * b[i].x-a[i].y*b[i].y;
c[i].y = a[i].x * b[i].y+a[i].y*b[i].x;
}
} |
19,076 | #define t_max 1
#define t 1
/*
(w1_a[0][0]=((a[0][0][0][0][1]*(a[0][0][0][0][1]+1.0))*((a[0][0][0][0][1]+2.0)*0.16666666666666666)))
(w2_a[0][0]=(((a[0][0][0][0][1]-1.0)*(a[0][0][0][0][1]+1.0))*((a[0][0][0][0][1]+2.0)*-0.5)))
(w3_a[0][0]=(((a[0][0][0][0][1]-1.0)*a[0][0][0][0][1])*((a[0][0][0][0][1]+2.0)*0.5)))
(w4_a[0][0]=(((a[0][0][0][0][1]-1.0)*a[0][0][0][0][1])*((a[0][0][0][0][1]+1.0)*-0.16666666666666666)))
(w1_b[0][0]=((b[0][0][0][0][2]*(b[0][0][0][0][2]+1.0))*((b[0][0][0][0][2]+2.0)*0.16666666666666666)))
(w2_b[0][0]=(((b[0][0][0][0][2]-1.0)*(b[0][0][0][0][2]+1.0))*((b[0][0][0][0][2]+2.0)*-0.5)))
(w3_b[0][0]=(((b[0][0][0][0][2]-1.0)*b[0][0][0][0][2])*((b[0][0][0][0][2]+2.0)*0.5)))
(w4_b[0][0]=(((b[0][0][0][0][2]-1.0)*b[0][0][0][0][2])*((b[0][0][0][0][2]+1.0)*-0.16666666666666666)))
(w1_c[0][0]=((c[0][0][0][0][3]*(c[0][0][0][0][3]+1.0))*((c[0][0][0][0][3]+2.0)*0.16666666666666666)))
(w2_c[0][0]=(((c[0][0][0][0][3]-1.0)*(c[0][0][0][0][3]+1.0))*((c[0][0][0][0][3]+2.0)*-0.5)))
(w3_c[0][0]=(((c[0][0][0][0][3]-1.0)*c[0][0][0][0][3])*((c[0][0][0][0][3]+2.0)*0.5)))
(w4_c[0][0]=(((c[0][0][0][0][3]-1.0)*c[0][0][0][0][3])*((c[0][0][0][0][3]+1.0)*-0.16666666666666666)))
(u[0][0][0][1][0]=((((((((w1_a*w1_b)*(w1_c*u[-1][-1][-1][0][0]))+((w2_a*w1_b)*(w1_c*u[0][-1][-1][0][0])))+(((w3_a*w1_b)*(w1_c*u[1][-1][-1][0][0]))+((w4_a*w1_b)*(w1_c*u[2][-1][-1][0][0]))))+((((w1_a*w2_b)*(w1_c*u[-1][0][-1][0][0]))+((w2_a*w2_b)*(w1_c*u[0][0][-1][0][0])))+(((w3_a*w2_b)*(w1_c*u[1][0][-1][0][0]))+((w4_a*w2_b)*(w1_c*u[2][0][-1][0][0])))))+(((((w1_a*w3_b)*(w1_c*u[-1][1][-1][0][0]))+((w2_a*w3_b)*(w1_c*u[0][1][-1][0][0])))+(((w3_a*w3_b)*(w1_c*u[1][1][-1][0][0]))+((w4_a*w3_b)*(w1_c*u[2][1][-1][0][0]))))+((((w1_a*w4_b)*(w1_c*u[-1][2][-1][0][0]))+((w2_a*w4_b)*(w1_c*u[0][2][-1][0][0])))+(((w3_a*w4_b)*(w1_c*u[1][2][-1][0][0]))+((w4_a*w4_b)*(w1_c*u[2][2][-1][0][0]))))))+((((((w1_a*w1_b)*(w2_c*u[-1][-1][0][0][0]))+((w2_a*w1_b)*(w2_c*u[0][-1][0][0][0])))+(((w3_a*w1_b)*(w2_c*u[1][-1][0][0][0]))+((w4_a*w1_b)*(w2_c*u[2][-1][0][0][0]))))+((((w1_a*w2_b)*(w2_c*u[-1][0][0][0][0]))+((w2_a*w2_b)*(w2_c*u[0][0][0][0][0])))+(((w3_a*w2_b)*(w2_c*u[1][0][0][0][0]))+((w4_a*w2_b)*(w2_c*u[2][0][0][0][0])))))+(((((w1_a*w3_b)*(w2_c*u[-1][1][0][0][0]))+((w2_a*w3_b)*(w2_c*u[0][1][0][0][0])))+(((w3_a*w3_b)*(w2_c*u[1][1][0][0][0]))+((w4_a*w3_b)*(w2_c*u[2][1][0][0][0]))))+((((w1_a*w4_b)*(w2_c*u[-1][2][0][0][0]))+((w2_a*w4_b)*(w2_c*u[0][2][0][0][0])))+(((w3_a*w4_b)*(w2_c*u[1][2][0][0][0]))+((w4_a*w4_b)*(w2_c*u[2][2][0][0][0])))))))+(((((((w1_a*w1_b)*(w3_c*u[-1][-1][1][0][0]))+((w2_a*w1_b)*(w3_c*u[0][-1][1][0][0])))+(((w3_a*w1_b)*(w3_c*u[1][-1][1][0][0]))+((w4_a*w1_b)*(w3_c*u[2][-1][1][0][0]))))+((((w1_a*w2_b)*(w3_c*u[-1][0][1][0][0]))+((w2_a*w2_b)*(w3_c*u[0][0][1][0][0])))+(((w3_a*w2_b)*(w3_c*u[1][0][1][0][0]))+((w4_a*w2_b)*(w3_c*u[2][0][1][0][0])))))+(((((w1_a*w3_b)*(w3_c*u[-1][1][1][0][0]))+((w2_a*w3_b)*(w3_c*u[0][1][1][0][0])))+(((w3_a*w3_b)*(w3_c*u[1][1][1][0][0]))+((w4_a*w3_b)*(w3_c*u[2][1][1][0][0]))))+((((w1_a*w4_b)*(w3_c*u[-1][2][1][0][0]))+((w2_a*w4_b)*(w3_c*u[0][2][1][0][0])))+(((w3_a*w4_b)*(w3_c*u[1][2][1][0][0]))+((w4_a*w4_b)*(w3_c*u[2][2][1][0][0]))))))+((((((w1_a*w1_b)*(w4_c*u[-1][-1][2][0][0]))+((w2_a*w1_b)*(w4_c*u[0][-1][2][0][0])))+(((w3_a*w1_b)*(w4_c*u[1][-1][2][0][0]))+((w4_a*w1_b)*(w4_c*u[2][-1][2][0][0]))))+((((w1_a*w2_b)*(w4_c*u[-1][0][2][0][0]))+((w2_a*w2_b)*(w4_c*u[0][0][2][0][0])))+(((w3_a*w2_b)*(w4_c*u[1][0][2][0][0]))+((w4_a*w2_b)*(w4_c*u[2][0][2][0][0])))))+(((((w1_a*w3_b)*(w4_c*u[-1][1][2][0][0]))+((w2_a*w3_b)*(w4_c*u[0][1][2][0][0])))+(((w3_a*w3_b)*(w4_c*u[1][1][2][0][0]))+((w4_a*w3_b)*(w4_c*u[2][1][2][0][0]))))+((((w1_a*w4_b)*(w4_c*u[-1][2][2][0][0]))+((w2_a*w4_b)*(w4_c*u[0][2][2][0][0])))+(((w3_a*w4_b)*(w4_c*u[1][2][2][0][0]))+((w4_a*w4_b)*(w4_c*u[2][2][2][0][0])))))))))
*/
__global__ void tricubic_interpolation(double * * u_0_1_out, double * u_0_0, double * u_0_1, double * a_1_0, double * b_2_0, double * c_3_0, int x_max, int y_max, int z_max)
{
/*
const double * const u__a_1[16] = { a_1_0 } ;
const double * const u__b_2[16] = { b_2_0 } ;
const double * const u__c_3[16] = { c_3_0 } ;
double * const u__u_0[16] = { u_0_0, u_0_1 } ;
*/
double w1_a;
double w1_b;
double w1_c;
double w2_a;
double w2_b;
double w2_c;
double w3_a;
double w3_b;
double w3_c;
double w4_a;
double w4_b;
double w4_c;
int _idx0;
int _idx1;
int _idx10;
int _idx11;
int _idx12;
int _idx13;
int _idx14;
int _idx15;
int _idx16;
int _idx17;
int _idx18;
int _idx19;
int _idx2;
int _idx20;
int _idx21;
int _idx22;
int _idx23;
int _idx24;
int _idx25;
int _idx26;
int _idx27;
int _idx28;
int _idx29;
int _idx3;
int _idx30;
int _idx31;
int _idx32;
int _idx33;
int _idx34;
int _idx35;
int _idx36;
int _idx37;
int _idx38;
int _idx39;
int _idx4;
int _idx40;
int _idx41;
int _idx42;
int _idx43;
int _idx44;
int _idx45;
int _idx46;
int _idx47;
int _idx48;
int _idx49;
int _idx5;
int _idx50;
int _idx51;
int _idx52;
int _idx53;
int _idx54;
int _idx55;
int _idx56;
int _idx57;
int _idx58;
int _idx59;
int _idx6;
int _idx60;
int _idx61;
int _idx62;
int _idx63;
int _idx64;
int _idx7;
int _idx8;
int _idx9;
int idx_1_2;
int p_idx_x;
int p_idx_x_max;
int p_idx_y;
int p_idx_y_max;
int p_idx_z;
int p_idx_z_max;
int size_1_1;
int size_1_2;
//int t;
int tmp;
/*
Initializations
*/
size_1_1=(y_max/blockDim.y);
size_1_2=(z_max/blockDim.z);
idx_1_2=(blockIdx.y/size_1_2);
tmp=(blockIdx.y-(idx_1_2*size_1_2));
p_idx_x=(threadIdx.x+(blockDim.x*blockIdx.x));
p_idx_x_max=(p_idx_x+1);
p_idx_y=(threadIdx.y+(tmp*blockDim.y));
p_idx_y_max=(p_idx_y+1);
p_idx_z=(threadIdx.z+(idx_1_2*blockDim.z));
p_idx_z_max=(p_idx_z+1);
/*
Implementation
*/
/*
for t = 1..t_max by 1 parallel 1 <level 0> schedule { ... }
*/
//for (t=1; t<=t_max; t+=1)
{
/* Index bounds calculations for iterators in p[t=t, s=(1, 1, 1)][0] */
/*
u[t=(t+1), s=p[t=?, s=?][0]][0]=stencil(u[t=t, s=p[t=?, s=?][0]][0])
*/
/* _idx0 = ((((p_idx_z*x_max)*y_max)+(p_idx_y*x_max))+p_idx_x) */
_idx0=((((p_idx_z*x_max)*y_max)+(p_idx_y*x_max))+p_idx_x);
w1_a=((a_1_0[_idx0]*(a_1_0[_idx0]+1.0))*((a_1_0[_idx0]+2.0)*0.16666666666666666));
w2_a=(((a_1_0[_idx0]-1.0)*(a_1_0[_idx0]+1.0))*((a_1_0[_idx0]+2.0)*-0.5));
w3_a=(((a_1_0[_idx0]-1.0)*a_1_0[_idx0])*((a_1_0[_idx0]+2.0)*0.5));
w4_a=(((a_1_0[_idx0]-1.0)*a_1_0[_idx0])*((a_1_0[_idx0]+1.0)*-0.16666666666666666));
w1_b=((b_2_0[_idx0]*(b_2_0[_idx0]+1.0))*((b_2_0[_idx0]+2.0)*0.16666666666666666));
w2_b=(((b_2_0[_idx0]-1.0)*(b_2_0[_idx0]+1.0))*((b_2_0[_idx0]+2.0)*-0.5));
w3_b=(((b_2_0[_idx0]-1.0)*b_2_0[_idx0])*((b_2_0[_idx0]+2.0)*0.5));
w4_b=(((b_2_0[_idx0]-1.0)*b_2_0[_idx0])*((b_2_0[_idx0]+1.0)*-0.16666666666666666));
w1_c=((c_3_0[_idx0]*(c_3_0[_idx0]+1.0))*((c_3_0[_idx0]+2.0)*0.16666666666666666));
w2_c=(((c_3_0[_idx0]-1.0)*(c_3_0[_idx0]+1.0))*((c_3_0[_idx0]+2.0)*-0.5));
w3_c=(((c_3_0[_idx0]-1.0)*c_3_0[_idx0])*((c_3_0[_idx0]+2.0)*0.5));
w4_c=(((c_3_0[_idx0]-1.0)*c_3_0[_idx0])*((c_3_0[_idx0]+1.0)*-0.16666666666666666));
/* _idx1 = (((((((p_idx_z*x_max)+((3*p_idx_z)*t))*y_max)+((((3*p_idx_z)*t)+p_idx_y)*x_max))+((9*p_idx_z)*(t*t)))+((3*p_idx_y)*t))+p_idx_x) */
_idx1=((((_idx0+(((3*p_idx_z)*t)*y_max))+(((3*p_idx_z)*t)*x_max))+((9*p_idx_z)*(t*t)))+((3*p_idx_y)*t));
/* _idx2 = ((((((((p_idx_z*x_max)+((3*p_idx_z)*t))*y_max)+((((3*p_idx_z)*t)+p_idx_y)*x_max))+((9*p_idx_z)*(t*t)))+((3*p_idx_y)*t))+p_idx_x)+1) */
_idx2=(_idx1+1);
/* _idx3 = ((((((((p_idx_z*x_max)+((3*p_idx_z)*t))*y_max)+((((3*p_idx_z)*t)+p_idx_y)*x_max))+((9*p_idx_z)*(t*t)))+((3*p_idx_y)*t))+p_idx_x)+2) */
_idx3=(_idx2+1);
/* _idx4 = ((((((((p_idx_z*x_max)+((3*p_idx_z)*t))*y_max)+((((3*p_idx_z)*t)+p_idx_y)*x_max))+((9*p_idx_z)*(t*t)))+((3*p_idx_y)*t))+p_idx_x)+3) */
_idx4=(_idx2+2);
/* _idx5 = (((((((p_idx_z*x_max)+((3*p_idx_z)*t))*y_max)+(((((3*p_idx_z)*t)+p_idx_y)+1)*x_max))+((9*p_idx_z)*(t*t)))+(((3*p_idx_y)+3)*t))+p_idx_x) */
_idx5=((_idx1+x_max)+(3*t));
/* _idx6 = ((((((((p_idx_z*x_max)+((3*p_idx_z)*t))*y_max)+(((((3*p_idx_z)*t)+p_idx_y)+1)*x_max))+((9*p_idx_z)*(t*t)))+(((3*p_idx_y)+3)*t))+p_idx_x)+1) */
_idx6=(_idx5+1);
/* _idx7 = ((((((((p_idx_z*x_max)+((3*p_idx_z)*t))*y_max)+(((((3*p_idx_z)*t)+p_idx_y)+1)*x_max))+((9*p_idx_z)*(t*t)))+(((3*p_idx_y)+3)*t))+p_idx_x)+2) */
_idx7=(_idx5+2);
/* _idx8 = ((((((((p_idx_z*x_max)+((3*p_idx_z)*t))*y_max)+(((((3*p_idx_z)*t)+p_idx_y)+1)*x_max))+((9*p_idx_z)*(t*t)))+(((3*p_idx_y)+3)*t))+p_idx_x)+3) */
_idx8=(_idx7+1);
/* _idx9 = (((((((p_idx_z*x_max)+((3*p_idx_z)*t))*y_max)+(((((3*p_idx_z)*t)+p_idx_y)+2)*x_max))+((9*p_idx_z)*(t*t)))+(((3*p_idx_y)+6)*t))+p_idx_x) */
_idx9=((_idx5+x_max)+(3*t));
/* _idx10 = ((((((((p_idx_z*x_max)+((3*p_idx_z)*t))*y_max)+(((((3*p_idx_z)*t)+p_idx_y)+2)*x_max))+((9*p_idx_z)*(t*t)))+(((3*p_idx_y)+6)*t))+p_idx_x)+1) */
_idx10=(_idx9+1);
/* _idx11 = ((((((((p_idx_z*x_max)+((3*p_idx_z)*t))*y_max)+(((((3*p_idx_z)*t)+p_idx_y)+2)*x_max))+((9*p_idx_z)*(t*t)))+(((3*p_idx_y)+6)*t))+p_idx_x)+2) */
_idx11=(_idx10+1);
/* _idx12 = ((((((((p_idx_z*x_max)+((3*p_idx_z)*t))*y_max)+(((((3*p_idx_z)*t)+p_idx_y)+2)*x_max))+((9*p_idx_z)*(t*t)))+(((3*p_idx_y)+6)*t))+p_idx_x)+3) */
_idx12=(_idx11+1);
/* _idx13 = (((((((p_idx_z*x_max)+((3*p_idx_z)*t))*y_max)+(((((3*p_idx_z)*t)+p_idx_y)+3)*x_max))+((9*p_idx_z)*(t*t)))+(((3*p_idx_y)+9)*t))+p_idx_x) */
_idx13=((_idx9+x_max)+(3*t));
/* _idx14 = ((((((((p_idx_z*x_max)+((3*p_idx_z)*t))*y_max)+(((((3*p_idx_z)*t)+p_idx_y)+3)*x_max))+((9*p_idx_z)*(t*t)))+(((3*p_idx_y)+9)*t))+p_idx_x)+1) */
_idx14=(_idx13+1);
/* _idx15 = ((((((((p_idx_z*x_max)+((3*p_idx_z)*t))*y_max)+(((((3*p_idx_z)*t)+p_idx_y)+3)*x_max))+((9*p_idx_z)*(t*t)))+(((3*p_idx_y)+9)*t))+p_idx_x)+2) */
_idx15=(_idx14+1);
/* _idx16 = ((((((((p_idx_z*x_max)+((3*p_idx_z)*t))*y_max)+(((((3*p_idx_z)*t)+p_idx_y)+3)*x_max))+((9*p_idx_z)*(t*t)))+(((3*p_idx_y)+9)*t))+p_idx_x)+3) */
_idx16=(_idx14+2);
/* _idx17 = ((((((((p_idx_z+1)*x_max)+(((3*p_idx_z)+3)*t))*y_max)+(((((3*p_idx_z)+3)*t)+p_idx_y)*x_max))+(((9*p_idx_z)+9)*(t*t)))+((3*p_idx_y)*t))+p_idx_x) */
_idx17=(((_idx1+((x_max+(3*t))*y_max))+((3*t)*x_max))+(9*(t*t)));
/* _idx18 = (((((((((p_idx_z+1)*x_max)+(((3*p_idx_z)+3)*t))*y_max)+(((((3*p_idx_z)+3)*t)+p_idx_y)*x_max))+(((9*p_idx_z)+9)*(t*t)))+((3*p_idx_y)*t))+p_idx_x)+1) */
_idx18=(_idx17+1);
/* _idx19 = (((((((((p_idx_z+1)*x_max)+(((3*p_idx_z)+3)*t))*y_max)+(((((3*p_idx_z)+3)*t)+p_idx_y)*x_max))+(((9*p_idx_z)+9)*(t*t)))+((3*p_idx_y)*t))+p_idx_x)+2) */
_idx19=(_idx18+1);
/* _idx20 = (((((((((p_idx_z+1)*x_max)+(((3*p_idx_z)+3)*t))*y_max)+(((((3*p_idx_z)+3)*t)+p_idx_y)*x_max))+(((9*p_idx_z)+9)*(t*t)))+((3*p_idx_y)*t))+p_idx_x)+3) */
_idx20=(_idx19+1);
/* _idx21 = ((((((((p_idx_z+1)*x_max)+(((3*p_idx_z)+3)*t))*y_max)+((((((3*p_idx_z)+3)*t)+p_idx_y)+1)*x_max))+(((9*p_idx_z)+9)*(t*t)))+(((3*p_idx_y)+3)*t))+p_idx_x) */
_idx21=((_idx17+x_max)+(3*t));
/* _idx22 = (((((((((p_idx_z+1)*x_max)+(((3*p_idx_z)+3)*t))*y_max)+((((((3*p_idx_z)+3)*t)+p_idx_y)+1)*x_max))+(((9*p_idx_z)+9)*(t*t)))+(((3*p_idx_y)+3)*t))+p_idx_x)+1) */
_idx22=(_idx21+1);
/* _idx23 = (((((((((p_idx_z+1)*x_max)+(((3*p_idx_z)+3)*t))*y_max)+((((((3*p_idx_z)+3)*t)+p_idx_y)+1)*x_max))+(((9*p_idx_z)+9)*(t*t)))+(((3*p_idx_y)+3)*t))+p_idx_x)+2) */
_idx23=(_idx22+1);
/* _idx24 = (((((((((p_idx_z+1)*x_max)+(((3*p_idx_z)+3)*t))*y_max)+((((((3*p_idx_z)+3)*t)+p_idx_y)+1)*x_max))+(((9*p_idx_z)+9)*(t*t)))+(((3*p_idx_y)+3)*t))+p_idx_x)+3) */
_idx24=(_idx23+1);
/* _idx25 = ((((((((p_idx_z+1)*x_max)+(((3*p_idx_z)+3)*t))*y_max)+((((((3*p_idx_z)+3)*t)+p_idx_y)+2)*x_max))+(((9*p_idx_z)+9)*(t*t)))+(((3*p_idx_y)+6)*t))+p_idx_x) */
_idx25=((_idx21+x_max)+(3*t));
/* _idx26 = (((((((((p_idx_z+1)*x_max)+(((3*p_idx_z)+3)*t))*y_max)+((((((3*p_idx_z)+3)*t)+p_idx_y)+2)*x_max))+(((9*p_idx_z)+9)*(t*t)))+(((3*p_idx_y)+6)*t))+p_idx_x)+1) */
_idx26=(_idx25+1);
/* _idx27 = (((((((((p_idx_z+1)*x_max)+(((3*p_idx_z)+3)*t))*y_max)+((((((3*p_idx_z)+3)*t)+p_idx_y)+2)*x_max))+(((9*p_idx_z)+9)*(t*t)))+(((3*p_idx_y)+6)*t))+p_idx_x)+2) */
_idx27=(_idx25+2);
/* _idx28 = (((((((((p_idx_z+1)*x_max)+(((3*p_idx_z)+3)*t))*y_max)+((((((3*p_idx_z)+3)*t)+p_idx_y)+2)*x_max))+(((9*p_idx_z)+9)*(t*t)))+(((3*p_idx_y)+6)*t))+p_idx_x)+3) */
_idx28=(_idx27+1);
/* _idx29 = ((((((((p_idx_z+1)*x_max)+(((3*p_idx_z)+3)*t))*y_max)+((((((3*p_idx_z)+3)*t)+p_idx_y)+3)*x_max))+(((9*p_idx_z)+9)*(t*t)))+(((3*p_idx_y)+9)*t))+p_idx_x) */
_idx29=((_idx25+x_max)+(3*t));
/* _idx30 = (((((((((p_idx_z+1)*x_max)+(((3*p_idx_z)+3)*t))*y_max)+((((((3*p_idx_z)+3)*t)+p_idx_y)+3)*x_max))+(((9*p_idx_z)+9)*(t*t)))+(((3*p_idx_y)+9)*t))+p_idx_x)+1) */
_idx30=(_idx29+1);
/* _idx31 = (((((((((p_idx_z+1)*x_max)+(((3*p_idx_z)+3)*t))*y_max)+((((((3*p_idx_z)+3)*t)+p_idx_y)+3)*x_max))+(((9*p_idx_z)+9)*(t*t)))+(((3*p_idx_y)+9)*t))+p_idx_x)+2) */
_idx31=(_idx29+2);
/* _idx32 = (((((((((p_idx_z+1)*x_max)+(((3*p_idx_z)+3)*t))*y_max)+((((((3*p_idx_z)+3)*t)+p_idx_y)+3)*x_max))+(((9*p_idx_z)+9)*(t*t)))+(((3*p_idx_y)+9)*t))+p_idx_x)+3) */
_idx32=(_idx29+3);
/* _idx33 = ((((((((p_idx_z+2)*x_max)+(((3*p_idx_z)+6)*t))*y_max)+(((((3*p_idx_z)+6)*t)+p_idx_y)*x_max))+(((9*p_idx_z)+18)*(t*t)))+((3*p_idx_y)*t))+p_idx_x) */
_idx33=(((_idx17+((x_max+(3*t))*y_max))+((3*t)*x_max))+(9*(t*t)));
/* _idx34 = (((((((((p_idx_z+2)*x_max)+(((3*p_idx_z)+6)*t))*y_max)+(((((3*p_idx_z)+6)*t)+p_idx_y)*x_max))+(((9*p_idx_z)+18)*(t*t)))+((3*p_idx_y)*t))+p_idx_x)+1) */
_idx34=(_idx33+1);
/* _idx35 = (((((((((p_idx_z+2)*x_max)+(((3*p_idx_z)+6)*t))*y_max)+(((((3*p_idx_z)+6)*t)+p_idx_y)*x_max))+(((9*p_idx_z)+18)*(t*t)))+((3*p_idx_y)*t))+p_idx_x)+2) */
_idx35=(_idx34+1);
/* _idx36 = (((((((((p_idx_z+2)*x_max)+(((3*p_idx_z)+6)*t))*y_max)+(((((3*p_idx_z)+6)*t)+p_idx_y)*x_max))+(((9*p_idx_z)+18)*(t*t)))+((3*p_idx_y)*t))+p_idx_x)+3) */
_idx36=(_idx34+2);
/* _idx37 = ((((((((p_idx_z+2)*x_max)+(((3*p_idx_z)+6)*t))*y_max)+((((((3*p_idx_z)+6)*t)+p_idx_y)+1)*x_max))+(((9*p_idx_z)+18)*(t*t)))+(((3*p_idx_y)+3)*t))+p_idx_x) */
_idx37=((_idx33+x_max)+(3*t));
/* _idx38 = (((((((((p_idx_z+2)*x_max)+(((3*p_idx_z)+6)*t))*y_max)+((((((3*p_idx_z)+6)*t)+p_idx_y)+1)*x_max))+(((9*p_idx_z)+18)*(t*t)))+(((3*p_idx_y)+3)*t))+p_idx_x)+1) */
_idx38=(_idx37+1);
/* _idx39 = (((((((((p_idx_z+2)*x_max)+(((3*p_idx_z)+6)*t))*y_max)+((((((3*p_idx_z)+6)*t)+p_idx_y)+1)*x_max))+(((9*p_idx_z)+18)*(t*t)))+(((3*p_idx_y)+3)*t))+p_idx_x)+2) */
_idx39=(_idx37+2);
/* _idx40 = (((((((((p_idx_z+2)*x_max)+(((3*p_idx_z)+6)*t))*y_max)+((((((3*p_idx_z)+6)*t)+p_idx_y)+1)*x_max))+(((9*p_idx_z)+18)*(t*t)))+(((3*p_idx_y)+3)*t))+p_idx_x)+3) */
_idx40=(_idx39+1);
/* _idx41 = ((((((((p_idx_z+2)*x_max)+(((3*p_idx_z)+6)*t))*y_max)+((((((3*p_idx_z)+6)*t)+p_idx_y)+2)*x_max))+(((9*p_idx_z)+18)*(t*t)))+(((3*p_idx_y)+6)*t))+p_idx_x) */
_idx41=((_idx37+x_max)+(3*t));
/* _idx42 = (((((((((p_idx_z+2)*x_max)+(((3*p_idx_z)+6)*t))*y_max)+((((((3*p_idx_z)+6)*t)+p_idx_y)+2)*x_max))+(((9*p_idx_z)+18)*(t*t)))+(((3*p_idx_y)+6)*t))+p_idx_x)+1) */
_idx42=(_idx41+1);
/* _idx43 = (((((((((p_idx_z+2)*x_max)+(((3*p_idx_z)+6)*t))*y_max)+((((((3*p_idx_z)+6)*t)+p_idx_y)+2)*x_max))+(((9*p_idx_z)+18)*(t*t)))+(((3*p_idx_y)+6)*t))+p_idx_x)+2) */
_idx43=(_idx41+2);
/* _idx44 = (((((((((p_idx_z+2)*x_max)+(((3*p_idx_z)+6)*t))*y_max)+((((((3*p_idx_z)+6)*t)+p_idx_y)+2)*x_max))+(((9*p_idx_z)+18)*(t*t)))+(((3*p_idx_y)+6)*t))+p_idx_x)+3) */
_idx44=(_idx41+3);
/* _idx45 = ((((((((p_idx_z+2)*x_max)+(((3*p_idx_z)+6)*t))*y_max)+((((((3*p_idx_z)+6)*t)+p_idx_y)+3)*x_max))+(((9*p_idx_z)+18)*(t*t)))+(((3*p_idx_y)+9)*t))+p_idx_x) */
_idx45=((_idx41+x_max)+(3*t));
/* _idx46 = (((((((((p_idx_z+2)*x_max)+(((3*p_idx_z)+6)*t))*y_max)+((((((3*p_idx_z)+6)*t)+p_idx_y)+3)*x_max))+(((9*p_idx_z)+18)*(t*t)))+(((3*p_idx_y)+9)*t))+p_idx_x)+1) */
_idx46=(_idx45+1);
/* _idx47 = (((((((((p_idx_z+2)*x_max)+(((3*p_idx_z)+6)*t))*y_max)+((((((3*p_idx_z)+6)*t)+p_idx_y)+3)*x_max))+(((9*p_idx_z)+18)*(t*t)))+(((3*p_idx_y)+9)*t))+p_idx_x)+2) */
_idx47=(_idx45+2);
/* _idx48 = (((((((((p_idx_z+2)*x_max)+(((3*p_idx_z)+6)*t))*y_max)+((((((3*p_idx_z)+6)*t)+p_idx_y)+3)*x_max))+(((9*p_idx_z)+18)*(t*t)))+(((3*p_idx_y)+9)*t))+p_idx_x)+3) */
_idx48=(_idx45+3);
/* _idx49 = ((((((((p_idx_z+3)*x_max)+(((3*p_idx_z)+9)*t))*y_max)+(((((3*p_idx_z)+9)*t)+p_idx_y)*x_max))+(((9*p_idx_z)+27)*(t*t)))+((3*p_idx_y)*t))+p_idx_x) */
_idx49=(((_idx33+((x_max+(3*t))*y_max))+((3*t)*x_max))+(9*(t*t)));
/* _idx50 = (((((((((p_idx_z+3)*x_max)+(((3*p_idx_z)+9)*t))*y_max)+(((((3*p_idx_z)+9)*t)+p_idx_y)*x_max))+(((9*p_idx_z)+27)*(t*t)))+((3*p_idx_y)*t))+p_idx_x)+1) */
_idx50=(_idx49+1);
/* _idx51 = (((((((((p_idx_z+3)*x_max)+(((3*p_idx_z)+9)*t))*y_max)+(((((3*p_idx_z)+9)*t)+p_idx_y)*x_max))+(((9*p_idx_z)+27)*(t*t)))+((3*p_idx_y)*t))+p_idx_x)+2) */
_idx51=(_idx50+1);
/* _idx52 = (((((((((p_idx_z+3)*x_max)+(((3*p_idx_z)+9)*t))*y_max)+(((((3*p_idx_z)+9)*t)+p_idx_y)*x_max))+(((9*p_idx_z)+27)*(t*t)))+((3*p_idx_y)*t))+p_idx_x)+3) */
_idx52=(_idx50+2);
/* _idx53 = ((((((((p_idx_z+3)*x_max)+(((3*p_idx_z)+9)*t))*y_max)+((((((3*p_idx_z)+9)*t)+p_idx_y)+1)*x_max))+(((9*p_idx_z)+27)*(t*t)))+(((3*p_idx_y)+3)*t))+p_idx_x) */
_idx53=((_idx49+x_max)+(3*t));
/* _idx54 = (((((((((p_idx_z+3)*x_max)+(((3*p_idx_z)+9)*t))*y_max)+((((((3*p_idx_z)+9)*t)+p_idx_y)+1)*x_max))+(((9*p_idx_z)+27)*(t*t)))+(((3*p_idx_y)+3)*t))+p_idx_x)+1) */
_idx54=(_idx53+1);
/* _idx55 = (((((((((p_idx_z+3)*x_max)+(((3*p_idx_z)+9)*t))*y_max)+((((((3*p_idx_z)+9)*t)+p_idx_y)+1)*x_max))+(((9*p_idx_z)+27)*(t*t)))+(((3*p_idx_y)+3)*t))+p_idx_x)+2) */
_idx55=(_idx53+2);
/* _idx56 = (((((((((p_idx_z+3)*x_max)+(((3*p_idx_z)+9)*t))*y_max)+((((((3*p_idx_z)+9)*t)+p_idx_y)+1)*x_max))+(((9*p_idx_z)+27)*(t*t)))+(((3*p_idx_y)+3)*t))+p_idx_x)+3) */
_idx56=(_idx53+3);
/* _idx57 = ((((((((p_idx_z+3)*x_max)+(((3*p_idx_z)+9)*t))*y_max)+((((((3*p_idx_z)+9)*t)+p_idx_y)+2)*x_max))+(((9*p_idx_z)+27)*(t*t)))+(((3*p_idx_y)+6)*t))+p_idx_x) */
_idx57=((_idx53+x_max)+(3*t));
/* _idx58 = (((((((((p_idx_z+3)*x_max)+(((3*p_idx_z)+9)*t))*y_max)+((((((3*p_idx_z)+9)*t)+p_idx_y)+2)*x_max))+(((9*p_idx_z)+27)*(t*t)))+(((3*p_idx_y)+6)*t))+p_idx_x)+1) */
_idx58=(_idx57+1);
/* _idx59 = (((((((((p_idx_z+3)*x_max)+(((3*p_idx_z)+9)*t))*y_max)+((((((3*p_idx_z)+9)*t)+p_idx_y)+2)*x_max))+(((9*p_idx_z)+27)*(t*t)))+(((3*p_idx_y)+6)*t))+p_idx_x)+2) */
_idx59=(_idx57+2);
/* _idx60 = (((((((((p_idx_z+3)*x_max)+(((3*p_idx_z)+9)*t))*y_max)+((((((3*p_idx_z)+9)*t)+p_idx_y)+2)*x_max))+(((9*p_idx_z)+27)*(t*t)))+(((3*p_idx_y)+6)*t))+p_idx_x)+3) */
_idx60=(_idx57+3);
/* _idx61 = ((((((((p_idx_z+3)*x_max)+(((3*p_idx_z)+9)*t))*y_max)+((((((3*p_idx_z)+9)*t)+p_idx_y)+3)*x_max))+(((9*p_idx_z)+27)*(t*t)))+(((3*p_idx_y)+9)*t))+p_idx_x) */
_idx61=((_idx57+x_max)+(3*t));
/* _idx62 = (((((((((p_idx_z+3)*x_max)+(((3*p_idx_z)+9)*t))*y_max)+((((((3*p_idx_z)+9)*t)+p_idx_y)+3)*x_max))+(((9*p_idx_z)+27)*(t*t)))+(((3*p_idx_y)+9)*t))+p_idx_x)+1) */
_idx62=(_idx61+1);
/* _idx63 = (((((((((p_idx_z+3)*x_max)+(((3*p_idx_z)+9)*t))*y_max)+((((((3*p_idx_z)+9)*t)+p_idx_y)+3)*x_max))+(((9*p_idx_z)+27)*(t*t)))+(((3*p_idx_y)+9)*t))+p_idx_x)+2) */
_idx63=(_idx61+2);
/* _idx64 = (((((((((p_idx_z+3)*x_max)+(((3*p_idx_z)+9)*t))*y_max)+((((((3*p_idx_z)+9)*t)+p_idx_y)+3)*x_max))+(((9*p_idx_z)+27)*(t*t)))+(((3*p_idx_y)+9)*t))+p_idx_x)+3) */
_idx64=(_idx63+1);
u_0_0[_idx22]=((((((((w1_a*w1_b)*(w1_c*u_0_1[_idx1]))+((w2_a*w1_b)*(w1_c*u_0_1[_idx2])))+(((w3_a*w1_b)*(w1_c*u_0_1[_idx3]))+((w4_a*w1_b)*(w1_c*u_0_1[_idx4]))))+((((w1_a*w2_b)*(w1_c*u_0_1[_idx5]))+((w2_a*w2_b)*(w1_c*u_0_1[_idx6])))+(((w3_a*w2_b)*(w1_c*u_0_1[_idx7]))+((w4_a*w2_b)*(w1_c*u_0_1[_idx8])))))+(((((w1_a*w3_b)*(w1_c*u_0_1[_idx9]))+((w2_a*w3_b)*(w1_c*u_0_1[_idx10])))+(((w3_a*w3_b)*(w1_c*u_0_1[_idx11]))+((w4_a*w3_b)*(w1_c*u_0_1[_idx12]))))+((((w1_a*w4_b)*(w1_c*u_0_1[_idx13]))+((w2_a*w4_b)*(w1_c*u_0_1[_idx14])))+(((w3_a*w4_b)*(w1_c*u_0_1[_idx15]))+((w4_a*w4_b)*(w1_c*u_0_1[_idx16]))))))+((((((w1_a*w1_b)*(w2_c*u_0_1[_idx17]))+((w2_a*w1_b)*(w2_c*u_0_1[_idx18])))+(((w3_a*w1_b)*(w2_c*u_0_1[_idx19]))+((w4_a*w1_b)*(w2_c*u_0_1[_idx20]))))+((((w1_a*w2_b)*(w2_c*u_0_1[_idx21]))+((w2_a*w2_b)*(w2_c*u_0_1[_idx22])))+(((w3_a*w2_b)*(w2_c*u_0_1[_idx23]))+((w4_a*w2_b)*(w2_c*u_0_1[_idx24])))))+(((((w1_a*w3_b)*(w2_c*u_0_1[_idx25]))+((w2_a*w3_b)*(w2_c*u_0_1[_idx26])))+(((w3_a*w3_b)*(w2_c*u_0_1[_idx27]))+((w4_a*w3_b)*(w2_c*u_0_1[_idx28]))))+((((w1_a*w4_b)*(w2_c*u_0_1[_idx29]))+((w2_a*w4_b)*(w2_c*u_0_1[_idx30])))+(((w3_a*w4_b)*(w2_c*u_0_1[_idx31]))+((w4_a*w4_b)*(w2_c*u_0_1[_idx32])))))))+(((((((w1_a*w1_b)*(w3_c*u_0_1[_idx33]))+((w2_a*w1_b)*(w3_c*u_0_1[_idx34])))+(((w3_a*w1_b)*(w3_c*u_0_1[_idx35]))+((w4_a*w1_b)*(w3_c*u_0_1[_idx36]))))+((((w1_a*w2_b)*(w3_c*u_0_1[_idx37]))+((w2_a*w2_b)*(w3_c*u_0_1[_idx38])))+(((w3_a*w2_b)*(w3_c*u_0_1[_idx39]))+((w4_a*w2_b)*(w3_c*u_0_1[_idx40])))))+(((((w1_a*w3_b)*(w3_c*u_0_1[_idx41]))+((w2_a*w3_b)*(w3_c*u_0_1[_idx42])))+(((w3_a*w3_b)*(w3_c*u_0_1[_idx43]))+((w4_a*w3_b)*(w3_c*u_0_1[_idx44]))))+((((w1_a*w4_b)*(w3_c*u_0_1[_idx45]))+((w2_a*w4_b)*(w3_c*u_0_1[_idx46])))+(((w3_a*w4_b)*(w3_c*u_0_1[_idx47]))+((w4_a*w4_b)*(w3_c*u_0_1[_idx48]))))))+((((((w1_a*w1_b)*(w4_c*u_0_1[_idx49]))+((w2_a*w1_b)*(w4_c*u_0_1[_idx50])))+(((w3_a*w1_b)*(w4_c*u_0_1[_idx51]))+((w4_a*w1_b)*(w4_c*u_0_1[_idx52]))))+((((w1_a*w2_b)*(w4_c*u_0_1[_idx53]))+((w2_a*w2_b)*(w4_c*u_0_1[_idx54])))+(((w3_a*w2_b)*(w4_c*u_0_1[_idx55]))+((w4_a*w2_b)*(w4_c*u_0_1[_idx56])))))+(((((w1_a*w3_b)*(w4_c*u_0_1[_idx57]))+((w2_a*w3_b)*(w4_c*u_0_1[_idx58])))+(((w3_a*w3_b)*(w4_c*u_0_1[_idx59]))+((w4_a*w3_b)*(w4_c*u_0_1[_idx60]))))+((((w1_a*w4_b)*(w4_c*u_0_1[_idx61]))+((w2_a*w4_b)*(w4_c*u_0_1[_idx62])))+(((w3_a*w4_b)*(w4_c*u_0_1[_idx63]))+((w4_a*w4_b)*(w4_c*u_0_1[_idx64]))))))));
}
}
__global__ void initialize(double * u_0_0, double * u_0_1, double * a_1_0, double * b_2_0, double * c_3_0, int x_max, int y_max, int z_max)
{
/*
const double * const a_1_0[16] = { a_1_0 } ;
const double * const b_2_0[16] = { b_2_0 } ;
const double * const c_3_0[16] = { c_3_0 } ;
double * const u__u_0[16] = { u_0_0, u_0_1 } ;
*/
double w1_a;
double w1_b;
double w1_c;
double w2_a;
double w2_b;
double w2_c;
double w3_a;
double w3_b;
double w3_c;
double w4_a;
double w4_b;
double w4_c;
int _idx0;
int _idx1;
int _idx10;
int _idx11;
int _idx12;
int _idx13;
int _idx14;
int _idx15;
int _idx16;
int _idx17;
int _idx18;
int _idx19;
int _idx2;
int _idx20;
int _idx21;
int _idx22;
int _idx23;
int _idx24;
int _idx25;
int _idx26;
int _idx27;
int _idx28;
int _idx29;
int _idx3;
int _idx30;
int _idx31;
int _idx32;
int _idx33;
int _idx34;
int _idx35;
int _idx36;
int _idx37;
int _idx38;
int _idx39;
int _idx4;
int _idx40;
int _idx41;
int _idx42;
int _idx43;
int _idx44;
int _idx45;
int _idx46;
int _idx47;
int _idx48;
int _idx49;
int _idx5;
int _idx50;
int _idx51;
int _idx52;
int _idx53;
int _idx54;
int _idx55;
int _idx56;
int _idx57;
int _idx58;
int _idx59;
int _idx6;
int _idx60;
int _idx61;
int _idx62;
int _idx63;
int _idx64;
int _idx7;
int _idx8;
int _idx9;
int idx_1_2;
int p_idx_x;
int p_idx_x_max;
int p_idx_y;
int p_idx_y_max;
int p_idx_z;
int p_idx_z_max;
int size_1_1;
int size_1_2;
//int t;
int tmp;
/*
Initializations
*/
size_1_1=(y_max/blockDim.y);
size_1_2=(z_max/blockDim.z);
idx_1_2=(blockIdx.y/size_1_2);
tmp=(blockIdx.y-(idx_1_2*size_1_2));
p_idx_x=(threadIdx.x+(blockDim.x*blockIdx.x));
p_idx_x_max=(p_idx_x+1);
p_idx_y=(threadIdx.y+(tmp*blockDim.y));
p_idx_y_max=(p_idx_y+1);
p_idx_z=(threadIdx.z+(idx_1_2*blockDim.z));
p_idx_z_max=(p_idx_z+1);
/*
Implementation
*/
/*
for t = 1..t_max by 1 parallel 1 <level 0> schedule { ... }
*/
//for (t=1; t<=t_max; t+=1)
{
/* Index bounds calculations for iterators in p[t=t, s=(1, 1, 1)][0] */
/*
u[t=(t+1), s=p[t=?, s=?][0]][0]=stencil(u[t=t, s=p[t=?, s=?][0]][0])
*/
/* _idx0 = ((((p_idx_z*x_max)*y_max)+(p_idx_y*x_max))+p_idx_x) */
_idx0=((((p_idx_z*x_max)*y_max)+(p_idx_y*x_max))+p_idx_x);
a_1_0[_idx0]=0.2;
w1_a=0.1;
a_1_0[_idx0]=0.2;
w2_a=0.1;
a_1_0[_idx0]=0.2;
w3_a=0.1;
a_1_0[_idx0]=0.2;
w4_a=0.1;
b_2_0[_idx0]=0.30000000000000004;
w1_b=0.1;
b_2_0[_idx0]=0.30000000000000004;
w2_b=0.1;
b_2_0[_idx0]=0.30000000000000004;
w3_b=0.1;
b_2_0[_idx0]=0.30000000000000004;
w4_b=0.1;
c_3_0[_idx0]=0.4;
w1_c=0.1;
c_3_0[_idx0]=0.4;
w2_c=0.1;
c_3_0[_idx0]=0.4;
w3_c=0.1;
c_3_0[_idx0]=0.4;
w4_c=0.1;
/* _idx1 = (((((((p_idx_z*x_max)+((3*p_idx_z)*t))*y_max)+((((3*p_idx_z)*t)+p_idx_y)*x_max))+((9*p_idx_z)*(t*t)))+((3*p_idx_y)*t))+p_idx_x) */
_idx1=((((_idx0+(((3*p_idx_z)*t)*y_max))+(((3*p_idx_z)*t)*x_max))+((9*p_idx_z)*(t*t)))+((3*p_idx_y)*t));
u_0_1[_idx1]=0.1;
/* _idx2 = ((((((((p_idx_z+1)*x_max)+(((3*p_idx_z)+3)*t))*y_max)+(((((3*p_idx_z)+3)*t)+p_idx_y)*x_max))+(((9*p_idx_z)+9)*(t*t)))+((3*p_idx_y)*t))+p_idx_x) */
_idx2=(((_idx1+((x_max+(3*t))*y_max))+((3*t)*x_max))+(9*(t*t)));
u_0_1[_idx2]=0.1;
/* _idx3 = ((((((((p_idx_z+2)*x_max)+(((3*p_idx_z)+6)*t))*y_max)+(((((3*p_idx_z)+6)*t)+p_idx_y)*x_max))+(((9*p_idx_z)+18)*(t*t)))+((3*p_idx_y)*t))+p_idx_x) */
_idx3=(((_idx2+((x_max+(3*t))*y_max))+((3*t)*x_max))+(9*(t*t)));
u_0_1[_idx3]=0.1;
/* _idx4 = ((((((((p_idx_z+3)*x_max)+(((3*p_idx_z)+9)*t))*y_max)+(((((3*p_idx_z)+9)*t)+p_idx_y)*x_max))+(((9*p_idx_z)+27)*(t*t)))+((3*p_idx_y)*t))+p_idx_x) */
_idx4=(((_idx3+((x_max+(3*t))*y_max))+((3*t)*x_max))+(9*(t*t)));
u_0_1[_idx4]=0.1;
/* _idx5 = (((((((p_idx_z*x_max)+((3*p_idx_z)*t))*y_max)+(((((3*p_idx_z)*t)+p_idx_y)+1)*x_max))+((9*p_idx_z)*(t*t)))+(((3*p_idx_y)+3)*t))+p_idx_x) */
_idx5=((_idx1+x_max)+(3*t));
u_0_1[_idx5]=0.1;
/* _idx6 = ((((((((p_idx_z+1)*x_max)+(((3*p_idx_z)+3)*t))*y_max)+((((((3*p_idx_z)+3)*t)+p_idx_y)+1)*x_max))+(((9*p_idx_z)+9)*(t*t)))+(((3*p_idx_y)+3)*t))+p_idx_x) */
_idx6=((_idx2+x_max)+(3*t));
u_0_1[_idx6]=0.1;
/* _idx7 = ((((((((p_idx_z+2)*x_max)+(((3*p_idx_z)+6)*t))*y_max)+((((((3*p_idx_z)+6)*t)+p_idx_y)+1)*x_max))+(((9*p_idx_z)+18)*(t*t)))+(((3*p_idx_y)+3)*t))+p_idx_x) */
_idx7=((_idx3+x_max)+(3*t));
u_0_1[_idx7]=0.1;
/* _idx8 = ((((((((p_idx_z+3)*x_max)+(((3*p_idx_z)+9)*t))*y_max)+((((((3*p_idx_z)+9)*t)+p_idx_y)+1)*x_max))+(((9*p_idx_z)+27)*(t*t)))+(((3*p_idx_y)+3)*t))+p_idx_x) */
_idx8=((_idx4+x_max)+(3*t));
u_0_1[_idx8]=0.1;
/* _idx9 = (((((((p_idx_z*x_max)+((3*p_idx_z)*t))*y_max)+(((((3*p_idx_z)*t)+p_idx_y)+2)*x_max))+((9*p_idx_z)*(t*t)))+(((3*p_idx_y)+6)*t))+p_idx_x) */
_idx9=((_idx5+x_max)+(3*t));
u_0_1[_idx9]=0.1;
/* _idx10 = ((((((((p_idx_z+1)*x_max)+(((3*p_idx_z)+3)*t))*y_max)+((((((3*p_idx_z)+3)*t)+p_idx_y)+2)*x_max))+(((9*p_idx_z)+9)*(t*t)))+(((3*p_idx_y)+6)*t))+p_idx_x) */
_idx10=((_idx6+x_max)+(3*t));
u_0_1[_idx10]=0.1;
/* _idx11 = ((((((((p_idx_z+2)*x_max)+(((3*p_idx_z)+6)*t))*y_max)+((((((3*p_idx_z)+6)*t)+p_idx_y)+2)*x_max))+(((9*p_idx_z)+18)*(t*t)))+(((3*p_idx_y)+6)*t))+p_idx_x) */
_idx11=((_idx7+x_max)+(3*t));
u_0_1[_idx11]=0.1;
/* _idx12 = ((((((((p_idx_z+3)*x_max)+(((3*p_idx_z)+9)*t))*y_max)+((((((3*p_idx_z)+9)*t)+p_idx_y)+2)*x_max))+(((9*p_idx_z)+27)*(t*t)))+(((3*p_idx_y)+6)*t))+p_idx_x) */
_idx12=((_idx8+x_max)+(3*t));
u_0_1[_idx12]=0.1;
/* _idx13 = (((((((p_idx_z*x_max)+((3*p_idx_z)*t))*y_max)+(((((3*p_idx_z)*t)+p_idx_y)+3)*x_max))+((9*p_idx_z)*(t*t)))+(((3*p_idx_y)+9)*t))+p_idx_x) */
_idx13=((_idx9+x_max)+(3*t));
u_0_1[_idx13]=0.1;
/* _idx14 = ((((((((p_idx_z+1)*x_max)+(((3*p_idx_z)+3)*t))*y_max)+((((((3*p_idx_z)+3)*t)+p_idx_y)+3)*x_max))+(((9*p_idx_z)+9)*(t*t)))+(((3*p_idx_y)+9)*t))+p_idx_x) */
_idx14=((_idx10+x_max)+(3*t));
u_0_1[_idx14]=0.1;
/* _idx15 = ((((((((p_idx_z+2)*x_max)+(((3*p_idx_z)+6)*t))*y_max)+((((((3*p_idx_z)+6)*t)+p_idx_y)+3)*x_max))+(((9*p_idx_z)+18)*(t*t)))+(((3*p_idx_y)+9)*t))+p_idx_x) */
_idx15=((_idx11+x_max)+(3*t));
u_0_1[_idx15]=0.1;
/* _idx16 = ((((((((p_idx_z+3)*x_max)+(((3*p_idx_z)+9)*t))*y_max)+((((((3*p_idx_z)+9)*t)+p_idx_y)+3)*x_max))+(((9*p_idx_z)+27)*(t*t)))+(((3*p_idx_y)+9)*t))+p_idx_x) */
_idx16=((_idx12+x_max)+(3*t));
u_0_1[_idx16]=0.1;
/* _idx17 = ((((((((p_idx_z*x_max)+((3*p_idx_z)*t))*y_max)+((((3*p_idx_z)*t)+p_idx_y)*x_max))+((9*p_idx_z)*(t*t)))+((3*p_idx_y)*t))+p_idx_x)+1) */
_idx17=(_idx1+1);
u_0_1[_idx17]=0.1;
/* _idx18 = (((((((((p_idx_z+1)*x_max)+(((3*p_idx_z)+3)*t))*y_max)+(((((3*p_idx_z)+3)*t)+p_idx_y)*x_max))+(((9*p_idx_z)+9)*(t*t)))+((3*p_idx_y)*t))+p_idx_x)+1) */
_idx18=(_idx2+1);
u_0_1[_idx18]=0.1;
/* _idx19 = (((((((((p_idx_z+2)*x_max)+(((3*p_idx_z)+6)*t))*y_max)+(((((3*p_idx_z)+6)*t)+p_idx_y)*x_max))+(((9*p_idx_z)+18)*(t*t)))+((3*p_idx_y)*t))+p_idx_x)+1) */
_idx19=(_idx3+1);
u_0_1[_idx19]=0.1;
/* _idx20 = (((((((((p_idx_z+3)*x_max)+(((3*p_idx_z)+9)*t))*y_max)+(((((3*p_idx_z)+9)*t)+p_idx_y)*x_max))+(((9*p_idx_z)+27)*(t*t)))+((3*p_idx_y)*t))+p_idx_x)+1) */
_idx20=(_idx4+1);
u_0_1[_idx20]=0.1;
/* _idx21 = ((((((((p_idx_z*x_max)+((3*p_idx_z)*t))*y_max)+(((((3*p_idx_z)*t)+p_idx_y)+1)*x_max))+((9*p_idx_z)*(t*t)))+(((3*p_idx_y)+3)*t))+p_idx_x)+1) */
_idx21=(_idx5+1);
u_0_1[_idx21]=0.1;
/* _idx22 = (((((((((p_idx_z+1)*x_max)+(((3*p_idx_z)+3)*t))*y_max)+((((((3*p_idx_z)+3)*t)+p_idx_y)+1)*x_max))+(((9*p_idx_z)+9)*(t*t)))+(((3*p_idx_y)+3)*t))+p_idx_x)+1) */
_idx22=(_idx6+1);
u_0_1[_idx22]=0.1;
/* _idx23 = (((((((((p_idx_z+2)*x_max)+(((3*p_idx_z)+6)*t))*y_max)+((((((3*p_idx_z)+6)*t)+p_idx_y)+1)*x_max))+(((9*p_idx_z)+18)*(t*t)))+(((3*p_idx_y)+3)*t))+p_idx_x)+1) */
_idx23=(_idx7+1);
u_0_1[_idx23]=0.1;
/* _idx24 = (((((((((p_idx_z+3)*x_max)+(((3*p_idx_z)+9)*t))*y_max)+((((((3*p_idx_z)+9)*t)+p_idx_y)+1)*x_max))+(((9*p_idx_z)+27)*(t*t)))+(((3*p_idx_y)+3)*t))+p_idx_x)+1) */
_idx24=(_idx8+1);
u_0_1[_idx24]=0.1;
/* _idx25 = ((((((((p_idx_z*x_max)+((3*p_idx_z)*t))*y_max)+(((((3*p_idx_z)*t)+p_idx_y)+2)*x_max))+((9*p_idx_z)*(t*t)))+(((3*p_idx_y)+6)*t))+p_idx_x)+1) */
_idx25=(_idx9+1);
u_0_1[_idx25]=0.1;
/* _idx26 = (((((((((p_idx_z+1)*x_max)+(((3*p_idx_z)+3)*t))*y_max)+((((((3*p_idx_z)+3)*t)+p_idx_y)+2)*x_max))+(((9*p_idx_z)+9)*(t*t)))+(((3*p_idx_y)+6)*t))+p_idx_x)+1) */
_idx26=(_idx10+1);
u_0_1[_idx26]=0.1;
/* _idx27 = (((((((((p_idx_z+2)*x_max)+(((3*p_idx_z)+6)*t))*y_max)+((((((3*p_idx_z)+6)*t)+p_idx_y)+2)*x_max))+(((9*p_idx_z)+18)*(t*t)))+(((3*p_idx_y)+6)*t))+p_idx_x)+1) */
_idx27=(_idx11+1);
u_0_1[_idx27]=0.1;
/* _idx28 = (((((((((p_idx_z+3)*x_max)+(((3*p_idx_z)+9)*t))*y_max)+((((((3*p_idx_z)+9)*t)+p_idx_y)+2)*x_max))+(((9*p_idx_z)+27)*(t*t)))+(((3*p_idx_y)+6)*t))+p_idx_x)+1) */
_idx28=(_idx12+1);
u_0_1[_idx28]=0.1;
/* _idx29 = ((((((((p_idx_z*x_max)+((3*p_idx_z)*t))*y_max)+(((((3*p_idx_z)*t)+p_idx_y)+3)*x_max))+((9*p_idx_z)*(t*t)))+(((3*p_idx_y)+9)*t))+p_idx_x)+1) */
_idx29=(_idx13+1);
u_0_1[_idx29]=0.1;
/* _idx30 = (((((((((p_idx_z+1)*x_max)+(((3*p_idx_z)+3)*t))*y_max)+((((((3*p_idx_z)+3)*t)+p_idx_y)+3)*x_max))+(((9*p_idx_z)+9)*(t*t)))+(((3*p_idx_y)+9)*t))+p_idx_x)+1) */
_idx30=(_idx14+1);
u_0_1[_idx30]=0.1;
/* _idx31 = (((((((((p_idx_z+2)*x_max)+(((3*p_idx_z)+6)*t))*y_max)+((((((3*p_idx_z)+6)*t)+p_idx_y)+3)*x_max))+(((9*p_idx_z)+18)*(t*t)))+(((3*p_idx_y)+9)*t))+p_idx_x)+1) */
_idx31=(_idx15+1);
u_0_1[_idx31]=0.1;
/* _idx32 = (((((((((p_idx_z+3)*x_max)+(((3*p_idx_z)+9)*t))*y_max)+((((((3*p_idx_z)+9)*t)+p_idx_y)+3)*x_max))+(((9*p_idx_z)+27)*(t*t)))+(((3*p_idx_y)+9)*t))+p_idx_x)+1) */
_idx32=(_idx16+1);
u_0_1[_idx32]=0.1;
/* _idx33 = ((((((((p_idx_z*x_max)+((3*p_idx_z)*t))*y_max)+((((3*p_idx_z)*t)+p_idx_y)*x_max))+((9*p_idx_z)*(t*t)))+((3*p_idx_y)*t))+p_idx_x)+2) */
_idx33=(_idx17+1);
u_0_1[_idx33]=0.1;
/* _idx34 = (((((((((p_idx_z+1)*x_max)+(((3*p_idx_z)+3)*t))*y_max)+(((((3*p_idx_z)+3)*t)+p_idx_y)*x_max))+(((9*p_idx_z)+9)*(t*t)))+((3*p_idx_y)*t))+p_idx_x)+2) */
_idx34=(_idx2+2);
u_0_1[_idx34]=0.1;
/* _idx35 = (((((((((p_idx_z+2)*x_max)+(((3*p_idx_z)+6)*t))*y_max)+(((((3*p_idx_z)+6)*t)+p_idx_y)*x_max))+(((9*p_idx_z)+18)*(t*t)))+((3*p_idx_y)*t))+p_idx_x)+2) */
_idx35=(_idx19+1);
u_0_1[_idx35]=0.1;
/* _idx36 = (((((((((p_idx_z+3)*x_max)+(((3*p_idx_z)+9)*t))*y_max)+(((((3*p_idx_z)+9)*t)+p_idx_y)*x_max))+(((9*p_idx_z)+27)*(t*t)))+((3*p_idx_y)*t))+p_idx_x)+2) */
_idx36=(_idx20+1);
u_0_1[_idx36]=0.1;
/* _idx37 = ((((((((p_idx_z*x_max)+((3*p_idx_z)*t))*y_max)+(((((3*p_idx_z)*t)+p_idx_y)+1)*x_max))+((9*p_idx_z)*(t*t)))+(((3*p_idx_y)+3)*t))+p_idx_x)+2) */
_idx37=(_idx21+1);
u_0_1[_idx37]=0.1;
/* _idx38 = (((((((((p_idx_z+1)*x_max)+(((3*p_idx_z)+3)*t))*y_max)+((((((3*p_idx_z)+3)*t)+p_idx_y)+1)*x_max))+(((9*p_idx_z)+9)*(t*t)))+(((3*p_idx_y)+3)*t))+p_idx_x)+2) */
_idx38=(_idx6+2);
u_0_1[_idx38]=0.1;
/* _idx39 = (((((((((p_idx_z+2)*x_max)+(((3*p_idx_z)+6)*t))*y_max)+((((((3*p_idx_z)+6)*t)+p_idx_y)+1)*x_max))+(((9*p_idx_z)+18)*(t*t)))+(((3*p_idx_y)+3)*t))+p_idx_x)+2) */
_idx39=(_idx7+2);
u_0_1[_idx39]=0.1;
/* _idx40 = (((((((((p_idx_z+3)*x_max)+(((3*p_idx_z)+9)*t))*y_max)+((((((3*p_idx_z)+9)*t)+p_idx_y)+1)*x_max))+(((9*p_idx_z)+27)*(t*t)))+(((3*p_idx_y)+3)*t))+p_idx_x)+2) */
_idx40=(_idx8+2);
u_0_1[_idx40]=0.1;
/* _idx41 = ((((((((p_idx_z*x_max)+((3*p_idx_z)*t))*y_max)+(((((3*p_idx_z)*t)+p_idx_y)+2)*x_max))+((9*p_idx_z)*(t*t)))+(((3*p_idx_y)+6)*t))+p_idx_x)+2) */
_idx41=(_idx25+1);
u_0_1[_idx41]=0.1;
/* _idx42 = (((((((((p_idx_z+1)*x_max)+(((3*p_idx_z)+3)*t))*y_max)+((((((3*p_idx_z)+3)*t)+p_idx_y)+2)*x_max))+(((9*p_idx_z)+9)*(t*t)))+(((3*p_idx_y)+6)*t))+p_idx_x)+2) */
_idx42=(_idx10+2);
u_0_1[_idx42]=0.1;
/* _idx43 = (((((((((p_idx_z+2)*x_max)+(((3*p_idx_z)+6)*t))*y_max)+((((((3*p_idx_z)+6)*t)+p_idx_y)+2)*x_max))+(((9*p_idx_z)+18)*(t*t)))+(((3*p_idx_y)+6)*t))+p_idx_x)+2) */
_idx43=(_idx27+1);
u_0_1[_idx43]=0.1;
/* _idx44 = (((((((((p_idx_z+3)*x_max)+(((3*p_idx_z)+9)*t))*y_max)+((((((3*p_idx_z)+9)*t)+p_idx_y)+2)*x_max))+(((9*p_idx_z)+27)*(t*t)))+(((3*p_idx_y)+6)*t))+p_idx_x)+2) */
_idx44=(_idx12+2);
u_0_1[_idx44]=0.1;
/* _idx45 = ((((((((p_idx_z*x_max)+((3*p_idx_z)*t))*y_max)+(((((3*p_idx_z)*t)+p_idx_y)+3)*x_max))+((9*p_idx_z)*(t*t)))+(((3*p_idx_y)+9)*t))+p_idx_x)+2) */
_idx45=(_idx29+1);
u_0_1[_idx45]=0.1;
/* _idx46 = (((((((((p_idx_z+1)*x_max)+(((3*p_idx_z)+3)*t))*y_max)+((((((3*p_idx_z)+3)*t)+p_idx_y)+3)*x_max))+(((9*p_idx_z)+9)*(t*t)))+(((3*p_idx_y)+9)*t))+p_idx_x)+2) */
_idx46=(_idx30+1);
u_0_1[_idx46]=0.1;
/* _idx47 = (((((((((p_idx_z+2)*x_max)+(((3*p_idx_z)+6)*t))*y_max)+((((((3*p_idx_z)+6)*t)+p_idx_y)+3)*x_max))+(((9*p_idx_z)+18)*(t*t)))+(((3*p_idx_y)+9)*t))+p_idx_x)+2) */
_idx47=(_idx15+2);
u_0_1[_idx47]=0.1;
/* _idx48 = (((((((((p_idx_z+3)*x_max)+(((3*p_idx_z)+9)*t))*y_max)+((((((3*p_idx_z)+9)*t)+p_idx_y)+3)*x_max))+(((9*p_idx_z)+27)*(t*t)))+(((3*p_idx_y)+9)*t))+p_idx_x)+2) */
_idx48=(_idx16+2);
u_0_1[_idx48]=0.1;
/* _idx49 = ((((((((p_idx_z*x_max)+((3*p_idx_z)*t))*y_max)+((((3*p_idx_z)*t)+p_idx_y)*x_max))+((9*p_idx_z)*(t*t)))+((3*p_idx_y)*t))+p_idx_x)+3) */
_idx49=(_idx17+2);
u_0_1[_idx49]=0.1;
/* _idx50 = (((((((((p_idx_z+1)*x_max)+(((3*p_idx_z)+3)*t))*y_max)+(((((3*p_idx_z)+3)*t)+p_idx_y)*x_max))+(((9*p_idx_z)+9)*(t*t)))+((3*p_idx_y)*t))+p_idx_x)+3) */
_idx50=(_idx2+3);
u_0_1[_idx50]=0.1;
/* _idx51 = (((((((((p_idx_z+2)*x_max)+(((3*p_idx_z)+6)*t))*y_max)+(((((3*p_idx_z)+6)*t)+p_idx_y)*x_max))+(((9*p_idx_z)+18)*(t*t)))+((3*p_idx_y)*t))+p_idx_x)+3) */
_idx51=(_idx19+2);
u_0_1[_idx51]=0.1;
/* _idx52 = (((((((((p_idx_z+3)*x_max)+(((3*p_idx_z)+9)*t))*y_max)+(((((3*p_idx_z)+9)*t)+p_idx_y)*x_max))+(((9*p_idx_z)+27)*(t*t)))+((3*p_idx_y)*t))+p_idx_x)+3) */
_idx52=(_idx20+2);
u_0_1[_idx52]=0.1;
/* _idx53 = ((((((((p_idx_z*x_max)+((3*p_idx_z)*t))*y_max)+(((((3*p_idx_z)*t)+p_idx_y)+1)*x_max))+((9*p_idx_z)*(t*t)))+(((3*p_idx_y)+3)*t))+p_idx_x)+3) */
_idx53=(_idx21+2);
u_0_1[_idx53]=0.1;
/* _idx54 = (((((((((p_idx_z+1)*x_max)+(((3*p_idx_z)+3)*t))*y_max)+((((((3*p_idx_z)+3)*t)+p_idx_y)+1)*x_max))+(((9*p_idx_z)+9)*(t*t)))+(((3*p_idx_y)+3)*t))+p_idx_x)+3) */
_idx54=(_idx6+3);
u_0_1[_idx54]=0.1;
/* _idx55 = (((((((((p_idx_z+2)*x_max)+(((3*p_idx_z)+6)*t))*y_max)+((((((3*p_idx_z)+6)*t)+p_idx_y)+1)*x_max))+(((9*p_idx_z)+18)*(t*t)))+(((3*p_idx_y)+3)*t))+p_idx_x)+3) */
_idx55=(_idx7+3);
u_0_1[_idx55]=0.1;
/* _idx56 = (((((((((p_idx_z+3)*x_max)+(((3*p_idx_z)+9)*t))*y_max)+((((((3*p_idx_z)+9)*t)+p_idx_y)+1)*x_max))+(((9*p_idx_z)+27)*(t*t)))+(((3*p_idx_y)+3)*t))+p_idx_x)+3) */
_idx56=(_idx8+3);
u_0_1[_idx56]=0.1;
/* _idx57 = ((((((((p_idx_z*x_max)+((3*p_idx_z)*t))*y_max)+(((((3*p_idx_z)*t)+p_idx_y)+2)*x_max))+((9*p_idx_z)*(t*t)))+(((3*p_idx_y)+6)*t))+p_idx_x)+3) */
_idx57=(_idx25+2);
u_0_1[_idx57]=0.1;
/* _idx58 = (((((((((p_idx_z+1)*x_max)+(((3*p_idx_z)+3)*t))*y_max)+((((((3*p_idx_z)+3)*t)+p_idx_y)+2)*x_max))+(((9*p_idx_z)+9)*(t*t)))+(((3*p_idx_y)+6)*t))+p_idx_x)+3) */
_idx58=(_idx42+1);
u_0_1[_idx58]=0.1;
/* _idx59 = (((((((((p_idx_z+2)*x_max)+(((3*p_idx_z)+6)*t))*y_max)+((((((3*p_idx_z)+6)*t)+p_idx_y)+2)*x_max))+(((9*p_idx_z)+18)*(t*t)))+(((3*p_idx_y)+6)*t))+p_idx_x)+3) */
_idx59=(_idx43+1);
u_0_1[_idx59]=0.1;
/* _idx60 = (((((((((p_idx_z+3)*x_max)+(((3*p_idx_z)+9)*t))*y_max)+((((((3*p_idx_z)+9)*t)+p_idx_y)+2)*x_max))+(((9*p_idx_z)+27)*(t*t)))+(((3*p_idx_y)+6)*t))+p_idx_x)+3) */
_idx60=(_idx12+3);
u_0_1[_idx60]=0.1;
/* _idx61 = ((((((((p_idx_z*x_max)+((3*p_idx_z)*t))*y_max)+(((((3*p_idx_z)*t)+p_idx_y)+3)*x_max))+((9*p_idx_z)*(t*t)))+(((3*p_idx_y)+9)*t))+p_idx_x)+3) */
_idx61=(_idx29+2);
u_0_1[_idx61]=0.1;
/* _idx62 = (((((((((p_idx_z+1)*x_max)+(((3*p_idx_z)+3)*t))*y_max)+((((((3*p_idx_z)+3)*t)+p_idx_y)+3)*x_max))+(((9*p_idx_z)+9)*(t*t)))+(((3*p_idx_y)+9)*t))+p_idx_x)+3) */
_idx62=(_idx46+1);
u_0_1[_idx62]=0.1;
/* _idx63 = (((((((((p_idx_z+2)*x_max)+(((3*p_idx_z)+6)*t))*y_max)+((((((3*p_idx_z)+6)*t)+p_idx_y)+3)*x_max))+(((9*p_idx_z)+18)*(t*t)))+(((3*p_idx_y)+9)*t))+p_idx_x)+3) */
_idx63=(_idx15+3);
u_0_1[_idx63]=0.1;
/* _idx64 = (((((((((p_idx_z+3)*x_max)+(((3*p_idx_z)+9)*t))*y_max)+((((((3*p_idx_z)+9)*t)+p_idx_y)+3)*x_max))+(((9*p_idx_z)+27)*(t*t)))+(((3*p_idx_y)+9)*t))+p_idx_x)+3) */
_idx64=(_idx48+1);
u_0_1[_idx64]=0.1;
u_0_0[_idx22]=1.1;
}
}
|
19,077 | #include "includes.h"
__global__ void sqr_norm_kernel(const float *in, float *block_res, int total)
{
extern __shared__ float sdata[];
int in_idx = 2 * (blockIdx.x * blockDim.x + threadIdx.x);
int i = threadIdx.x;
unsigned ins = blockDim.x;
if (in_idx >= total * 2)
sdata[i] = 0;
else
sdata[i] = in[in_idx] * in[in_idx] + in[in_idx + 1] * in[in_idx + 1];
for (unsigned outs = (ins + 1) / 2; ins > 1; ins = outs, outs = (outs + 1) / 2) {
__syncthreads();
if (i + outs < ins)
sdata[i] += sdata[i + outs];
}
if (i == 0)
block_res[blockIdx.x] = sdata[0];
} |
19,078 | #include <iostream>
#include <string>
#include <fstream>
#include <chrono>
#include <stdio.h>
#include <stdlib.h>
using namespace std;
const int FILTER_WIDTH = 7;
const int BLOCK_SIZE = 256;
int FILTER[FILTER_WIDTH*FILTER_WIDTH] = {
1,4,7,10,7,4,1,
4,12,26,33,26,12,4,
7,26,55,71,55,26,7,
10,33,71,91,71,33,10,
7,26,55,71,55,26,7,
4,12,26,33,26,12,4,
1,4,7,10,7,4,1
};
// Display the first and last 10 items
void displayResult(const int original[], const int result[], int size) {
cout << "Display result: ";
cout << "(original -> result)\n";
for (int i = 0; i < 10; i++) {
cout << original[i] << " -> " << result[i] << "\n";
}
cout << ".\n.\n.\n";
for (int i = size - 10; i < size; i++) {
cout << original[i] << " -> " << result[i] << "\n";
}
}
void initData(string file, int **data, int *sizeX, int *sizeY) {
int x;
int y;
long long i = 0;
cout << "Reading "<< file << "... \n";
ifstream myfile(file);
if (myfile.is_open()) {
myfile >> x;
myfile >> y;
int *temp = new int[x * y];
for( i=0; i < x * y; i++){
myfile >> temp[(int)i];
}
myfile.close();
*data = temp;
*sizeX = x;
*sizeY = y;
}
else {
cout << "ERROR: File " << file << " not found!\n";
exit(0);
}
cout << i << " entries imported\n";
}
void saveResult(string file, int data[], int sizeX, int sizeY) {
long long i = 0;
cout << "Saving data to "<< file <<"... \n";
ofstream myfile(file, std::ofstream::out);
if (myfile.is_open()) {
myfile << sizeX << "\n";
myfile << sizeY << "\n";
for (i = 0; i < sizeX * sizeY; i++){
myfile << data[i] << "\n";
}
myfile.close();
}
else {
cout << "ERROR: Cannot save to " << file << "!\n";
exit(0);
}
cout << i << " entries saved\n";
}
// Kernel function for 2D smoothing in GPU
__global__
void calculateResult(int sizeX, int sizeY, int *data, int *result, int *filter){
int halfFilterWidth = FILTER_WIDTH/2;
//int indexX = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
//int indexY = blockIdx.y * blockDim.y + threadIdx.y;
int strideY = blockDim.y * gridDim.y;
// start from last column in image
for(int x = blockIdx.x * blockDim.x + threadIdx.x; x < sizeX ; x += stride){
// start from last row in image
for(int y = blockIdx.y * blockDim.y + threadIdx.y; y < sizeY; y += strideY){
// store numberator and denominator for convolution calculation
int numerator = 0;
int denominator = 0;
// traverse the filter in the x-direction
for(int filterX = FILTER_WIDTH - 1; filterX >= 0; filterX--){
// traverse the filter in the y-direction
for(int filterY = FILTER_WIDTH -1; filterY >= 0; filterY--){
int xPos = x + filterX -halfFilterWidth;
int yPos = y + filterY - halfFilterWidth;
// adjust xPos to accomodate edges in grid
if(xPos < 0){
xPos = 0;
}
else if(xPos < sizeX){
}
else{
xPos = sizeX - 1;
}
// adjust yPos to accomodate edges in grid
if(yPos < 0){
yPos = 0;
}
else if(yPos < sizeY){
}
else{
yPos = sizeY - 1;
}
// adjust numerator and denominator
numerator += data[yPos * sizeX + xPos] * filter[filterY * FILTER_WIDTH + filterX];
denominator += filter[filterY * FILTER_WIDTH + filterX];
}
}
// store result
result[y * sizeX + x] = numerator/denominator;
}
}
}
// GPU implementation
void GPU_Test(int data[], int result[], int sizeX, int sizeY) {
// input:
// int data[] - int array holding the flattened original image
// int sizeX - the width of the image
// int sizeY - the height of the image
// output:
// int result[] - int array holding the smoothed image
int size = sizeX * sizeY;
// Allocate device memory for result[], data[] and FILTER[] and copy data onto the device
int *r, *d, *f;
cudaMalloc((void**)&r, size*sizeof(int));
cudaMalloc((void**)&d, size*sizeof(int));
cudaMalloc((void**)&f, size*sizeof(int));
cudaMemcpy(r, result, size*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d, data, size*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(f, FILTER, FILTER_WIDTH*FILTER_WIDTH*sizeof(int), cudaMemcpyHostToDevice);
// Start timer for kernel
auto startKernel = chrono::steady_clock::now();
int numBlocks = (size + BLOCK_SIZE - 1) / BLOCK_SIZE;
// Call the kernel function
calculateResult<<<numBlocks, BLOCK_SIZE>>>(sizeX, sizeY, d, r,f);
// End timer for kernel and display kernel time
cudaDeviceSynchronize(); // <- DO NOT REMOVE
auto endKernel = chrono::steady_clock::now();
cout << "Kernel Elapsed time: " << chrono::duration <double, milli>(endKernel - startKernel).count() << "ms\n";
// Copy reuslt from device to host
cudaMemcpy(result, r, size*sizeof(float), cudaMemcpyDeviceToHost);
// Free device memory
cudaFree(&d);
cudaFree(&r);
cudaFree(&f);
}
// CPU implementation
void CPU_Test(int data[], int result[], int sizeX, int sizeY) {
// input:
// int data[] - int array holding the flattened original image
// int sizeX - the width of the image
// int sizeY - the height of the image
// output:
// int result[] - int array holding the smoothed image
// Smooth the image with filter size = FILTER_WIDTH
// apply partial filter for the border
int halfFilterWidth = FILTER_WIDTH/2;
// start from last column in image
for(int x = sizeX -1; x >= 0; x--){
// start from last row in image
for(int y = sizeY - 1; y >= 0; y--){
// store numberator and denominator for convolution calculation
int numerator = 0;
int denominator = 0;
// traverse the filter in the x-direction
for(int filterX = FILTER_WIDTH - 1; filterX >= 0; filterX--){
// traverse the filter in the y-direction
for(int filterY = FILTER_WIDTH -1; filterY >= 0; filterY--){
int xPos = x + filterX -halfFilterWidth;
int yPos = y + filterY - halfFilterWidth;
// adjust xPos to accomodate edges in grid
if(xPos < 0){
xPos = 0;
}
else if(xPos < sizeX){
}
else{
xPos = sizeX - 1;
}
// adjust yPos to accomodate edges in grid
if(yPos < 0){
yPos = 0;
}
else if(yPos < sizeY){
}
else{
yPos = sizeY - 1;
}
// adjust numerator and denominator
numerator += data[yPos * sizeX + xPos] * FILTER[filterY * FILTER_WIDTH + filterX];
denominator += FILTER[filterY * FILTER_WIDTH + filterX];
}
}
// store result
result[y * sizeX + x] = numerator/denominator;
}
}
}
// The input is a 2D grayscale image
// The image is flattened into a text file of pixel values.
int main(int argc, char *argv[]) {
string inputFile = (argc == 1) ? "image2D.txt" : argv[1];
int sizeX;
int sizeY;
int *dataForCPUTest;
int *dataForGPUTest;
initData(inputFile, &dataForCPUTest, &sizeX, &sizeY);
initData(inputFile, &dataForGPUTest, &sizeX, &sizeY);
int size = sizeX * sizeY;
int *resultForCPUTest = new int[size];
int *resultForGPUTest = new int[size];
cout << "\n";
cout << "CPU Implementation\n";
auto startCPU = chrono::steady_clock::now();
CPU_Test(dataForCPUTest, resultForCPUTest, sizeX, sizeY);
auto endCPU = chrono::steady_clock::now();
cout << "Elapsed time: " << chrono::duration <double, milli>(endCPU - startCPU).count() << "ms\n";
displayResult(dataForCPUTest, resultForCPUTest, size);
saveResult("2D_result_CPU.txt",resultForCPUTest, sizeX, sizeY);
cout << "\n";
cout << "GPU Implementation\n";
auto startGPU = chrono::steady_clock::now();
GPU_Test(dataForGPUTest, resultForGPUTest, sizeX, sizeY);
auto endGPU = chrono::steady_clock::now();
cout << "Elapsed time: " << chrono::duration <double, milli>(endGPU - startGPU).count() << "ms\n";
displayResult(dataForGPUTest, resultForGPUTest, size);
saveResult("2D_result_GPU.txt",resultForGPUTest, sizeX, sizeY);
return 0;
}
|
19,079 | #include <cuda_runtime.h>
#include <stdio.h>
/*
*
*/
__global__ void poly_div1(float* poli, const int N) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N) {
float x = poli[idx];
poli[idx] = 5 + x * ( 7 - x * (9 + x * (5 + x * (5 + x))))+x/5.0;
}
}
__global__ void poly_div2(float* poli, const int N) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N) {
float x = poli[idx];
poli[idx] = 5 + x * ( 7 - x * (9 + x * (5 + x * (5 + x))))+x*0.2;
}
}
__global__ void poly_div3(float* poli, const int N) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N) {
float x = poli[idx];
poli[idx] = 5 + x * ( 7 - x * (9 + x * (5 + x * (5 + x))))+5.0/x;
}
}
__global__ void poly_div4(float* poli, const int N) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N) {
float x = poli[idx];
float y = 5.0/x;
poli[idx] = 5 + x * ( 7 - x * (9 + x * (5 + x * (5 + x))))+y;
}
}
__global__ void poly_div5(float* poli, const int N) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N) {
float x = poli[idx];
poli[idx] = 5 + x * ( 7 - x * (9 + x * (5 + x * (5 + x))))+1.0/x;
}
}
__global__ void poly_div6(float* poli, const int N) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N) {
float x = poli[idx];
float y = 1.0/x;
poli[idx] = 5 + x * ( 7 - x * (9 + x * (5 + x * (5 + x))))+y;
}
}
__global__ void poly_div7(float* poli, const int N) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N) {
float x = poli[idx];
poli[idx] = 5 + x * ( 7 - x * (9 + x * (5 + x * (5 + x))))+1.0f/x;
}
}
__global__ void poly_div8(float* poli, const int N) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N) {
float x = poli[idx];
poli[idx] = 5 + x * ( 7 - x * (9 + x * (5 + x * (5 + x))))+5.0f/x;
}
}
cudaEvent_t start, stop;
float elapsed_time;
void start_event() {
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
}
void end_event() {
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_time, start, stop);
printf("Execution time = %.6fms\n",elapsed_time);
}
int main() {
int nElem = 1 << 27;
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
printf("Using Device %d: %s\n", dev, deviceProp.name);
cudaSetDevice(dev);
size_t nBytes = nElem * sizeof(float);
float* h_polinomy = (float*)malloc(nBytes);
float* d_polinomy;
float* d_results;
cudaMalloc((float**)&d_polinomy, nBytes);
cudaMalloc((float**)&d_results, nBytes);
int iLen = 512;
dim3 block (iLen);
dim3 grid ((nElem + block.x - 1) / block.x);
start_event();
poly_div2<<<grid, block>>>(d_polinomy, nElem);
printf("poli2 "); end_event();
cudaDeviceSynchronize();
printf("poli2[idx] = 5 + x * ( 7 - x * (9 + x * (5 + x * (5 + x))))+x*0.2;\n");
start_event();
poly_div1<<<grid, block>>>(d_polinomy, nElem);
printf("poli1 "); end_event();
cudaDeviceSynchronize();
printf("poli1[idx] = 5 + x * ( 7 - x * (9 + x * (5 + x * (5 + x))))+x/5.0;\n");
start_event();
poly_div3<<<grid, block>>>(d_polinomy, nElem);
printf("poli3 "); end_event();
printf("poli3[idx] = 5 + x * ( 7 - x * (9 + x * (5 + x * (5 + x))))+5.0/x;\n");
start_event();
poly_div4<<<grid, block>>>(d_polinomy, nElem);
printf("poli4 "); end_event();
printf("float y = 5.0/x; \n poli4[idx] = 5 + x * ( 7 - x * (9 + x * (5 + x * (5 + x))))+y;\n");
start_event();
poly_div5<<<grid, block>>>(d_polinomy, nElem);
printf("poli5 "); end_event();
printf("poli[idx] = 5 + x * ( 7 - x * (9 + x * (5 + x * (5 + x))))+1.0/x;\n");
start_event();
poly_div6<<<grid, block>>>(d_polinomy, nElem);
printf("poli6 "); end_event();
printf("float y = 1.0/x;\npoli[idx] = 5 + x * ( 7 - x * (9 + x * (5 + x * (5 + x))))+y;\n");
start_event();
poly_div7<<<grid, block>>>(d_polinomy, nElem);
printf("poli7" ); end_event();
printf("poli[idx] = 5 + x * ( 7 - x * (9 + x * (5 + x * (5 + x))))+1.0f/x;\n");
start_event();
poly_div8<<<grid, block>>>(d_polinomy, nElem);
printf("poli8 "); end_event();
printf("poli[idx] = 5 + x * ( 7 - x * (9 + x * (5 + x * (5 + x))))+5.0f/x;\n");
cudaMemcpy(h_polinomy, d_polinomy, nBytes, cudaMemcpyDeviceToHost);
cudaFree(d_polinomy);
free(h_polinomy);
}
|
19,080 | /*
This version of my sudoku solver will make use of cuda to attemp to gain speedups
*/
#include <iostream>
#include <fstream>
//#include <chrono>
#define boardSize 81
#define sideSize 9
using namespace std;
struct Board{
int values[81];
bool isFixedValue[81];
bool isPossibleValue[81][9];
//int coordinates;
};
void getFileName(char fileName[]){
cout << "Please give the file location of your sudoku board."<<endl;
cin.getline(fileName, 255);
return;
}
bool getBoard(char fileName[], Board &mainBoard){
ifstream file(fileName);//input can only occur on host this can't be parallelized
if(file.fail())
return false;
char temp = '0';
for (int i = 0; i < boardSize; i++){
file >> temp;
mainBoard.values[i] = (int)((int)temp - (int)'0');
//cout << mainBoard.values[i] << ",";
if(mainBoard.values[i] > 0){
mainBoard.isFixedValue[i] = true;
}
else{
mainBoard.isFixedValue[i] = false;
mainBoard.values[i] = (mainBoard.values[i] / -1);
}
}
file.close();
return true;
}
__global__ void rowCheckKernel(const int board[], const int row, const int value, bool *hasDuplicates)
{
int i = (row*sideSize) + threadIdx.x;
if(value == board[i])
*hasDuplicates = true;//for some reason this doesn't want to pass back the correct value, always the same one
}
bool anyDuplicates(int board[], int coordinates, int value)
{
int row = coordinates / sideSize;
int column = coordinates % sideSize;
//dim3 grid(sideSize);
//bool *hasDuplicates = false;
//rowCheckKernel<<<1,grid>>>(board,row,value,hasDuplicates);
//bool test = &hasDuplicates;
/*if(test){
return test;
}*/
for(int i = row * sideSize; i < ((row*sideSize)+sideSize); i++)
{
if(value == board[i]){
return true;
}
}
for(int i = column; i < boardSize; i+=sideSize)
{
if(value == board[i])
return true;
}
while(column%3 != 0)
column--;
while(row%3 != 0)
row--;
for(int r = row; r < (row+3); r++)
{
for(int c = column; c < (column+3); c++)
{
if(value == board[((r*9)+c)])
return true;
}
}
return false;
}
__global__ void validateBoardKernel(Board board, bool &isValid)
{
int tempValue = board.values[threadIdx.x];
board.values[threadIdx.x] = 0;
if(tempValue != 0 /*&& anyDuplicates(board.values, threadIdx.x, tempValue)*/){
isValid = false;
}
}
bool validateBoard(Board board){//when I attempted to paralellize this it would
for(int i = 0; i < boardSize; i++){
int tempValue = board.values[i];
board.values[i] = 0;
if(tempValue != 0 && anyDuplicates(board.values, i, tempValue)){
cout<<"ERROR: Duplicate value '"
<< tempValue;
return false;
}
board.values[i] = tempValue;
}
return true;
}
void displayBoard(Board board)
{//this cannot be implemented in parallel I don't think, everything I found said input output had to occur on host
ofstream results("resultsParallel.txt");
for(int i = 0; i < boardSize; i++){
results<<board.values[i] <<",";
if(i%sideSize == 8)
results<<endl;
}
results.close();
}
__global__ void isSolvedKernel(const Board board, bool &solved)
{
if(board.values[threadIdx.x] == 0)
solved = true;
}
bool isSolved(Board board)
{
bool solved = true;
dim3 grid(boardSize);
isSolvedKernel<<<1, grid>>>(board,solved);
return solved;
}
bool canChange(Board mainBoard, int coordinates, int value)
{//this function has no behavior worth parallelizing
if(value==0)
return true;
if(mainBoard.isFixedValue[coordinates])
return false;
if(anyDuplicates(mainBoard.values, coordinates, value))
return false;
return true;
}
__global__ void checkPossiblesKernel(Board &newBoard, bool &noChanges)
{
int possibles = 0;
int value;
if(!newBoard.isFixedValue[threadIdx.x]){
for(int guess = 1; guess <= sideSize; guess++){
if(/*canChange(newBoard, threadIdx.x, guess)can't use host function from global*/guess==0 || !newBoard.isFixedValue[threadIdx.x]){
value = guess;
newBoard.isPossibleValue[threadIdx.x][guess-1] = true;
possibles++;
}
else
newBoard.isPossibleValue[threadIdx.x][guess-1] = false;
}
}
if(possibles==1){
newBoard.values[threadIdx.x] = value;
newBoard.isFixedValue[threadIdx.x] = true;
noChanges = false;
}
}
bool checkPossibles(Board &newBoard)
{
bool noChanges;
do{
noChanges = true;
for(int i = 0; i < boardSize; i++){
int possibles = 0;
int value;
if(!newBoard.isFixedValue[i]){
for(int guess = 1; guess <= 9; guess++){
if(canChange(newBoard, i, guess)){
value = guess;
newBoard.isPossibleValue[i][guess-1] = true;
possibles++;
}
else
newBoard.isPossibleValue[i][guess-1] = false;
}
}
if(possibles==1){
newBoard.values[i] = value;
newBoard.isFixedValue[i] = true;
noChanges = false;
}
}
}while(noChanges == false);
if(isSolved(newBoard))
return true;
return false;
}
/*dfs is very difficult to do in parallel
I couldn't figure out how to do it
__global__ void dfs(Board &board)
{
}
Board parallelBrute(Board board)
{
}
*/
Board recursiveBrute(Board board, int startPosition)
{
while(startPosition < boardSize && board.isFixedValue[startPosition] && board.values[startPosition] != 0)
startPosition++;
if(startPosition >= boardSize)
return board;
for(int guess = 1; guess <= 9; guess++){
Board tempBoard = board;
if(board.isPossibleValue[startPosition][guess-1] && canChange(board,startPosition,guess)){
board.values[startPosition] = guess;
tempBoard = recursiveBrute(board, startPosition+1);
}
if(isSolved(tempBoard))
return tempBoard;
}
board.values[startPosition] = 0;
return board;
}
__global__ void solveKernel(Board &newBoard, bool &noChanges)
{
for(int guess = 1; guess <= sideSize; guess++){
int total = 0;
for(int iRow = threadIdx.x; iRow < (threadIdx.x+sideSize); iRow++){
if(!newBoard.isFixedValue[iRow])
total += newBoard.isPossibleValue[iRow][guess-1];
}
if(total == 1){
for(int iRow = threadIdx.x; iRow <(threadIdx.x + sideSize); iRow++){
if(newBoard.isPossibleValue[iRow][guess-1] && !newBoard.isFixedValue[iRow]){
newBoard.values[iRow] = guess;
newBoard.isFixedValue[iRow] = true;
noChanges = false;
}
}
}
}
}
void solve(Board &newBoard)
{
bool noChanges;
do
{
noChanges = true;
if(checkPossibles(newBoard))
return;
for(int i = 0; i < boardSize; i += sideSize){
for(int guess = 1; guess <= 9; guess++){
int total = 0;
for(int iRow = i; iRow < (i+9); iRow++){
if(!newBoard.isFixedValue[iRow])
total += newBoard.isPossibleValue[iRow][guess-1];
}
if(total == 1){
for(int iRow = i; iRow <(i+9); iRow++){
if(newBoard.isPossibleValue[iRow][guess-1] && !newBoard.isFixedValue[iRow]){
newBoard.values[iRow] = guess;
newBoard.isFixedValue[iRow] = true;
noChanges = false;
}
}
}
}
}
if(checkPossibles(newBoard))
return;
for(int i = 0; i < sideSize; i++){
for(int guess = 1; guess <= 9; guess++){
int total = 0;
for(int iColumn = i; iColumn < boardSize; iColumn += sideSize){
if(!newBoard.isFixedValue[iColumn]){
total += newBoard.isPossibleValue[iColumn][guess-1];
}
}
if(total == 1){
for(int iColumn = i; iColumn < boardSize; iColumn += sideSize){
if(newBoard.isPossibleValue[iColumn][guess-1] && !newBoard.isFixedValue[iColumn]){
newBoard.values[iColumn] = guess;
newBoard.isFixedValue[iColumn] = true;
noChanges = false;
}
}
}
}
}
if(checkPossibles(newBoard))
return;
for(int r = 0; r < boardSize; r+= sideSize*3){
for(int c = 0; c < sideSize; c += 3){
int i = r+c;
for(int guess = 1; guess <= 9; guess++){
int total = 0;
for(int iBlockR = i; iBlockR < (i+sideSize*3); iBlockR += sideSize){
for(int iBlockC = iBlockR; iBlockC < (iBlockR + 3); iBlockC++){
if(!newBoard.isFixedValue[iBlockC])
total += newBoard.isPossibleValue[iBlockC][guess-1];
}
}
if(total == 1){
for(int iBlockR = i; iBlockR < (i+sideSize*3); iBlockR += sideSize){
for(int iBlockC = iBlockR; iBlockC < (iBlockR + 3); iBlockC++){
if(newBoard.isPossibleValue[iBlockC][guess-1] && !newBoard.isFixedValue[iBlockC]){
newBoard.values[iBlockC] = guess;
newBoard.isFixedValue[iBlockC] = true;
noChanges = false;
}
}
}
}
}
}
}
if(checkPossibles(newBoard))
return;
}while(noChanges == false);
if(!isSolved(newBoard))
newBoard = recursiveBrute(newBoard, 0);
}
int main(int argc, char *argv[]){
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
char fileName[256];
Board mainBoard;
getFileName(fileName);
if(!getBoard(fileName,mainBoard))
cout <<"Error with filename";
else if(!validateBoard(mainBoard)){
displayBoard(mainBoard);
return 0;
}
solve(mainBoard);
displayBoard(mainBoard);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
cout <<"Solve time took " << milliseconds << "ms"<<endl;
return 0;
}
|
19,081 | /*
Jaitirth Jacob - 13CO125 Vidit Bhargava - 13CO151
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define ITERATIONS 4 //Repeat the experiment for greater accuracy
__global__ void add(int *a, int *b, int *c, int tpb)
{
//Find the correct thread index in the grid
int i = blockIdx.x * tpb + threadIdx.x;
c[i] = a[i] + b[i];
}
#define N 1000000 //Array Size
#define min_threads 16
#define max_threads 1024
int main(void)
{
int *a,*b,*c;
int *d_a, *d_b, *d_c;
int size = N * sizeof(int);
a = (int *)malloc(size);
b = (int *)malloc(size);
c = (int *)malloc(size);
//Allocate on device
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
srand(time(NULL));
//Populate a and b
for (int i = 0; i < N; ++i)
{
a[i] = rand()%20;
b[i] = rand()%37;
}
int numBlocks;
cudaEvent_t start, copy, exec, result; //Events for measuring time
//To calculate average over a number of iterations
float t1[7], t2[7], t3[7], total[7];
for (int i = 0; i < 7; ++i)
{
t1[i]=0;
t2[i]=0;
t3[i]=0;
total[i]=0;
}
printf("t1: time for copying arrays\n");
printf("t2: time for kernel execution\n");
printf("t3: time for copying result back\n\n");
printf("All times in milliseconds\n");
printf("TPB\t\tNB\t\tt1\t\tt2\t\tt3\t\ttotal\t\n");
int count;
for (int i = 0; i < ITERATIONS; ++i)
{
count=0;
for (int threadsPerBlock = min_threads; threadsPerBlock <= max_threads; threadsPerBlock*=2)
{
numBlocks = (N + threadsPerBlock - 1)/threadsPerBlock;
cudaEventCreate(&start);
cudaEventCreate(©);
cudaEventCreate(&exec);
cudaEventCreate(&result);
cudaEventRecord(start);
//Copy inputs to device
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
cudaEventRecord(copy);
cudaEventSynchronize(copy);
//Launch add() kernel on GPU
add<<<numBlocks,threadsPerBlock>>>(d_a, d_b, d_c, threadsPerBlock);
cudaEventRecord(exec);
cudaEventSynchronize(exec);
//Copy result back to host
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
cudaEventRecord(result);
cudaEventSynchronize(result);
float temp1=0, temp2=0, temp3=0, temptotal;
cudaEventElapsedTime(&temp1, start, copy);
cudaEventElapsedTime(&temp2, copy, exec);
cudaEventElapsedTime(&temp3, exec, result);
cudaEventElapsedTime(&temptotal, start, result);
t1[count] += temp1;
t2[count] += temp2;
t3[count] += temp3;
total[count] += temptotal;
cudaEventDestroy(start);
cudaEventDestroy(copy);
cudaEventDestroy(exec);
cudaEventDestroy(result);
count++;
}
}
int threadsPerBlock = min_threads;
for (int i = 0; i < 7; ++i)
{
numBlocks = (N + threadsPerBlock - 1)/threadsPerBlock;
t1[i]/=(float)ITERATIONS;
t2[i]/=(float)ITERATIONS;
t3[i]/=(float)ITERATIONS;
total[i]/=(float)ITERATIONS;
printf("%d\t\t%d\t\t%.5f\t\t%.5f\t\t%.5f\t\t%.5f\t\t\n",
threadsPerBlock, numBlocks, t1[i], t2[i], t3[i], total[i]);
threadsPerBlock*=2;
}
//Cleanup
free(a); free(b); free(c);
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
return 0;
}
|
19,082 | #include <stdio.h>
// Prints info about the device
// Takes in the device number and a pointer to the properties
void printProperties(int i, cudaDeviceProp *prop){
printf( " --- General Information for device %d ---\n", i );
printf( "Name: %s\n", prop->name );
}
int main( void ) {
cudaDeviceProp *prop;
int count;
//Doesn't handle errors
cudaGetDeviceCount(&count);
printf("Device Count: %d\n", count);
while(count > 0){
//Doesn't handle errors
cudaGetDeviceProperties( prop, count );
printProperties(count, prop);
printf( " --- General Information for device %d ---\n", count );
printf( "Name: %s\n", prop->name );
count--;
}
return 0;
}
|
19,083 | extern "C"{
__global__ void convolution_1D_basic_kernel(int *N, int *M, int *P,
int Mask_Width, int Width){
int i = blockIdx.x*blockDim.x + threadIdx.x;
float Pvalue = 0;
int N_start_point = i - (Mask_Width / 2);
for (int j = 0; j < Mask_Width; j++){
if(N_start_point + j >= 0 && N_start_point + j < Width){
Pvalue += N[N_start_point + j] * M[j];
}
}
P[i] = Pvalue;
}
} |
19,084 | #include <cstdio>
template<typename T>
__device__ __inline__ void add(T& val) {
val += 1;
}
template<typename T>
__global__ void func(T* ptr) {
add<T>(ptr[blockIdx.x]);
}
int main() {
cudaStream_t stream;
cudaStreamCreate(&stream);
int *h_ptr, *d_ptr;
cudaHostAlloc(&h_ptr, 20, cudaHostAllocDefault);
for(int i=0; i<5; i++) {
h_ptr[i] = i*2 + 1;
}
cudaMalloc((void**)&d_ptr, 20);
cudaMemcpyAsync(d_ptr, h_ptr, 20, cudaMemcpyHostToDevice, stream);
func<<<5, 1, 0, stream>>>(d_ptr);
cudaMemcpyAsync(h_ptr, d_ptr, 20, cudaMemcpyDeviceToHost, stream);
cudaStreamSynchronize(stream);
for(int i=0; i<5; i++) {
printf("%d ", h_ptr[i]);
}
printf("\n");
return 0;
}
|
19,085 | #include <stdio.h>
#include <stdlib.h>
#define MAX_ITER 100
#define MAX 100 //maximum value of the matrix element
#define TOL 0.000001
// Generate a random float number with the maximum value of max
float rand_float(int max){
return ((float)rand()/(float)(RAND_MAX)) * max;
}
// Allocate 2D matrix
void allocate_init_2Dmatrix(float ***mat, int n, int m){
int i, j;
*mat = (float **) malloc(n * sizeof(float *));
for(i = 0; i < n; i++) {
(*mat)[i] = (float *)malloc(m * sizeof(float));
for (j = 0; j < m; j++)
(*mat)[i][j] = rand_float(MAX);
}
}
// solver
void solver(float ***mat, int n, int m){
float diff = 0, temp;
int done = 0, cnt_iter = 0, i, j;
while (!done && (cnt_iter < MAX_ITER)){
diff = 0;
for (i = 1; i < n - 1; i++)
for (j = 1; j < m - 1; j++){
temp = (*mat)[i][j];
(*mat)[i][j] = 0.2 * ((*mat)[i][j] + (*mat)[i][j - 1] + (*mat)[i - 1][j] + (*mat)[i][j + 1] + (*mat)[i + 1][j]);
diff += abs((*mat)[i][j] - temp);
}
if (diff/n/n < TOL)
done = 1;
cnt_iter ++;
}
if (done)
printf("Solver converged after %d iterations\n", cnt_iter);
else
printf("Solver not converged after %d iterations\n", cnt_iter);
}
int main(int argc, char *argv[]) {
int n, communication;
float **a;
if (argc < 2) {
printf("Call this program with two parameters: matrix_size communication \n");
printf("\t matrix_size: Add 2 to a power of 2 (e.g. : 18, 1026)\n");
exit(1);
}
n = atoi(argv[1]);
printf("Matrix size = %d\n", n);
allocate_init_2Dmatrix(&a, n, n);
solver(&a, n, n);
return 0;
}
|
19,086 | // This is the naive implementation of in box check with all matrices squeezed to vector
__global__ void inBoxKernel(const float *A, const float *B, int *C, int numElements){
int i = (blockDim.x * blockIdx.x + threadIdx.x)*2;
float t11;
float t12;
float t21;
float t22;
if (i/2 < numElements)
{
if ((A[i]<B[0] || (A[i] >B[4]))||((A[i+1]>B[3]) || (A[i+1] <B[7])))
{
C[i/2]=-1;
}
else
{
t11=(A[i]-B[0])*(B[2]-B[0])+(A[i+1]-B[1])*(B[3]-B[1]);
t12=(A[i]-B[4])*(B[6]-B[4])+(A[i+1]-B[5])*(B[7]-B[5]);
t21=(A[i]-B[2])*(B[4]-B[2])+(A[i+1]-B[3])*(B[5]-B[3]);
t22=(A[i]-B[6])*(B[2]-B[6])+(A[i+1]-B[7])*(B[1]-B[7]);
if (t11>=1e-20 && t12>=1e-20 && t21>=1e-20 && t22>=1e-20)
{
C[i/2]=1;
}
else
{
C[i/2]=-1;
}
}
}
}
void inBoxLauncher(const float *A, const float *B, int *C, int numElements){
int threadsPerBlock = 512;
int blocksPerGrid =(numElements - 1) / threadsPerBlock +1;
inBoxKernel<<<blocksPerGrid,threadsPerBlock>>>(A, B, C, numElements);
}
|
19,087 | // The code which is causing the pointer pointer address space error:
// %"struct.Eigen::half_impl::__half" = type { i16 }
// %"struct.Eigen::half_impl::half_base" = type { %"struct.Eigen::half_impl::__half" }
// %"struct.Eigen::half" = type { %"struct.Eigen::half_impl::half_base" }
// %"struct.Eigen::DSizes" = type { %"class.Eigen::array" }
// %"class.Eigen::array" = type { [1 x i32] }
// %"struct.Eigen::GpuDevice" = type <{ %"class.Eigen::StreamInterface"*, i32, [4 x i8] }>
// %"class.Eigen::StreamInterface" = type { i32 (...)** }
// %"struct.Eigen::TensorEvaluator.0" = type { %"struct.Eigen::half"*, %"struct.Eigen::DSizes", %"struct.Eigen::GpuDevice"* }
// %"struct.Eigen::TensorEvaluator.2" = type { %"struct.Eigen::half"*, %"struct.Eigen::DSizes", %"struct.Eigen::GpuDevice"* }
// %"struct.Eigen::TensorEvaluator.7" = type { %"struct.Eigen::internal::scalar_left", %"struct.Eigen::TensorEvaluator.2" }
// %"struct.Eigen::TensorEvaluator.6" = type { %"struct.Eigen::TensorEvaluator.0", %"struct.Eigen::TensorEvaluator.7" }
// ; Function Attrs: norecurse nounwind
// define weak_odr void @_ZN5Eigen8internal15EigenMetaKernelINS_15TensorEvaluatorIKNS_14TensorAssignOpINS_9TensorMapINS_6TensorINS_4halfELi1ELi1EiEELi16EEEKNS_18TensorCwiseUnaryOpINS0_11scalar_leftIS6_S6_NS0_13scalar_sum_opIS6_S6_EEEEKNS4_INS5_IKS6_Li1ELi1EiEELi16EEEEEEENS_9GpuDeviceEEEiEEvT_T0_(%"struct.Eigen::TensorEvaluator.6"* byval nocapture readonly align 8 %memcopied_eval, i32 %size) #1 comdat {
// %1 = tail call i32 @llvm.ptx.read.ctaid.x() #6
// %2 = tail call i32 @llvm.ptx.read.ntid.x() #6
// %3 = mul i32 %2, %1
// %4 = tail call i32 @llvm.ptx.read.tid.x() #6
// %5 = add i32 %3, %4
// %6 = tail call i32 @llvm.ptx.read.nctaid.x() #6
// %7 = mul i32 %6, %2
// %8 = getelementptr inbounds %"struct.Eigen::TensorEvaluator.6", %"struct.Eigen::TensorEvaluator.6"* %memcopied_eval, i64 0, i32 0, i32 0
// %9 = load %"struct.Eigen::half"*, %"struct.Eigen::half"** %8, align 8
struct MyStruct {
float floatvalue;
int intvalue;
};
__device__ __host__ float sumStruct(struct MyStruct **p_structs, int N) {
float sum = 0;
for(int i = 0; i < N; i++) {
struct MyStruct *mystruct = p_structs[i];
sum += mystruct->floatvalue + float(mystruct->intvalue) * 3.5f;
}
return sum;
}
__global__ void mykernel(float *data, MyStruct *structs, int N) {
data[0] = sumStruct(&structs, N);
data[3] = sumStruct(&structs, 123);
data[4] = sumStruct(&structs, 12300);
}
class HalfImpl {
public:
short myshort;
};
class HalfBase : public HalfImpl {
};
class Half : public HalfBase {
};
class StreamInterface {
};
class GpuDevice {
public:
int myint;
StreamInterface *pstreamInterface;
};
class TensorEvaluator0 {
public:
Half *phalf;
GpuDevice gpuDevice;
};
class TensorEvaluator2 {
public:
Half *phalf;
GpuDevice gpuDevice;
};
class TensorEvaluator7 {
public:
Half *phalf;
TensorEvaluator2 te2;
};
class TensorEvaluator6 {
public:
class TensorEvaluator0 te0;
class TensorEvaluator7 te7;
};
// __device__ __host__ float getGpuDeviceValue(GpuDevice *devices, int N) {
// float sum = 0;
// for(int i = 0; i < N; i++) {
// sum += devices[i].myfloat * (5.5f + i);
// }
// return sum;
// }
__device__ __host__ float getHalfValue(Half *half_, int a) {
return (float)(half_[a].myshort + 123);
}
__global__ void myte6kernel(TensorEvaluator6 *structs, float *data, GpuDevice *gpudevices, int a, int b, int c) {
Half *phalf = structs[a].te0.phalf;
data[0] = getHalfValue(phalf, a);
//gpudevices[b] = structs[a].te0.gpuDevice;
// float sum = 0;
// for(int i = 0; i < 1000; i++) {
// sum += getGpuDeviceValue(&structs[i].te0.gpuDevice, c + i);
// }
// data[1] = sum;
}
|
19,088 | /******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
// Define your kernels in this file you may use more than one kernel if you
// need to
#define BLOCK_SIZE 512
// INSERT KERNEL(S) HERE
__global__ void fillBins(unsigned int* input, unsigned int* bins, unsigned int num_elements, unsigned int num_bins) {
extern __shared__ unsigned int private_histogram[];
//Initializing private histogram bins
int bin_stride = 0;
while((threadIdx.x + bin_stride) < num_bins) {
private_histogram[threadIdx.x + bin_stride] = 0;
bin_stride += blockDim.x;
}
__syncthreads();
//Computation of private histogram
int i = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
while(i < num_elements) {
atomicAdd(&private_histogram[input[i]], 1);
i += stride;
}
__syncthreads();
//Merging private history bins with global history bins
bin_stride = 0;
while((threadIdx.x + bin_stride) < num_bins) {
atomicAdd(&bins[threadIdx.x + bin_stride], private_histogram[threadIdx.x + bin_stride]);
bin_stride += blockDim.x;
}
__syncthreads();
}
/******************************************************************************
Setup and invoke your kernel(s) in this function. You may also allocate more
GPU memory if you need to
*******************************************************************************/
void histogram(unsigned int* input, unsigned int* bins, unsigned int num_elements,
unsigned int num_bins) {
// INSERT CODE HERE
dim3 dimGrid((num_elements - 1)/BLOCK_SIZE + 1, 1, 1);
dim3 dimBlock(BLOCK_SIZE, 1, 1);
fillBins<<<dimGrid, dimBlock, num_bins * sizeof(unsigned int)>>>(input, bins, num_elements, num_bins);
}
|
19,089 | /* nqueens.cu
* Jonathan Lehman
* February 26, 2012
*
* Compile with: nvcc -o nqueens nqueens.cu
* to get default with _N_ = 4 and numBX = 1 numBY = 1 sumOnGPU = 0
*
* Or specify _N_ by compiling with: nvcc -o nqueens nqueens.cu -D_N_=x
* where x is the board size desired where x must be >= 4 and <= 22
*
* and/Or specify numBX by compiling with: nvcc -o nqueens nqueens.cu -DnumBX=y
* where y is the number of tuple values to be generated by blockIdx.x
* where y must be >= 1 such that N^numBX < maxgridsize (in this case 65535 blocks)
*
* and/or specify numBY by compiling with nvcc -o nqueens nqueens.cu -DnumBY=z
* where z is the number of groups of ((N / 2) + (N % 2)) columns by N^numBX rows that work on the solution
* essentially, this evenly divides the work of the tuples being generated iteratively by each thread between each group
* where z must be <= N^numBX
*
* and/or specify whether or not to add the block totals on the GPU or cpu with nvcc -o nqueens nqueens.cu -DsumOnGPU=a
* where a is 1 or 0, with 1 doing the sum on the GPU and 0 doing the sum on the CPU
*
*/
#include <cuda.h>
#include <stdio.h>
#include <math.h>
#include <sys/time.h>
__global__ void queen(long*, int);
__device__ void sumBlocks(long *);
void checkArgs(int, char**, int);
void checkGPUCapabilities(int, int, int, int, int);
double getTime();
//set board size
#ifndef _N_
#define _N_ 4
#endif
//set the number of values in the tuple BlockIdx.x should be responsible for
#ifndef numBX
#define numBX 1
#endif
#ifndef numBY
#define numBY 1
#endif
//number of values in tuple to be generated by thread (incrementally)
//#ifndef numGen
#define numGen _N_ - 3 - numBX
//#endif
//whether or not the sum of blocksums (solution should be summed on GPU or CPU)
//CPU by default
//Set to 1 to add on GPU
#ifndef sumOnGPU
#define sumOnGPU 0
#endif
// Keep track of the gpu time.
cudaEvent_t start, stop;
float elapsedTime;
// Keep track of the CPU time.
double startTime, stopTime;
//array for block sums
long *a;
int main(int argc, char *argv[]){
/*check errors with macros*/
//ensure number of tuples generated iteratively is not less than 0
if(numGen < 0){
fprintf(stderr, "\nnqeens: The number of values in the tuple generated iteratively cannot be less than 0.\n NumGen = _N_(%d) - 3 - numBX(%d) = %d\n", _N_, numBX, numGen);
exit(1);
}
//ensure N is in the correct range
if(_N_ < 4 || _N_ > 22){
fprintf(stderr, "\nnqeens: _N_(%d) must be between 4 and 22 inclusive\n", _N_);
exit(1);
}
//ensure that at least one of the tuple values is generated by the block's X coordinate value
if(numBX < 1){
fprintf(stderr, "\nnqeens: The number of tuples generated by each block's X coordinate value (numBX=%d) must be >= 1\n", numBX);
exit(1);
}
//ensure that the number of Y segments that the numGen work is divided into
//is at least one per work segment
if(numBY > pow(_N_, numGen)){
fprintf(stderr, "\nnqeens: numBY(%d) must be less than or equal to _N_^numGen(%d)\n", numBY, pow(_N_, numGen));
exit(1);
}
long *dev_a;
//check validity of arguments (should be no arguments)
checkArgs(argc, argv, 1);
int gW, gH, numberBlocks;
//calculate grid width based on factor N,
gW = pow(_N_, numBX);
//depends on if N is even or odd
int sizePerYSeg = (_N_ / 2) + (_N_ % 2);
gH = sizePerYSeg * numBY;
numberBlocks = gW * gH;
//check that GPU can handle arguments
checkGPUCapabilities(gW, gH, _N_, _N_, numberBlocks);
/* Initialize the source arrays here. */
a = new long[numberBlocks];
/* Allocate global device memory. */
cudaMalloc((void **)&dev_a, sizeof(long) * numberBlocks);
/* Start the timer. */
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
/* Execute the kernel. */
dim3 block(_N_, _N_); //threads w x h
dim3 grid(gW, gH); //blocks w x h
queen<<<grid, block>>>(dev_a, sizePerYSeg);
/* Wait for the kernel to complete. Needed for timing. */
cudaThreadSynchronize();
/* Stop the timer and print the resulting time. */
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
/* Get result from device. */
cudaMemcpy(a, dev_a, sizeof(long) * numberBlocks, cudaMemcpyDeviceToHost);
//print any cuda error messages
const char* errorString = cudaGetErrorString(cudaGetLastError());
printf("GPU Error: %s\n", errorString);
if(sumOnGPU){
printf("Number of Solutions:%d\n", a[0]);
//add cpu time and gpu time and print result
printf( "GPU Time/Total Time: %f secs\n", (elapsedTime / 1000.0));
}
else{
/* Start the CPU timer. */
startTime = getTime();
int sum = 0;
//check if N is even or odd, then calculate sum, which is number of solutions
if(_N_ % 2 == 0){
for(int i = 0; i < numberBlocks; i++){
sum+= a[i];
}
sum *= 2;
}
else{
int numBlocksPerSeg = numberBlocks / numBY;
int rowSizeOfGrid = pow(_N_, numBX);
for(int j = 0; j < numBY; j++){
int start = j * numBlocksPerSeg;
for(int i = start; i < start + numBlocksPerSeg - rowSizeOfGrid; i++){
sum+= a[i];
}
}
sum *= 2;
//add last block row of sums for each Y block
for(int j = 0; j < numBY; j++){
for(int i = j * numBlocksPerSeg + numBlocksPerSeg - rowSizeOfGrid; i < j * numBlocksPerSeg + numBlocksPerSeg; i++){
sum+= a[i];
}
}
}
/* Stop the CPU timer */
stopTime = getTime();
double totalTime = stopTime - startTime;
printf("Number of Solutions: %d\n", sum);
//add cpu time and gpu time and print result
printf( "GPU Time: %f secs\nCPU Time: %f secs\nTotal Time: %f secs\n", (elapsedTime / 1000.0), totalTime, (elapsedTime / 1000.0) + totalTime );
}
//destroy cuda event
cudaEventDestroy(start);
cudaEventDestroy(stop);
/* Free the allocated device memory. */
cudaFree(dev_a);
//free allocated host memory
free(a);
}
__global__
void queen(long *a, int sizePerYSeg){
__shared__ long solutions[_N_][_N_];
__shared__ char tuple[_N_][_N_][_N_];
int totalWrong = 0;
solutions[threadIdx.x][threadIdx.y] = 0;
int totNumGen = powf(_N_, numGen);
int bYsegment = blockIdx.y / sizePerYSeg;
int workSize = totNumGen / numBY;
int extra = totNumGen - workSize * numBY;//extra work to be done by last segment
//set tuple by block Y value
tuple[threadIdx.x][threadIdx.y][0] = blockIdx.y % sizePerYSeg;
//set tuple(s) by block X value
int rem = blockIdx.x;
for(int i = 1; i <= numBX; i++){
tuple[threadIdx.x][threadIdx.y][i] = rem % _N_;
rem = rem / _N_;
}
int tupCtr = numBX;
//set tuples by thread value
tuple[threadIdx.x][threadIdx.y][++tupCtr] = threadIdx.x;
tuple[threadIdx.x][threadIdx.y][++tupCtr] = threadIdx.y;
//check if thread is valid at this point
for(int i = tupCtr; i > 0; i--){
for(int j = i - 1, ctr = 1; j >= 0; j--, ctr++){
//same row
totalWrong += tuple[threadIdx.x][threadIdx.y][i] == tuple[threadIdx.x][threadIdx.y][j];
//diag upleft
totalWrong += (tuple[threadIdx.x][threadIdx.y][i] - ctr) == tuple[threadIdx.x][threadIdx.y][j];
//diag downleft
totalWrong += (tuple[threadIdx.x][threadIdx.y][i] + ctr) == tuple[threadIdx.x][threadIdx.y][j];
}
}
if(totalWrong == 0){
//iterate through all numbers to generate possible solutions thread must check
//does not do if thread is already not valid at this point
int start = bYsegment * workSize;
for(int c = start; c < start + workSize + (bYsegment == numBY - 1) * extra; c++){
//generate last values in tuple, convert to base N and store to tuple array
int rem = c;
for(int b = 0, k = tupCtr + 1; b < numGen; b++, k++){
tuple[threadIdx.x][threadIdx.y][k] = rem % _N_;
rem = rem / _N_;
}
//checks that the numGen tuple values are indeed unique (saves work overall)
for(int x = 0; x < numGen && totalWrong == 0; x++){
for(int y = 0; y < numGen && totalWrong == 0; y++){
totalWrong += tuple[threadIdx.x][threadIdx.y][tupCtr + 1 + x] == tuple[threadIdx.x][threadIdx.y][tupCtr + 1 + y] && x != y;
}
}
//check one solution
for(int i = _N_ - 1; i > totalWrong * _N_; i--){
for(int j = i - 1, ctr = 1; j >= 0; j--, ctr++){
//same row
totalWrong += tuple[threadIdx.x][threadIdx.y][i] == tuple[threadIdx.x][threadIdx.y][j];
//diag upleft
totalWrong += (tuple[threadIdx.x][threadIdx.y][i] - ctr) == tuple[threadIdx.x][threadIdx.y][j];
//diag downleft
totalWrong += (tuple[threadIdx.x][threadIdx.y][i] + ctr) == tuple[threadIdx.x][threadIdx.y][j];
}
}
//add 1 to solution total if nothing wrong
solutions[threadIdx.x][threadIdx.y] += !(totalWrong);
//reset total wrong
totalWrong = 0;
}
}
//sync the threads so that thread 0 can make the calculations
__syncthreads();
//have thread 0 sum for all threads in block to get block total
if(threadIdx.x == 0 && threadIdx.y == 0){
//ensure that the block total value is 0 initially
long sum = 0;
//iterate through each threads solution and add it to the block total
for(int i =0; i < _N_; i++){
for(int j = 0; j < _N_; j++){
//use local var
sum += solutions[i][j];
}
}
//store to global memory
a[gridDim.x * blockIdx.y + blockIdx.x] = sum;
}
//sync the threads so that calculations can be made
__syncthreads();
//have the first thread in the first block sum up the block sums to return to the CPU
if(sumOnGPU == 1 && blockIdx.x == 0 && blockIdx.y == 0 && threadIdx.x == 0 && threadIdx.y == 0){
sumBlocks(a);
}
}
__device__
void sumBlocks(long *a){
long sum = 0;
int numberBlocks = gridDim.x * gridDim.y;
int rowSizeOfGrid = powf(_N_, numBX);
//check if N is even or odd, then calculate sum, which is number of solutions
if(_N_ % 2 == 0){
for(int i = 0; i < numberBlocks; i++){
sum+= a[i];
}
sum *= 2;
}
else{
int numBlocksPerSeg = numberBlocks / numBY;
for(int j = 0; j < numBY; j++){
int start = j * numBlocksPerSeg;
for(int i = start; i < start + numBlocksPerSeg - rowSizeOfGrid; i++){
sum+= a[i];
}
}
sum *= 2;
//add last block row of sums for each Y block
for(int j = 0; j < numBY; j++){
for(int i = j * numBlocksPerSeg + numBlocksPerSeg - rowSizeOfGrid; i < j * numBlocksPerSeg + numBlocksPerSeg; i++){
sum+= a[i];
}
}
}
//store sum to first index of a
a[gridDim.x * blockIdx.y + blockIdx.x] = 0;
a[gridDim.x * blockIdx.y + blockIdx.x] = sum;
}
void checkArgs(int argc, char *argv[], int numArgs){
//check number of arguments
if(argc > numArgs){
fprintf(stderr, "\nnqueens: Incorrect number of arguments, %d\nCorrect usage: \"nqueens\"\n", argc - 1);
exit(1);
}
char* invalChar;
long arg;
//check each argument
for(int i = 1; i < numArgs; i++){
//check for overflow of argument
if((arg = strtol(argv[i], &invalChar, 10)) >= INT_MAX){
fprintf(stderr, "\nnqueens: Overflow. Invalid argument %d for nqueens, '%s'.\nThe argument must be a valid, positive, non-zero integer less than %d.\n", i, argv[i], INT_MAX);
exit(1);
}
//check that argument is a valid positive integer and check underflow
if(!(arg > 0) || (*invalChar)){
fprintf(stderr, "\nnqueens: Invalid argument %d for nqueens, '%s'. The argument must be a valid, positive, non-zero integer.\n", i, argv[i]);
exit(1);
}
}
}
void checkGPUCapabilities(int gridW, int gridH, int blockW, int blockH, int size){
//check what GPU is being used
int devId;
cudaGetDevice( &devId );
//get device properties for GPU being used
cudaDeviceProp gpuProp;
cudaGetDeviceProperties( &gpuProp, devId );
//check if GPU has enough memory
if(gpuProp.totalGlobalMem < (size * sizeof(long))){
fprintf(stderr, "\nnqueens: Insufficient GPU. GPU does not have enough memory to handle the data size: %ld. It can only handle data sizes up to %ld.\n", (size * sizeof(float)) * 3, gpuProp.totalGlobalMem);
exit(1);
}
//check if GPU can handle the number of threads per bloc
if(gpuProp.maxThreadsPerBlock < (blockW * blockH)){
fprintf(stderr, "\nnqueens: Insufficient GPU. GPU can only handle %d threads per block, not %d.\n", gpuProp.maxThreadsPerBlock, (blockW * blockH));
exit(1);
}
//check that GPU can handle the number of threads in the block width
if(gpuProp.maxThreadsDim[0] < blockW){
fprintf(stderr, "\nnqueens: Insufficient GPU. GPU can only handle %d threads as the block width of each block, not %d.\n", gpuProp.maxThreadsDim[0], blockW );
exit(1);
}
//check that GPU can handle the number of threads in the block height
if(gpuProp.maxThreadsDim[1] < blockH){
fprintf(stderr, "\nnqueens: Insufficient GPU. GPU can only handle %d threads as the block height of each block, not %d.\n", gpuProp.maxThreadsDim[1], blockH );
exit(1);
}
//check that GPU can handle the number of blocks in the grid width
if(gpuProp.maxGridSize[0] < gridW){
fprintf(stderr, "\nnqueens: Insufficient GPU. GPU can only handle %d blocks as the grid width of each grid, not %d.\n", gpuProp.maxGridSize[0], gridW );
exit(1);
}
//check that GPU can handle the number of blocks in the grid height
if(gpuProp.maxGridSize[1] < gridH){
fprintf(stderr, "\nnqueens: Insufficient GPU. GPU can only handle %d blocks as the grid height of each grid, not %d.\n", gpuProp.maxGridSize[1], gridH );
exit(1);
}
}
double getTime(){
timeval thetime;
gettimeofday(&thetime, 0);
return thetime.tv_sec + thetime.tv_usec / 1000000.0;
}
|
19,090 | /*
Final project of NVIDIA Fundamentals of CUDA in C/C++
Consits of a simulation of the n-body problem.
*/
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define SOFTENING 1e-9f
/*
* Each body contains x, y, and z coordinate positions,
* as well as velocities in the x, y, and z directions.
*/
typedef struct { float x, y, z, vx, vy, vz; } Body;
/*
* Do not modify this function. A constraint of this exercise is
* that it remain a host function.
*/
void randomizeBodies(float *data, int n) {
for (int i = 0; i < n; i++) {
data[i] = 2.0f * (rand() / (float)RAND_MAX) - 1.0f;
}
}
/*
* This function calculates the gravitational impact of all bodies in the system
* on all others, but does not update their positions.
*/
__global__
void bodyForce(Body *p, float dt, int n) {
int tidx = threadIdx.x + blockDim.x*blockIdx.x;
int slidex = blockDim.x*gridDim.x;
for (int i = tidx; i < n; i+=slidex) {
float Fx = 0.0f; float Fy = 0.0f; float Fz = 0.0f;
for (int j = 0; j < n; j++) {
float dx = p[j].x - p[i].x;
float dy = p[j].y - p[i].y;
float dz = p[j].z - p[i].z;
float distSqr = dx*dx + dy*dy + dz*dz + SOFTENING;
float invDist = rsqrtf(distSqr);
float invDist3 = invDist * invDist * invDist;
Fx += dx * invDist3; Fy += dy * invDist3; Fz += dz * invDist3;
}
p[i].vx += dt*Fx; p[i].vy += dt*Fy; p[i].vz += dt*Fz;
}
}
__global__
void integratePosition(Body *p, float dt, int n){
int tid = threadIdx.x + blockDim.x*blockIdx.x;
int slide = blockDim.x*gridDim.x;
for(int i=tid;i<n;i+=slide){
p[i].x += p[i].vx*dt;
p[i].y += p[i].vy*dt;
p[i].z += p[i].vz*dt;
}
}
int main(const int argc, const char** argv) {
/*
* Do not change the value for `nBodies` here. If you would like to modify it,
* pass values into the command line.
*/
int nBodies = 2<<11;
if (argc > 1) nBodies = 2<<atoi(argv[1]);
const float dt = 0.01f; // time step
const int nIters = 10; // simulation iterations
int bytes = nBodies * sizeof(Body);
float *buf;
int deviceId;
cudaGetDevice(&deviceId);
cudaDeviceProp props;
cudaGetDeviceProperties(&props, deviceId);
int size_warps = props.multiProcessorCount;
cudaMallocHost(&buf, bytes);
Body *p;
cudaMalloc(&p, bytes);
/*
* As a constraint of this exercise, `randomizeBodies` must remain a host function.
*/
randomizeBodies(buf, 6 * nBodies); // Init pos / vel data
cudaMemcpy(p, buf, bytes, cudaMemcpyHostToDevice);
clock_t start = clock();
/*
* This simulation will run for 10 cycles of time, calculating gravitational
* interaction amongst bodies, and adjusting their positions to reflect.
*/
/*******************************************************************/
// Do not modify these 2 lines of code.
for (int iter = 0; iter < nIters; iter++) {
/*******************************************************************/
/*
* You will likely wish to refactor the work being done in `bodyForce`,
* as well as the work to integrate the positions.
*/
dim3 num_threads(128);
dim3 num_blocks(size_warps*8);
bodyForce<<<num_blocks, num_threads>>>(p, dt, nBodies); // compute interbody forces
cudaDeviceSynchronize();
/*
* This position integration cannot occur until this round of `bodyForce` has completed.
* Also, the next round of `bodyForce` cannot begin until the integration is complete.
*/
integratePosition<<<num_blocks, num_threads>>>(p, dt, nBodies); // integrate position
cudaDeviceSynchronize();
}
double totalTime = (double)(clock()-start)/CLOCKS_PER_SEC;
cudaMemcpy(buf, p, bytes, cudaMemcpyDeviceToHost);
double avgTime = totalTime / (double)(nIters);
double billionsOfOpsPerSecond = (1e-9 * nBodies * nBodies) / (avgTime);
printf("%d Bodies: average %0.3f Billion Interactions / second\n", nBodies, billionsOfOpsPerSecond);
/*******************************************************************/
/*
* Feel free to modify code below.
*/
cudaFree(p);
cudaFreeHost(buf);
}
|
19,091 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
int row_counter(FILE* fp);
int col_counter(FILE* fp);
void read_matrix(FILE* fp,int *data);
void print_matrix(int *data,int mRr, int mRc);
__global__ void matrix_multiplication(int *m1,int *m2, int *mR, int m1r, int m1c, int m2c)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int sum = 0;
int i = 0;
if( col < m2c && row < m1r)
{
for(i = 0; i < m1c; i++)
{
sum += m1[row * m1c + i] * m2[i * m2c + col];
}
mR[row * m2c + col] = sum;
}
}
int main(int argc, char **argv)
{
if( 3 != argc ){
printf("Incorrect number of params: %d\n",argc - 1);
return 1;
}
//Vars Declaration
int m1r,m1c,m2r,m2c,mRr,mRc; //rows & cols
int m1s,m2s,mRs; //size
int *gpu_m1, *cpu_m1;
int *gpu_m2, *cpu_m2;
int *gpu_mR, *cpu_mR;
cudaError_t err = cudaSuccess;
// Open the files
FILE* fp1 = fopen(argv[1], "r");
FILE* fp2 = fopen(argv[2], "r");
// Check if files exists
if (fp1 == NULL)
{
printf("Could not open file %s",argv[1] );
return 1;
}
if (fp2 == NULL)
{
printf("Could not open file %s",argv[2] );
return 1;
}
//Read the number of rows and cols of each matrix
m1r = row_counter(fp1);
m1c = col_counter(fp1);
m2r = row_counter(fp2);
m2c = col_counter(fp2);
mRr = m1r;
mRc = m2c;
//printf("Matrix 1: %dx%d\n", m1r,m1c);
//printf("Matrix 2: %dx%d\n", m2r,m2c);
//condition: the number of columns in A must equal the number of rows in B
if(m1c != m2r){
printf("Incorrect Matrix Size, cols of Matrix 1 (%d) are different of the rows of Matrix 2 (%d)\n",m1c,m2r );
return 1;
}
//reserve memory to each matrix
m1s = m1r*m1c*sizeof(int);
m2s = m2r*m2c*sizeof(int);
mRs = mRr*mRc*sizeof(int);
cpu_m1 = (int*)malloc(m1s);
cpu_m2 = (int*)malloc(m2s);
cpu_mR = (int*)malloc(mRs);
err = cudaMalloc((void**)&gpu_m1,m1s);
if(err != cudaSuccess){printf("Error with Matrix 1\n");exit(1);}
err = cudaMalloc((void**)&gpu_m2,m2s);
if(err != cudaSuccess){printf("Error with Matrix 2\n");exit(1);}
err = cudaMalloc((void**)&gpu_mR,mRs);
if(err != cudaSuccess){printf("Error with Matrix R\n");exit(1);}
//Read the Files to the CPU memory
read_matrix(fp1,cpu_m1);
read_matrix(fp2,cpu_m2);
//Now the files can be closed
fclose(fp1);
fclose(fp2);
//Copy each matrix to the device
err = cudaMemcpy(gpu_m1, cpu_m1, m1s, cudaMemcpyHostToDevice);
if(err != cudaSuccess){printf("Error Coping Matrix 1\n");exit(1);}
err = cudaMemcpy(gpu_m2, cpu_m2, m2s, cudaMemcpyHostToDevice);
if(err != cudaSuccess){printf("Error Coping Matrix 2\n");exit(1);}
//Execute the Kernel in the Device
int tile_dim = 32;
dim3 block_dim(tile_dim,tile_dim,1);
dim3 grid_dim(ceil(mRc/float(tile_dim)),ceil(mRr/float(tile_dim)),1);
matrix_multiplication<<<grid_dim,block_dim>>>(gpu_m1, gpu_m2, gpu_mR, m1r, m1c, m2c);
//copy the result to Host mem
err = cudaMemcpy(cpu_mR,gpu_mR,mRs, cudaMemcpyDeviceToHost);
if(err != cudaSuccess){printf("Error Coping Matrix R\n");exit(1);}
print_matrix(cpu_mR, mRr, mRc);
}
int row_counter(FILE* fp){
char c;
int count = 1;
for (c = getc(fp); c != EOF; c = getc(fp))
if (c == '\n')
count = count + 1;
rewind(fp);
return count;
}
int col_counter(FILE* fp){
char c;
int count = 1;
for (c = getc(fp); c != '\n'; c = getc(fp))
if (c == ',')
count = count + 1;
rewind(fp);
return count;
}
void read_matrix(FILE* fp,int *data){
char c;
int nc;
int temp = 0;
int pot = 0;
int index = 0;
for (c = getc(fp); c != EOF; c = getc(fp)){
nc = (int)c;
if (c == ',' || c == '\n') {
//save & restart
data[index] = (int)temp;
temp = 0;
pot = 0;
index++;
}else if(nc >= 48 && nc <= 57){
//add other decimal
temp = (temp * (int)pow(10,pot)) + (nc - 48);
if(pot == 0){
pot = 1;
}
}
}
rewind(fp);
return;
}
void print_matrix(int *data,int mRr, int mRc){
int row, col;
for (row=0; row<mRr; row++)
{
for(col=0; col<mRc; col++)
{
printf("%d", data[row * mRc + col]);
if(col != mRc - 1){
printf(",");
}
}
if(row != mRr - 1){
//if isn't the last line print return
printf("\n");
}
}
return;
}
|
19,092 | #include <cuda_runtime.h>
#include <stdio.h>
__global__ void add_one(int n, float* x) {
int i = threadIdx.x;
if (i < n) {
x[i] = x[i] + 1;
printf("thread %d, value=%f\n", i, x[i]);
}
}
void initialize_input(float* h_A, int n) {
for (int i = 0; i < n; i++) {
h_A[i] = i;
}
}
__constant__ float constData[256];
void test1(int n, float* x) {
float data[256];
cudaMemcpyToSymbol(constData, data, sizeof(data));
cudaMemcpyFromSymbol(data, constData, sizeof(data));
}
__device__ float devData;
void test2(int n, float* x) {
float value = 3.14f;
cudaMemcpyToSymbol(devData, &value, sizeof(float));
}
__device__ float* devPointer;
void test3(int n, float* x) {
float* ptr;
cudaMalloc(&ptr, 256 * sizeof(float));
cudaMemcpyToSymbol(devPointer, &ptr, sizeof(ptr));
}
int main(void) {
int N = 16;
size_t size = N * sizeof(float);
// Allocate input vectors h_A in host memory
float* h_A = (float*)malloc(size);
initialize_input(h_A, N);
// Allocate vectors in device memory
float* d_A;
cudaMalloc(&d_A, size);
// Copy vectors from host memory to device memory
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
add_one<<<1, N>>>(N, d_A);
// Copy result from device memory to host memory
cudaMemcpy(h_A, d_A, size, cudaMemcpyDeviceToHost);
// Free device memory
cudaFree(d_A);
printf("result: %f,%f\n", h_A[0], h_A[1]);
// Free host memory
free(h_A);
}
|
19,093 | #include "includes.h"
__global__ void inverse_kernel(double* d_y, double* d_x) {
double x = *d_x;
*d_y = 1. / x;
} |
19,094 | #include <sys/time.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <curand_kernel.h>
#include <fstream>
#include <iostream>
__device__ __host__ unsigned int bitreverse(unsigned int number) {
number = ((0xffff0000 & number) >> 16) | ((0x0000ffff & number) << 16);
number = ((0xff00ff00 & number) >> 8) | ((0x00ff00ff & number) << 8);
number = ((0xf0f0f0f0 & number) >> 4) | ((0x0f0f0f0f & number) << 4);
number = ((0xcccccccc & number) >> 2) | ((0x33333333 & number) << 2);
number = ((0xaaaaaaaa & number) >> 1) | ((0x55555555 & number) << 1);
return number;
}
__device__ __host__ int fastexp(int b, int e, int p)
{
int r = 1;
while (e > 0)
{
if (e & 0x1)
r = ((long)r * b) % p;
e >>= 1;
b = ((long)b * b) % p;
}
return r;
}
__device__ __host__ unsigned int getIndex(int m, int j)
{
unsigned int m1 = (m >> j) << (j + 1);
unsigned int m2 = ((1 << j) - 1) & m;
return (m1 | m2);
}
__device__ __host__ unsigned int getShift(unsigned int index, int j, int n)
{
unsigned int r = index >> (j + 1);
r = bitreverse(r);
r >>= 32 - ((n >> 1) - j);
r <<= j;
return r;
}
__global__ void rand_device_api_kernel(curandState *states, int *out, int n, int p)
{
int gtid = blockIdx.x * blockDim.x + threadIdx.x;
if (gtid < n)
{
curandState *state = states + gtid;
curand_init(1234UL, gtid, 0, state);
double rand = curand_uniform_double(state);
out[gtid] = (int)floor(p*rand);
out[gtid + n] = 0;
}
}
__global__ void montgomeryKernel(int *a, int *b, long *c, int n, int p, int thres)
{
long temp, temp0;
int tid = threadIdx.x;
int inc = thres;
long *pC = c + (blockIdx.x * n);
//int index;
for(int i = tid; i < n; i += blockDim.x)
pC[i] = 0;
__syncthreads();
for (int i = 0; i < n; i++)
{
for(int j = tid; j < n; j += blockDim.x)
{
if(j == 0)
temp0 = pC[j] + ((long)a[j] * b[i]);
else
temp = pC[j] + ((long)a[j] * b[i]);
if (i == thres)
{
temp0 %= p;
temp %= p;
}
if(j != 0)
pC[j-1] = temp;
__syncthreads();
}
if (tid == 0)
pC[n-1] = -temp0;
if(i == thres)
thres += inc;
__syncthreads();
}
for(int i = tid; i < n; i += blockDim.x)
pC[i] = ((-pC[i] % p) + p) % p;
}
//__global__ void nttKernel(int *a, int *b, int *c, int n, int *wn, int *wni, int p, int ni)
//{
// extern __shared__ int local[];
//
// int *locala = local;
// int *localb = local + n;
//
// int tid = threadIdx.x;
// int lgn = (int)roundf(log2f((float)n));
//
// // Copy elements in reverse bit order to local memory
// locala[2*tid] = a[bitreverse(2*tid) >> (32 - lgn)];
// locala[2*tid+1] = a[bitreverse(2*tid+1) >> (32 - lgn)];
//
// localb[2*tid] = b[bitreverse(2*tid) >> (32 - lgn)];
// localb[2*tid+1] = b[bitreverse(2*tid+1) >> (32 - lgn)];
//
// __syncthreads();
//
// int quotient = tid; // quotient = tid / stride
//
// // Forward NTT
// for(int s = 1; s <= lgn; s++)
// {
// // Set up what we need for this iteration
// int m = 1 << s;
// int stride = m >> 1;
//
// int factor = tid - (quotient << (s-1));
// int t_tid = (quotient << s) + factor;
//
// int w = wn[(lgn - s + 1) * (n >> 1) + factor];
// // Butterfly operation
// long t = (long)w * locala[t_tid + stride];
// int u = locala[t_tid];
// locala[t_tid] = (u + t) % p;
// locala[t_tid + stride] = (u - t) % p;
//
// t = (long)w * localb[t_tid + stride];
// u = localb[t_tid];
// localb[t_tid] = (u + t) % p;
// localb[t_tid + stride] = (u - t) % p;
//
// quotient >>= 1;
//
// __syncthreads();
// }
//
// localb[2*tid] = ((long)localb[2*tid] * locala[2*tid]) % p;
// localb[2*tid+1] = ((long)localb[2*tid+1] * locala[2*tid+1]) % p;
//
// __syncthreads();
//
// locala[2*tid] = localb[bitreverse(2*tid) >> (32 - lgn)];
// locala[2*tid+1] = localb[bitreverse(2*tid+1) >> (32 - lgn)];
//
// __syncthreads();
//
// quotient = tid;
//
// // Backward NTT
// for(int s = 1; s <= lgn; s++)
// {
// int m = 1 << s;
// int stride = m >> 1;
// int factor = tid - (quotient << (s-1));
// int t_tid = (quotient << s) + factor;
// int w = wni[(lgn - s + 1) * (n >> 1) + factor];
//
// long t = (long)w * locala[t_tid + stride];
// int u = locala[t_tid];
// locala[t_tid] = (u + t) % p;
// locala[t_tid + stride] = (u - t) % p;
//
// quotient >>= 1;
//
// __syncthreads();
// }
//
// // write results to buffer
// if(blockIdx.x == 0)
// {
//// c[2*tid] = locala[2*tid];
//// c[2*tid+1] = locala[2*tid+1];
// locala[2*tid] = (locala[2*tid] * ni) % p;
// locala[2*tid+1] = (locala[2*tid+1] * ni) % p;
//
// __syncthreads();
//
// c[tid] = (((locala[tid] - locala[tid + (n/2)]) % p) + p) % p;
// }
//}
__global__ void nttKernel(int *a, int *b, int *c, int n, int *wn, int *wni, int p, int ni)
{
int tid = threadIdx.x;
int bid = blockIdx.x;
int lgn = (int)roundf(log2f((float)n));
int *pC1 = c + (bid * n * 2);
int *pC2 = pC1 + n;
// Copy elements in reverse bit order to scratchpad memory
for (int i = tid; i < n/2; i += blockDim.x)
{
pC1[2*i] = b[bitreverse(2*i) >> (32 - lgn)];
pC1[2*i+1] = b[bitreverse(2*i+1) >> (32 - lgn)];
}
__syncthreads();
for (int i = tid; i < n/2; i += blockDim.x)
{
pC2[2*i] = a[bitreverse(2*i) >> (32 - lgn)];
pC2[2*i+1] = a[bitreverse(2*i+1) >> (32 - lgn)];
}
int *locala = pC2;
int *localb = pC1;
__syncthreads();
// Forward NTT
for(int s = 1; s <= lgn; s++)
{
// Set up what we need for this iteration
for (int i = tid; i < n/2; i += blockDim.x)
{
int m = 1 << s;
int stride = m >> 1;
int quotient = i / stride;
int factor = i - (quotient << (s-1));
int t_tid = (quotient << s) + factor;
int w = wn[(lgn - s + 1) * (n >> 1) + factor];
// Butterfly operation
long t = ((long)w * locala[t_tid + stride]) % p;
long u = locala[t_tid];
locala[t_tid] = (u + t) % p;
locala[t_tid + stride] = (((u - t) % p) + p) % p;
t = ((long)w * localb[t_tid + stride]) % p;
u = localb[t_tid];
localb[t_tid] = (u + t) % p;
localb[t_tid + stride] = (((u - t) % p) + p) % p;
}
__syncthreads();
}
for(int i=tid; i < n/2; i+=blockDim.x)
{
localb[2*i] = ((long)localb[2*i] * locala[2*i]) % p;
localb[2*i+1] = ((long)localb[2*i+1] * locala[2*i+1]) % p;
}
__syncthreads();
for(int i=tid; i < n/2; i+=blockDim.x)
{
locala[2*i] = localb[bitreverse(2*i) >> (32 - lgn)];
locala[2*i+1] = localb[bitreverse(2*i+1) >> (32 - lgn)];
}
__syncthreads();
// Backward NTT
for(int s = 1; s <= lgn; s++)
{
for(int i=tid; i < n/2; i+=blockDim.x)
{
int m = 1 << s;
int stride = m >> 1;
int quotient = i / stride;
int factor = i - (quotient << (s-1));
int t_tid = (quotient << s) + factor;
int w = wni[(lgn - s + 1) * (n >> 1) + factor];
long t = ((long)w * locala[t_tid + stride]) % p;
long u = locala[t_tid];
locala[t_tid] = (u + t) % p;
locala[t_tid + stride] = (((u - t) % p) + p) % p;
}
__syncthreads();
}
for(int i=tid; i < n/2; i+= blockDim.x)
{
locala[2*i] = ((long)locala[2*i] * ni) % p;
locala[2*i+1] = ((long)locala[2*i+1] * ni) % p;
}
__syncthreads();
for(int i=tid; i < n/2; i+=blockDim.x)
{
pC1[i] = (((locala[i] - locala[i + (n/2)]) % p) + p) % p;
}
}
//__global__ void nussKernel(int *rx, int *ry, int *ns, int lgn, int *wx, int *wy, int scratchSize, int *z)
//{
// int bid = blockIdx.x;
// int tid = threadIdx.x;
//
// // each block has its reserved scratchpad space
// int *brx = bid * scratchSize + rx;
// int *bry = bid * scratchSize + ry;
// int *bwx = bid * scratchSize + wx;
// int *bwy = bid * scratchSize + wy;
//
// int numTrans = 1;
// int numTransPerPass;
// int numPasses;
//
// for (int l = 0; l < lgn; l++)
// {
// int n = ns[l];
// int TransSize = 1 << n;
// int m = 1 << (n >> 1);
// int r = (n & 0x1) ? m << 1 : m;
//
// numTransPerPass = blockDim.x / TransSize;
// numPasses = numTrans / numTransPerPass;
//
// int ptid = tid; // thread id during the different passes
// for (int k = 0; k < numPasses; k++)
// {
// int lbid = ptid / TransSize;
// int ltid = ptid - (lbid * TransSize);
//
// int *lrx = lbid * TransSize + brx;
// int *lry = lbid * TransSize + bry;
//
// int *lwx = lbid * (m << 1) * r + bwx;
// int *lwy = lbid * (m << 1) * r + bwy;
//
// int j = ltid / m;
// int i = ltid - (j * m);
//
// // Initialize
// lwx[i * r + j] = lwx[(i + m)*r + j] = lrx[ltid];
// lwy[i * r + j] = lwy[(i + m)*r + j] = lry[ltid];
//
// int lm = ltid / r;
// int lr = ltid - (lm * r);
//
// // Transform
// for (int j = (n >> 1) - 1; j >= 0; j--)
// {
// int index = getIndex(lm, j);
// int shift = getShift(index, j, n);
// shift *= (r == m) ? 1 : 2;
// int sindex = (lr >= shift) ? (lr - shift) : (lr - shift + r);
//
// int tx = lwx[index * r + lr];
// int ux = (lr >= shift) ? lwx[(index + (1 << j)) * r + sindex] : -lwx[(index + (1 << j)) * r + sindex];
//
// int ty = lwy[index * r + lr];
// int uy = (lr >= shift) ? lwy[(index + (1 << j)) * r + sindex] : -lwy[(index + (1 << j)) * r + sindex];
//
// __syncthreads();
//
// lwx[index * r + lr] = tx + ux;
// lwx[(index + (1 << j)) * r + lr] = tx - ux;
//
// lwy[index * r + lr] = ty + uy;
// lwy[(index + (1 << j)) * r + lr] = ty - uy;
//
// __syncthreads();
// }
//
// ptid += blockDim.x;
// } // End of passes
//
// // update the state
// numTrans *= (m << 1);
//
// int *temp = brx; brx = bwx; bwx = temp;
// temp = bry; bry = bwy; bwy = temp;
//
// __syncthreads();
// } // End of this stage
//
// // Do all the convolutions of size 2
// for (int i = tid; i < numTrans; i += blockDim.x)
// {
// // Determine on which data to operate
// int *lrx = (i << 1) + brx;
// int *lry = (i << 1) + bry;
// int *lwx = (i << 1) + bwx;
//
// // Operate
// int x0, x1, y0, y1;
// x0 = lrx[0]; x1 = lrx[1]; y0 = lry[0]; y1 = lry[1];
// int t = x0 * (y0 + y1);
// lwx[0] = t - (x0 + x1) * y1;
// lwx[1] = t + (x1 - x0) * y0;
// }
//
// // Set read and write segments for the last phase
// int *brz = bwx;
// int *bwz = bwy;
//
// // Start last phase
// for (int l = lgn-1; l >= 0; l--)
// {
// int n = ns[l];
// int m = 1 << (n >> 1);
// int r = (n & 0x1) ? m << 1 : m;
// int blocking = r * m; // how many threads per reconstruction
// int grouping = m << 1; // how many polys needed for reconstruction
//
// numTransPerPass = blockDim.x / blocking;
// numPasses = (numTrans / grouping) / numTransPerPass;
//
// int ptid = tid;
// for (int k = 0; k < numPasses; k++)
// {
// int lbid = ptid / blocking;
// int ltid = ptid - (lbid * blocking);
// //where to read from
// int *lrz = grouping * r * lbid + brz;
//
// //where to write to
// int *lwz = lbid * blocking + bwz; // offset is current lbid * size of trans at next level
//
// int lm = ltid / r;
// int lr = ltid - (lm * r);
//
// // Untransform
// for (int j = 0; j <= (n >> 1); j++)
// {
// int index = getIndex(lm, j);
// int shift = getShift(index, j, n);
// shift *= (r == m) ? 1 : 2;
// //int sindex = (lr < (r - shift)) ? (lr + shift) : (lr + shift - r);
// int sindex;
// int tp, up;
//
// int t = lrz[index * r + lr];
// int u = lrz[(index + (1 << j)) * r + lr];
//
// if (lr < (r - shift))
// {
// sindex = lr + shift;
// tp = lrz[index * r + sindex];
// up = lrz[(index + (1 << j)) * r + sindex];
// }
// else
// {
// sindex = lr + shift - r;
// tp = -lrz[index * r + sindex];
// up = -lrz[(index + (1 << j)) * r + sindex];
// }
//
// __syncthreads();
//
// lrz[index * r + lr] = (t + u) / 2;
// lrz[(index + (1 << j)) * r + lr] = (tp - up) / 2;
//
// __syncthreads();
// }
//
// // Repack
// int j = ltid / m;
// int i = ltid - (j * m);
//
// lwz[ltid] = (j == 0) ? (lrz[i * r] - lrz[(m + i) * r + (r - 1)]) : (lrz[i * r + j] + lrz[(m + i) * r + (j - 1)]);
//
// ptid += blockDim.x;
// }
//
// int *temp = brz; brz = bwz; bwz = temp;
// numTrans /= grouping;
//
// __syncthreads();
// }
//
// z[tid] = brz[tid];
//}
__global__ void nussbaumerKernel(int *rx, int *ry, int *ns, int lgn, int *wx, int *wy, int scratchSize, int *z, int p, long inv2)
{
int bid = blockIdx.x;
int tid = threadIdx.x;
// each block has its reserved scratchpad space
int *brx = bid * scratchSize + rx;
int *bry = bid * scratchSize + ry;
int *bwx = bid * scratchSize + wx;
int *bwy = bid * scratchSize + wy;
int *bz = bid * (1 << ns[0]) + z;
int numTrans = 1;
int numTransPerPass;
int numPasses;
for (int l = 0; l < lgn; l++)
{
int n = ns[l];
int TransSize = 1 << n;
int m = 1 << (n >> 1);
int r = (n & 0x1) ? m << 1 : m;
numTransPerPass = blockDim.x / r;
numPasses = numTrans / numTransPerPass;
int ptid = tid; // thread id during the different passes
for (int k = 0; k < numPasses; k++)
{
int lbid = ptid / r;
int ltid = ptid - (lbid * r);
int *lrx = lbid * TransSize + brx;
int *lry = lbid * TransSize + bry;
int *lwx = lbid * (m << 1) * r + bwx;
int *lwy = lbid * (m << 1) * r + bwy;
for (int o = ltid; o < (1 << n); o += r)
{
int j = o / m;
int i = o - (j * m);
// Initialize
lwx[i * r + j] = lwx[(i + m)*r + j] = lrx[o];
lwy[i * r + j] = lwy[(i + m)*r + j] = lry[o];
}
// Transform
for (int j = (n >> 1) - 1; j >= 0; j--)
{
for (int lm = 0; lm < m; lm++)
{
int index = getIndex(lm, j);
int shift = getShift(index, j, n);
shift *= (r == m) ? 1 : 2;
int sindex = (ltid >= shift) ? (ltid - shift) : (ltid - shift + r);
int tx = lwx[index * r + ltid];
int ux = (ltid >= shift) ? lwx[(index + (1 << j)) * r + sindex] : -lwx[(index + (1 << j)) * r + sindex];
int ty = lwy[index * r + ltid];
int uy = (ltid >= shift) ? lwy[(index + (1 << j)) * r + sindex] : -lwy[(index + (1 << j)) * r + sindex];
__syncthreads();
lwx[index * r + ltid] = ((tx + ux) % p);
lwx[(index + (1 << j)) * r + ltid] = (((tx - ux) % p) + p) % p;
lwy[index * r + ltid] = ((ty + uy) % p);
lwy[(index + (1 << j)) * r + ltid] = (((ty - uy) % p) + p) % p;
__syncthreads();
}
}
ptid += blockDim.x;
} // End of passes
// update the state
numTrans *= (m << 1);
//if(tid == 0) printf("%d\n", numTrans);
int *temp = brx; brx = bwx; bwx = temp;
temp = bry; bry = bwy; bwy = temp;
__syncthreads();
} // End of this stage
// Determine how many passes to operate on the whole array
for (int i = tid; i < numTrans; i += blockDim.x)
{
// Determine on which data to operate
int *lrx = (i << 1) + brx;
int *lry = (i << 1) + bry;
int *lwx = (i << 1) + bwx;
//int *lwy = i * TransSize + bwy;
// Operate
int x0, x1, y0, y1;
x0 = lrx[0]; x1 = lrx[1]; y0 = lry[0]; y1 = lry[1];
long t = (long)x0 * (y0 + y1);
lwx[0] = (((t - (x0 + x1) * (long)y1) % p) + p) % p;
lwx[1] = ((t + (x1 - x0) * (long)y0) % p);
//ptid += blockDim.x;
}
int *brz = bwx;
int *bwz = bwy;
// Start last phase
for (int l = lgn - 1; l >= 0; l--)
{
int n = ns[l];
int m = 1 << (n >> 1);
int r = (n & 0x1) ? m << 1 : m;
int blocking = r; // how many threads per reconstruction
int grouping = m << 1; // how many polys needed for reconstruction
numTransPerPass = blockDim.x / blocking;
numPasses = (numTrans / grouping) / numTransPerPass;
int ptid = tid;
for (int k = 0; k < numPasses; k++)
{
int lbid = ptid / blocking;
int ltid = ptid - (lbid * blocking);
//where to read from
int *lrz = grouping * r * lbid + brz;
//where to write to
int *lwz = lbid * (r * m) + bwz; // offset is current lbid * size of trans at next level
// Untransform
for (int j = 0; j <= (n >> 1); j++)
{
for (int lm = 0; lm < m; lm++)
{
int index = getIndex(lm, j);
int shift = getShift(index, j, n);
shift *= (r == m) ? 1 : 2;
int sindex;
int tp, up;
int t = lrz[index * r + ltid];
int u = lrz[(index + (1 << j)) * r + ltid];
if (ltid < (r - shift))
{
sindex = ltid + shift;
tp = lrz[index * r + sindex];
up = lrz[(index + (1 << j)) * r + sindex];
}
else
{
sindex = ltid + shift - r;
tp = -lrz[index * r + sindex];
up = -lrz[(index + (1 << j)) * r + sindex];
}
__syncthreads();
lrz[index * r + ltid] = (((t + u) * (long)inv2) % p);
lrz[(index + (1 << j)) * r + ltid] = ((((tp - up) * (long)inv2) % p) + p) % p;
__syncthreads();
}
}
// Repack
for (int o = ltid; o < (1 << n); o += r)
{
int j = o / m;
int i = o - (j * m);
lwz[o] = (j == 0) ? (((lrz[i * r] - lrz[(m + i) * r + (r - 1)]) % p) + p) % p : ((lrz[i * r + j] + lrz[(m + i) * r + (j - 1)]) % p);
}
ptid += blockDim.x;
}
int *temp = brz; brz = bwz; bwz = temp;
numTrans /= grouping;
__syncthreads();
}
for (int o = tid; o < (1 << ns[0]); o += blockDim.x)
{
bz[o] = (brz[o] % p);
}
}
void generatePolynomialOnDevice(int n, int p, int **d_poly)
{
static curandState *states = NULL;
int *d_out;
cudaMalloc((void**)&d_out, sizeof(int)*n*2);
cudaMalloc((void**)&states, sizeof(curandState)*n);
dim3 block = 64;
dim3 grid = (n + block.x - 1) / block.x;
rand_device_api_kernel<<<grid, block>>>(states, d_out, n, p);
cudaDeviceSynchronize();
*d_poly = d_out;
}
void powsOf2(int wn, int *pows, int lgn, int n, int p)
{
for(int i = 1; i <= lgn; i++)
{
for(int j = 0; j < n; j++)
{
pows[i * n + j] = fastexp(fastexp(wn, (1 << (i-1)), p), j , p);
}
}
// for (int i = 1; i <= lgn; i++)
// {
// for(int j = 0; j < n; j++)
// {
// printf("[%d, %d %d] ", pows[i * n + j], i, j);
// }
// printf("\n");
// }
}
void writeToFile(int *a, int n)
{
std::ofstream out;
out.open("/home/othmane/Dokumente/poly.dat");
for(int i = 0; i < n; i++)
out << a[i] << std::endl;
out.close();
}
void moveToHost(int *d_Buf, int **Buf, int n)
{
*Buf = (int*)malloc(n*sizeof(int));
cudaMemcpy(*Buf, d_Buf, n*sizeof(int), cudaMemcpyDeviceToHost);
}
double cpuSecond()
{
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6);
}
void multiplyOnCPU(int *A, int *B, int n, int* rC, int p)
{
int *C = (int*)calloc(2 * n, sizeof(int));
for (int i = 0; i<n; ++i)
for (int j = 0; j < n; ++j)
{
C[j + i] = (C[j + i] + ((long)B[i] * A[j])) % p;
}
for (int i = 0; i < n; ++i)
C[i] = ((((long)C[i] - C[i + n]) % p) + p) % p;
for (int i = 0; i < n; ++i)
rC[i] = C[i];
free(C);
}
void montgomeryMultiply(int *d_a, int *d_b, int *c, int n, int p, int grid)
{
long *d_c, *h_c;
h_c = (long*)malloc(n * sizeof(long));
cudaMalloc((void**)&d_c, n * grid * sizeof(long));
// calculate threshold
long square = (long)p * p;
long thres = (((1UL << 63) - 1) / square);
dim3 block = (n > 1024) ? 1024 : n;
double iStart = cpuSecond();
montgomeryKernel<<<grid, block>>>(d_a, d_b, d_c, n, p, (int)thres);
cudaDeviceSynchronize();
double iElaps = cpuSecond() - iStart;
std::cout << "Time elapsed Montgomery: " << iElaps << std::endl;
cudaMemcpy(h_c, d_c, n * sizeof(long), cudaMemcpyDeviceToHost);
for(int i = 0; i < n; i++)
c[i] = (int)h_c[i];
free(h_c);
cudaFree(d_c);
}
void nttMultiply(int *d_a, int *d_b, int *c, int n, int ni, int wn, int wni, int p, int grid)
{
int *d_c, *d_wn, *d_wni, *wns, *wnis;
int lgn = (int)round(log2((double)n));
wns = (int*)malloc((lgn+1) * (n/2) * sizeof(int));
wnis = (int*)malloc((lgn+1) * (n/2) * sizeof(int));
powsOf2(wn, wns, lgn, n/2, p);
powsOf2(wni, wnis, lgn, n/2, p);
cudaMalloc((void**)&d_wn, (lgn+1) * (n/2) * sizeof(int));
cudaMalloc((void**)&d_wni, (lgn+1) * (n/2) * sizeof(int));
cudaMalloc((void**)&d_c, n * 2 * sizeof(int));
cudaMemcpy(d_wn, wns, (lgn+1) * (n/2) * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_wni, wnis, (lgn+1) * (n/2) * sizeof(int), cudaMemcpyHostToDevice);
dim3 block = ((n / 2) > 1024) ? 1024 : n / 2;
double iStart = cpuSecond();
nttKernel<<<1, block>>>(d_a, d_b, d_c, n, d_wn, d_wni, p, ni);
cudaDeviceSynchronize();
double iElaps = cpuSecond() - iStart;
std::cout << "Time elapsed NTT: " << iElaps << std::endl;
cudaMemcpy(c, d_c, n * sizeof(int), cudaMemcpyDeviceToHost);
// for(int i=0; i<256; i++)
// c[i] = (((c[i] * 256) % 257) + 257) % 257;
//
// for(int i=0; i<128; i++)
// c[i] = (((c[i] - c[i + 128]) % 257) + 257) % 257;
cudaFree(d_c);
cudaFree(d_wn);
cudaFree(d_wni);
free(wns);
free(wnis);
}
void nussbaumerMultiply(int *d_a, int *d_b, int *c, int n, int p, int grid, long inv2)
{
n = (int)round(log2((float)n));
int lgn = (int)ceil(log2((float)n));
int *ns = (int*)malloc(lgn * sizeof(int));
int size = 1 << n;
int scalableSize = (n & 0x1) ? (1 << ((n >> 1) + 1)) : (1 << (n >> 1));
int scratchSize = size * (1 << lgn);
ns[0] = n;
for (int i = 1; i < lgn; i++)
ns[i] = (ns[i - 1] & 0x1) ? (ns[i-1] >> 1) + 1 : (ns[i-1] >> 1);
int *d_rx, *d_ry, *d_wx, *d_wy, *d_ns, *d_z, *z;
cudaMalloc((void**)&d_rx, scratchSize * grid * sizeof(int));
cudaMalloc((void**)&d_ry, scratchSize * grid * sizeof(int));
cudaMalloc((void**)&d_wx, scratchSize * grid * sizeof(int));
cudaMalloc((void**)&d_wy, scratchSize * grid * sizeof(int));
cudaMalloc((void**)&d_ns, lgn * sizeof(int));
cudaMalloc((void**)&d_z, size * grid * sizeof(int));
z = (int*)malloc(size * sizeof(int));
cudaMemcpy(d_rx, d_a, size * sizeof(int), cudaMemcpyDeviceToDevice);
cudaMemcpy(d_ry, d_b, size * sizeof(int), cudaMemcpyDeviceToDevice);
cudaMemcpy(d_ns, ns, lgn * sizeof(int), cudaMemcpyHostToDevice);
double iStart = cpuSecond();
nussbaumerKernel<<<1, scalableSize>>>(d_rx, d_ry, d_ns, lgn, d_wx, d_wy, scratchSize, d_z, p, inv2);
cudaDeviceSynchronize();
double iElaps = cpuSecond() - iStart;
std::cout << "Time elapsed Nussbaumer: " << iElaps << std::endl;
cudaMemcpy(z, d_z , size * sizeof(int), cudaMemcpyDeviceToHost);
for (int i = 0; i < size; i++)
c[i] = z[i];
free(z);
free(ns);
cudaFree(d_rx);
cudaFree(d_ry);
cudaFree(d_wx);
cudaFree(d_wy);
cudaFree(d_z);
}
void initialize(int **d_a, int **d_b, int **c, int **c_ref, int n, int p)
{
generatePolynomialOnDevice(n, p, d_a);
generatePolynomialOnDevice(n, p, d_b);
*c = (int*)malloc(n * 2 * sizeof(int));
*c_ref = (int*)malloc(n * sizeof(int));
}
int main()
{
// int n = 128;
// int p = 257;
//
// int ni = 256;
// int wn = 3;
// int wni = 86;
// int n = 16;
// int p = 786433;
//
// int ni = 761857;
// int wn = 41596;
// int wni = 116823;
// int n = 256;
// int p = 7681;
//
// int ni = 7666;
// int wn = 62;
// int wni = 1115;
//
// long inv2 = 3841;
/* params for n = 1024 */
int n = 1024;
int p = 786433;
int ni = 786049;
int wn = 19;
int wni = 579477;
long inv2 = 393217;
/* params for n = 2048*/
// int n = 2048;
// int p = 786433;
// int ni = 786241;
// int wn = 14;
// int wni = 280869;
// long inv2 = 393217;
/* params for n = 4096 */
// int n = 4096;
// int p = 786433;
// int ni = 786337;
// int wn = 804;
// int wni = 292467;
// long inv2 = 393217;
int grid = 1;
int *d_a, *d_b, *a, *b, *c, *c_ref;
initialize(&d_a, &d_b, &c, &c_ref, n, p);
moveToHost(d_a, &a, n);
moveToHost(d_b, &b, n);
montgomeryMultiply(d_a, d_b, c, n, p, grid);
nussbaumerMultiply(d_a, d_b, c, n, p, grid, inv2);
nttMultiply(d_a, d_b, c, 2*n, ni, wn, wni, p, grid);
multiplyOnCPU(a, b, n, c_ref, p);
for(int i = 0; i < n; i++)
if(c[i] != c_ref[i])
printf("Error at %d, %d %d\n", i, c_ref[i], c[i]);
free(a);
free(b);
free(c);
free(c_ref);
cudaFree(d_a);
cudaFree(d_b);
return 0;
}
|
19,095 | __global__ void hadamardProductKernel(float *a, float *b, int l)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < l; i += stride)
a[i] = a[i] * b[i];
}
extern "C" void hadamard_wrapper(float *a, float *b, int l)
{
int blockSize = 256;
int numBlocks = (l + blockSize - 1) / blockSize;
hadamardProductKernel<<<numBlocks, blockSize>>>(a, b, l);
}
|
19,096 | #include <stdio.h>
#include <iostream>
#include <sys/time.h>
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if(error!=cudaSuccess) { \
printf("Error: %s:%d, ", __FILE__, __LINE__); \
printf("code: %d, reason: %s\n", error, cudaGetErrorString(error)); \
} \
} \
double cpuSecond() {
struct timeval tp;
gettimeofday(&tp,NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6);
}
void checkResult(float *hostRef, float *gpuRef, const int N) {
double epsilon = 1.0E-8;
bool match = true;
for(int i=0; i<N; i++){
if (abs(hostRef[i]-gpuRef[i])>epsilon){
match = false;
std::cout << "Arrays do not match!" << std::endl;
printf("host %5.2f gpu %5.2f at current %d\n", hostRef[i], gpuRef[i],i);
break;
}
}
if (match) std::cout << "Arrays match. " << std::endl;
}
void initialData(float *ip, int size) {
// generate different seed for random number
time_t t;
srand((unsigned int) time(&t));
for(int i=0; i<size; i++) {
ip[i] = (float)(rand()&0xFF)/10.f;
}
}
void sumMatrixOnHost(float *A, float *B, float *C, const int nx, const int ny) {
float *ia = A;
float *ib = B;
float *ic = C;
for(int iy=0; iy<ny; iy++) {
for(int ix=0; ix<nx; ix++) {
ic[ix]=ia[ix]+ib[ix];
}
ia += nx;
ib += nx;
ic += nx;
}
}
__global__ void sumMatrixOnGPU2D(float *MatA, float *MatB, float *MatC, int nx, int ny) {
unsigned int ix = threadIdx.x+blockIdx.x*blockDim.x;
unsigned int iy = threadIdx.y+blockIdx.y*blockDim.y;
unsigned int idx = iy*nx+ix;
if (ix<nx && iy<ny) {
MatC[idx] = MatA[idx] + MatB[idx];
}
}
int main (int argc, char **argv) {
std::cout << argv[0] << " Starting..." << std::endl;
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
std::cout << "Using Device " << dev << ": " << deviceProp.name << std::endl;
CHECK(cudaSetDevice(dev));
// set up data size of matrix
int nx = 1<<12;
int ny = 1<<12;
int nxy=nx*ny;
int nBytes=nxy*sizeof(float);
std::cout << "Matrix size: " << nx << ", " << ny << std::endl;
// malloc host memory
float *h_A, *h_B, *hostRef, *gpuRef;
h_A =(float *)malloc(nBytes);
h_B =(float *)malloc(nBytes);
hostRef=(float *)malloc(nBytes);
gpuRef =(float *)malloc(nBytes);
// cpuSecond()
double iStart, iElaps;
// initial dataset at host site
iStart=cpuSecond();
initialData(h_A, nxy);
initialData(h_B, nxy);
iElaps=cpuSecond()-iStart;
memset(hostRef, 0, nBytes);
memset(gpuRef, 0, nBytes);
// add vector at host side for result checks
iStart=cpuSecond();
sumMatrixOnHost(h_A, h_B, hostRef, nx, ny);
iElaps=cpuSecond()-iStart;
// malloc device global memory
float *d_A, *d_B, *d_C;
cudaMalloc((void **)&d_A, nBytes);
cudaMalloc((void **)&d_B, nBytes);
cudaMalloc((void **)&d_C, nBytes);
// transfer data from host to device
cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice);
// invoke kernel at host side
int dimx=32;
int dimy=32;
dim3 block(dimx, dimy);
dim3 grid((nx+block.x-1)/block.x, (ny+block.y-1)/block.y);
iStart=cpuSecond();
sumMatrixOnGPU2D <<<grid, block>>> (d_A, d_B, d_C, nx, ny);
cudaDeviceSynchronize();
iElaps=cpuSecond()-iStart;
std::cout << "sumMatrixOnGPU2D <<< (" << grid.x << ", " << grid.y << "), ("
<< block.x << ", " << block.y << ") >>>" <<
"Time elapsed " << iElaps << std::endl;
// copy kernel result back to host side
cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost);
// check device results
checkResult(hostRef, gpuRef, nxy);
// free device global memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
// free host memory
free(h_A);
free(h_B);
free(hostRef);
free(gpuRef);
cudaDeviceReset();
return 0;
}
|
19,097 | #include "includes.h"
__global__ void CudaPermuteWeightsPVToCudnn( float *dest, float *src, int outFeatures, int ny, int nx, int inFeatures, int manyScaleX, int manyScaleY) {
// Parameter dimensions are PV source dimensions
int kSrc = (blockIdx.x * blockDim.x) + threadIdx.x;
if (kSrc < outFeatures * manyScaleX * manyScaleY * ny * nx * inFeatures) {
int kOF = kSrc / (ny * nx * inFeatures);
int kY = (kSrc % (ny * nx * inFeatures)) / (nx * inFeatures);
int kX = (kSrc % (nx * inFeatures)) / inFeatures;
int kIF = (kSrc % inFeatures);
int sOF = inFeatures * ny * nx;
int sIF = ny * nx;
int sY = nx;
int kDest = kOF * sOF + kIF * sIF + (ny - kY - 1) * sY + (nx - kX - 1);
dest[kDest] = src[kSrc];
}
} |
19,098 | /************************************************************************************\
* *
* Copyright � 2014 Advanced Micro Devices, Inc. *
* Copyright (c) 2015 Mark D. Hill and David A. Wood *
* All rights reserved. *
* *
* Redistribution and use in source and binary forms, with or without *
* modification, are permitted provided that the following are met: *
* *
* You must reproduce the above copyright notice. *
* *
* Neither the name of the copyright holder nor the names of its contributors *
* may be used to endorse or promote products derived from this software *
* without specific, prior, written permission from at least the copyright holder. *
* *
* You must include the following terms in your license and/or other materials *
* provided with the software. *
* *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" *
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE *
* IMPLIED WARRANTIES OF MERCHANTABILITY, NON-INFRINGEMENT, AND FITNESS FOR A *
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER *
* OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, *
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT *
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS *
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN *
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING *
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY *
* OF SUCH DAMAGE. *
* *
* Without limiting the foregoing, the software may implement third party *
* technologies for which you must obtain licenses from parties other than AMD. *
* You agree that AMD has not obtained or conveyed to you, and that you shall *
* be responsible for obtaining the rights to use and/or distribute the applicable *
* underlying intellectual property rights related to the third party technologies. *
* These third party technologies are not licensed hereunder. *
* *
* If you use the software (in whole or in part), you shall adhere to all *
* applicable U.S., European, and other export laws, including but not limited to *
* the U.S. Export Administration Regulations ("EAR") (15 C.F.R Sections 730-774), *
* and E.U. Council Regulation (EC) No 428/2009 of 5 May 2009. Further, pursuant *
* to Section 740.6 of the EAR, you hereby certify that, except pursuant to a *
* license granted by the United States Department of Commerce Bureau of Industry *
* and Security or as otherwise permitted pursuant to a License Exception under *
* the U.S. Export Administration Regulations ("EAR"), you will not (1) export, *
* re-export or release to a national of a country in Country Groups D:1, E:1 or *
* E:2 any restricted technology, software, or source code you receive hereunder, *
* or (2) export to Country Groups D:1, E:1 or E:2 the direct product of such *
* technology or software, if such foreign produced direct product is subject to *
* national security controls as identified on the Commerce Control List (currently *
* found in Supplement 1 to Part 774 of EAR). For the most current Country Group *
* listings, or for additional information about the EAR or your obligations under *
* those regulations, please refer to the U.S. Bureau of Industry and Security's *
* website at http://www.bis.doc.gov/. *
* *
\************************************************************************************/
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <algorithm>
#include "../graph_parser/parse.h"
#include "../graph_parser/util.h"
#include "kernel.cu"
#ifdef GEM5_FUSION
#include <stdint.h>
extern "C" {
void m5_work_begin(uint64_t workid, uint64_t threadid);
void m5_work_end(uint64_t workid, uint64_t threadid);
}
#endif
void print_vector(int *vector, int num);
int main(int argc, char **argv)
{
char *tmpchar;
bool directed = 1;
int num_nodes;
int num_edges;
int file_format = 1;
cudaError_t err = cudaSuccess;
if (argc == 3) {
tmpchar = argv[1]; // Graph inputfile
file_format = atoi(argv[2]);
} else {
fprintf(stderr, "You did something wrong!\n");
exit(1);
}
// Allocate the csr structure
csr_array *csr;
// Parse the graph and store it into the CSR structure
if (file_format == 1) {
csr = parseMetis_transpose(tmpchar, &num_nodes, &num_edges, directed);
} else if (file_format == 0) {
csr = parseCOO_transpose(tmpchar, &num_nodes, &num_edges, directed);
} else {
printf("reserve for future");
exit(1);
}
// Allocate the cost array
int *cost_array = (int *)malloc(num_nodes * sizeof(int));
if (!cost_array) fprintf(stderr, "malloc failed cost_array\n");
// Set the cost array to zero
for (int i = 0; i < num_nodes; i++) {
cost_array[i] = 0;
}
// Create device-side buffers
int *row_d;
int *col_d;
int *data_d;
int *vector_d1;
int *vector_d2;
int *stop_d;
// Create the device-side graph structure
err = cudaMalloc(&row_d, (num_nodes + 1) * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc row_d (size:%d) => %s\n", num_nodes, cudaGetErrorString(err));
return -1;
}
err = cudaMalloc(&col_d, num_edges * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc col_d (size:%d) => %s\n", num_edges, cudaGetErrorString(err));
return -1;
}
err = cudaMalloc(&data_d, num_edges * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc data_d (size:%d) => %s\n", num_edges, cudaGetErrorString(err));
return -1;
}
// Termination variable
err = cudaMalloc(&stop_d, sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc stop_d (size:%d) => %s\n", 1, cudaGetErrorString(err));
return -1;
}
// Create the device-side buffers for sssp
err = cudaMalloc(&vector_d1, num_nodes * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc vector_d1 (size:%d) => %s\n", num_nodes, cudaGetErrorString(err));
return -1;
}
err = cudaMalloc(&vector_d2, num_nodes * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc vector_d2 (size:%d) => %s\n", num_nodes, cudaGetErrorString(err));
return -1;
}
double timer1 = gettime();
#ifdef GEM5_FUSION
m5_work_begin(0, 0);
#endif
// Copy data to device side buffers
err = cudaMemcpy(row_d, csr->row_array, (num_nodes + 1) * sizeof(int), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMemcpy row_d (size:%d) => %s\n", num_nodes, cudaGetErrorString(err));
return -1;
}
err = cudaMemcpy(col_d, csr->col_array, num_edges * sizeof(int), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMemcpy col_d (size:%d) => %s\n", num_nodes, cudaGetErrorString(err));
return -1;
}
err = cudaMemcpy(data_d, csr->data_array, num_edges * sizeof(int), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMemcpy data_d (size:%d) => %s\n", num_nodes, cudaGetErrorString(err));
return -1;
}
double timer3 = gettime();
// Work dimensions
int block_size = 64;
int num_blocks = (num_nodes + block_size - 1) / block_size;
dim3 threads(block_size, 1, 1);
dim3 grid(num_blocks, 1, 1);
// Source vertex 0
int sourceVertex = 0;
// Launch the initialization kernel
vector_init <<<grid, threads>>>(vector_d1, vector_d2, sourceVertex, num_nodes);
cudaThreadSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: vector_init failed (%s)\n", cudaGetErrorString(err));
return -1;
}
int stop = 1;
int cnt = 0;
// Main computation loop
for (int i = 1; i < num_nodes; i++) {
// Reset the termination variable
stop = 0;
// Copy the termination variable to the device
err = cudaMemcpy(stop_d, &stop, sizeof(int), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: write stop_d (%s)\n", cudaGetErrorString(err));
return -1;
}
// Launch the assignment kernel
vector_assign <<<grid, threads>>>(vector_d1, vector_d2, num_nodes);
// Launch the min.+ kernel
spmv_min_dot_plus_kernel <<<grid, threads>>>(num_nodes, row_d, col_d,
data_d, vector_d1,
vector_d2);
// Launch the check kernel
vector_diff <<<grid, threads>>>(vector_d1, vector_d2,
stop_d, num_nodes);
// Read the termination variable back
err = cudaMemcpy(&stop, stop_d, sizeof(int), cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: read stop_d (%s)\n", cudaGetErrorString(err));
return -1;
}
// Exit the loop
if (stop == 0) {
break;
}
cnt++;
}
cudaThreadSynchronize();
double timer4 = gettime();
// Read the cost_array back
err = cudaMemcpy(cost_array, vector_d1, num_nodes * sizeof(int), cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: read vector_d1 (%s)\n", cudaGetErrorString(err));
return -1;
}
#ifdef GEM5_FUSION
m5_work_end(0, 0);
#endif
double timer2 = gettime();
// Print the timing statistics
printf("kernel + memcpy time = %lf ms\n", (timer2 - timer1) * 1000);
printf("kernel time = %lf ms\n", (timer4 - timer3) * 1000);
printf("number iterations = %d\n", cnt);
#if 1
// Print cost_array
print_vector(cost_array, num_nodes);
#endif
// Clean up the host arrays
free(cost_array);
csr->freeArrays();
free(csr);
// Clean up the device-side buffers
cudaFree(row_d);
cudaFree(col_d);
cudaFree(data_d);
cudaFree(stop_d);
cudaFree(vector_d1);
cudaFree(vector_d2);
return 0;
}
void print_vector(int *vector, int num)
{
FILE * fp = fopen("result.out", "w");
if (!fp) {
printf("ERROR: unable to open result.txt\n");
}
for (int i = 0; i < num; i++)
fprintf(fp, "%d: %d\n", i + 1, vector[i]);
fclose(fp);
}
|
19,099 | #include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <iostream>
#include <chrono>
int main() {
std::vector<double> stocks;
int n = 0;
while (std::cin){
n = n + 1;
double stock_day;
std::cin >> stock_day;
stocks.push_back(stock_day);
}
auto start = std::chrono::steady_clock::now();
thrust::device_vector<double> dev(stocks);
auto end = std::chrono::steady_clock::now();
std::chrono::duration<double> time_seconds = end-start;
double preco_medio = thrust::reduce(dev.begin(), dev.end(), 0, thrust::plus<double>()) / n;
double preco_medio_ano = thrust::reduce(dev.begin() + n - 365, dev.end(), 0, thrust::plus<double>()) / 360;
double maior_preco = thrust::reduce(dev.begin(), dev.end(), 0, thrust::maximum<double>());;
double menor_preco = thrust::reduce(dev.begin(), dev.end(), maior_preco, thrust::minimum<double>());;
double maior_ano = thrust::reduce(dev.begin() + n - 365, dev.end(), 0, thrust::maximum<double>());
double menor_ano = thrust::reduce(dev.begin() + n - 365, dev.end(), maior_ano, thrust::minimum<double>());
std::cout << "media: " << preco_medio << " media ano: " << preco_medio_ano << " maior: " << maior_preco << " menor: " << menor_preco << " maior ano: " << maior_ano << " menor ano: " << menor_ano << "\n";
} |
19,100 | // Copyright 2013 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <pthread.h>
#include <cuda_runtime.h>
#define MAX_STRING 100
#define EXP_TABLE_SIZE 1000
#define MAX_EXP 6
#define MAX_SENTENCE_LENGTH 1000
#define MAX_CODE_LENGTH 40
// Commented version of Mikolov's original code
// Cloned from https://github.com/chrisjmccormick/word2vec_commented.git
/* Report cuda error */
#define checkCUDAerr(err) {\
cudaError_t ce = err;\
if (cudaSuccess != ce) {\
printf("%s %d : %s\n", __FILE__, __LINE__, cudaGetErrorString(ce));\
exit(0);\
}\
}
/*
* The size of the hash table for the vocabulary.
* The vocabulary won't be allowed to grow beyond 70% of this number.
* For instance, if the hash table has 30M entries, then the maximum
* vocab size is 21M. This is to minimize the occurrence (and performance
* impact) of hash collisions.
*/
const int vocab_hash_size = 30000000; // Maximum 30 * 0.7 = 21M words in the vocabulary
typedef float real; // Precision of float numbers
/**
* ======== vocab_word ========
* Properties:
* cn - The word frequency (number of times it appears).
* word - The actual string word.
*/
struct vocab_word {
long long cn;
int *point;
char *word, *code, codelen;
};
//TODO Allocate cuda structure first without incuring error
/*
* ======== Global Variables ========
*
*/
char train_file[MAX_STRING], output_file[MAX_STRING];
char save_vocab_file[MAX_STRING], read_vocab_file[MAX_STRING];
/*
* ======== vocab ========
* This array will hold all of the words in the vocabulary.
* This is internal state.
*/
struct vocab_word *vocab;
int binary = 0, cbow = 1, debug_mode = 2, window = 5, min_count = 5, num_threads = 12, min_reduce = 1;
/*
* ======== vocab_hash ========
* This array is the hash table for the vocabulary. Word strings are hashed
* to a hash code (an integer), then the hash code is used as the index into
* 'vocab_hash', to retrieve the index of the word within the 'vocab' array.
*/
int *vocab_hash;
/*
* ======== vocab_max_size ========
* This is not a limit on the number of words in the vocabulary, but rather
* a chunk size for allocating the vocabulary table. The vocabulary table will
* be expanded as necessary, and is allocated, e.g., 1,000 words at a time.
*
* ======== vocab_size ========
* Stores the number of unique words in the vocabulary.
* This is not a parameter, but rather internal state.
*
* ======== layer1_size ========
* This is the number of features in the word vectors.
* It is the number of neurons in the hidden layer of the model.
*/
long long vocab_max_size = 1000, vocab_size = 0, layer1_size = 100;
/*
*
*/
long long train_words = 0, word_count_actual = 0, iter = 5, file_size = 0, classes = 0;
/*
* ======== alpha ========
* TODO - This is a learning rate parameter.
*
* ======== starting_alpha ========
*
* ======== sample ========
* This parameter controls the subsampling of frequent words.
* Smaller values of 'sample' mean words are less likely to be kept.
* Set 'sample' to 0 to disable subsampling.
* See the comments in the subsampling section for more details.
*/
float alpha = 0.025, starting_alpha, sample = 1e-3;
/*
* IMPORTANT - Note that the weight matrices are stored as 1D arrays, not
* 2D matrices, so to access row 'i' of syn0, the index is (i * layer1_size).
*
* ======== syn0 ========
* This is the hidden layer weights (which is also the word vectors!)
*
* ======== syn1 ========
* This is the output layer weights *if using heirarchical softmax*
*
* ======== syn1neg ========
* This is the output layer weights *if using negative sampling*
*
* ======== expTable ========
* Stores precalcultaed activations for the output layer.
*/
float *syn0, *syn1, *syn1neg, *expTable;
clock_t start;
int hs = 0, negative = 5;
const int table_size = 1e8;
int *table;
// CUDA Init
int *vocab_codelen, *vocab_point, *d_vocab_codelen, *d_vocab_point;
int *d_table;
char *vocab_code, *d_vocab_code;
float *d_syn0, *d_syn1, *d_expTable;
/**
* ======== InitUnigramTable ========
* This table is used to implement negative sampling.
* Each word is given a weight equal to it's frequency (word count) raised to
* the 3/4 power. The probability for a selecting a word is just its weight
* divided by the sum of weights for all words.
*
* Note that the vocabulary has been sorted by word count, descending, such
* that we will go through the vocabulary from most frequent to least.
*/
void InitUnigramTable() {
int a, i;
double train_words_pow = 0;
double d1, power = 0.75;
// Allocate the table. It's bigger than the vocabulary, because words will
// appear in it multiple times based on their frequency.
// Every vocab word appears at least once in the table.
// The size of the table relative to the size of the vocab dictates the
// resolution of the sampling. A larger unigram table means the negative
// samples will be selected with a probability that more closely matches the
// probability calculated by the equation.
table = (int *)malloc(table_size * sizeof(int));
// Calculate the denominator, which is the sum of weights for all words.
for (a = 0; a < vocab_size; a++) train_words_pow += pow(vocab[a].cn, power);
// 'i' is the vocabulary index of the current word, whereas 'a' will be
// the index into the unigram table.
i = 0;
// Calculate the probability that we choose word 'i'. This is a fraction
// betwee 0 and 1.
d1 = pow(vocab[i].cn, power) / train_words_pow;
// Loop over all positions in the table.
for (a = 0; a < table_size; a++) {
// Store word 'i' in this position. Word 'i' will appear multiple times
// in the table, based on its frequency in the training data.
table[a] = i;
// If the fraction of the table we have filled is greater than the
// probability of choosing this word, then move to the next word.
if (a / (double)table_size > d1) {
// Move to the next word.
i++;
// Calculate the probability for the new word, and accumulate it with
// the probabilities of all previous words, so that we can compare d1 to
// the percentage of the table that we have filled.
d1 += pow(vocab[i].cn, power) / train_words_pow;
}
// Don't go past the end of the vocab.
// The total weights for all words should sum up to 1, so there shouldn't
// be any extra space at the end of the table. Maybe it's possible to be
// off by 1, though? Or maybe this is just precautionary.
if (i >= vocab_size) i = vocab_size - 1;
}
}
/**
* ======== ReadWord ========
* Reads a single word from a file, assuming space + tab + EOL to be word
* boundaries.
*
* Parameters:
* word - A char array allocated to hold the maximum length string.
* fin - The training file.
*/
void ReadWord(char *word, FILE *fin) {
// 'a' will be the index into 'word'.
int a = 0, ch;
// Read until the end of the word or the end of the file.
while (!feof(fin)) {
// Get the next character.
ch = fgetc(fin);
// ASCII Character 13 is a carriage return 'CR' whereas character 10 is
// newline or line feed 'LF'.
if (ch == 13) continue;
// Check for word boundaries...
if ((ch == ' ') || (ch == '\t') || (ch == '\n')) {
// If the word has at least one character, we're done.
if (a > 0) {
// Put the newline back before returning so that we find it next time.
if (ch == '\n') ungetc(ch, fin);
break;
}
// If the word is empty and the character is newline, treat this as the
// end of a "sentence" and mark it with the token </s>.
if (ch == '\n') {
strcpy(word, (char *)"</s>");
return;
// If the word is empty and the character is tab or space, just continue
// on to the next character.
} else continue;
}
// If the character wasn't space, tab, CR, or newline, add it to the word.
word[a] = ch;
a++;
// If the word's too long, truncate it, but keep going till we find the end
// of it.
if (a >= MAX_STRING - 1) a--;
}
// Terminate the string with null.
word[a] = 0;
}
/**
* ======== GetWordHash ========
* Returns hash value of a word. The hash is an integer between 0 and
* vocab_hash_size (default is 30E6).
*
* For example, the word 'hat':
* hash = ((((h * 257) + a) * 257) + t) % 30E6
*/
int GetWordHash(char *word) {
unsigned long long a, hash = 0;
for (a = 0; a < strlen(word); a++) hash = hash * 257 + word[a];
hash = hash % vocab_hash_size;
return hash;
}
/**
* ======== SearchVocab ========
* Lookup the index in the 'vocab' table of the given 'word'.
* Returns -1 if the word is not found.
* This function uses a hash table for fast lookup.
*/
int SearchVocab(char *word) {
// Compute the hash value for 'word'.
unsigned int hash = GetWordHash(word);
// Lookup the index in the hash table, handling collisions as needed.
// See 'AddWordToVocab' to see how collisions are handled.
while (1) {
// If the word isn't in the hash table, it's not in the vocab.
if (vocab_hash[hash] == -1) return -1;
// If the input word matches the word stored at the index, we're good,
// return the index.
if (!strcmp(word, vocab[vocab_hash[hash]].word)) return vocab_hash[hash];
// Otherwise, we need to scan through the hash table until we find it.
hash = (hash + 1) % vocab_hash_size;
}
// This will never be reached.
return -1;
}
/**
* ======== ReadWordIndex ========
* Reads the next word from the training file, and returns its index into the
* 'vocab' table.
*/
int ReadWordIndex(FILE *fin) {
char word[MAX_STRING];
ReadWord(word, fin);
if (feof(fin)) return -1;
return SearchVocab(word);
}
/**
* ======== AddWordToVocab ========
* Adds a new word to the vocabulary (one that hasn't been seen yet).
*/
int AddWordToVocab(char *word) {
// Measure word length.
unsigned int hash, length = strlen(word) + 1;
// Limit string length (default limit is 100 characters).
if (length > MAX_STRING) length = MAX_STRING;
// Allocate and store the word string.
vocab[vocab_size].word = (char *)calloc(length, sizeof(char));
strcpy(vocab[vocab_size].word, word);
// Initialize the word frequency to 0.
vocab[vocab_size].cn = 0;
// Increment the vocabulary size.
vocab_size++;
// Reallocate memory if needed
if (vocab_size + 2 >= vocab_max_size) {
vocab_max_size += 1000;
vocab = (struct vocab_word *)realloc(vocab, vocab_max_size * sizeof(struct vocab_word));
}
// Add the word to the 'vocab_hash' table so that we can map quickly from the
// string to its vocab_word structure.
// Hash the word to an integer between 0 and 30E6.
hash = GetWordHash(word);
// If the spot is already taken in the hash table, find the next empty spot.
while (vocab_hash[hash] != -1)
hash = (hash + 1) % vocab_hash_size;
// Map the hash code to the index of the word in the 'vocab' array.
vocab_hash[hash] = vocab_size - 1;
// Return the index of the word in the 'vocab' array.
return vocab_size - 1;
}
// Used later for sorting by word counts
int VocabCompare(const void *a, const void *b) {
return ((struct vocab_word *)b)->cn - ((struct vocab_word *)a)->cn;
}
/**
* ======== SortVocab ========
* Sorts the vocabulary by frequency using word counts, and removes words that
* occur fewer than 'min_count' times in the training text.
*
* Removing words from the vocabulary requires recomputing the hash table.
*/
void SortVocab() {
int a, size;
unsigned int hash;
/*
* Sort the vocabulary by number of occurrences, in descending order.
*
* Keep </s> at the first position by sorting starting from index 1.
*
* Sorting the vocabulary this way causes the words with the fewest
* occurrences to be at the end of the vocabulary table. This will allow us
* to free the memory associated with the words that get filtered out.
*/
qsort(&vocab[1], vocab_size - 1, sizeof(struct vocab_word), VocabCompare);
// Clear the vocabulary hash table.
for (a = 0; a < vocab_hash_size; a++) vocab_hash[a] = -1;
// Store the initial vocab size to use in the for loop condition.
size = vocab_size;
// Recompute the number of training words.
train_words = 0;
// For every word currently in the vocab...
for (a = 0; a < size; a++) {
// If it occurs fewer than 'min_count' times, remove it from the vocabulary.
if ((vocab[a].cn < min_count) && (a != 0)) {
// Decrease the size of the new vocabulary.
vocab_size--;
// Free the memory associated with the word string.
free(vocab[a].word);
} else {
// Hash will be re-computed, as after the sorting it is not actual
hash=GetWordHash(vocab[a].word);
while (vocab_hash[hash] != -1) hash = (hash + 1) % vocab_hash_size;
vocab_hash[hash] = a;
train_words += vocab[a].cn;
}
}
// Reallocate the vocab array, chopping off all of the low-frequency words at
// the end of the table.
vocab = (struct vocab_word *)realloc(vocab, (vocab_size + 1) * sizeof(struct vocab_word));
// Allocate memory for the binary tree construction
for (a = 0; a < vocab_size; a++) {
vocab[a].code = (char *)calloc(MAX_CODE_LENGTH, sizeof(char));
vocab[a].point = (int *)calloc(MAX_CODE_LENGTH, sizeof(int));
}
}
// Reduces the vocabulary by removing infrequent tokens
void ReduceVocab() {
int a, b = 0;
unsigned int hash;
for (a = 0; a < vocab_size; a++) if (vocab[a].cn > min_reduce) {
vocab[b].cn = vocab[a].cn;
vocab[b].word = vocab[a].word;
b++;
} else free(vocab[a].word);
vocab_size = b;
for (a = 0; a < vocab_hash_size; a++) vocab_hash[a] = -1;
for (a = 0; a < vocab_size; a++) {
// Hash will be re-computed, as it is not actual
hash = GetWordHash(vocab[a].word);
while (vocab_hash[hash] != -1) hash = (hash + 1) % vocab_hash_size;
vocab_hash[hash] = a;
}
fflush(stdout);
min_reduce++;
}
// Main execution for cbow model
__global__ void cbow_exec(int window, int layer1_size, int negative, int hs, int table_size,
int vocab_size, float alpha, const float* expTable, const int* table,
const int* vocab_codelen, const int* vocab_point, const char* vocab_code,
const int* sen, const int* sentence_length, float* syn1, float* syn0)
{
__shared__ float f, g;
// init for sentence index using blockIdx
int sent_idx_s = sentence_length[blockIdx.x];
int sent_idx_e = sentence_length[blockIdx.x + 1];
unsigned long next_random = blockIdx.x;
if(threadIdx.x < layer1_size) for(int sentence_position = sent_idx_s; sentence_position < sent_idx_e; sentence_position++){
int word = sen[sentence_position];
if(word == -1) continue;
float neu1 = 0;
float neu1e = 0;
next_random = next_random * (unsigned long)2514903917 + 11;
int b = next_random % window;
int cw = 0;
for(int a = b; a < window * 2 + 1 - b; a++) if (a != window) {
int c = sentence_position - window + a;
// Verify c isn't outisde the bounds of the sentence.
if (c < sent_idx_s) continue;
if (c >= sent_idx_e) continue;
int last_word = sen[c];
if (last_word == -1) continue;
// TODO Not sure here
neu1 += syn0[threadIdx.x + last_word * layer1_size];
cw++;
}
if(cw){
// neu1 was the sum of the context word vectors, and now becomes
// their average.
neu1 /= cw;
// HS Phase
if(hs) for(int d = vocab_codelen[word]; d < vocab_codelen[word+1]; d++){
int l2 = vocab_point[d] * layer1_size;
f = 0;
f += neu1 * syn1[threadIdx.x + l2];
__syncthreads();
// Apply the sigmoid activation to the current output neuron.
if (f <= -MAX_EXP) continue;
else if (f >= MAX_EXP) continue;
else f = expTable[(int)((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))];
g = (1 - vocab_code[d] - f) * alpha;
neu1e += g * syn1[threadIdx.x + l2];
// Atomic addition for threads
atomicAdd(&syn1[threadIdx.x + l2], g * neu1);
}
if(negative > 0) for (int d = 0; d < negative + 1; d++) {
int target , label;
if(d == 0){
target = word;
label = 1;
}else{
next_random = next_random * (unsigned long long)25214903917 + 11;
target = table[(next_random >> 16) % table_size];
if (target == 0) target = next_random % (vocab_size - 1) + 1;
if (target == word) continue;
label = 0;
}
int l2 = target * layer1_size;
f = 0; // not sure
f += neu1 * syn1[threadIdx.x + l2]; // should split with thead or not?
__syncthreads();
if (f > MAX_EXP) g = (label - 1) * alpha;
else if (f < -MAX_EXP) g = (label - 0) * alpha;
else g = (label - expTable[(int)((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))]) * alpha;
neu1e += g * syn1[l2 + threadIdx.x];
// Atomic addition for threads
atomicAdd(&syn1[threadIdx.x + l2], g * neu1);
}
for(int a = b; a < window * 2 + 1 - b; a++) if (a != window) {
int c = sentence_position - window + a;
if(c < sent_idx_s) continue;
if(c >= sent_idx_e) continue;
int last_word = sen[c];
if (last_word == -1) continue;
// Atomic addition for threads
atomicAdd(&syn0[threadIdx.x + last_word * layer1_size], g * neu1e);
}
}
}
}
// Skip gram execution, not sure if should create f array for each thread or not
__global__ void skipgram_exec(int window, int layer1_size, int negative, int hs, int table_size,
int vocab_size, float alpha, const float* expTable, const int* table,
const int* vocab_codelen, const int* vocab_point, const char* vocab_code,
const int* sen, const int* sentence_length, float* syn1, float* syn0)
{
__shared__ float f, g;
// init for sentence index using blockIdx
int sent_idx_s = sentence_length[blockIdx.x];
int sent_idx_e = sentence_length[blockIdx.x + 1];
unsigned long next_random = blockIdx.x;
if(threadIdx.x < layer1_size) for(int sentence_position = sent_idx_s; sentence_position < sent_idx_e; sentence_position++){
int word = sen[sentence_position];
if(word == -1) continue;
float neu1e = 0;
next_random = next_random * (unsigned long)2514903917 + 11;
int b = next_random % window;
for (int a = b; a < window * 2 + 1 - b; a++) if (a != window) {
int c = sentence_position - window + a;
if (c < sent_idx_s) continue;
if (c >= sent_idx_e) continue;
int last_word = sen[c];
if (last_word == -1) continue;
// TODO Not sure here
int l1 = last_word * layer1_size;
neu1e = 0;
//HS
if(hs) for(int d = vocab_codelen[word]; d < vocab_codelen[word+1]; d++){
f = 0;
int l2 = vocab_point[d] * layer1_size;
f = syn0[threadIdx.x + l1] * syn1[threadIdx.x + l2];
__syncthreads();
if (f <= -MAX_EXP) continue;
else if (f >= MAX_EXP) continue;
else f = expTable[(int)((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))];
neu1e += g * syn1[threadIdx.x + l2];
atomicAdd(&syn1[threadIdx.x + l2], g * syn0[threadIdx.x + l1]);
}
// Negative Sampling
if(negative > 0) for (int d = 0; d < negative + 1; d++) {
int target, label;
if(d == 0){
target = word;
label = 1;
}else{
next_random = next_random * (unsigned long long)25214903917 + 11;
target = table[(next_random >> 16) % table_size];
if (target == 0) target = next_random % (vocab_size - 1) + 1;
if (target == word) continue;
label = 0;
}
int l2 = target * layer1_size;
f = 0;
f = syn0[threadIdx.x +l1] * syn1[threadIdx.x + l2];//should use syn1neg?
__syncthreads();
if (f > MAX_EXP) g = (label - 1) * alpha;
else if (f < -MAX_EXP) g = (label - 0) * alpha;
else g = (label - expTable[(int)((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))]) * alpha;
neu1e += g * syn1[threadIdx.x + l2];
atomicAdd(&syn1[threadIdx.x + l2], g * syn0[threadIdx.x + l1]);
}
atomicAdd(&syn0[threadIdx.x + l1], neu1e);
}
}
}
// CUDA Structure allocation
void initVocabCuda(){
vocab_codelen = (int*)malloc((vocab_size+1) * sizeof(int));
vocab_codelen[0] = 0;
for(int i = 1; i <= vocab_size; i++){
vocab_codelen[i] = vocab_codelen[i-1]+vocab[i-1].codelen;
// global codelen struct getting vocab information for cuda sync
}
vocab_point = (int*)malloc(vocab_codelen[vocab_size] * sizeof(int));
vocab_code = (char*)malloc(vocab_codelen[vocab_size] * sizeof(char));
cudaMalloc((void **)&d_vocab_codelen, (vocab_size+1) * sizeof(int));
cudaMalloc((void **)&d_vocab_point, vocab_codelen[vocab_size] * sizeof(int));
cudaMalloc((void **)&d_vocab_code, vocab_codelen[vocab_size] * sizeof(char));
for(int i=0; i < vocab_size; i++){
for(int j = 0; j < vocab_size; j++){
vocab_point[vocab_codelen[i]+j] = vocab[i].point[j];
vocab_code[vocab_codelen[i]+j] = vocab[i].code[j];
}
}
cudaMemcpy(d_vocab_codelen, vocab_codelen, (vocab_size+1) * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_vocab_point, vocab_point, vocab_codelen[vocab_size] * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_vocab_code, vocab_code, vocab_codelen[vocab_size] * sizeof(char), cudaMemcpyHostToDevice);
}
/**
* ======== CreateBinaryTree ========
* Create binary Huffman tree using the word counts.
* Frequent words will have short unique binary codes.
* Huffman encoding is used for lossless compression.
* For each vocabulary word, the vocab_word structure includes a `point` array,
* which is the list of internal tree nodes which:
* 1. Define the path from the root to the leaf node for the word.
* 2. Each correspond to a row of the output matrix.
* The `code` array is a list of 0s and 1s which specifies whether each output
* in `point` should be trained to output 0 or 1.
*/
void CreateBinaryTree() {
long long a, b, i, min1i, min2i, pos1, pos2, point[MAX_CODE_LENGTH];
char code[MAX_CODE_LENGTH]; // Default is 40
// Note that calloc initializes these arrays to 0.
long long *count = (long long *)calloc(vocab_size * 2 + 1, sizeof(long long));
long long *binary = (long long *)calloc(vocab_size * 2 + 1, sizeof(long long));
long long *parent_node = (long long *)calloc(vocab_size * 2 + 1, sizeof(long long));
// The count array is twice the size of the vocabulary, plus one.
// - The first half of `count` becomes a list of the word counts
// for each word in the vocabulary. We do not modify this part of the
// list.
// - The second half of `count` is set to a large positive integer (1
// quadrillion). When we combine two trees under a word (e.g., word_id
// 13), then we place the total weight of those subtrees into the word's
// position in the second half (e.g., count[vocab_size + 13]).
//
for (a = 0; a < vocab_size; a++) count[a] = vocab[a].cn;
for (a = vocab_size; a < vocab_size * 2; a++) count[a] = 1e15;
// `pos1` and `pos2` are indeces into the `count` array.
// - `pos1` starts at the middle of `count` (the end of the list of word
// counts) and moves left.
// - `pos2` starts at the beginning of the list of large integers and moves
// right.
pos1 = vocab_size - 1;
pos2 = vocab_size;
/* ===============================
* Step 1: Create Huffman Tree
* ===============================
* [Original Comment] Following algorithm constructs the Huffman tree by
* adding one node at a time
*
* The Huffman coding algorithm starts with every node as its own tree, and
* then combines the two smallest trees on each step. The weight of a tree is
* the sum of the word counts for the words it contains.
*
* Once the tree is constructed, you can use the `parent_node` array to
* navigate it. For the word at index 13, for example, you would look at
* parent_node[13], and then parent_node[parent_node[13]], and so on, till
* you reach the root.
*
* A Huffman tree stores all of the words in the vocabulary at the leaves.
* Frequent words have short paths, and infrequent words have long paths.
* Here, we are also associating each internal node of the tree with a
* row of the output matrix. Every time we combine two trees and create a
* new node, we give it a row in the output matrix.
*/
// The number of tree combinations needed is equal to the size of the vocab,
// minus 1.
for (a = 0; a < vocab_size - 1; a++) {
// First, find two smallest nodes 'min1, min2'
// Find min1 (at index `min1i`)
if (pos1 >= 0) {
if (count[pos1] < count[pos2]) {
min1i = pos1;
pos1--;
} else {
min1i = pos2;
pos2++;
}
} else {
min1i = pos2;
pos2++;
}
// Find min2 (at index `min2i`).
if (pos1 >= 0) {
if (count[pos1] < count[pos2]) {
min2i = pos1;
pos1--;
} else {
min2i = pos2;
pos2++;
}
} else {
min2i = pos2;
pos2++;
}
// Calculate the combined weight. We could be combining two words, a word
// and a tree, or two trees.
count[vocab_size + a] = count[min1i] + count[min2i];
// Store the path for working back up the tree.
parent_node[min1i] = vocab_size + a;
parent_node[min2i] = vocab_size + a;
// binary[min1i] = 0; // This is implied.
// min1 is the (left?) node and is labeled '0', min2 is the (right?) node
// and is labeled '1'.
binary[min2i] = 1;
}
/* ==========================================
* Step 2: Define Samples for Each Word
* ==========================================
* [Original Comment] Now assign binary code to each vocabulary word
*
* vocab[word]
* .code - A variable-length string of 0s and 1s.
* .point - A variable-length array of output row indeces.
* .codelen - The length of the `code` array.
* The point array has length `codelen + 1`.
*
*/
// For each word in the vocabulary...
for (a = 0; a < vocab_size; a++) {
b = a;
i = 0; // `i` stores the code length.
// Construct the binary code...
// `code` stores 1s and 0s.
// `point` stores indeces.
// This loop works backwards from the leaf, so the `code` and `point`
// lists end up in reverse order.
while (1) {
// Lookup whether this is on the left or right of its parent node.
code[i] = binary[b];
// Note: point[0] always holds the word iteself...
point[i] = b;
// Increment the code length.
i++;
// This will always return an index in the second half of the array.
b = parent_node[b];
// We've reached the root when...
if (b == vocab_size * 2 - 2) break;
}
// Record the code length (the length of the `point` list).
vocab[a].codelen = i;
// The root node is at row `vocab_size - 2` of the output matrix.
vocab[a].point[0] = vocab_size - 2;
// For each bit in this word's code...
for (b = 0; b < i; b++) {
// Reverse the code in `code` and store it in `vocab[a].code`
vocab[a].code[i - b - 1] = code[b];
// Store the row indeces of the internal nodes leading to this word.
// These are the set of outputs which will be trained every time
// this word is encountered in the training data as an output word.
vocab[a].point[i - b] = point[b] - vocab_size;
}
}
free(count);
free(binary);
free(parent_node);
}
/**
* ======== LearnVocabFromTrainFile ========
* Builds a vocabulary from the words found in the training file.
*
* This function will also build a hash table which allows for fast lookup
* from the word string to the corresponding vocab_word object.
*
* Words that occur fewer than 'min_count' times will be filtered out of
* vocabulary.
*/
void LearnVocabFromTrainFile() {
char word[MAX_STRING];
FILE *fin;
long long a, i;
// Populate the vocab table with -1s.
for (a = 0; a < vocab_hash_size; a++) vocab_hash[a] = -1;
// Open the training file.
fin = fopen(train_file, "rb");
if (fin == NULL) {
printf("ERROR: training data file not found!\n");
exit(1);
}
vocab_size = 0;
// The special token </s> is used to mark the end of a sentence. In training,
// the context window does not go beyond the ends of a sentence.
//
// Add </s> explicitly here so that it occurs at position 0 in the vocab.
AddWordToVocab((char *)"</s>");
while (1) {
// Read the next word from the file into the string 'word'.
ReadWord(word, fin);
// Stop when we've reached the end of the file.
if (feof(fin)) break;
// Count the total number of tokens in the training text.
train_words++;
// Print progress at every 100,000 words.
if ((debug_mode > 1) && (train_words % 100000 == 0)) {
printf("%lldK%c", train_words / 1000, 13);
fflush(stdout);
}
// Look up this word in the vocab to see if we've already added it.
i = SearchVocab(word);
// If it's not in the vocab...
if (i == -1) {
// ...add it.
a = AddWordToVocab(word);
// Initialize the word frequency to 1.
vocab[a].cn = 1;
// If it's already in the vocab, just increment the word count.
} else vocab[i].cn++;
// If the vocabulary has grown too large, trim out the most infrequent
// words. The vocabulary is considered "too large" when it's filled more
// than 70% of the hash table (this is to try and keep hash collisions
// down).
if (vocab_size > vocab_hash_size * 0.7) ReduceVocab();
}
// Sort the vocabulary in descending order by number of word occurrences.
// Remove (and free the associated memory) for all the words that occur
// fewer than 'min_count' times.
SortVocab();
// Report the final vocabulary size, and the total number of words
// (excluding those filtered from the vocabulary) in the training set.
if (debug_mode > 0) {
printf("Vocab size: %lld\n", vocab_size);
printf("Words in train file: %lld\n", train_words);
}
file_size = ftell(fin);
fclose(fin);
}
void SaveVocab() {
long long i;
FILE *fo = fopen(save_vocab_file, "wb");
for (i = 0; i < vocab_size; i++) fprintf(fo, "%s %lld\n", vocab[i].word, vocab[i].cn);
fclose(fo);
}
void ReadVocab() {
long long a, i = 0;
char c;
char word[MAX_STRING];
FILE *fin = fopen(read_vocab_file, "rb");
if (fin == NULL) {
printf("Vocabulary file not found\n");
exit(1);
}
for (a = 0; a < vocab_hash_size; a++) vocab_hash[a] = -1;
vocab_size = 0;
while (1) {
ReadWord(word, fin);
if (feof(fin)) break;
a = AddWordToVocab(word);
fscanf(fin, "%lld%c", &vocab[a].cn, &c);
i++;
}
SortVocab();
if (debug_mode > 0) {
printf("Vocab size: %lld\n", vocab_size);
printf("Words in train file: %lld\n", train_words);
}
fin = fopen(train_file, "rb");
if (fin == NULL) {
printf("ERROR: training data file not found!\n");
exit(1);
}
fseek(fin, 0, SEEK_END);
file_size = ftell(fin);
fclose(fin);
}
/**
* ======== InitNet ========
*
*/
void InitNet() {
long long a, b;
unsigned long long next_random = 1;
// Allocate the hidden layer of the network, which is what becomes the word vectors.
// The variable for this layer is 'syn0'.
a = posix_memalign((void **)&syn0, 128, (long long)vocab_size * layer1_size * sizeof(real));
if (syn0 == NULL) {printf("Memory allocation failed\n"); exit(1);}
// If we're using hierarchical softmax for training...
if (hs) {
a = posix_memalign((void **)&syn1, 128, (long long)vocab_size * layer1_size * sizeof(real));
if (syn1 == NULL) {printf("Memory allocation failed\n"); exit(1);}
for (a = 0; a < vocab_size; a++) for (b = 0; b < layer1_size; b++)
syn1[a * layer1_size + b] = 0;
}
// If we're using negative sampling for training...
if (negative>0) {
// Allocate the output layer of the network.
// The variable for this layer is 'syn1neg'.
// This layer has the same size as the hidden layer, but is the transpose.
a = posix_memalign((void **)&syn1neg, 128, (long long)vocab_size * layer1_size * sizeof(real));
if (syn1neg == NULL) {printf("Memory allocation failed\n"); exit(1);}
// Set all of the weights in the output layer to 0.
for (a = 0; a < vocab_size; a++) for (b = 0; b < layer1_size; b++)
syn1neg[a * layer1_size + b] = 0;
}
// Randomly initialize the weights for the hidden layer (word vector layer).
// TODO - What's the equation here?
for (a = 0; a < vocab_size; a++) for (b = 0; b < layer1_size; b++) {
next_random = next_random * (unsigned long long)25214903917 + 11;
syn0[a * layer1_size + b] = (((next_random & 0xFFFF) / (real)65536) - 0.5) / layer1_size;
}
// Create a binary tree for Huffman coding.
// TODO - As best I can tell, this is only used for hierarchical softmax training...
CreateBinaryTree();
}
/**
* ======== TrainModelThread ========
* This function performs the training of the model.
*/
void *TrainModelThread(void *id) {
/*
* word - Stores the index of a word in the vocab table.
* word_count - Stores the total number of training words processed.
*/
long long a, b, d, cw, word, last_word, sentence_length = 0, sentence_position = 0;
long long word_count = 0, last_word_count = 0, sen[MAX_SENTENCE_LENGTH + 1];
long long l1, l2, c, target, label, local_iter = iter;
unsigned long long next_random = (long long)id;
real f, g;
clock_t now;
// neu1 is only used by the CBOW architecture.
real *neu1 = (real *)calloc(layer1_size, sizeof(real));
// neu1e is used by both architectures.
real *neu1e = (real *)calloc(layer1_size, sizeof(real));
// Open the training file and seek to the portion of the file that this
// thread is responsible for.
FILE *fi = fopen(train_file, "rb");
fseek(fi, file_size / (long long)num_threads * (long long)id, SEEK_SET);
// This loop covers the whole training operation...
while (1) {
/*
* ======== Variables ========
* iter - This is the number of training epochs to run; default is 5.
* word_count - The number of input words processed.
* train_words - The total number of words in the training text (not
* including words removed from the vocabuly by ReduceVocab).
*/
// This block prints a progress update, and also adjusts the training
// 'alpha' parameter.
if (word_count - last_word_count > 10000) {
word_count_actual += word_count - last_word_count;
last_word_count = word_count;
// The percentage complete is based on the total number of passes we are
// doing and not just the current pass.
if ((debug_mode > 1)) {
now=clock();
printf("%cAlpha: %f Progress: %.2f%% Words/thread/sec: %.2fk ", 13, alpha,
// Percent complete = [# of input words processed] /
// ([# of passes] * [# of words in a pass])
word_count_actual / (real)(iter * train_words + 1) * 100,
word_count_actual / ((real)(now - start + 1) / (real)CLOCKS_PER_SEC * 1000));
fflush(stdout);
}
// Update alpha to: [initial alpha] * [percent of training remaining]
// This means that alpha will gradually decrease as we progress through
// the training text.
alpha = starting_alpha * (1 - word_count_actual / (real)(iter * train_words + 1));
// Don't let alpha go below [initial alpha] * 0.0001.
if (alpha < starting_alpha * 0.0001) alpha = starting_alpha * 0.0001;
}
// This 'if' block retrieves the next sentence from the training text and
// stores it in 'sen'.
// TODO - Under what condition would sentence_length not be zero?
if (sentence_length == 0) {
while (1) {
// Read the next word from the training data and lookup its index in
// the vocab table. 'word' is the word's vocab index.
word = ReadWordIndex(fi);
if (feof(fi)) break;
// If the word doesn't exist in the vocabulary, skip it.
if (word == -1) continue;
// Track the total number of training words processed.
word_count++;
// 'vocab' word 0 is a special token "</s>" which indicates the end of
// a sentence.
if (word == 0) break;
/*
* =================================
* Subsampling of Frequent Words
* =================================
* This code randomly discards training words, but is designed to
* keep the relative frequencies the same. That is, less frequent
* words will be discarded less often.
*
* We first calculate the probability that we want to *keep* the word;
* this is the value 'ran'. Then, to decide whether to keep the word,
* we generate a random fraction (0.0 - 1.0), and if 'ran' is smaller
* than this number, we discard the word. This means that the smaller
* 'ran' is, the more likely it is that we'll discard this word.
*
* The quantity (vocab[word].cn / train_words) is the fraction of all
* the training words which are 'word'. Let's represent this fraction
* by x.
*
* Using the default 'sample' value of 0.001, the equation for ran is:
* ran = (sqrt(x / 0.001) + 1) * (0.001 / x)
*
* You can plot this function to see it's behavior; it has a curved
* L shape.
*
* Here are some interesting points in this function (again this is
* using the default sample value of 0.001).
* - ran = 1 (100% chance of being kept) when x <= 0.0026.
* - That is, any word which is 0.0026 of the words *or fewer*
* will be kept 100% of the time. Only words which represent
* more than 0.26% of the total words will be subsampled.
* - ran = 0.5 (50% chance of being kept) when x = 0.00746.
* - ran = 0.033 (3.3% chance of being kept) when x = 1.
* - That is, if a word represented 100% of the training set
* (which of course would never happen), it would only be
* kept 3.3% of the time.
*
* NOTE: Seems like it would be more efficient to pre-calculate this
* probability for each word and store it in the vocab table...
*
* Words that are discarded by subsampling aren't added to our training
* 'sentence'. This means the discarded word is neither used as an
* input word or a context word for other inputs.
*/
if (sample > 0) {
// Calculate the probability of keeping 'word'.
real ran = (sqrt(vocab[word].cn / (sample * train_words)) + 1) * (sample * train_words) / vocab[word].cn;
// Generate a random number.
// The multiplier is 25.xxx billion, so 'next_random' is a 64-bit integer.
next_random = next_random * (unsigned long long)25214903917 + 11;
// If the probability is less than a random fraction, discard the word.
//
// (next_random & 0xFFFF) extracts just the lower 16 bits of the
// random number. Dividing this by 65536 (2^16) gives us a fraction
// between 0 and 1. So the code is just generating a random fraction.
if (ran < (next_random & 0xFFFF) / (real)65536) continue;
}
// If we kept the word, add it to the sentence.
sen[sentence_length] = word;
sentence_length++;
// Verify the sentence isn't too long.
if (sentence_length >= MAX_SENTENCE_LENGTH) break;
}
sentence_position = 0;
}
if (feof(fi) || (word_count > train_words / num_threads)) {
word_count_actual += word_count - last_word_count;
local_iter--;
if (local_iter == 0) break;
word_count = 0;
last_word_count = 0;
sentence_length = 0;
fseek(fi, file_size / (long long)num_threads * (long long)id, SEEK_SET);
continue;
}
// Get the next word in the sentence. The word is represented by its index
// into the vocab table.
word = sen[sentence_position];
if (word == -1) continue;
for (c = 0; c < layer1_size; c++) neu1[c] = 0;
for (c = 0; c < layer1_size; c++) neu1e[c] = 0;
// This is a standard random integer generator, as seen here:
// https://en.wikipedia.org/wiki/Linear_congruential_generator
next_random = next_random * (unsigned long long)25214903917 + 11;
// 'b' becomes a random integer between 0 and 'window' - 1.
// This is the amount we will shrink the window size by.
b = next_random % window;
/*
* ====================================
* CBOW Architecture
* ====================================
* sen - This is the array of words in the sentence. Subsampling has
* already been applied. Words are represented by their ids.
*
* sentence_position - This is the index of the current input word.
*
* a - Offset into the current window, relative to the window start.
* a will range from 0 to (window * 2)
*
* b - The amount to shrink the context window by.
*
* c - 'c' is a scratch variable used in two unrelated ways:
* 1. It's first used as the index of the current context word
* within the sentence (the `sen` array).
* 2. It's then used as the for-loop variable for calculating
* vector dot-products and other arithmetic.
*
* syn0 - The hidden layer weights. Note that the weights are stored as a
* 1D array, so word 'i' is found at (i * layer1_size).
*
* target - The output word we're working on. If it's the positive sample
* then `label` is 1. `label` is 0 for negative samples.
* Note: `target` and `label` are only used in negative sampling,
* and not HS.
*
* neu1 - This vector will hold the *average* of all of the context word
* vectors. This is the output of the hidden layer for CBOW.
*
* neu1e - Holds the gradient for updating the hidden layer weights.
* It's a vector of length 300, not a matrix.
* This same gradient update is applied to all context word
* vectors.
*/
if (cbow) { //train the cbow architecture
// in -> hidden
cw = 0;
// This loop will sum together the word vectors for all of the context
// words.
//
// Loop over the positions in the context window (skipping the word at
// the center). 'a' is just the offset within the window, it's not the
// index relative to the beginning of the sentence.
for (a = b; a < window * 2 + 1 - b; a++) if (a != window) {
// Convert the window offset 'a' into an index 'c' into the sentence
// array.
c = sentence_position - window + a;
// Verify c isn't outisde the bounds of the sentence.
if (c < 0) continue;
if (c >= sentence_length) continue;
// Get the context word. That is, get the id of the word (its index in
// the vocab table).
last_word = sen[c];
// At this point we have two words identified:
// 'word' - The word (word ID) at our current position in the
// sentence (in the center of a context window).
// 'last_word' - The word (word ID) at a position within the context
// window.
// Verify that the word exists in the vocab
if (last_word == -1) continue;
// Add the word vector for this context word to the running sum in
// neur1.
// `layer1_size` is 300, `neu1` is length 300
for (c = 0; c < layer1_size; c++) neu1[c] += syn0[c + last_word * layer1_size];
// Count the number of context words.
cw++;
}
// Skip if there were somehow no context words.
if (cw) {
// neu1 was the sum of the context word vectors, and now becomes
// their average.
for (c = 0; c < layer1_size; c++) neu1[c] /= cw;
// // HIERARCHICAL SOFTMAX
// vocab[word]
// .point - A variable-length list of row ids, which are the output
// rows to train on.
// .code - A variable-length list of 0s and 1s, which are the desired
// labels for the outputs in `point`.
// .codelen - The length of the `code` array for this
// word.
//
if (hs) for (d = 0; d < vocab[word].codelen; d++) {
f = 0;
// point[d] is the index of a row of the ouput matrix.
// l2 is the index of that word in the output layer weights (syn1).
l2 = vocab[word].point[d] * layer1_size;
// Propagate hidden -> output
// neu1 is the average of the context words from the hidden layer.
// This loop computes the dot product between neu1 and the output
// weights for the output word at point[d].
for (c = 0; c < layer1_size; c++) f += neu1[c] * syn1[c + l2];
// Apply the sigmoid activation to the current output neuron.
if (f <= -MAX_EXP) continue;
else if (f >= MAX_EXP) continue;
else f = expTable[(int)((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))];
// 'g' is the error multiplied by the learning rate.
// The error is (label - f), so label = (1 - code), meaning if
// code is 0, then this is a positive sample and vice versa.
g = (1 - vocab[word].code[d] - f) * alpha;
// Propagate errors output -> hidden
for (c = 0; c < layer1_size; c++) neu1e[c] += g * syn1[c + l2];
// Learn weights hidden -> output
for (c = 0; c < layer1_size; c++) syn1[c + l2] += g * neu1[c];
}
// NEGATIVE SAMPLING
// Rather than performing backpropagation for every word in our
// vocabulary, we only perform it for the positive sample and a few
// negative samples (the number of words is given by 'negative').
// These negative words are selected using a "unigram" distribution,
// which is generated in the function InitUnigramTable.
if (negative > 0) for (d = 0; d < negative + 1; d++) {
// On the first iteration, we're going to train the positive sample.
if (d == 0) {
target = word;
label = 1;
// On the other iterations, we'll train the negative samples.
} else {
// Pick a random word to use as a 'negative sample'; do this using
// the unigram table.
// Get a random integer.
next_random = next_random * (unsigned long long)25214903917 + 11;
// 'target' becomes the index of the word in the vocab to use as
// the negative sample.
target = table[(next_random >> 16) % table_size];
// If the target is the special end of sentence token, then just
// pick a random word from the vocabulary instead.
if (target == 0) target = next_random % (vocab_size - 1) + 1;
// Don't use the positive sample as a negative sample!
if (target == word) continue;
// Mark this as a negative example.
label = 0;
}
// At this point, target might either be the positive sample or a
// negative sample, depending on the value of `label`.
// Get the index of the target word in the output layer.
l2 = target * layer1_size;
// Calculate the dot product between:
// neu1 - The average of the context word vectors.
// syn1neg[l2] - The output weights for the target word.
f = 0;
for (c = 0; c < layer1_size; c++) f += neu1[c] * syn1neg[c + l2];
// This block does two things:
// 1. Calculates the output of the network for this training
// pair, using the expTable to evaluate the output layer
// activation function.
// 2. Calculate the error at the output, stored in 'g', by
// subtracting the network output from the desired output,
// and finally multiply this by the learning rate.
if (f > MAX_EXP) g = (label - 1) * alpha;
else if (f < -MAX_EXP) g = (label - 0) * alpha;
else g = (label - expTable[(int)((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))]) * alpha;
// Multiply the error by the output layer weights.
// (I think this is the gradient calculation?)
// Accumulate these gradients over all of the negative samples.
for (c = 0; c < layer1_size; c++) neu1e[c] += g * syn1neg[c + l2];
// Update the output layer weights by multiplying the output error
// by the average of the context word vectors.
for (c = 0; c < layer1_size; c++) syn1neg[c + l2] += g * neu1[c];
}
// hidden -> in
// Backpropagate the error to the hidden layer (the word vectors).
// This code is used both for heirarchical softmax and for negative
// sampling.
//
// Loop over the positions in the context window (skipping the word at
// the center). 'a' is just the offset within the window, it's not
// the index relative to the beginning of the sentence.
for (a = b; a < window * 2 + 1 - b; a++) if (a != window) {
// Convert the window offset 'a' into an index 'c' into the sentence
// array.
c = sentence_position - window + a;
// Verify c isn't outisde the bounds of the sentence.
if (c < 0) continue;
if (c >= sentence_length) continue;
// Get the context word. That is, get the id of the word (its index in
// the vocab table).
last_word = sen[c];
// Verify word exists in vocab.
if (last_word == -1) continue;
// Note that `c` is no longer the sentence position, it's just a
// for-loop index.
// Add the gradient in the vector `neu1e` to the word vector for
// the current context word.
// syn0[last_word * layer1_size] <-- Accesses the word vector.
for (c = 0; c < layer1_size; c++) syn0[c + last_word * layer1_size] += neu1e[c];
}
}
}
/*
* ====================================
* Skip-gram Architecture
* ====================================
* sen - This is the array of words in the sentence. Subsampling has
* already been applied. Words are represented by their ids.
*
* sentence_position - This is the index of the current input word.
*
* a - Offset into the current window, relative to the window start.
* a will range from 0 to (window * 2)
*
* b - The amount to shrink the context window by.
*
* c - 'c' is a scratch variable used in two unrelated ways:
* 1. It's first used as the index of the current context word
* within the sentence (the `sen` array).
* 2. It's then used as the for-loop variable for calculating
* vector dot-products and other arithmetic.
*
* syn0 - The hidden layer weights. Note that the weights are stored as a
* 1D array, so word 'i' is found at (i * layer1_size).
*
* l1 - Index into the hidden layer (syn0). Index of the start of the
* weights for the current input word.
*
* target - The output word we're working on. If it's the positive sample
* then `label` is 1. `label` is 0 for negative samples.
* Note: `target` and `label` are only used in negative sampling,
* and not HS.
*/
else {
// Loop over the positions in the context window (skipping the word at
// the center). 'a' is just the offset within the window, it's not
// the index relative to the beginning of the sentence.
for (a = b; a < window * 2 + 1 - b; a++) if (a != window) {
// Convert the window offset 'a' into an index 'c' into the sentence
// array.
c = sentence_position - window + a;
// Verify c isn't outisde the bounds of the sentence.
if (c < 0) continue;
if (c >= sentence_length) continue;
// Get the context word. That is, get the id of the word (its index in
// the vocab table).
last_word = sen[c];
// At this point we have two words identified:
// 'word' - The word at our current position in the sentence (in the
// center of a context window).
// 'last_word' - The word at a position within the context window.
// Verify that the word exists in the vocab (I don't think this should
// ever be the case?)
if (last_word == -1) continue;
// Calculate the index of the start of the weights for 'last_word'.
l1 = last_word * layer1_size;
for (c = 0; c < layer1_size; c++) neu1e[c] = 0;
// HIERARCHICAL SOFTMAX
if (hs) for (d = 0; d < vocab[word].codelen; d++) {
f = 0;
l2 = vocab[word].point[d] * layer1_size;
// Propagate hidden -> output
for (c = 0; c < layer1_size; c++) f += syn0[c + l1] * syn1[c + l2];
if (f <= -MAX_EXP) continue;
else if (f >= MAX_EXP) continue;
else f = expTable[(int)((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))];
// 'g' is the gradient multiplied by the learning rate
g = (1 - vocab[word].code[d] - f) * alpha;
// Propagate errors output -> hidden
for (c = 0; c < layer1_size; c++) neu1e[c] += g * syn1[c + l2];
// Learn weights hidden -> output
for (c = 0; c < layer1_size; c++) syn1[c + l2] += g * syn0[c + l1];
}
// NEGATIVE SAMPLING
// Rather than performing backpropagation for every word in our
// vocabulary, we only perform it for a few words (the number of words
// is given by 'negative').
// These words are selected using a "unigram" distribution, which is generated
// in the function InitUnigramTable
if (negative > 0) for (d = 0; d < negative + 1; d++) {
// On the first iteration, we're going to train the positive sample.
if (d == 0) {
target = word;
label = 1;
// On the other iterations, we'll train the negative samples.
} else {
// Pick a random word to use as a 'negative sample'; do this using
// the unigram table.
// Get a random integer.
next_random = next_random * (unsigned long long)25214903917 + 11;
// 'target' becomes the index of the word in the vocab to use as
// the negative sample.
target = table[(next_random >> 16) % table_size];
// If the target is the special end of sentence token, then just
// pick a random word from the vocabulary instead.
if (target == 0) target = next_random % (vocab_size - 1) + 1;
// Don't use the positive sample as a negative sample!
if (target == word) continue;
// Mark this as a negative example.
label = 0;
}
// Get the index of the target word in the output layer.
l2 = target * layer1_size;
// At this point, our two words are represented by their index into
// the layer weights.
// l1 - The index of our input word within the hidden layer weights.
// l2 - The index of our output word within the output layer weights.
// label - Whether this is a positive (1) or negative (0) example.
// Calculate the dot-product between the input words weights (in
// syn0) and the output word's weights (in syn1neg).
// Note that this calculates the dot-product manually using a for
// loop over the vector elements!
f = 0;
for (c = 0; c < layer1_size; c++) f += syn0[c + l1] * syn1neg[c + l2];
// This block does two things:
// 1. Calculates the output of the network for this training
// pair, using the expTable to evaluate the output layer
// activation function.
// 2. Calculate the error at the output, stored in 'g', by
// subtracting the network output from the desired output,
// and finally multiply this by the learning rate.
if (f > MAX_EXP) g = (label - 1) * alpha;
else if (f < -MAX_EXP) g = (label - 0) * alpha;
else g = (label - expTable[(int)((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))]) * alpha;
// Multiply the error by the output layer weights.
// Accumulate these gradients over the negative samples and the one
// positive sample.
for (c = 0; c < layer1_size; c++) neu1e[c] += g * syn1neg[c + l2];
// Update the output layer weights by multiplying the output error
// by the hidden layer weights.
for (c = 0; c < layer1_size; c++) syn1neg[c + l2] += g * syn0[c + l1];
}
// Once the hidden layer gradients for the negative samples plus the
// one positive sample have been accumulated, update the hidden layer
// weights.
// Note that we do not average the gradient before applying it.
for (c = 0; c < layer1_size; c++) syn0[c + l1] += neu1e[c];
}
}
// Advance to the next word in the sentence.
sentence_position++;
// Check if we've reached the end of the sentence.
// If so, set sentence_length to 0 and we'll read a new sentence at the
// beginning of this loop.
if (sentence_position >= sentence_length) {
sentence_length = 0;
continue;
}
}
fclose(fi);
free(neu1);
free(neu1e);
pthread_exit(NULL);
}
/**
* ======== TrainModel ========
* Main entry point to the training process.
*/
void TrainModel() {
long a, b, c, d;
FILE *fo;
pthread_t *pt = (pthread_t *)malloc(num_threads * sizeof(pthread_t));
printf("Starting training using file %s\n", train_file);
starting_alpha = alpha;
// Either load a pre-existing vocabulary, or learn the vocabulary from
// the training file.
if (read_vocab_file[0] != 0) ReadVocab(); else LearnVocabFromTrainFile();
// Save the vocabulary.
if (save_vocab_file[0] != 0) SaveVocab();
// Stop here if no output_file was specified.
if (output_file[0] == 0) return;
// Allocate the weight matrices and initialize them.
InitNet();
// If we're using negative sampling, initialize the unigram table, which
// is used to pick words to use as "negative samples" (with more frequent
// words being picked more often).
if (negative > 0) InitUnigramTable();
// Record the start time of training.
start = clock();
// Run training, which occurs in the 'TrainModelThread' function.
for (a = 0; a < num_threads; a++) pthread_create(&pt[a], NULL, TrainModelThread, (void *)a);
for (a = 0; a < num_threads; a++) pthread_join(pt[a], NULL);
fo = fopen(output_file, "wb");
if (classes == 0) {
// Save the word vectors
fprintf(fo, "%lld %lld\n", vocab_size, layer1_size);
for (a = 0; a < vocab_size; a++) {
fprintf(fo, "%s ", vocab[a].word);
if (binary) for (b = 0; b < layer1_size; b++) fwrite(&syn0[a * layer1_size + b], sizeof(real), 1, fo);
else for (b = 0; b < layer1_size; b++) fprintf(fo, "%lf ", syn0[a * layer1_size + b]);
fprintf(fo, "\n");
}
} else {
// Run K-means on the word vectors
int clcn = classes, iter = 10, closeid;
int *centcn = (int *)malloc(classes * sizeof(int));
int *cl = (int *)calloc(vocab_size, sizeof(int));
real closev, x;
real *cent = (real *)calloc(classes * layer1_size, sizeof(real));
for (a = 0; a < vocab_size; a++) cl[a] = a % clcn;
for (a = 0; a < iter; a++) {
for (b = 0; b < clcn * layer1_size; b++) cent[b] = 0;
for (b = 0; b < clcn; b++) centcn[b] = 1;
for (c = 0; c < vocab_size; c++) {
for (d = 0; d < layer1_size; d++) cent[layer1_size * cl[c] + d] += syn0[c * layer1_size + d];
centcn[cl[c]]++;
}
for (b = 0; b < clcn; b++) {
closev = 0;
for (c = 0; c < layer1_size; c++) {
cent[layer1_size * b + c] /= centcn[b];
closev += cent[layer1_size * b + c] * cent[layer1_size * b + c];
}
closev = sqrt(closev);
for (c = 0; c < layer1_size; c++) cent[layer1_size * b + c] /= closev;
}
for (c = 0; c < vocab_size; c++) {
closev = -10;
closeid = 0;
for (d = 0; d < clcn; d++) {
x = 0;
for (b = 0; b < layer1_size; b++) x += cent[layer1_size * d + b] * syn0[c * layer1_size + b];
if (x > closev) {
closev = x;
closeid = d;
}
}
cl[c] = closeid;
}
}
// Save the K-means classes
for (a = 0; a < vocab_size; a++) fprintf(fo, "%s %d\n", vocab[a].word, cl[a]);
free(centcn);
free(cent);
free(cl);
}
fclose(fo);
}
int ArgPos(char *str, int argc, char **argv) {
int a;
for (a = 1; a < argc; a++) if (!strcmp(str, argv[a])) {
if (a == argc - 1) {
printf("Argument missing for %s\n", str);
exit(1);
}
return a;
}
return -1;
}
int main(int argc, char **argv) {
int i;
if (argc == 1) {
printf("WORD VECTOR estimation toolkit v 0.1c\n\n");
printf("Options:\n");
printf("Parameters for training:\n");
printf("\t-train <file>\n");
printf("\t\tUse text data from <file> to train the model\n");
printf("\t-output <file>\n");
printf("\t\tUse <file> to save the resulting word vectors / word clusters\n");
printf("\t-size <int>\n");
printf("\t\tSet size of word vectors; default is 100\n");
printf("\t-window <int>\n");
printf("\t\tSet max skip length between words; default is 5\n");
printf("\t-sample <float>\n");
printf("\t\tSet threshold for occurrence of words. Those that appear with higher frequency in the training data\n");
printf("\t\twill be randomly down-sampled; default is 1e-3, useful range is (0, 1e-5)\n");
printf("\t-hs <int>\n");
printf("\t\tUse Hierarchical Softmax; default is 0 (not used)\n");
printf("\t-negative <int>\n");
printf("\t\tNumber of negative examples; default is 5, common values are 3 - 10 (0 = not used)\n");
printf("\t-threads <int>\n");
printf("\t\tUse <int> threads (default 12)\n");
printf("\t-iter <int>\n");
printf("\t\tRun more training iterations (default 5)\n");
printf("\t-min-count <int>\n");
printf("\t\tThis will discard words that appear less than <int> times; default is 5\n");
printf("\t-alpha <float>\n");
printf("\t\tSet the starting learning rate; default is 0.025 for skip-gram and 0.05 for CBOW\n");
printf("\t-classes <int>\n");
printf("\t\tOutput word classes rather than word vectors; default number of classes is 0 (vectors are written)\n");
printf("\t-debug <int>\n");
printf("\t\tSet the debug mode (default = 2 = more info during training)\n");
printf("\t-binary <int>\n");
printf("\t\tSave the resulting vectors in binary moded; default is 0 (off)\n");
printf("\t-save-vocab <file>\n");
printf("\t\tThe vocabulary will be saved to <file>\n");
printf("\t-read-vocab <file>\n");
printf("\t\tThe vocabulary will be read from <file>, not constructed from the training data\n");
printf("\t-cbow <int>\n");
printf("\t\tUse the continuous bag of words model; default is 1 (use 0 for skip-gram model)\n");
printf("\nExamples:\n");
printf("./word2vec -train data.txt -output vec.txt -size 200 -window 5 -sample 1e-4 -negative 5 -hs 0 -binary 0 -cbow 1 -iter 3\n\n");
return 0;
}
output_file[0] = 0;
save_vocab_file[0] = 0;
read_vocab_file[0] = 0;
if ((i = ArgPos((char *)"-size", argc, argv)) > 0) layer1_size = atoi(argv[i + 1]);
if ((i = ArgPos((char *)"-train", argc, argv)) > 0) strcpy(train_file, argv[i + 1]);
if ((i = ArgPos((char *)"-save-vocab", argc, argv)) > 0) strcpy(save_vocab_file, argv[i + 1]);
if ((i = ArgPos((char *)"-read-vocab", argc, argv)) > 0) strcpy(read_vocab_file, argv[i + 1]);
if ((i = ArgPos((char *)"-debug", argc, argv)) > 0) debug_mode = atoi(argv[i + 1]);
if ((i = ArgPos((char *)"-binary", argc, argv)) > 0) binary = atoi(argv[i + 1]);
if ((i = ArgPos((char *)"-cbow", argc, argv)) > 0) cbow = atoi(argv[i + 1]);
if (cbow) alpha = 0.05;
if ((i = ArgPos((char *)"-alpha", argc, argv)) > 0) alpha = atof(argv[i + 1]);
if ((i = ArgPos((char *)"-output", argc, argv)) > 0) strcpy(output_file, argv[i + 1]);
if ((i = ArgPos((char *)"-window", argc, argv)) > 0) window = atoi(argv[i + 1]);
if ((i = ArgPos((char *)"-sample", argc, argv)) > 0) sample = atof(argv[i + 1]);
if ((i = ArgPos((char *)"-hs", argc, argv)) > 0) hs = atoi(argv[i + 1]);
if ((i = ArgPos((char *)"-negative", argc, argv)) > 0) negative = atoi(argv[i + 1]);
if ((i = ArgPos((char *)"-threads", argc, argv)) > 0) num_threads = atoi(argv[i + 1]);
if ((i = ArgPos((char *)"-iter", argc, argv)) > 0) iter = atoi(argv[i + 1]);
if ((i = ArgPos((char *)"-min-count", argc, argv)) > 0) min_count = atoi(argv[i + 1]);
if ((i = ArgPos((char *)"-classes", argc, argv)) > 0) classes = atoi(argv[i + 1]);
// Allocate the vocabulary table.
vocab = (struct vocab_word *)calloc(vocab_max_size, sizeof(struct vocab_word));
// Allocate the hash table for mapping word strings to word entries.
vocab_hash = (int *)calloc(vocab_hash_size, sizeof(int));
/*
* ======== Precomputed Exp Table ========
* To calculate the softmax output, they use a table of values which are
* pre-computed here.
*
* From the top of this file:
* #define EXP_TABLE_SIZE 1000
* #define MAX_EXP 6
*
* First, let's look at this inner term:
* i / (real)EXP_TABLE_SIZE * 2 - 1
* This is just a straight line that goes from -1 to +1.
* (0, -1.0), (1, -0.998), (2, -0.996), ... (999, 0.998), (1000, 1.0).
*
* Next, multiplying this by MAX_EXP = 6, it causes the output to range
* from -6 to +6 instead of -1 to +1.
* (0, -6.0), (1, -5.988), (2, -5.976), ... (999, 5.988), (1000, 6.0).
*
* So the total input range of the table is
* Range = MAX_EXP * 2 = 12
* And the increment on the inputs is
* Increment = Range / EXP_TABLE_SIZE = 0.012
*
* Let's say we want to compute the output for the value x = 0.25. How do
* we calculate the position in the table?
* index = (x - -MAX_EXP) / increment
* Which we can re-write as:
* index = (x + MAX_EXP) / (range / EXP_TABLE_SIZE)
* = (x + MAX_EXP) / ((2 * MAX_EXP) / EXP_TABLE_SIZE)
* = (x + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2)
*
* The last form is what we find in the code elsewhere for using the table:
* expTable[(int)((f + MAX_EXP) * (EXP_TABLE_SIZE / MAX_EXP / 2))]
*
*/
// Allocate the table, 1000 floats.
expTable = (real *)malloc((EXP_TABLE_SIZE + 1) * sizeof(real));
// For each position in the table...
for (i = 0; i < EXP_TABLE_SIZE; i++) {
// Calculate the output of e^x for values in the range -6.0 to +6.0.
expTable[i] = exp((i / (real)EXP_TABLE_SIZE * 2 - 1) * MAX_EXP); // Precompute the exp() table
// Currently the table contains the function exp(x).
// We are going to replace this with exp(x) / (exp(x) + 1), which is
// just the sigmoid activation function!
// Note that
// exp(x) / (exp(x) + 1)
// is equivalent to
// 1 / (1 + exp(-x))
expTable[i] = expTable[i] / (expTable[i] + 1); // Precompute f(x) = x / (x + 1)
}
TrainModel();
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.