hip_filename stringlengths 5 84 | hip_content stringlengths 79 9.69M | cuda_filename stringlengths 4 83 | cuda_content stringlengths 19 9.69M |
|---|---|---|---|
2c6a2548b3a26c8896a49a1b8bbff5776931e32d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "array2d_psng.h"
__global__ void setup_kernel ( hiprandState_t * state, int width, int height, int pitch )
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int UniqueBlockIndex = blockIdx.y * gridDim.x + blockIdx.x;
int id = UniqueBlockIndex * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x; // unique id
/* Each thread gets same seed, a different sequence number, no offset */
if( x < width && y < height)
{
hiprandState_t *address = (hiprandState_t *)((char*)state + y * pitch) + x;
hiprand_init(id, 0, 0, address);
}
}
void Array2D_psng::randStateGen()
{
/* Allocate space for prng states on device */
if( _array2D == NULL)
{
size_t pitchData;
CUDA_SAFE_CALL( hipMallocPitch( ( void **)& _array2D , &pitchData, _width * sizeof(hiprandState_t), _height));
_pitchData = static_cast<int>(pitchData);
}
/* Setup prng states */
hipLaunchKernelGGL(( setup_kernel) , dim3(_gridSize), dim3(_blockSize), 0, 0, _array2D, _width, _height, _pitchData );
}
| 2c6a2548b3a26c8896a49a1b8bbff5776931e32d.cu | #include "array2d_psng.h"
__global__ void setup_kernel ( curandState * state, int width, int height, int pitch )
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int UniqueBlockIndex = blockIdx.y * gridDim.x + blockIdx.x;
int id = UniqueBlockIndex * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x; // unique id
/* Each thread gets same seed, a different sequence number, no offset */
if( x < width && y < height)
{
curandState *address = (curandState *)((char*)state + y * pitch) + x;
curand_init(id, 0, 0, address);
}
}
void Array2D_psng::randStateGen()
{
/* Allocate space for prng states on device */
if( _array2D == NULL)
{
size_t pitchData;
CUDA_SAFE_CALL( cudaMallocPitch( ( void **)& _array2D , &pitchData, _width * sizeof(curandState), _height));
_pitchData = static_cast<int>(pitchData);
}
/* Setup prng states */
setup_kernel <<<_gridSize, _blockSize>>>( _array2D, _width, _height, _pitchData );
}
|
f900a6b2aec952d70bba0da2d37a436a698fa1dc.hip | // !!! This is a file automatically generated by hipify!!!
#include "THHTensorMath.h"
#include "THHGeneral.h"
#include "THHBlas.h"
#include "THHTensorCopy.h"
#include "THHTensorRandom.h"
#include "THHApply.cuh"
#include "THHReduce.cuh"
float THCudaTensor_dot(THCState *state, THCudaTensor *self, THCudaTensor *src)
{
THAssert(THCudaTensor_checkGPU(state, 2, self, src));
THArgCheck(THCudaTensor_nElement(state, self) == THCudaTensor_nElement(state, src), 2, "sizes do not match");
{
self = THCudaTensor_newContiguous(state, self);
src = THCudaTensor_newContiguous(state, src);
float result = THCudaBlas_dot(state,
THCudaTensor_nElement(state, self),
THCudaTensor_data(state, self), 1,
THCudaTensor_data(state, src), 1);
THCudaTensor_free(state, src);
THCudaTensor_free(state, self);
return result;
}
}
void THCudaTensor_addmv(THCState *state, THCudaTensor *r_, float beta, THCudaTensor *t, float alpha, THCudaTensor *mat, THCudaTensor *vec)
{
THAssert(THCudaTensor_checkGPU(state, 4, r_, t, mat, vec));
if( (mat->nDimension != 2) || (vec->nDimension != 1) )
THError("matrix and vector expected");
if( mat->size[1] != vec->size[0] )
THError("size mismatch");
if(t->nDimension != 1)
THError("size mismatch");
if(t->size[0] != mat->size[0])
THError("size mismatch");
if(r_ != t)
{
THCudaTensor_resizeAs(state, r_, t);
THCudaTensor_copy(state, r_, t);
}
if(mat->stride[0] == 1)
{
THCudaBlas_gemv(state, 'n', mat->size[0], mat->size[1],
alpha, THCudaTensor_data(state, mat), mat->stride[1],
THCudaTensor_data(state, vec), vec->stride[0],
beta, THCudaTensor_data(state, r_), r_->stride[0]);
}
else if(mat->stride[1] == 1)
{
THCudaBlas_gemv(state, 't', mat->size[1], mat->size[0],
alpha, THCudaTensor_data(state, mat), mat->stride[0],
THCudaTensor_data(state, vec), vec->stride[0],
beta, THCudaTensor_data(state, r_), r_->stride[0]);
}
else
{
THCudaTensor *cmat = THCudaTensor_newContiguous(state, mat);
THCudaBlas_gemv(state, 't', mat->size[1], mat->size[0],
alpha, THCudaTensor_data(state, cmat), cmat->stride[0],
THCudaTensor_data(state, vec), vec->stride[0],
beta, THCudaTensor_data(state, r_), r_->stride[0]);
THCudaTensor_free(state, cmat);
}
}
void THCudaTensor_addmm(THCState *state, THCudaTensor *r_, float beta, THCudaTensor *t, float alpha, THCudaTensor *m1, THCudaTensor *m2)
{
THAssert(THCudaTensor_checkGPU(state, 4, r_, t, m1, m2));
char transpose_r, transpose_m1, transpose_m2;
THCudaTensor *r__, *m1_, *m2_;
if( (m1->nDimension != 2) || (m2->nDimension != 2) )
THError("matrix and matrix expected");
if(t->nDimension != 2)
THError("size mismatch");
if( (t->size[0] != m1->size[0]) || (t->size[1] != m2->size[1]) || (m1->size[1] != m2->size[0]) )
THError("size mismatch");
if(t != r_)
{
THCudaTensor_resizeAs(state, r_, t);
THCudaTensor_copy(state, r_, t);
}
/* r_ */
if(r_->stride[0] == 1 &&
r_->stride[1] != 0)
{
transpose_r = 'n';
r__ = r_;
}
else if(r_->stride[1] == 1 &&
r_->stride[0] != 0)
{
THCudaTensor *swap = m2;
m2 = m1;
m1 = swap;
transpose_r = 't';
r__ = r_;
}
else
{
transpose_r = 'n';
r__ = THCudaTensor_newWithSize2d(state, r_->size[1], r_->size[0]);
THCudaTensor_copy(state, r__, r_);
THCudaTensor_transpose(state, r__, NULL, 0, 1);
}
/* m1 */
if(m1->stride[(transpose_r == 'n' ? 0 : 1)] == 1 &&
m1->stride[(transpose_r == 'n' ? 1 : 0)] != 0)
{
transpose_m1 = 'n';
m1_ = m1;
}
else if(m1->stride[(transpose_r == 'n' ? 1 : 0)] == 1 &&
m1->stride[(transpose_r == 'n' ? 0 : 1)] != 0)
{
transpose_m1 = 't';
m1_ = m1;
}
else
{
transpose_m1 = (transpose_r == 'n' ? 't' : 'n');
m1_ = THCudaTensor_newContiguous(state, m1);
}
/* m2 */
if(m2->stride[(transpose_r == 'n' ? 0 : 1)] == 1 &&
m2->stride[(transpose_r == 'n' ? 1 : 0)] != 0)
{
transpose_m2 = 'n';
m2_ = m2;
}
else if(m2->stride[(transpose_r == 'n' ? 1 : 0)] == 1 &&
m2->stride[(transpose_r == 'n' ? 0 : 1)] != 0)
{
transpose_m2 = 't';
m2_ = m2;
}
else
{
transpose_m2 = (transpose_r == 'n' ? 't' : 'n');
m2_ = THCudaTensor_newContiguous(state, m2);
}
/* do the operation */
THCudaBlas_gemm(state,
transpose_m1,
transpose_m2,
r__->size[(transpose_r == 'n' ? 0 : 1)],
r__->size[(transpose_r == 'n' ? 1 : 0)],
m1_->size[(transpose_r == 'n' ? 1 : 0)],
alpha,
THCudaTensor_data(state, m1_),
(transpose_m1 == 'n' ? m1_->stride[(transpose_r == 'n' ? 1 : 0)] : m1_->stride[(transpose_r == 'n' ? 0 : 1)]),
THCudaTensor_data(state, m2_),
(transpose_m2 == 'n' ? m2_->stride[(transpose_r == 'n' ? 1 : 0)] : m2_->stride[(transpose_r == 'n' ? 0 : 1)]),
beta,
THCudaTensor_data(state, r__),
r__->stride[(transpose_r == 'n' ? 1 : 0)]);
/* free intermediate variables */
if(m1_ != m1)
THCudaTensor_free(state, m1_);
if(m2_ != m2)
THCudaTensor_free(state, m2_);
if(r__ != r_)
THCudaTensor_freeCopyTo(state, r__, r_);
}
void THCudaTensor_addr(THCState *state, THCudaTensor *r_, float beta, THCudaTensor *t, float alpha, THCudaTensor *vec1, THCudaTensor *vec2)
{
THAssert(THCudaTensor_checkGPU(state, 4, r_, t, vec1, vec2));
if( (vec1->nDimension != 1) || (vec2->nDimension != 1) )
THError("vector and vector expected");
if(t->nDimension != 2)
THError("size mismatch");
if( (t->size[0] != vec1->size[0]) || (t->size[1] != vec2->size[0]) )
THError("size mismatch");
if(r_ != t)
{
THCudaTensor_resizeAs(state, r_, t);
THCudaTensor_copy(state, r_, t);
}
if(beta != 1)
THCudaTensor_mul(state, r_, r_, beta);
if(r_->stride[0] == 1)
{
THCudaBlas_ger(state, vec1->size[0], vec2->size[0],
alpha, THCudaTensor_data(state, vec1), vec1->stride[0],
THCudaTensor_data(state, vec2), vec2->stride[0],
THCudaTensor_data(state, r_), r_->stride[1]);
}
else if(r_->stride[1] == 1)
{
THCudaBlas_ger(state, vec2->size[0], vec1->size[0],
alpha, THCudaTensor_data(state, vec2), vec2->stride[0],
THCudaTensor_data(state, vec1), vec1->stride[0],
THCudaTensor_data(state, r_), r_->stride[0]);
}
else
{
THCudaTensor *cr = THCudaTensor_newClone(state, r_);
THCudaBlas_ger(state, vec2->size[0], vec1->size[0],
alpha, THCudaTensor_data(state, vec2), vec2->stride[0],
THCudaTensor_data(state, vec1), vec1->stride[0],
THCudaTensor_data(state, cr), cr->stride[0]);
THCudaTensor_freeCopyTo(state, cr, r_);
}
}
void THCudaTensor_baddbmm(THCState *state, THCudaTensor *result, float beta, THCudaTensor *t,
float alpha, THCudaTensor *batch1, THCudaTensor *batch2) {
THAssert(THCudaTensor_checkGPU(state, 4, result, t, batch1, batch2));
THArgCheck(THCudaTensor_nDimension(state, t) == 3, 4, "expected 3D tensor");
THArgCheck(THCudaTensor_nDimension(state, batch1) == 3, 6, "expected 3D tensor");
THArgCheck(THCudaTensor_nDimension(state, batch2) == 3, 7, "expected 3D tensor");
THArgCheck(THCudaTensor_size(state, t, 0) == THCudaTensor_size(state, batch1, 0), 6,
"equal number of batches expected");
THArgCheck(THCudaTensor_size(state, t, 0) == THCudaTensor_size(state, batch2, 0), 7,
"equal number of batches expected");
THArgCheck(THCudaTensor_size(state, t, 1) == THCudaTensor_size(state, batch1, 1), 6,
"wrong matrix size");
THArgCheck(THCudaTensor_size(state, t, 2) == THCudaTensor_size(state, batch2, 2), 7,
"wrong matrix size");
THArgCheck(THCudaTensor_size(state, batch1, 2) == THCudaTensor_size(state, batch2, 1), 6,
"wrong matrix size");
if (t != result) {
THCudaTensor_resizeAs(state, result, t);
THCudaTensor_copy(state, result, t);
}
bool transpose_result;
char transpose_batch1, transpose_batch2;
long lda, ldb, ldc;
THCudaTensor *result_, *batch1_, *batch2_;
if (result->stride[1] == 1)
{
transpose_result = false;
result_ = result;
ldc = result_->stride[2];
}
else if (result->stride[2] == 1)
{
transpose_result = true;
THCudaTensor *swap = batch2;
batch2 = batch1;
batch1 = swap;
result_ = result;
ldc = result_->stride[1];
}
else
{
transpose_result = false;
result_ = THCudaTensor_newWithSize3d(state, result->size[0], result->size[2], result->size[1]);
THCudaTensor_copy(state, result_, result);
THCudaTensor_transpose(state, result_, NULL, 1, 2);
ldc = result_->stride[2];
}
if (batch1->stride[transpose_result ? 2 : 1] == 1)
{
transpose_batch1 = 'n';
batch1_ = batch1;
lda = batch1_->stride[transpose_result ? 1 : 2];
}
else if (batch1->stride[transpose_result ? 1 : 2] == 1)
{
transpose_batch1 = 't';
batch1_ = batch1;
lda = batch1_->stride[transpose_result ? 2 : 1];
}
else
{
transpose_batch1 = transpose_result ? 'n' : 't';
batch1_ = THCudaTensor_newContiguous(state, batch1);
lda = batch1_->stride[1];
}
if (batch2->stride[transpose_result ? 2 : 1] == 1)
{
transpose_batch2 = 'n';
batch2_ = batch2;
ldb = batch2_->stride[transpose_result ? 1 : 2];
}
else if (batch2->stride[transpose_result ? 1 : 2] == 1)
{
transpose_batch2 = 't';
batch2_ = batch2;
ldb = batch2_->stride[transpose_result ? 2 : 1];
}
else
{
transpose_batch2 = transpose_result ? 'n' : 't';
batch2_ = THCudaTensor_newContiguous(state, batch2);
ldb = batch2_->stride[1];
}
// Compute pointers to matrices in each batch.
long num_batches = result_->size[0];
size_t matrices_size = num_batches * sizeof(float*);
const float **matrices1 = (const float **)THAlloc(matrices_size);
const float **matrices2 = (const float **)THAlloc(matrices_size);
float **result_matrices = (float **)THAlloc(matrices_size);
for (int i = 0; i < num_batches; ++i)
{
matrices1[i] = THCudaTensor_data(state, batch1_) + i * batch1_->stride[0];
matrices2[i] = THCudaTensor_data(state, batch2_) + i * batch2_->stride[0];
result_matrices[i] = THCudaTensor_data(state, result_) + i * result_->stride[0];
}
// Copy pointers to device.
const float **d_matrices1, **d_matrices2;
float **d_result_matrices;
THCudaCheck(THCudaMalloc(state, (void**)&d_matrices1, matrices_size));
THCudaCheck(THCudaMalloc(state, (void**)&d_matrices2, matrices_size));
THCudaCheck(THCudaMalloc(state, (void**)&d_result_matrices, matrices_size));
THCudaCheck(hipMemcpyAsync(d_matrices1, matrices1, matrices_size,
hipMemcpyHostToDevice, THCState_getCurrentStream(state)));
THCudaCheck(hipMemcpyAsync(d_matrices2, matrices2, matrices_size,
hipMemcpyHostToDevice, THCState_getCurrentStream(state)));
THCudaCheck(hipMemcpyAsync(d_result_matrices, result_matrices, matrices_size,
hipMemcpyHostToDevice, THCState_getCurrentStream(state)));
THCudaBlas_gemmBatched(
state,
transpose_batch1,
transpose_batch2,
result_->size[transpose_result ? 2 : 1],
result_->size[transpose_result ? 1 : 2],
batch1_->size[transpose_result ? 1 : 2],
alpha,
d_matrices1, lda,
d_matrices2, ldb,
beta,
d_result_matrices, ldc,
num_batches);
THCudaFree(state, d_matrices1);
THCudaFree(state, d_matrices2);
THCudaFree(state, d_result_matrices);
THFree(matrices1);
THFree(matrices2);
THFree(result_matrices);
if (batch1_ != batch1)
THCudaTensor_free(state, batch1_);
if (batch2_ != batch2)
THCudaTensor_free(state, batch2_);
if (result_ != result)
THCudaTensor_freeCopyTo(state, result_, result);
}
| f900a6b2aec952d70bba0da2d37a436a698fa1dc.cu | #include "THCTensorMath.h"
#include "THCGeneral.h"
#include "THCBlas.h"
#include "THCTensorCopy.h"
#include "THCTensorRandom.h"
#include "THCApply.cuh"
#include "THCReduce.cuh"
float THCudaTensor_dot(THCState *state, THCudaTensor *self, THCudaTensor *src)
{
THAssert(THCudaTensor_checkGPU(state, 2, self, src));
THArgCheck(THCudaTensor_nElement(state, self) == THCudaTensor_nElement(state, src), 2, "sizes do not match");
{
self = THCudaTensor_newContiguous(state, self);
src = THCudaTensor_newContiguous(state, src);
float result = THCudaBlas_dot(state,
THCudaTensor_nElement(state, self),
THCudaTensor_data(state, self), 1,
THCudaTensor_data(state, src), 1);
THCudaTensor_free(state, src);
THCudaTensor_free(state, self);
return result;
}
}
void THCudaTensor_addmv(THCState *state, THCudaTensor *r_, float beta, THCudaTensor *t, float alpha, THCudaTensor *mat, THCudaTensor *vec)
{
THAssert(THCudaTensor_checkGPU(state, 4, r_, t, mat, vec));
if( (mat->nDimension != 2) || (vec->nDimension != 1) )
THError("matrix and vector expected");
if( mat->size[1] != vec->size[0] )
THError("size mismatch");
if(t->nDimension != 1)
THError("size mismatch");
if(t->size[0] != mat->size[0])
THError("size mismatch");
if(r_ != t)
{
THCudaTensor_resizeAs(state, r_, t);
THCudaTensor_copy(state, r_, t);
}
if(mat->stride[0] == 1)
{
THCudaBlas_gemv(state, 'n', mat->size[0], mat->size[1],
alpha, THCudaTensor_data(state, mat), mat->stride[1],
THCudaTensor_data(state, vec), vec->stride[0],
beta, THCudaTensor_data(state, r_), r_->stride[0]);
}
else if(mat->stride[1] == 1)
{
THCudaBlas_gemv(state, 't', mat->size[1], mat->size[0],
alpha, THCudaTensor_data(state, mat), mat->stride[0],
THCudaTensor_data(state, vec), vec->stride[0],
beta, THCudaTensor_data(state, r_), r_->stride[0]);
}
else
{
THCudaTensor *cmat = THCudaTensor_newContiguous(state, mat);
THCudaBlas_gemv(state, 't', mat->size[1], mat->size[0],
alpha, THCudaTensor_data(state, cmat), cmat->stride[0],
THCudaTensor_data(state, vec), vec->stride[0],
beta, THCudaTensor_data(state, r_), r_->stride[0]);
THCudaTensor_free(state, cmat);
}
}
void THCudaTensor_addmm(THCState *state, THCudaTensor *r_, float beta, THCudaTensor *t, float alpha, THCudaTensor *m1, THCudaTensor *m2)
{
THAssert(THCudaTensor_checkGPU(state, 4, r_, t, m1, m2));
char transpose_r, transpose_m1, transpose_m2;
THCudaTensor *r__, *m1_, *m2_;
if( (m1->nDimension != 2) || (m2->nDimension != 2) )
THError("matrix and matrix expected");
if(t->nDimension != 2)
THError("size mismatch");
if( (t->size[0] != m1->size[0]) || (t->size[1] != m2->size[1]) || (m1->size[1] != m2->size[0]) )
THError("size mismatch");
if(t != r_)
{
THCudaTensor_resizeAs(state, r_, t);
THCudaTensor_copy(state, r_, t);
}
/* r_ */
if(r_->stride[0] == 1 &&
r_->stride[1] != 0)
{
transpose_r = 'n';
r__ = r_;
}
else if(r_->stride[1] == 1 &&
r_->stride[0] != 0)
{
THCudaTensor *swap = m2;
m2 = m1;
m1 = swap;
transpose_r = 't';
r__ = r_;
}
else
{
transpose_r = 'n';
r__ = THCudaTensor_newWithSize2d(state, r_->size[1], r_->size[0]);
THCudaTensor_copy(state, r__, r_);
THCudaTensor_transpose(state, r__, NULL, 0, 1);
}
/* m1 */
if(m1->stride[(transpose_r == 'n' ? 0 : 1)] == 1 &&
m1->stride[(transpose_r == 'n' ? 1 : 0)] != 0)
{
transpose_m1 = 'n';
m1_ = m1;
}
else if(m1->stride[(transpose_r == 'n' ? 1 : 0)] == 1 &&
m1->stride[(transpose_r == 'n' ? 0 : 1)] != 0)
{
transpose_m1 = 't';
m1_ = m1;
}
else
{
transpose_m1 = (transpose_r == 'n' ? 't' : 'n');
m1_ = THCudaTensor_newContiguous(state, m1);
}
/* m2 */
if(m2->stride[(transpose_r == 'n' ? 0 : 1)] == 1 &&
m2->stride[(transpose_r == 'n' ? 1 : 0)] != 0)
{
transpose_m2 = 'n';
m2_ = m2;
}
else if(m2->stride[(transpose_r == 'n' ? 1 : 0)] == 1 &&
m2->stride[(transpose_r == 'n' ? 0 : 1)] != 0)
{
transpose_m2 = 't';
m2_ = m2;
}
else
{
transpose_m2 = (transpose_r == 'n' ? 't' : 'n');
m2_ = THCudaTensor_newContiguous(state, m2);
}
/* do the operation */
THCudaBlas_gemm(state,
transpose_m1,
transpose_m2,
r__->size[(transpose_r == 'n' ? 0 : 1)],
r__->size[(transpose_r == 'n' ? 1 : 0)],
m1_->size[(transpose_r == 'n' ? 1 : 0)],
alpha,
THCudaTensor_data(state, m1_),
(transpose_m1 == 'n' ? m1_->stride[(transpose_r == 'n' ? 1 : 0)] : m1_->stride[(transpose_r == 'n' ? 0 : 1)]),
THCudaTensor_data(state, m2_),
(transpose_m2 == 'n' ? m2_->stride[(transpose_r == 'n' ? 1 : 0)] : m2_->stride[(transpose_r == 'n' ? 0 : 1)]),
beta,
THCudaTensor_data(state, r__),
r__->stride[(transpose_r == 'n' ? 1 : 0)]);
/* free intermediate variables */
if(m1_ != m1)
THCudaTensor_free(state, m1_);
if(m2_ != m2)
THCudaTensor_free(state, m2_);
if(r__ != r_)
THCudaTensor_freeCopyTo(state, r__, r_);
}
void THCudaTensor_addr(THCState *state, THCudaTensor *r_, float beta, THCudaTensor *t, float alpha, THCudaTensor *vec1, THCudaTensor *vec2)
{
THAssert(THCudaTensor_checkGPU(state, 4, r_, t, vec1, vec2));
if( (vec1->nDimension != 1) || (vec2->nDimension != 1) )
THError("vector and vector expected");
if(t->nDimension != 2)
THError("size mismatch");
if( (t->size[0] != vec1->size[0]) || (t->size[1] != vec2->size[0]) )
THError("size mismatch");
if(r_ != t)
{
THCudaTensor_resizeAs(state, r_, t);
THCudaTensor_copy(state, r_, t);
}
if(beta != 1)
THCudaTensor_mul(state, r_, r_, beta);
if(r_->stride[0] == 1)
{
THCudaBlas_ger(state, vec1->size[0], vec2->size[0],
alpha, THCudaTensor_data(state, vec1), vec1->stride[0],
THCudaTensor_data(state, vec2), vec2->stride[0],
THCudaTensor_data(state, r_), r_->stride[1]);
}
else if(r_->stride[1] == 1)
{
THCudaBlas_ger(state, vec2->size[0], vec1->size[0],
alpha, THCudaTensor_data(state, vec2), vec2->stride[0],
THCudaTensor_data(state, vec1), vec1->stride[0],
THCudaTensor_data(state, r_), r_->stride[0]);
}
else
{
THCudaTensor *cr = THCudaTensor_newClone(state, r_);
THCudaBlas_ger(state, vec2->size[0], vec1->size[0],
alpha, THCudaTensor_data(state, vec2), vec2->stride[0],
THCudaTensor_data(state, vec1), vec1->stride[0],
THCudaTensor_data(state, cr), cr->stride[0]);
THCudaTensor_freeCopyTo(state, cr, r_);
}
}
void THCudaTensor_baddbmm(THCState *state, THCudaTensor *result, float beta, THCudaTensor *t,
float alpha, THCudaTensor *batch1, THCudaTensor *batch2) {
THAssert(THCudaTensor_checkGPU(state, 4, result, t, batch1, batch2));
THArgCheck(THCudaTensor_nDimension(state, t) == 3, 4, "expected 3D tensor");
THArgCheck(THCudaTensor_nDimension(state, batch1) == 3, 6, "expected 3D tensor");
THArgCheck(THCudaTensor_nDimension(state, batch2) == 3, 7, "expected 3D tensor");
THArgCheck(THCudaTensor_size(state, t, 0) == THCudaTensor_size(state, batch1, 0), 6,
"equal number of batches expected");
THArgCheck(THCudaTensor_size(state, t, 0) == THCudaTensor_size(state, batch2, 0), 7,
"equal number of batches expected");
THArgCheck(THCudaTensor_size(state, t, 1) == THCudaTensor_size(state, batch1, 1), 6,
"wrong matrix size");
THArgCheck(THCudaTensor_size(state, t, 2) == THCudaTensor_size(state, batch2, 2), 7,
"wrong matrix size");
THArgCheck(THCudaTensor_size(state, batch1, 2) == THCudaTensor_size(state, batch2, 1), 6,
"wrong matrix size");
if (t != result) {
THCudaTensor_resizeAs(state, result, t);
THCudaTensor_copy(state, result, t);
}
bool transpose_result;
char transpose_batch1, transpose_batch2;
long lda, ldb, ldc;
THCudaTensor *result_, *batch1_, *batch2_;
if (result->stride[1] == 1)
{
transpose_result = false;
result_ = result;
ldc = result_->stride[2];
}
else if (result->stride[2] == 1)
{
transpose_result = true;
THCudaTensor *swap = batch2;
batch2 = batch1;
batch1 = swap;
result_ = result;
ldc = result_->stride[1];
}
else
{
transpose_result = false;
result_ = THCudaTensor_newWithSize3d(state, result->size[0], result->size[2], result->size[1]);
THCudaTensor_copy(state, result_, result);
THCudaTensor_transpose(state, result_, NULL, 1, 2);
ldc = result_->stride[2];
}
if (batch1->stride[transpose_result ? 2 : 1] == 1)
{
transpose_batch1 = 'n';
batch1_ = batch1;
lda = batch1_->stride[transpose_result ? 1 : 2];
}
else if (batch1->stride[transpose_result ? 1 : 2] == 1)
{
transpose_batch1 = 't';
batch1_ = batch1;
lda = batch1_->stride[transpose_result ? 2 : 1];
}
else
{
transpose_batch1 = transpose_result ? 'n' : 't';
batch1_ = THCudaTensor_newContiguous(state, batch1);
lda = batch1_->stride[1];
}
if (batch2->stride[transpose_result ? 2 : 1] == 1)
{
transpose_batch2 = 'n';
batch2_ = batch2;
ldb = batch2_->stride[transpose_result ? 1 : 2];
}
else if (batch2->stride[transpose_result ? 1 : 2] == 1)
{
transpose_batch2 = 't';
batch2_ = batch2;
ldb = batch2_->stride[transpose_result ? 2 : 1];
}
else
{
transpose_batch2 = transpose_result ? 'n' : 't';
batch2_ = THCudaTensor_newContiguous(state, batch2);
ldb = batch2_->stride[1];
}
// Compute pointers to matrices in each batch.
long num_batches = result_->size[0];
size_t matrices_size = num_batches * sizeof(float*);
const float **matrices1 = (const float **)THAlloc(matrices_size);
const float **matrices2 = (const float **)THAlloc(matrices_size);
float **result_matrices = (float **)THAlloc(matrices_size);
for (int i = 0; i < num_batches; ++i)
{
matrices1[i] = THCudaTensor_data(state, batch1_) + i * batch1_->stride[0];
matrices2[i] = THCudaTensor_data(state, batch2_) + i * batch2_->stride[0];
result_matrices[i] = THCudaTensor_data(state, result_) + i * result_->stride[0];
}
// Copy pointers to device.
const float **d_matrices1, **d_matrices2;
float **d_result_matrices;
THCudaCheck(THCudaMalloc(state, (void**)&d_matrices1, matrices_size));
THCudaCheck(THCudaMalloc(state, (void**)&d_matrices2, matrices_size));
THCudaCheck(THCudaMalloc(state, (void**)&d_result_matrices, matrices_size));
THCudaCheck(cudaMemcpyAsync(d_matrices1, matrices1, matrices_size,
cudaMemcpyHostToDevice, THCState_getCurrentStream(state)));
THCudaCheck(cudaMemcpyAsync(d_matrices2, matrices2, matrices_size,
cudaMemcpyHostToDevice, THCState_getCurrentStream(state)));
THCudaCheck(cudaMemcpyAsync(d_result_matrices, result_matrices, matrices_size,
cudaMemcpyHostToDevice, THCState_getCurrentStream(state)));
THCudaBlas_gemmBatched(
state,
transpose_batch1,
transpose_batch2,
result_->size[transpose_result ? 2 : 1],
result_->size[transpose_result ? 1 : 2],
batch1_->size[transpose_result ? 1 : 2],
alpha,
d_matrices1, lda,
d_matrices2, ldb,
beta,
d_result_matrices, ldc,
num_batches);
THCudaFree(state, d_matrices1);
THCudaFree(state, d_matrices2);
THCudaFree(state, d_result_matrices);
THFree(matrices1);
THFree(matrices2);
THFree(result_matrices);
if (batch1_ != batch1)
THCudaTensor_free(state, batch1_);
if (batch2_ != batch2)
THCudaTensor_free(state, batch2_);
if (result_ != result)
THCudaTensor_freeCopyTo(state, result_, result);
}
|
6c2148ec6838d657134c109c8c13d660648f0e19.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include<stdlib.h>
#include<string.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#define BLOCK_SIZE 1024
#define SECTION_SIZE 2*BLOCK_SIZE
__global__ void
listScanKernel(float * input, float * output, int len)
{
__shared__ float list[SECTION_SIZE];
unsigned int t = threadIdx.x;
unsigned int start = blockIdx.x*blockDim.x;
list[t] = ((t+start) < len ) ? input[t+start]:0.0f;
list[t+blockDim.x] = ((start+t+blockDim.x) < len) ? input[start+t+blockDim.x]:0.0f;
for(unsigned int stride =1;stride<= BLOCK_SIZE; stride*=2)
{
int index = (t+1)*stride*2-1;
if(index<SECTION_SIZE)
list[index]+=list[index-stride];
__syncthreads();
}
for(unsigned int stride = BLOCK_SIZE/2;stride>0;stride/=2)
{
__syncthreads();
int index = (t+1)*stride*2-1;
if(index+stride < SECTION_SIZE )
list[index+stride]+=list[index];
}
__syncthreads();
if(t+start < len)
output[t+start] = list[t];
}
__global__ void
loadSumArrayKernel(float *input, float *sumArray, int len)
{
unsigned int t = threadIdx.x;
unsigned int start = blockIdx.x*blockDim.x;
unsigned int lastBlockId = (len-1)/BLOCK_SIZE;
unsigned int lastThreadIdx = (len%BLOCK_SIZE-1);
if(t+start<len)
{
if(blockIdx.x == lastBlockId)
sumArray[blockIdx.x] = input[lastThreadIdx+start];
else
sumArray[blockIdx.x] = input[start+blockDim.x-1];
}
}
__global__ void
listScanSumKernel(float *input, float *output,int len)
{
__shared__ float sumArray[SECTION_SIZE];
unsigned int t = threadIdx.x;
unsigned int start = blockIdx.x*blockDim.x;
if(t+start<len && blockIdx.x>0)
{
output[t+start]+=input[blockIdx.x-1];
__syncthreads();
}
}
void totalCPU(float * input, float * output, int len)
{
int i=0;
output[0]=input[0];
for(i=1;i<len;i++)
output[i] = output[i-1]+input[i];
printf("\n*****CPU calculation******\n");
}
void loadValue(char *FileInput,int len,float *a,float *b)
{
FILE *file;
int i=0;
char buff[100];
memset(b,0,len);
file = fopen(FileInput,"r");
if(!file)
{
printf("\nNo file found!");
system("pause");
exit(0);
}
while(fgets(buff,len,file))
{
a[i] = atof(buff);
i++;
}
fclose(file);
}
void storeResult(char *fileOutput,float *arr,unsigned int len)
{
FILE *file;
int count=0;
file = fopen(fileOutput,"w");
if(!file)
{
printf("\nCannot create file!");
system("pause");
exit(0);
}
fprintf(file,"%d\n",len);
for(count =0 ;count<len;count++)
{
fprintf(file,"%.0f\n",arr[count]);
}
fclose(file);
}
void dispRes(float *arr,int len)
{
int i=0;
printf("result = ");
for(i=0;i<len;i++)
printf("%4.0f ",arr[i]);
system("pause");
}
int main(int argc,char*argv[])
{
float * hostInput; // The input 1D list
float * hostOutput; // The output list
float * deviceInput;
float *deviceSumArray;
float *deviceSumArrayOutput;
float * deviceOutput;
int numElements = (int) (atoi)(argv[3]); // number of elements in the input list
hostInput = (float*)malloc(numElements*sizeof(float));
hostOutput = (float*)malloc(numElements*sizeof(float));
//cuda memory allocation on the device
hipMalloc((void**)&deviceInput,numElements*sizeof(float));
hipMalloc((void**)&deviceOutput,numElements*sizeof(float));
hipMalloc((void**)&deviceSumArray,numElements*sizeof(float));
hipMalloc((void**)&deviceSumArrayOutput,numElements*sizeof(float));
printf("Loading values to the array...\n");
loadValue(argv[1],numElements,hostInput,hostOutput);
//cuda memory copy from host to device
hipMemcpy(deviceInput,hostInput,numElements*sizeof(float),hipMemcpyHostToDevice);
//CPU equivalent
totalCPU(hostInput,hostOutput,numElements);
dispRes(hostOutput,numElements);
printf("Calling CUDA kernel...\n");
dim3 DimGrid((numElements-1)/BLOCK_SIZE+1,1,1);
dim3 DimBlock(BLOCK_SIZE,1,1);
hipLaunchKernelGGL(( listScanKernel), dim3(DimGrid),dim3(DimBlock), 0, 0, deviceInput,deviceOutput,numElements);
hipLaunchKernelGGL(( loadSumArrayKernel), dim3(DimGrid),dim3(DimBlock), 0, 0, deviceOutput,deviceSumArray,numElements);
hipLaunchKernelGGL(( listScanKernel), dim3(DimGrid),dim3(DimBlock), 0, 0, deviceSumArray,deviceSumArrayOutput,numElements);
hipLaunchKernelGGL(( listScanSumKernel), dim3(DimGrid),dim3(DimBlock), 0, 0, deviceSumArrayOutput,deviceOutput,numElements);
//cuda memory copy from device to host
hipMemcpy(hostOutput,deviceOutput,numElements*sizeof(float),hipMemcpyDeviceToHost);
dispRes(hostOutput,numElements);
storeResult(argv[2],hostOutput,numElements);
free(hostInput);
free(hostOutput);
hipFree(deviceInput);
hipFree(deviceOutput);
return 0;
}
| 6c2148ec6838d657134c109c8c13d660648f0e19.cu | #include <stdio.h>
#include<stdlib.h>
#include<string.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#define BLOCK_SIZE 1024
#define SECTION_SIZE 2*BLOCK_SIZE
__global__ void
listScanKernel(float * input, float * output, int len)
{
__shared__ float list[SECTION_SIZE];
unsigned int t = threadIdx.x;
unsigned int start = blockIdx.x*blockDim.x;
list[t] = ((t+start) < len ) ? input[t+start]:0.0f;
list[t+blockDim.x] = ((start+t+blockDim.x) < len) ? input[start+t+blockDim.x]:0.0f;
for(unsigned int stride =1;stride<= BLOCK_SIZE; stride*=2)
{
int index = (t+1)*stride*2-1;
if(index<SECTION_SIZE)
list[index]+=list[index-stride];
__syncthreads();
}
for(unsigned int stride = BLOCK_SIZE/2;stride>0;stride/=2)
{
__syncthreads();
int index = (t+1)*stride*2-1;
if(index+stride < SECTION_SIZE )
list[index+stride]+=list[index];
}
__syncthreads();
if(t+start < len)
output[t+start] = list[t];
}
__global__ void
loadSumArrayKernel(float *input, float *sumArray, int len)
{
unsigned int t = threadIdx.x;
unsigned int start = blockIdx.x*blockDim.x;
unsigned int lastBlockId = (len-1)/BLOCK_SIZE;
unsigned int lastThreadIdx = (len%BLOCK_SIZE-1);
if(t+start<len)
{
if(blockIdx.x == lastBlockId)
sumArray[blockIdx.x] = input[lastThreadIdx+start];
else
sumArray[blockIdx.x] = input[start+blockDim.x-1];
}
}
__global__ void
listScanSumKernel(float *input, float *output,int len)
{
__shared__ float sumArray[SECTION_SIZE];
unsigned int t = threadIdx.x;
unsigned int start = blockIdx.x*blockDim.x;
if(t+start<len && blockIdx.x>0)
{
output[t+start]+=input[blockIdx.x-1];
__syncthreads();
}
}
void totalCPU(float * input, float * output, int len)
{
int i=0;
output[0]=input[0];
for(i=1;i<len;i++)
output[i] = output[i-1]+input[i];
printf("\n*****CPU calculation******\n");
}
void loadValue(char *FileInput,int len,float *a,float *b)
{
FILE *file;
int i=0;
char buff[100];
memset(b,0,len);
file = fopen(FileInput,"r");
if(!file)
{
printf("\nNo file found!");
system("pause");
exit(0);
}
while(fgets(buff,len,file))
{
a[i] = atof(buff);
i++;
}
fclose(file);
}
void storeResult(char *fileOutput,float *arr,unsigned int len)
{
FILE *file;
int count=0;
file = fopen(fileOutput,"w");
if(!file)
{
printf("\nCannot create file!");
system("pause");
exit(0);
}
fprintf(file,"%d\n",len);
for(count =0 ;count<len;count++)
{
fprintf(file,"%.0f\n",arr[count]);
}
fclose(file);
}
void dispRes(float *arr,int len)
{
int i=0;
printf("result = ");
for(i=0;i<len;i++)
printf("%4.0f ",arr[i]);
system("pause");
}
int main(int argc,char*argv[])
{
float * hostInput; // The input 1D list
float * hostOutput; // The output list
float * deviceInput;
float *deviceSumArray;
float *deviceSumArrayOutput;
float * deviceOutput;
int numElements = (int) (atoi)(argv[3]); // number of elements in the input list
hostInput = (float*)malloc(numElements*sizeof(float));
hostOutput = (float*)malloc(numElements*sizeof(float));
//cuda memory allocation on the device
cudaMalloc((void**)&deviceInput,numElements*sizeof(float));
cudaMalloc((void**)&deviceOutput,numElements*sizeof(float));
cudaMalloc((void**)&deviceSumArray,numElements*sizeof(float));
cudaMalloc((void**)&deviceSumArrayOutput,numElements*sizeof(float));
printf("Loading values to the array...\n");
loadValue(argv[1],numElements,hostInput,hostOutput);
//cuda memory copy from host to device
cudaMemcpy(deviceInput,hostInput,numElements*sizeof(float),cudaMemcpyHostToDevice);
//CPU equivalent
totalCPU(hostInput,hostOutput,numElements);
dispRes(hostOutput,numElements);
printf("Calling CUDA kernel...\n");
dim3 DimGrid((numElements-1)/BLOCK_SIZE+1,1,1);
dim3 DimBlock(BLOCK_SIZE,1,1);
listScanKernel<<<DimGrid,DimBlock>>>(deviceInput,deviceOutput,numElements);
loadSumArrayKernel<<<DimGrid,DimBlock>>>(deviceOutput,deviceSumArray,numElements);
listScanKernel<<<DimGrid,DimBlock>>>(deviceSumArray,deviceSumArrayOutput,numElements);
listScanSumKernel<<<DimGrid,DimBlock>>>(deviceSumArrayOutput,deviceOutput,numElements);
//cuda memory copy from device to host
cudaMemcpy(hostOutput,deviceOutput,numElements*sizeof(float),cudaMemcpyDeviceToHost);
dispRes(hostOutput,numElements);
storeResult(argv[2],hostOutput,numElements);
free(hostInput);
free(hostOutput);
cudaFree(deviceInput);
cudaFree(deviceOutput);
return 0;
}
|
00e4e72ae4401e0e0fcf66dd8486921e54f9cce7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/phi/kernels/sparse/softmax_kernel.h"
#include <thrust/device_ptr.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <thrust/transform.h>
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/visit_type.h"
#include "paddle/phi/kernels/empty_kernel.h"
#include "paddle/phi/kernels/full_kernel.h"
#include "paddle/phi/kernels/funcs/activation_functor.h"
#include "paddle/phi/kernels/funcs/math_cuda_utils.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/funcs/reduce_functor.h"
#include "paddle/phi/kernels/funcs/sparse/softmax.cu.h"
#include "paddle/phi/kernels/gpu/reduce.h"
#include "paddle/phi/kernels/softmax_kernel.h"
#include "paddle/phi/kernels/sparse/empty_kernel.h"
namespace phi {
namespace sparse {
template <typename T, typename IntT = int>
__global__ void SoftmaxGpuKernel(const IntT* x_crows,
const T* x_values,
T* out_values,
int row_number,
int total_row_number) {
int row = blockIdx.x * blockDim.y + threadIdx.y;
int non_zero_idx = threadIdx.x;
if (row >= total_row_number) return;
int cur_batch = row / row_number;
int crow_idx = cur_batch * (row_number + 1) + (row % row_number);
int cur_batch_offset = 0;
for (int i = 1; i < cur_batch + 1; ++i) {
cur_batch_offset += x_crows[i * (row_number + 1) - 1];
}
int row_first = cur_batch_offset + static_cast<int>(x_crows[crow_idx]);
int row_nnz = static_cast<int>(x_crows[crow_idx + 1] - x_crows[crow_idx]);
if (row_nnz == 0) return;
int kIteration = (row_nnz + warpSize - 1) / warpSize;
T max_val = -std::numeric_limits<T>::infinity();
for (int i = 0; i < kIteration; ++i) {
int idx = non_zero_idx + i * warpSize;
if (idx >= row_nnz) break;
T val = x_values[row_first + idx];
if (val > max_val) {
max_val = val;
}
}
T row_max_val = phi::funcs::WarpReduceMax<T>(max_val, 0xFFFFFFFF);
T exp_sum = 0;
for (int i = 0; i < kIteration; ++i) {
int idx = non_zero_idx + i * warpSize;
if (idx >= row_nnz) break;
auto functor = phi::funcs::CudaExpFunctor<T>();
T exp = functor(x_values[row_first + idx] - row_max_val);
exp_sum += exp;
out_values[row_first + idx] = exp;
}
T row_exp_sum = phi::funcs::WarpReduceSum<T>(exp_sum, 0xFFFFFFFF);
for (int i = 0; i < kIteration; ++i) {
int idx = non_zero_idx + i * warpSize;
if (idx >= row_nnz) break;
out_values[row_first + idx] = out_values[row_first + idx] / row_exp_sum;
}
}
template <typename T, typename Context>
void SoftmaxCsrKernel(const Context& dev_ctx,
const SparseCsrTensor& x,
int axis,
SparseCsrTensor* out) {
PADDLE_ENFORCE_EQ(axis,
-1,
phi::errors::Unimplemented(
"SparseCsrTensor only support axis=-1 for softmax, "
"which is faster when reading data by row (axis=-1)"));
EmptyLikeCsrKernel<T, Context>(dev_ctx, x, out);
auto x_dim = x.dims();
auto x_rank = x_dim.size();
int total_row_number = 1;
int row_number = 1;
for (int i = 0; i < x_rank - 1; ++i) {
total_row_number *= x_dim[i];
if (i == x_rank - 2) {
row_number = x_dim[i];
}
}
dim3 grid((total_row_number + 3) / 4);
dim3 block(32, 4);
PD_VISIT_BASE_INTEGRAL_TYPES(x.crows().dtype(), "CsrSoftmaxKernel", ([&] {
hipLaunchKernelGGL(( SoftmaxGpuKernel<T, data_t>)
, dim3(grid), dim3(block), 0, dev_ctx.stream(),
x.crows().data<data_t>(),
x.values().data<T>(),
out->mutable_values()->data<T>(),
row_number,
total_row_number);
}));
}
template <typename T, typename IntT>
__global__ void SoftmaxCooGPURawKernel(IntT* sorted_pool_indices,
IntT* pool_sizes,
IntT* pool_offsets,
IntT nvalues,
T* input_values,
T* output_values,
int total_rows) {
int row = blockIdx.x * blockDim.y + threadIdx.y;
if (row >= total_rows) return;
int tid = threadIdx.x;
int index = row / nvalues;
int j = row % nvalues;
IntT offset = pool_offsets[index];
IntT* pool_indices = sorted_pool_indices + offset;
IntT pool_indices_size = pool_sizes[index];
int kIteration = (pool_indices_size + warpSize - 1) / warpSize;
T max_val = -std::numeric_limits<T>::infinity();
for (int k = 0; k < kIteration; ++k) {
int idx = tid + k * warpSize;
if (idx >= pool_indices_size) break;
auto i = pool_indices[idx];
auto cur_value = input_values + j + nvalues * i;
if (*cur_value > max_val) {
max_val = *cur_value;
}
}
T row_max_val = phi::funcs::WarpReduceMax<T>(max_val, 0xFFFFFFFF);
T exp_sum = 0;
for (int k = 0; k < kIteration; ++k) {
int idx = tid + k * warpSize;
if (idx >= pool_indices_size) break;
auto i = pool_indices[idx];
auto cur_value = input_values + j + nvalues * i;
auto cur_out_value = output_values + i * nvalues + j;
auto functor = phi::funcs::CudaExpFunctor<T>();
T exp = functor(*cur_value - row_max_val);
exp_sum += exp;
*cur_out_value = exp;
}
T row_exp_sum = phi::funcs::WarpReduceSum<T>(exp_sum, 0xFFFFFFFF);
row_exp_sum = 1.0 / row_exp_sum;
for (int k = 0; k < kIteration; ++k) {
int idx = tid + k * warpSize;
if (idx >= pool_indices_size) break;
auto i = pool_indices[idx];
auto cur_out_value = output_values + i * nvalues + j;
*cur_out_value *= row_exp_sum;
}
}
template <typename T, typename IntT, typename Context>
void SoftmaxCooGPUKernel(const Context& dev_ctx,
const SparseCooTensor& x,
int axis,
SparseCooTensor* out) {
auto indices = x.indices();
auto values = x.values();
const auto x_dims = x.dims();
const std::vector<IntT> sizes = phi::vectorize<IntT>(x_dims);
const auto sparse_dim = x.sparse_dim();
const IntT x_nnz = x.nnz();
DenseTensor out_indices(indices);
DenseTensor out_values = EmptyLike<T, Context>(dev_ctx, values);
out->SetMember(out_indices, out_values, x.dims(), x.coalesced());
int dim = axis < 0 ? x_dims.size() + axis : axis;
/* If dim is greater than or equal to sparse_dim, the dense softmax is used.
*/
if (dim >= sparse_dim) {
SoftmaxKernel<T, Context>(
dev_ctx, values, dim - sparse_dim + 1, &out_values);
return;
}
auto stream = dev_ctx.stream();
IntT nvalues = std::accumulate(sizes.begin() + sparse_dim,
sizes.end(),
static_cast<IntT>(1),
std::multiplies<>());
auto values_2 = values.Resize({x_nnz, nvalues});
/* Compute independent pools of indices */
DenseTensor sorted_indices;
DenseTensor pool_offsets;
DenseTensor pool_sizes;
std::tie(sorted_indices, pool_offsets, pool_sizes, std::ignore) =
phi::funcs::sparse::ComputePoolMax<T, IntT, Context, false>(
dev_ctx, indices, values_2, sizes, nvalues, static_cast<IntT>(dim));
auto pool_size = pool_offsets.dims()[0];
auto out_values_ptr = out_values.data<T>();
auto values_ptr = values.data<T>();
int total_rows = pool_size * nvalues;
dim3 grid((total_rows + 15) / 16);
dim3 block(32, 16);
hipLaunchKernelGGL(( SoftmaxCooGPURawKernel<T, IntT>)
, dim3(grid), dim3(block), 0, stream, sorted_indices.data<IntT>(),
pool_sizes.data<IntT>(),
pool_offsets.data<IntT>(),
nvalues,
values_ptr,
out_values_ptr,
total_rows);
}
template <typename T, typename Context>
void SoftmaxCooKernel(const Context& dev_ctx,
const SparseCooTensor& x,
int axis,
SparseCooTensor* out) {
PD_VISIT_BASE_INTEGRAL_TYPES(
x.indices().dtype(), "SoftmaxCooGPUKernel", ([&] {
SoftmaxCooGPUKernel<T, data_t, Context>(dev_ctx, x, axis, out);
}));
}
} // namespace sparse
} // namespace phi
PD_REGISTER_KERNEL(softmax_csr,
GPU,
ALL_LAYOUT,
phi::sparse::SoftmaxCsrKernel,
float,
double) {
kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_CSR);
}
PD_REGISTER_KERNEL(softmax_coo,
GPU,
ALL_LAYOUT,
phi::sparse::SoftmaxCooKernel,
float,
double) {
kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_COO);
}
| 00e4e72ae4401e0e0fcf66dd8486921e54f9cce7.cu | /* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/phi/kernels/sparse/softmax_kernel.h"
#include <thrust/device_ptr.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <thrust/transform.h>
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/visit_type.h"
#include "paddle/phi/kernels/empty_kernel.h"
#include "paddle/phi/kernels/full_kernel.h"
#include "paddle/phi/kernels/funcs/activation_functor.h"
#include "paddle/phi/kernels/funcs/math_cuda_utils.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/funcs/reduce_functor.h"
#include "paddle/phi/kernels/funcs/sparse/softmax.cu.h"
#include "paddle/phi/kernels/gpu/reduce.h"
#include "paddle/phi/kernels/softmax_kernel.h"
#include "paddle/phi/kernels/sparse/empty_kernel.h"
namespace phi {
namespace sparse {
template <typename T, typename IntT = int>
__global__ void SoftmaxGpuKernel(const IntT* x_crows,
const T* x_values,
T* out_values,
int row_number,
int total_row_number) {
int row = blockIdx.x * blockDim.y + threadIdx.y;
int non_zero_idx = threadIdx.x;
if (row >= total_row_number) return;
int cur_batch = row / row_number;
int crow_idx = cur_batch * (row_number + 1) + (row % row_number);
int cur_batch_offset = 0;
for (int i = 1; i < cur_batch + 1; ++i) {
cur_batch_offset += x_crows[i * (row_number + 1) - 1];
}
int row_first = cur_batch_offset + static_cast<int>(x_crows[crow_idx]);
int row_nnz = static_cast<int>(x_crows[crow_idx + 1] - x_crows[crow_idx]);
if (row_nnz == 0) return;
int kIteration = (row_nnz + warpSize - 1) / warpSize;
T max_val = -std::numeric_limits<T>::infinity();
for (int i = 0; i < kIteration; ++i) {
int idx = non_zero_idx + i * warpSize;
if (idx >= row_nnz) break;
T val = x_values[row_first + idx];
if (val > max_val) {
max_val = val;
}
}
T row_max_val = phi::funcs::WarpReduceMax<T>(max_val, 0xFFFFFFFF);
T exp_sum = 0;
for (int i = 0; i < kIteration; ++i) {
int idx = non_zero_idx + i * warpSize;
if (idx >= row_nnz) break;
auto functor = phi::funcs::CudaExpFunctor<T>();
T exp = functor(x_values[row_first + idx] - row_max_val);
exp_sum += exp;
out_values[row_first + idx] = exp;
}
T row_exp_sum = phi::funcs::WarpReduceSum<T>(exp_sum, 0xFFFFFFFF);
for (int i = 0; i < kIteration; ++i) {
int idx = non_zero_idx + i * warpSize;
if (idx >= row_nnz) break;
out_values[row_first + idx] = out_values[row_first + idx] / row_exp_sum;
}
}
template <typename T, typename Context>
void SoftmaxCsrKernel(const Context& dev_ctx,
const SparseCsrTensor& x,
int axis,
SparseCsrTensor* out) {
PADDLE_ENFORCE_EQ(axis,
-1,
phi::errors::Unimplemented(
"SparseCsrTensor only support axis=-1 for softmax, "
"which is faster when reading data by row (axis=-1)"));
EmptyLikeCsrKernel<T, Context>(dev_ctx, x, out);
auto x_dim = x.dims();
auto x_rank = x_dim.size();
int total_row_number = 1;
int row_number = 1;
for (int i = 0; i < x_rank - 1; ++i) {
total_row_number *= x_dim[i];
if (i == x_rank - 2) {
row_number = x_dim[i];
}
}
dim3 grid((total_row_number + 3) / 4);
dim3 block(32, 4);
PD_VISIT_BASE_INTEGRAL_TYPES(x.crows().dtype(), "CsrSoftmaxKernel", ([&] {
SoftmaxGpuKernel<T, data_t>
<<<grid, block, 0, dev_ctx.stream()>>>(
x.crows().data<data_t>(),
x.values().data<T>(),
out->mutable_values()->data<T>(),
row_number,
total_row_number);
}));
}
template <typename T, typename IntT>
__global__ void SoftmaxCooGPURawKernel(IntT* sorted_pool_indices,
IntT* pool_sizes,
IntT* pool_offsets,
IntT nvalues,
T* input_values,
T* output_values,
int total_rows) {
int row = blockIdx.x * blockDim.y + threadIdx.y;
if (row >= total_rows) return;
int tid = threadIdx.x;
int index = row / nvalues;
int j = row % nvalues;
IntT offset = pool_offsets[index];
IntT* pool_indices = sorted_pool_indices + offset;
IntT pool_indices_size = pool_sizes[index];
int kIteration = (pool_indices_size + warpSize - 1) / warpSize;
T max_val = -std::numeric_limits<T>::infinity();
for (int k = 0; k < kIteration; ++k) {
int idx = tid + k * warpSize;
if (idx >= pool_indices_size) break;
auto i = pool_indices[idx];
auto cur_value = input_values + j + nvalues * i;
if (*cur_value > max_val) {
max_val = *cur_value;
}
}
T row_max_val = phi::funcs::WarpReduceMax<T>(max_val, 0xFFFFFFFF);
T exp_sum = 0;
for (int k = 0; k < kIteration; ++k) {
int idx = tid + k * warpSize;
if (idx >= pool_indices_size) break;
auto i = pool_indices[idx];
auto cur_value = input_values + j + nvalues * i;
auto cur_out_value = output_values + i * nvalues + j;
auto functor = phi::funcs::CudaExpFunctor<T>();
T exp = functor(*cur_value - row_max_val);
exp_sum += exp;
*cur_out_value = exp;
}
T row_exp_sum = phi::funcs::WarpReduceSum<T>(exp_sum, 0xFFFFFFFF);
row_exp_sum = 1.0 / row_exp_sum;
for (int k = 0; k < kIteration; ++k) {
int idx = tid + k * warpSize;
if (idx >= pool_indices_size) break;
auto i = pool_indices[idx];
auto cur_out_value = output_values + i * nvalues + j;
*cur_out_value *= row_exp_sum;
}
}
template <typename T, typename IntT, typename Context>
void SoftmaxCooGPUKernel(const Context& dev_ctx,
const SparseCooTensor& x,
int axis,
SparseCooTensor* out) {
auto indices = x.indices();
auto values = x.values();
const auto x_dims = x.dims();
const std::vector<IntT> sizes = phi::vectorize<IntT>(x_dims);
const auto sparse_dim = x.sparse_dim();
const IntT x_nnz = x.nnz();
DenseTensor out_indices(indices);
DenseTensor out_values = EmptyLike<T, Context>(dev_ctx, values);
out->SetMember(out_indices, out_values, x.dims(), x.coalesced());
int dim = axis < 0 ? x_dims.size() + axis : axis;
/* If dim is greater than or equal to sparse_dim, the dense softmax is used.
*/
if (dim >= sparse_dim) {
SoftmaxKernel<T, Context>(
dev_ctx, values, dim - sparse_dim + 1, &out_values);
return;
}
auto stream = dev_ctx.stream();
IntT nvalues = std::accumulate(sizes.begin() + sparse_dim,
sizes.end(),
static_cast<IntT>(1),
std::multiplies<>());
auto values_2 = values.Resize({x_nnz, nvalues});
/* Compute independent pools of indices */
DenseTensor sorted_indices;
DenseTensor pool_offsets;
DenseTensor pool_sizes;
std::tie(sorted_indices, pool_offsets, pool_sizes, std::ignore) =
phi::funcs::sparse::ComputePoolMax<T, IntT, Context, false>(
dev_ctx, indices, values_2, sizes, nvalues, static_cast<IntT>(dim));
auto pool_size = pool_offsets.dims()[0];
auto out_values_ptr = out_values.data<T>();
auto values_ptr = values.data<T>();
int total_rows = pool_size * nvalues;
dim3 grid((total_rows + 15) / 16);
dim3 block(32, 16);
SoftmaxCooGPURawKernel<T, IntT>
<<<grid, block, 0, stream>>>(sorted_indices.data<IntT>(),
pool_sizes.data<IntT>(),
pool_offsets.data<IntT>(),
nvalues,
values_ptr,
out_values_ptr,
total_rows);
}
template <typename T, typename Context>
void SoftmaxCooKernel(const Context& dev_ctx,
const SparseCooTensor& x,
int axis,
SparseCooTensor* out) {
PD_VISIT_BASE_INTEGRAL_TYPES(
x.indices().dtype(), "SoftmaxCooGPUKernel", ([&] {
SoftmaxCooGPUKernel<T, data_t, Context>(dev_ctx, x, axis, out);
}));
}
} // namespace sparse
} // namespace phi
PD_REGISTER_KERNEL(softmax_csr,
GPU,
ALL_LAYOUT,
phi::sparse::SoftmaxCsrKernel,
float,
double) {
kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_CSR);
}
PD_REGISTER_KERNEL(softmax_coo,
GPU,
ALL_LAYOUT,
phi::sparse::SoftmaxCooKernel,
float,
double) {
kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_COO);
}
|
193bea60367fdcaece5b7657ae8e4c1a0e59a5eb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <cstdio>
#include <cmath>
#include "error_checks.h" // Macros CUDA_CHECK and CHECK_ERROR_MSG
__global__ void vector_add(double* C, const double* A, const double* B, int N)
{
// Add the kernel code
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// Do not try to access past the allocated memory
if (idx < N) {
C[idx] = A[idx] + B[idx];
}
}
int main(void)
{
const int N = 20;
const int ThreadsInBlock = 128;
double* dA, * dB, * dC;
double hA[N], hB[N], hC[N];
for (int i = 0; i < N; ++i) {
hA[i] = (double)i;
hB[i] = (double)i * i;
}
/*
Add memory allocations and copies. Wrap your runtime function
calls with CUDA_CHECK( ) macro
*/
CUDA_CHECK(hipMalloc((void**)& dA, sizeof(double) * N));
CUDA_CHECK(hipMalloc((void**)& dB, sizeof(double) * N));
CUDA_CHECK(hipMalloc((void**)& dC, sizeof(double) * N));
CUDA_CHECK(hipMemcpy(dA, hA, sizeof(double) * N, hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpy(dB, hB, sizeof(double) * N, hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpy(dC, hC, sizeof(double) * N, hipMemcpyHostToDevice));
//#error Add the remaining memory allocations and copies
// Note the maximum size of threads in a block
dim3 grid, threads;
// Add the kernel call here
//#error Add the CUDA kernel call
vector_add << <1, ThreadsInBlock >> > (hC, dA, dB, N);
// Here we add an explicit synchronization so that we catch errors
// as early as possible. Don't do this in production code!
hipDeviceSynchronize();
CHECK_ERROR_MSG("vector_add kernel");
// Copy back the results and free the device memory
//#error Copy back the results and free the allocated memory
CUDA_CHECK(hipMemcpy(hA, dA, sizeof(double) * N, hipMemcpyDeviceToHost));
CUDA_CHECK(hipMemcpy(hB, dB, sizeof(double) * N, hipMemcpyDeviceToHost));
CUDA_CHECK(hipMemcpy(hC, dC, sizeof(double) * N, hipMemcpyDeviceToHost));
CUDA_CHECK(hipFree(dA));
CUDA_CHECK(hipFree(dB));
CUDA_CHECK(hipFree(dC));
for (int i = 0; i < N; i++)
printf("%5.1f\n", hC[i]);
return 0;
} | 193bea60367fdcaece5b7657ae8e4c1a0e59a5eb.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cstdio>
#include <cmath>
#include "error_checks.h" // Macros CUDA_CHECK and CHECK_ERROR_MSG
__global__ void vector_add(double* C, const double* A, const double* B, int N)
{
// Add the kernel code
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// Do not try to access past the allocated memory
if (idx < N) {
C[idx] = A[idx] + B[idx];
}
}
int main(void)
{
const int N = 20;
const int ThreadsInBlock = 128;
double* dA, * dB, * dC;
double hA[N], hB[N], hC[N];
for (int i = 0; i < N; ++i) {
hA[i] = (double)i;
hB[i] = (double)i * i;
}
/*
Add memory allocations and copies. Wrap your runtime function
calls with CUDA_CHECK( ) macro
*/
CUDA_CHECK(cudaMalloc((void**)& dA, sizeof(double) * N));
CUDA_CHECK(cudaMalloc((void**)& dB, sizeof(double) * N));
CUDA_CHECK(cudaMalloc((void**)& dC, sizeof(double) * N));
CUDA_CHECK(cudaMemcpy(dA, hA, sizeof(double) * N, cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(dB, hB, sizeof(double) * N, cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(dC, hC, sizeof(double) * N, cudaMemcpyHostToDevice));
//#error Add the remaining memory allocations and copies
// Note the maximum size of threads in a block
dim3 grid, threads;
// Add the kernel call here
//#error Add the CUDA kernel call
vector_add << <1, ThreadsInBlock >> > (hC, dA, dB, N);
// Here we add an explicit synchronization so that we catch errors
// as early as possible. Don't do this in production code!
cudaDeviceSynchronize();
CHECK_ERROR_MSG("vector_add kernel");
// Copy back the results and free the device memory
//#error Copy back the results and free the allocated memory
CUDA_CHECK(cudaMemcpy(hA, dA, sizeof(double) * N, cudaMemcpyDeviceToHost));
CUDA_CHECK(cudaMemcpy(hB, dB, sizeof(double) * N, cudaMemcpyDeviceToHost));
CUDA_CHECK(cudaMemcpy(hC, dC, sizeof(double) * N, cudaMemcpyDeviceToHost));
CUDA_CHECK(cudaFree(dA));
CUDA_CHECK(cudaFree(dB));
CUDA_CHECK(cudaFree(dC));
for (int i = 0; i < N; i++)
printf("%5.1f\n", hC[i]);
return 0;
} |
634fe55daa3a94dc7cb802b68c4d9f8de4d085b3.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <GL/glew.h>
#include <GL/freeglut.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <cuda_gl_interop.h>
/* screen constants */
const int width = 1024;
const int height = 768;
/* charge constants */
const float k = 20.0f;
const float minDistance = 0.1f; // not to divide by zero
const float maxSolidColorLength = 1.0f;
/* charges on the field */
const int maxCharge = 1000;
const int minCharge = -1000;
const char maxChargeCount = 30;
char chargeCount = 0;
__constant__ char dev_chargeCount;
float3 charges[maxChargeCount]; // x, y, z == m
__constant__ float3 dev_charges[maxChargeCount]; // x, y, z == m
/* OpenGL interoperability */
dim3 blocks, threads;
GLuint vbo;
struct cudaGraphicsResource *cuda_vbo_resource;
/* charge selection */
const int detectChargeRange = 20;
int selectedChargeIndex = -1;
bool isDragging = false;
static void HandleError(hipError_t err, const char *file, int line) {
if (err != hipSuccess) {
printf("%s in %s at line %d\n", hipGetErrorString(err), file, line);
exit(EXIT_FAILURE);
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
void key(unsigned char key, int x, int y) {
switch (key) {
case 27:
printf("Exit application\n");
glutLeaveMainLoop();
break;
}
}
__device__ float length(const float2& q) {
return sqrtf(q.x * q.x + q.y * q.y);
}
__device__ float length2(const float2& q) {
return (q.x * q.x + q.y * q.y);
}
__device__ void setColor(const float2& f, uchar4& pixel) {
pixel.x = pixel.y = pixel.z = pixel.w = 0;
float l = length(f);
pixel.x = (l > maxSolidColorLength ? 255 : l * 256 / maxSolidColorLength);
}
__device__ void calculate(const float3& charge, int x, int y, float2& f) {
f.x = x - charge.x;
f.y = y - charge.y;
float l = length2(f) + minDistance;
float e = charge.z * rsqrt(l * l * l);
f.x *= e;
f.y *= e;
}
__global__ void renderFrame(uchar4* screen) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
float2 force, t_force;
force.x = force.y = 0.0f;
if (x >= width || y >= height)
return;
for (char i = 0; i < dev_chargeCount; i++) {
calculate(dev_charges[i], x, y, t_force);
force.x += t_force.x;
force.y += t_force.y;
}
force.x *= k;
force.y *= k;
setColor(force, screen[x + y * width]);
}
void idle(void) {
uchar4* dev_screen;
size_t size;
HANDLE_ERROR(hipGraphicsMapResources(1, &cuda_vbo_resource, 0));
HANDLE_ERROR(
hipGraphicsResourceGetMappedPointer((void**) &dev_screen, &size, cuda_vbo_resource));
// Kernel Time measure
hipEvent_t startEvent, stopEvent;
float elapsedTime = 0.0f;
HANDLE_ERROR(hipEventCreate(&startEvent));
HANDLE_ERROR(hipEventCreate(&stopEvent));
HANDLE_ERROR(hipEventRecord(startEvent, 0));
// Render Image
hipLaunchKernelGGL(( renderFrame), dim3(blocks), dim3(threads), 0, 0, dev_screen);
HANDLE_ERROR(hipDeviceSynchronize());
HANDLE_ERROR(hipGraphicsUnmapResources(1, &cuda_vbo_resource, 0));
// Kernel Time measure
HANDLE_ERROR(hipEventRecord(stopEvent, 0));
HANDLE_ERROR(hipEventSynchronize(stopEvent));
HANDLE_ERROR(hipEventElapsedTime(&elapsedTime, startEvent, stopEvent));
char fps[256];
sprintf(fps, "Electric field: %3.4f ms per frame (FPS: %3.1f)", elapsedTime,
1000 / elapsedTime);
glutSetWindowTitle(fps);
glutPostRedisplay();
}
void draw(void) {
glClearColor(0.0, 0.0, 0.0, 1.0);
glClear(GL_COLOR_BUFFER_BIT);
glDrawPixels(width, height, GL_RGBA, GL_UNSIGNED_BYTE, 0);
glPointSize(3.0f);
glColor3f(0.0f, 1.0f, 1.0f);
glBegin(GL_POINTS);
glVertex2i(charges[selectedChargeIndex].x, charges[selectedChargeIndex].y);
glEnd();
glutSwapBuffers();
}
void createVBO(GLuint *vbo, struct cudaGraphicsResource **vbo_res,
unsigned int vbo_res_flags) {
unsigned int size = width * height * sizeof(uchar4);
glGenBuffers(1, vbo);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, *vbo);
glBufferData(GL_PIXEL_UNPACK_BUFFER_ARB, size, NULL, GL_DYNAMIC_DRAW);
HANDLE_ERROR(hipGraphicsGLRegisterBuffer(vbo_res, *vbo, vbo_res_flags));
}
void deleteVBO(GLuint *vbo, struct cudaGraphicsResource *vbo_res) {
HANDLE_ERROR(hipGraphicsUnregisterResource(cuda_vbo_resource));
glBindBuffer(1, *vbo);
glDeleteBuffers(1, vbo);
*vbo = 0;
}
void pushCharge(int x, int y) {
if (chargeCount < maxChargeCount)
chargeCount++;
else {
for (int i = 0; i < maxChargeCount - 1; ++i) {
charges[i] = charges[i + 1];
}
}
charges[chargeCount - 1].x = x;
charges[chargeCount - 1].y = y;
charges[chargeCount - 1].z = rand() % (maxCharge - minCharge) + minCharge;
printf("Debug: Charge #%d (%.0f, %.0f, %.0f)\n", chargeCount - 1,
charges[chargeCount - 1].x, charges[chargeCount - 1].y,
charges[chargeCount - 1].z);
HANDLE_ERROR(
hipMemcpyToSymbol(dev_charges, charges, chargeCount * sizeof(float3)));
HANDLE_ERROR(
hipMemcpyToSymbol(dev_chargeCount, &chargeCount, sizeof(chargeCount)));
printf("Charges %d\n", chargeCount);
}
void mouse(int button, int state, int x, int y) {
if (button != GLUT_LEFT_BUTTON)
return;
if (state == GLUT_DOWN) {
if (selectedChargeIndex != -1) { // Drag
printf("Drag charge #%d... ", selectedChargeIndex);
isDragging = true;
}
} else {
if (selectedChargeIndex != -1) { // Drop
printf("Drop\n");
isDragging = false;
} else {
pushCharge(x, height - y);
}
}
}
void mouseDrag(int x, int y) {
if (isDragging && selectedChargeIndex != -1) {
printf(" drag... ");
charges[selectedChargeIndex].x = x;
charges[selectedChargeIndex].y = height - y;
HANDLE_ERROR(
hipMemcpyToSymbol(dev_charges, charges, chargeCount * sizeof(float3)));
}
}
void mouseTrack(int x, int y) {
if (isDragging)
return;
// Detect selected charge
int dx = 0, dy = 0;
for (int i = 0; i < chargeCount; i++) {
dx = x - charges[i].x;
dy = (height - y) - charges[i].y;
if (dx * dx + dy * dy < detectChargeRange * detectChargeRange) {
selectedChargeIndex = i;
return;
}
}
selectedChargeIndex = -1;
}
void initCuda(int deviceId) {
int deviceCount = 0;
HANDLE_ERROR(hipGetDeviceCount(&deviceCount));
if (deviceCount <= 0) {
printf("No CUDA devices found\n");
exit(-1);
}
HANDLE_ERROR(hipGLSetGLDevice(deviceId));
hipDeviceProp_t properties;
HANDLE_ERROR(hipGetDeviceProperties(&properties, deviceId));
threads.x = 32;
threads.y = properties.maxThreadsPerBlock / threads.x - 2; // to avoid hipErrorLaunchOutOfResources error
blocks.x = (width + threads.x - 1) / threads.x;
blocks.y = (height + threads.y - 1) / threads.y;
printf(
"Debug: blocks(%d, %d), threads(%d, %d)\nCalculated Resolution: %d x %d\n",
blocks.x, blocks.y, threads.x, threads.y, blocks.x * threads.x,
blocks.y * threads.y);
}
void initGlut(int argc, char** argv) {
// Initialize freeglut
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA);
glutInitWindowSize(width, height);
glutCreateWindow("Electric field");
glutSetOption(GLUT_ACTION_ON_WINDOW_CLOSE, GLUT_ACTION_CONTINUE_EXECUTION);
glutIdleFunc(idle);
glutKeyboardFunc(key);
glutMouseFunc(mouse);
glutMotionFunc(mouseDrag);
glutPassiveMotionFunc(mouseTrack);
glutDisplayFunc(draw);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluOrtho2D(0.0, (GLdouble) width, 0.0, (GLdouble) height);
glewInit();
}
int main(int argc, char** argv) {
setbuf(stdout, NULL);
initCuda(0);
initGlut(argc, argv);
createVBO(&vbo, &cuda_vbo_resource, hipGraphicsMapFlagsWriteDiscard);
glutMainLoop();
deleteVBO(&vbo, cuda_vbo_resource);
return 0;
}
| 634fe55daa3a94dc7cb802b68c4d9f8de4d085b3.cu | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <GL/glew.h>
#include <GL/freeglut.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_gl_interop.h>
/* screen constants */
const int width = 1024;
const int height = 768;
/* charge constants */
const float k = 20.0f;
const float minDistance = 0.1f; // not to divide by zero
const float maxSolidColorLength = 1.0f;
/* charges on the field */
const int maxCharge = 1000;
const int minCharge = -1000;
const char maxChargeCount = 30;
char chargeCount = 0;
__constant__ char dev_chargeCount;
float3 charges[maxChargeCount]; // x, y, z == m
__constant__ float3 dev_charges[maxChargeCount]; // x, y, z == m
/* OpenGL interoperability */
dim3 blocks, threads;
GLuint vbo;
struct cudaGraphicsResource *cuda_vbo_resource;
/* charge selection */
const int detectChargeRange = 20;
int selectedChargeIndex = -1;
bool isDragging = false;
static void HandleError(cudaError_t err, const char *file, int line) {
if (err != cudaSuccess) {
printf("%s in %s at line %d\n", cudaGetErrorString(err), file, line);
exit(EXIT_FAILURE);
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
void key(unsigned char key, int x, int y) {
switch (key) {
case 27:
printf("Exit application\n");
glutLeaveMainLoop();
break;
}
}
__device__ float length(const float2& q) {
return sqrtf(q.x * q.x + q.y * q.y);
}
__device__ float length2(const float2& q) {
return (q.x * q.x + q.y * q.y);
}
__device__ void setColor(const float2& f, uchar4& pixel) {
pixel.x = pixel.y = pixel.z = pixel.w = 0;
float l = length(f);
pixel.x = (l > maxSolidColorLength ? 255 : l * 256 / maxSolidColorLength);
}
__device__ void calculate(const float3& charge, int x, int y, float2& f) {
f.x = x - charge.x;
f.y = y - charge.y;
float l = length2(f) + minDistance;
float e = charge.z * rsqrt(l * l * l);
f.x *= e;
f.y *= e;
}
__global__ void renderFrame(uchar4* screen) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
float2 force, t_force;
force.x = force.y = 0.0f;
if (x >= width || y >= height)
return;
for (char i = 0; i < dev_chargeCount; i++) {
calculate(dev_charges[i], x, y, t_force);
force.x += t_force.x;
force.y += t_force.y;
}
force.x *= k;
force.y *= k;
setColor(force, screen[x + y * width]);
}
void idle(void) {
uchar4* dev_screen;
size_t size;
HANDLE_ERROR(cudaGraphicsMapResources(1, &cuda_vbo_resource, 0));
HANDLE_ERROR(
cudaGraphicsResourceGetMappedPointer((void**) &dev_screen, &size, cuda_vbo_resource));
// Kernel Time measure
cudaEvent_t startEvent, stopEvent;
float elapsedTime = 0.0f;
HANDLE_ERROR(cudaEventCreate(&startEvent));
HANDLE_ERROR(cudaEventCreate(&stopEvent));
HANDLE_ERROR(cudaEventRecord(startEvent, 0));
// Render Image
renderFrame<<<blocks, threads>>>(dev_screen);
HANDLE_ERROR(cudaDeviceSynchronize());
HANDLE_ERROR(cudaGraphicsUnmapResources(1, &cuda_vbo_resource, 0));
// Kernel Time measure
HANDLE_ERROR(cudaEventRecord(stopEvent, 0));
HANDLE_ERROR(cudaEventSynchronize(stopEvent));
HANDLE_ERROR(cudaEventElapsedTime(&elapsedTime, startEvent, stopEvent));
char fps[256];
sprintf(fps, "Electric field: %3.4f ms per frame (FPS: %3.1f)", elapsedTime,
1000 / elapsedTime);
glutSetWindowTitle(fps);
glutPostRedisplay();
}
void draw(void) {
glClearColor(0.0, 0.0, 0.0, 1.0);
glClear(GL_COLOR_BUFFER_BIT);
glDrawPixels(width, height, GL_RGBA, GL_UNSIGNED_BYTE, 0);
glPointSize(3.0f);
glColor3f(0.0f, 1.0f, 1.0f);
glBegin(GL_POINTS);
glVertex2i(charges[selectedChargeIndex].x, charges[selectedChargeIndex].y);
glEnd();
glutSwapBuffers();
}
void createVBO(GLuint *vbo, struct cudaGraphicsResource **vbo_res,
unsigned int vbo_res_flags) {
unsigned int size = width * height * sizeof(uchar4);
glGenBuffers(1, vbo);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, *vbo);
glBufferData(GL_PIXEL_UNPACK_BUFFER_ARB, size, NULL, GL_DYNAMIC_DRAW);
HANDLE_ERROR(cudaGraphicsGLRegisterBuffer(vbo_res, *vbo, vbo_res_flags));
}
void deleteVBO(GLuint *vbo, struct cudaGraphicsResource *vbo_res) {
HANDLE_ERROR(cudaGraphicsUnregisterResource(cuda_vbo_resource));
glBindBuffer(1, *vbo);
glDeleteBuffers(1, vbo);
*vbo = 0;
}
void pushCharge(int x, int y) {
if (chargeCount < maxChargeCount)
chargeCount++;
else {
for (int i = 0; i < maxChargeCount - 1; ++i) {
charges[i] = charges[i + 1];
}
}
charges[chargeCount - 1].x = x;
charges[chargeCount - 1].y = y;
charges[chargeCount - 1].z = rand() % (maxCharge - minCharge) + minCharge;
printf("Debug: Charge #%d (%.0f, %.0f, %.0f)\n", chargeCount - 1,
charges[chargeCount - 1].x, charges[chargeCount - 1].y,
charges[chargeCount - 1].z);
HANDLE_ERROR(
cudaMemcpyToSymbol(dev_charges, charges, chargeCount * sizeof(float3)));
HANDLE_ERROR(
cudaMemcpyToSymbol(dev_chargeCount, &chargeCount, sizeof(chargeCount)));
printf("Charges %d\n", chargeCount);
}
void mouse(int button, int state, int x, int y) {
if (button != GLUT_LEFT_BUTTON)
return;
if (state == GLUT_DOWN) {
if (selectedChargeIndex != -1) { // Drag
printf("Drag charge #%d... ", selectedChargeIndex);
isDragging = true;
}
} else {
if (selectedChargeIndex != -1) { // Drop
printf("Drop\n");
isDragging = false;
} else {
pushCharge(x, height - y);
}
}
}
void mouseDrag(int x, int y) {
if (isDragging && selectedChargeIndex != -1) {
printf(" drag... ");
charges[selectedChargeIndex].x = x;
charges[selectedChargeIndex].y = height - y;
HANDLE_ERROR(
cudaMemcpyToSymbol(dev_charges, charges, chargeCount * sizeof(float3)));
}
}
void mouseTrack(int x, int y) {
if (isDragging)
return;
// Detect selected charge
int dx = 0, dy = 0;
for (int i = 0; i < chargeCount; i++) {
dx = x - charges[i].x;
dy = (height - y) - charges[i].y;
if (dx * dx + dy * dy < detectChargeRange * detectChargeRange) {
selectedChargeIndex = i;
return;
}
}
selectedChargeIndex = -1;
}
void initCuda(int deviceId) {
int deviceCount = 0;
HANDLE_ERROR(cudaGetDeviceCount(&deviceCount));
if (deviceCount <= 0) {
printf("No CUDA devices found\n");
exit(-1);
}
HANDLE_ERROR(cudaGLSetGLDevice(deviceId));
cudaDeviceProp properties;
HANDLE_ERROR(cudaGetDeviceProperties(&properties, deviceId));
threads.x = 32;
threads.y = properties.maxThreadsPerBlock / threads.x - 2; // to avoid cudaErrorLaunchOutOfResources error
blocks.x = (width + threads.x - 1) / threads.x;
blocks.y = (height + threads.y - 1) / threads.y;
printf(
"Debug: blocks(%d, %d), threads(%d, %d)\nCalculated Resolution: %d x %d\n",
blocks.x, blocks.y, threads.x, threads.y, blocks.x * threads.x,
blocks.y * threads.y);
}
void initGlut(int argc, char** argv) {
// Initialize freeglut
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA);
glutInitWindowSize(width, height);
glutCreateWindow("Electric field");
glutSetOption(GLUT_ACTION_ON_WINDOW_CLOSE, GLUT_ACTION_CONTINUE_EXECUTION);
glutIdleFunc(idle);
glutKeyboardFunc(key);
glutMouseFunc(mouse);
glutMotionFunc(mouseDrag);
glutPassiveMotionFunc(mouseTrack);
glutDisplayFunc(draw);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluOrtho2D(0.0, (GLdouble) width, 0.0, (GLdouble) height);
glewInit();
}
int main(int argc, char** argv) {
setbuf(stdout, NULL);
initCuda(0);
initGlut(argc, argv);
createVBO(&vbo, &cuda_vbo_resource, cudaGraphicsMapFlagsWriteDiscard);
glutMainLoop();
deleteVBO(&vbo, cuda_vbo_resource);
return 0;
}
|
dd040ddbb009e946f26257b0e015a09702db5d70.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include "CUDAImageEditor.cuh"
__global__
void removeBlue(const unsigned int width, const unsigned char* const inputPixels, unsigned char* const outputPixels) {
// Set third byte to 0
unsigned int x = (blockIdx.x * blockDim.x) + threadIdx.x;
unsigned int y = (blockIdx.y * blockDim.y) + threadIdx.y;
unsigned int byteIndex = 3 * (x + width * y);
// Use weighted values
int colour = (0.2125 * inputPixels[byteIndex + 0]) + (0.7154 * inputPixels[byteIndex + 1]) + (0.0721 * inputPixels[byteIndex + 2]);
outputPixels[byteIndex + 0] = colour;
outputPixels[byteIndex + 1] = colour;
outputPixels[byteIndex + 2] = colour;
}
void CUDAImageEditor::convertToMonochrome(const unsigned int height, const unsigned int width, const unsigned char* const h_inputPixels, unsigned char* const h_outputPixels) {
const unsigned int BUFFER_SIZE{ height * width * 3 };
// Put pixel buffer in device memory
unsigned char* d_inputPixels;
unsigned char* d_outputPixels;
hipMalloc(&d_inputPixels, BUFFER_SIZE);
hipMalloc(&d_outputPixels, BUFFER_SIZE);
hipMemcpy(d_inputPixels, h_inputPixels, BUFFER_SIZE, hipMemcpyHostToDevice);
// Blocks will be 8x8 threads
dim3 threadsPerBlock(8, 8);
dim3 numBlocks(width / threadsPerBlock.x, height / threadsPerBlock.y);
hipLaunchKernelGGL(( removeBlue), dim3(numBlocks), dim3(threadsPerBlock) , 0, 0, width, d_inputPixels, d_outputPixels);
hipMemcpy((void *)h_outputPixels, d_outputPixels, BUFFER_SIZE, hipMemcpyDeviceToHost);
hipFree(d_inputPixels);
hipFree(d_outputPixels);
} | dd040ddbb009e946f26257b0e015a09702db5d70.cu | #include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include "CUDAImageEditor.cuh"
__global__
void removeBlue(const unsigned int width, const unsigned char* const inputPixels, unsigned char* const outputPixels) {
// Set third byte to 0
unsigned int x = (blockIdx.x * blockDim.x) + threadIdx.x;
unsigned int y = (blockIdx.y * blockDim.y) + threadIdx.y;
unsigned int byteIndex = 3 * (x + width * y);
// Use weighted values
int colour = (0.2125 * inputPixels[byteIndex + 0]) + (0.7154 * inputPixels[byteIndex + 1]) + (0.0721 * inputPixels[byteIndex + 2]);
outputPixels[byteIndex + 0] = colour;
outputPixels[byteIndex + 1] = colour;
outputPixels[byteIndex + 2] = colour;
}
void CUDAImageEditor::convertToMonochrome(const unsigned int height, const unsigned int width, const unsigned char* const h_inputPixels, unsigned char* const h_outputPixels) {
const unsigned int BUFFER_SIZE{ height * width * 3 };
// Put pixel buffer in device memory
unsigned char* d_inputPixels;
unsigned char* d_outputPixels;
cudaMalloc(&d_inputPixels, BUFFER_SIZE);
cudaMalloc(&d_outputPixels, BUFFER_SIZE);
cudaMemcpy(d_inputPixels, h_inputPixels, BUFFER_SIZE, cudaMemcpyHostToDevice);
// Blocks will be 8x8 threads
dim3 threadsPerBlock(8, 8);
dim3 numBlocks(width / threadsPerBlock.x, height / threadsPerBlock.y);
removeBlue<<< numBlocks, threadsPerBlock >>>(width, d_inputPixels, d_outputPixels);
cudaMemcpy((void *)h_outputPixels, d_outputPixels, BUFFER_SIZE, cudaMemcpyDeviceToHost);
cudaFree(d_inputPixels);
cudaFree(d_outputPixels);
} |
fc7b017eb82f9286d84265935d8b743903c281d5.hip | // !!! This is a file automatically generated by hipify!!!
#include <nervCUDA.h>
#include <nerv_kernels.h>
#include "rocblas.h"
// Method used to compute op(A)*x using CUBLAS:
template<typename T>
void mat_vec_mult_device(hipblasHandle_t handle, hipblasOperation_t trans, unsigned int nrows, unsigned int ncols,
T *A, T *x, T *y, T alpha)
{
T beta = (T)0.0;
hipblasSgemv(handle, trans, nrows, ncols, &alpha, A, nrows, x, 1, &beta, y, 1);
}
template<>
void mat_vec_mult_device<double>(hipblasHandle_t handle, hipblasOperation_t trans, unsigned int nrows, unsigned int ncols,
double *A, double *x, double *y, double alpha)
{
double beta = 0.0;
checkCublasErrors(hipblasDgemv(handle, trans, nrows, ncols, &alpha, A, nrows, x, 1, &beta, y, 1));
}
template <typename T>
void _mat_vec_mult(unsigned int nrows, unsigned int ncols, T *A, T *x, T *y, bool tpA, T alpha)
{
size_t size;
size = nrows * ncols * sizeof(T);
T *d_A = NULL;
checkCudaErrors(hipMalloc(&d_A, size));
checkCudaErrors(hipMemcpy(d_A, A, size, hipMemcpyHostToDevice));
size = (tpA ? nrows : ncols) * sizeof(T);
T *d_x = NULL;
checkCudaErrors(hipMalloc(&d_x, size));
checkCudaErrors(hipMemcpy(d_x, x, size, hipMemcpyHostToDevice));
size = (tpA ? ncols : nrows) * sizeof(T);
T *d_y = NULL;
checkCudaErrors(hipMalloc(&d_y, size));
// checkCudaErrors(hipMemcpy(d_y, vec2, size, hipMemcpyHostToDevice));
hipblasHandle_t handle;
checkCublasErrors(hipblasCreate(&handle));
// hipStream_t stream;
// checkCublasErrors(hipblasSetStream(handle, stream));
mat_vec_mult_device(handle, tpA ? HIPBLAS_OP_T : HIPBLAS_OP_N, nrows, ncols, d_A, d_x, d_y, alpha);
checkCublasErrors(hipblasDestroy(handle));
copyFromDevice(y, d_y, (tpA ? ncols : nrows));
checkCudaErrors(hipFree(d_A));
checkCudaErrors(hipFree(d_x));
checkCudaErrors(hipFree(d_y));
}
template <typename T>
void _mat_vec_mult_cpu(unsigned int nrows, unsigned int ncols, T *A, T *x, T *y, bool tpA, T alpha)
{
// perform the matrix multiplication:
unsigned int nr = tpA ? ncols : nrows;
unsigned int nc = tpA ? nrows : ncols;
for (unsigned int r = 0; r < nr; ++r)
{
T val = 0.0;
for (unsigned int c = 0; c < nc; ++c)
{
// We retrieve the element A(r,c) and x(c)
val += (tpA ? A[nrows * r + c] : A[nrows * c + r]) * x[c];
}
y[r] = val*alpha;
}
}
extern "C" {
void mat_vec_mult(unsigned int nrows, unsigned int ncols, double *A, double *x, double *y, bool tpA, double alpha)
{
_mat_vec_mult(nrows, ncols, A, x, y, tpA, alpha);
}
void mat_vec_mult_f(unsigned int nrows, unsigned int ncols, float *A, float *x, float *y, bool tpA, float alpha)
{
_mat_vec_mult(nrows, ncols, A, x, y, tpA, alpha);
}
void mat_vec_mult_cpu(unsigned int nrows, unsigned int ncols, double *A, double *x, double *y, bool tpA, double alpha)
{
_mat_vec_mult_cpu(nrows, ncols, A, x, y, tpA, alpha);
}
}
| fc7b017eb82f9286d84265935d8b743903c281d5.cu | #include <nervCUDA.h>
#include <nerv_kernels.h>
#include "cublas_v2.h"
// Method used to compute op(A)*x using CUBLAS:
template<typename T>
void mat_vec_mult_device(cublasHandle_t handle, cublasOperation_t trans, unsigned int nrows, unsigned int ncols,
T *A, T *x, T *y, T alpha)
{
T beta = (T)0.0;
cublasSgemv(handle, trans, nrows, ncols, &alpha, A, nrows, x, 1, &beta, y, 1);
}
template<>
void mat_vec_mult_device<double>(cublasHandle_t handle, cublasOperation_t trans, unsigned int nrows, unsigned int ncols,
double *A, double *x, double *y, double alpha)
{
double beta = 0.0;
checkCublasErrors(cublasDgemv(handle, trans, nrows, ncols, &alpha, A, nrows, x, 1, &beta, y, 1));
}
template <typename T>
void _mat_vec_mult(unsigned int nrows, unsigned int ncols, T *A, T *x, T *y, bool tpA, T alpha)
{
size_t size;
size = nrows * ncols * sizeof(T);
T *d_A = NULL;
checkCudaErrors(cudaMalloc(&d_A, size));
checkCudaErrors(cudaMemcpy(d_A, A, size, cudaMemcpyHostToDevice));
size = (tpA ? nrows : ncols) * sizeof(T);
T *d_x = NULL;
checkCudaErrors(cudaMalloc(&d_x, size));
checkCudaErrors(cudaMemcpy(d_x, x, size, cudaMemcpyHostToDevice));
size = (tpA ? ncols : nrows) * sizeof(T);
T *d_y = NULL;
checkCudaErrors(cudaMalloc(&d_y, size));
// checkCudaErrors(cudaMemcpy(d_y, vec2, size, cudaMemcpyHostToDevice));
cublasHandle_t handle;
checkCublasErrors(cublasCreate(&handle));
// cudaStream_t stream;
// checkCublasErrors(cublasSetStream(handle, stream));
mat_vec_mult_device(handle, tpA ? CUBLAS_OP_T : CUBLAS_OP_N, nrows, ncols, d_A, d_x, d_y, alpha);
checkCublasErrors(cublasDestroy(handle));
copyFromDevice(y, d_y, (tpA ? ncols : nrows));
checkCudaErrors(cudaFree(d_A));
checkCudaErrors(cudaFree(d_x));
checkCudaErrors(cudaFree(d_y));
}
template <typename T>
void _mat_vec_mult_cpu(unsigned int nrows, unsigned int ncols, T *A, T *x, T *y, bool tpA, T alpha)
{
// perform the matrix multiplication:
unsigned int nr = tpA ? ncols : nrows;
unsigned int nc = tpA ? nrows : ncols;
for (unsigned int r = 0; r < nr; ++r)
{
T val = 0.0;
for (unsigned int c = 0; c < nc; ++c)
{
// We retrieve the element A(r,c) and x(c)
val += (tpA ? A[nrows * r + c] : A[nrows * c + r]) * x[c];
}
y[r] = val*alpha;
}
}
extern "C" {
void mat_vec_mult(unsigned int nrows, unsigned int ncols, double *A, double *x, double *y, bool tpA, double alpha)
{
_mat_vec_mult(nrows, ncols, A, x, y, tpA, alpha);
}
void mat_vec_mult_f(unsigned int nrows, unsigned int ncols, float *A, float *x, float *y, bool tpA, float alpha)
{
_mat_vec_mult(nrows, ncols, A, x, y, tpA, alpha);
}
void mat_vec_mult_cpu(unsigned int nrows, unsigned int ncols, double *A, double *x, double *y, bool tpA, double alpha)
{
_mat_vec_mult_cpu(nrows, ncols, A, x, y, tpA, alpha);
}
}
|
test_trt.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
* Full license terms provided in LICENSE.md file.
*/
#include <iostream>
#include <string>
#include <vector>
#include <sstream>
#include <chrono>
#include <stdexcept>
#include <fstream>
#include <opencv2/opencv.hpp>
#include <NvInfer.h>
#define MS_PER_SEC 1000.0
using namespace std;
using namespace nvinfer1;
class TestConfig;
typedef void (*preprocess_fn_t)(float *input, size_t channels, size_t height, size_t width);
float * imageToTensor(const cv::Mat & image);
void preprocessVgg(float *input, size_t channels, size_t height, size_t width);
void preprocessInception(float *input, size_t channels, size_t height, size_t width);
size_t argmax(float *input, size_t numel);
void test(const TestConfig &testConfig);
class TestConfig
{
public:
string imagePath;
string planPath;
string inputNodeName;
string outputNodeName;
string preprocessFnName;
string inputHeight;
string inputWidth;
string numOutputCategories;
string dataType;
string maxBatchSize;
string workspaceSize;
string numRuns;
string useMappedMemory;
string statsPath;
TestConfig(int argc, char * argv[])
{
imagePath = argv[1];
planPath = argv[2];
inputNodeName = argv[3];
inputHeight = argv[4];
inputWidth = argv[5];
outputNodeName = argv[6];
numOutputCategories = argv[7];
preprocessFnName = argv[8];
numRuns = argv[9];
dataType = argv[10];
maxBatchSize = argv[11];
workspaceSize = argv[12];
useMappedMemory = argv[13];
statsPath = argv[14];
}
static string UsageString()
{
string s = "";
s += "imagePath: \n";
s += "planPath: \n";
s += "inputNodeName: \n";
s += "inputHeight: \n";
s += "inputWidth: \n";
s += "outputNodeName: \n";
s += "numOutputCategories: \n";
s += "preprocessFnName: \n";
s += "numRuns: \n";
s += "dataType: \n";
s += "maxBatchSize: \n";
s += "workspaceSize: \n";
s += "useMappedMemory: \n";
s += "statsPath: \n";
return s;
}
string ToString()
{
string s = "";
s += "imagePath: " + imagePath + "\n";
s += "planPath: " + planPath + "\n";
s += "inputNodeName: " + inputNodeName + "\n";
s += "inputHeight: " + inputHeight + "\n";
s += "inputWidth: " + inputWidth + "\n";
s += "outputNodeName: " + outputNodeName + "\n";
s += "numOutputCategories: " + numOutputCategories + "\n";
s += "preprocessFnName: " + preprocessFnName + "\n";
s += "numRuns: " + numRuns + "\n";
s += "dataType: " + dataType + "\n";
s += "maxBatchSize: " + maxBatchSize + "\n";
s += "workspaceSize: " + workspaceSize + "\n";
s += "useMappedMemory: " + useMappedMemory + "\n";
s += "statsPath: " + statsPath + "\n";
return s;
}
static int ToInteger(string value)
{
int valueInt;
stringstream ss;
ss << value;
ss >> valueInt;
return valueInt;
}
preprocess_fn_t PreprocessFn() const {
if (preprocessFnName == "preprocess_vgg")
return preprocessVgg;
else if (preprocessFnName == "preprocess_inception")
return preprocessInception;
else
throw runtime_error("Invalid preprocessing function name.");
}
int InputWidth() const { return ToInteger(inputWidth); }
int InputHeight() const { return ToInteger(inputHeight); }
int NumOutputCategories() const { return ToInteger(numOutputCategories); }
nvinfer1::DataType DataType() const {
if (dataType == "float")
return nvinfer1::DataType::kFLOAT;
else if (dataType == "half")
return nvinfer1::DataType::kHALF;
else
throw runtime_error("Invalid data type.");
}
int MaxBatchSize() const { return ToInteger(maxBatchSize); }
int WorkspaceSize() const { return ToInteger(workspaceSize); }
int NumRuns() const { return ToInteger(numRuns); }
int UseMappedMemory() const { return ToInteger(useMappedMemory); }
};
class Logger : public ILogger
{
void log(Severity severity, const char * msg) override
{
cout << msg << endl;
}
} gLogger;
int main(int argc, char * argv[])
{
if (argc != 15)
{
cout << TestConfig::UsageString() << endl;
return 0;
}
TestConfig testConfig(argc, argv);
cout << "\ntestConfig: \n" << testConfig.ToString() << endl;
test(testConfig);
return 0;
}
float *imageToTensor(const cv::Mat & image)
{
const size_t height = image.rows;
const size_t width = image.cols;
const size_t channels = image.channels();
const size_t numel = height * width * channels;
const size_t stridesCv[3] = { width * channels, channels, 1 };
const size_t strides[3] = { height * width, width, 1 };
float * tensor;
hipHostMalloc((void**)&tensor, numel * sizeof(float), hipHostMallocMapped);
for (int i = 0; i < height; i++)
{
for (int j = 0; j < width; j++)
{
for (int k = 0; k < channels; k++)
{
const size_t offsetCv = i * stridesCv[0] + j * stridesCv[1] + k * stridesCv[2];
const size_t offset = k * strides[0] + i * strides[1] + j * strides[2];
tensor[offset] = (float) image.data[offsetCv];
}
}
}
return tensor;
}
void preprocessVgg(float * tensor, size_t channels, size_t height, size_t width)
{
const size_t strides[3] = { height * width, width, 1 };
const float mean[3] = { 123.68, 116.78, 103.94 };
for (int i = 0; i < height; i++)
{
for (int j = 0; j < width; j++)
{
for (int k = 0; k < channels; k++)
{
const size_t offset = k * strides[0] + i * strides[1] + j * strides[2];
tensor[offset] -= mean[k];
}
}
}
}
void preprocessInception(float * tensor, size_t channels, size_t height, size_t width)
{
const size_t numel = channels * height * width;
for (int i = 0; i < numel; i++)
tensor[i] = 2.0 * (tensor[i] / 255.0 - 0.5);
}
size_t argmax(float * tensor, size_t numel)
{
if (numel <= 0)
return 0;
size_t maxIndex = 0;
float max = tensor[0];
for (int i = 0; i < numel; i++)
{
if (tensor[i] > max)
{
maxIndex = i;
max = tensor[i];
}
}
return maxIndex;
}
void test(const TestConfig &testConfig)
{
ifstream planFile(testConfig.planPath);
stringstream planBuffer;
planBuffer << planFile.rdbuf();
string plan = planBuffer.str();
IRuntime *runtime = createInferRuntime(gLogger);
ICudaEngine *engine = runtime->deserializeCudaEngine((void*)plan.data(),
plan.size(), nullptr);
IExecutionContext *context = engine->createExecutionContext();
int inputBindingIndex, outputBindingIndex;
inputBindingIndex = engine->getBindingIndex(testConfig.inputNodeName.c_str());
outputBindingIndex = engine->getBindingIndex(testConfig.outputNodeName.c_str());
// load and preprocess image
cv::Mat image = cv::imread(testConfig.imagePath, cv::IMREAD_COLOR);
cv::cvtColor(image, image, cv::COLOR_BGR2RGB, 3);
cv::resize(image, image, cv::Size(testConfig.InputWidth(), testConfig.InputHeight()));
float *input = imageToTensor(image);
testConfig.PreprocessFn()(input, 3, testConfig.InputHeight(), testConfig.InputWidth());
// allocate memory on host / device for input / output
float *output;
float *inputDevice;
float *outputDevice;
size_t inputSize = testConfig.InputHeight() * testConfig.InputWidth() * 3 * sizeof(float);
hipHostMalloc(&output, testConfig.NumOutputCategories() * sizeof(float), hipHostMallocMapped);
if (testConfig.UseMappedMemory())
{
hipHostGetDevicePointer(&inputDevice, input, 0);
hipHostGetDevicePointer(&outputDevice, output, 0);
}
else
{
hipMalloc(&inputDevice, inputSize);
hipMalloc(&outputDevice, testConfig.NumOutputCategories() * sizeof(float));
}
float *bindings[2];
bindings[inputBindingIndex] = inputDevice;
bindings[outputBindingIndex] = outputDevice;
// run and compute average time over numRuns iterations
double avgTime = 0;
for (int i = 0; i < testConfig.NumRuns() + 1; i++)
{
chrono::duration<double> diff;
if (testConfig.UseMappedMemory())
{
auto t0 = chrono::steady_clock::now();
context->execute(1, (void**)bindings);
auto t1 = chrono::steady_clock::now();
diff = t1 - t0;
}
else
{
auto t0 = chrono::steady_clock::now();
hipMemcpy(inputDevice, input, inputSize, hipMemcpyHostToDevice);
context->execute(1, (void**)bindings);
hipMemcpy(output, outputDevice, testConfig.NumOutputCategories() * sizeof(float), hipMemcpyDeviceToHost);
auto t1 = chrono::steady_clock::now();
diff = t1 - t0;
}
if (i != 0)
avgTime += MS_PER_SEC * diff.count();
}
avgTime /= testConfig.NumRuns();
// save results to file
int maxCategoryIndex = argmax(output, testConfig.NumOutputCategories()) + 1001 - testConfig.NumOutputCategories();
cout << "Most likely category id is " << maxCategoryIndex << endl;
cout << "Average execution time in ms is " << avgTime << endl;
ofstream outfile;
outfile.open(testConfig.statsPath, ios_base::app);
outfile << "\n" << testConfig.planPath
<< " " << avgTime;
// << " " << maxCategoryIndex
// << " " << testConfig.InputWidth()
// << " " << testConfig.InputHeight()
// << " " << testConfig.MaxBatchSize()
// << " " << testConfig.WorkspaceSize()
// << " " << testConfig.dataType
// << " " << testConfig.NumRuns()
// << " " << testConfig.UseMappedMemory();
outfile.close();
hipFree(inputDevice);
hipFree(outputDevice);
hipHostFree(input);
hipHostFree(output);
engine->destroy();
context->destroy();
runtime->destroy();
}
| test_trt.cu | /**
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
* Full license terms provided in LICENSE.md file.
*/
#include <iostream>
#include <string>
#include <vector>
#include <sstream>
#include <chrono>
#include <stdexcept>
#include <fstream>
#include <opencv2/opencv.hpp>
#include <NvInfer.h>
#define MS_PER_SEC 1000.0
using namespace std;
using namespace nvinfer1;
class TestConfig;
typedef void (*preprocess_fn_t)(float *input, size_t channels, size_t height, size_t width);
float * imageToTensor(const cv::Mat & image);
void preprocessVgg(float *input, size_t channels, size_t height, size_t width);
void preprocessInception(float *input, size_t channels, size_t height, size_t width);
size_t argmax(float *input, size_t numel);
void test(const TestConfig &testConfig);
class TestConfig
{
public:
string imagePath;
string planPath;
string inputNodeName;
string outputNodeName;
string preprocessFnName;
string inputHeight;
string inputWidth;
string numOutputCategories;
string dataType;
string maxBatchSize;
string workspaceSize;
string numRuns;
string useMappedMemory;
string statsPath;
TestConfig(int argc, char * argv[])
{
imagePath = argv[1];
planPath = argv[2];
inputNodeName = argv[3];
inputHeight = argv[4];
inputWidth = argv[5];
outputNodeName = argv[6];
numOutputCategories = argv[7];
preprocessFnName = argv[8];
numRuns = argv[9];
dataType = argv[10];
maxBatchSize = argv[11];
workspaceSize = argv[12];
useMappedMemory = argv[13];
statsPath = argv[14];
}
static string UsageString()
{
string s = "";
s += "imagePath: \n";
s += "planPath: \n";
s += "inputNodeName: \n";
s += "inputHeight: \n";
s += "inputWidth: \n";
s += "outputNodeName: \n";
s += "numOutputCategories: \n";
s += "preprocessFnName: \n";
s += "numRuns: \n";
s += "dataType: \n";
s += "maxBatchSize: \n";
s += "workspaceSize: \n";
s += "useMappedMemory: \n";
s += "statsPath: \n";
return s;
}
string ToString()
{
string s = "";
s += "imagePath: " + imagePath + "\n";
s += "planPath: " + planPath + "\n";
s += "inputNodeName: " + inputNodeName + "\n";
s += "inputHeight: " + inputHeight + "\n";
s += "inputWidth: " + inputWidth + "\n";
s += "outputNodeName: " + outputNodeName + "\n";
s += "numOutputCategories: " + numOutputCategories + "\n";
s += "preprocessFnName: " + preprocessFnName + "\n";
s += "numRuns: " + numRuns + "\n";
s += "dataType: " + dataType + "\n";
s += "maxBatchSize: " + maxBatchSize + "\n";
s += "workspaceSize: " + workspaceSize + "\n";
s += "useMappedMemory: " + useMappedMemory + "\n";
s += "statsPath: " + statsPath + "\n";
return s;
}
static int ToInteger(string value)
{
int valueInt;
stringstream ss;
ss << value;
ss >> valueInt;
return valueInt;
}
preprocess_fn_t PreprocessFn() const {
if (preprocessFnName == "preprocess_vgg")
return preprocessVgg;
else if (preprocessFnName == "preprocess_inception")
return preprocessInception;
else
throw runtime_error("Invalid preprocessing function name.");
}
int InputWidth() const { return ToInteger(inputWidth); }
int InputHeight() const { return ToInteger(inputHeight); }
int NumOutputCategories() const { return ToInteger(numOutputCategories); }
nvinfer1::DataType DataType() const {
if (dataType == "float")
return nvinfer1::DataType::kFLOAT;
else if (dataType == "half")
return nvinfer1::DataType::kHALF;
else
throw runtime_error("Invalid data type.");
}
int MaxBatchSize() const { return ToInteger(maxBatchSize); }
int WorkspaceSize() const { return ToInteger(workspaceSize); }
int NumRuns() const { return ToInteger(numRuns); }
int UseMappedMemory() const { return ToInteger(useMappedMemory); }
};
class Logger : public ILogger
{
void log(Severity severity, const char * msg) override
{
cout << msg << endl;
}
} gLogger;
int main(int argc, char * argv[])
{
if (argc != 15)
{
cout << TestConfig::UsageString() << endl;
return 0;
}
TestConfig testConfig(argc, argv);
cout << "\ntestConfig: \n" << testConfig.ToString() << endl;
test(testConfig);
return 0;
}
float *imageToTensor(const cv::Mat & image)
{
const size_t height = image.rows;
const size_t width = image.cols;
const size_t channels = image.channels();
const size_t numel = height * width * channels;
const size_t stridesCv[3] = { width * channels, channels, 1 };
const size_t strides[3] = { height * width, width, 1 };
float * tensor;
cudaHostAlloc((void**)&tensor, numel * sizeof(float), cudaHostAllocMapped);
for (int i = 0; i < height; i++)
{
for (int j = 0; j < width; j++)
{
for (int k = 0; k < channels; k++)
{
const size_t offsetCv = i * stridesCv[0] + j * stridesCv[1] + k * stridesCv[2];
const size_t offset = k * strides[0] + i * strides[1] + j * strides[2];
tensor[offset] = (float) image.data[offsetCv];
}
}
}
return tensor;
}
void preprocessVgg(float * tensor, size_t channels, size_t height, size_t width)
{
const size_t strides[3] = { height * width, width, 1 };
const float mean[3] = { 123.68, 116.78, 103.94 };
for (int i = 0; i < height; i++)
{
for (int j = 0; j < width; j++)
{
for (int k = 0; k < channels; k++)
{
const size_t offset = k * strides[0] + i * strides[1] + j * strides[2];
tensor[offset] -= mean[k];
}
}
}
}
void preprocessInception(float * tensor, size_t channels, size_t height, size_t width)
{
const size_t numel = channels * height * width;
for (int i = 0; i < numel; i++)
tensor[i] = 2.0 * (tensor[i] / 255.0 - 0.5);
}
size_t argmax(float * tensor, size_t numel)
{
if (numel <= 0)
return 0;
size_t maxIndex = 0;
float max = tensor[0];
for (int i = 0; i < numel; i++)
{
if (tensor[i] > max)
{
maxIndex = i;
max = tensor[i];
}
}
return maxIndex;
}
void test(const TestConfig &testConfig)
{
ifstream planFile(testConfig.planPath);
stringstream planBuffer;
planBuffer << planFile.rdbuf();
string plan = planBuffer.str();
IRuntime *runtime = createInferRuntime(gLogger);
ICudaEngine *engine = runtime->deserializeCudaEngine((void*)plan.data(),
plan.size(), nullptr);
IExecutionContext *context = engine->createExecutionContext();
int inputBindingIndex, outputBindingIndex;
inputBindingIndex = engine->getBindingIndex(testConfig.inputNodeName.c_str());
outputBindingIndex = engine->getBindingIndex(testConfig.outputNodeName.c_str());
// load and preprocess image
cv::Mat image = cv::imread(testConfig.imagePath, cv::IMREAD_COLOR);
cv::cvtColor(image, image, cv::COLOR_BGR2RGB, 3);
cv::resize(image, image, cv::Size(testConfig.InputWidth(), testConfig.InputHeight()));
float *input = imageToTensor(image);
testConfig.PreprocessFn()(input, 3, testConfig.InputHeight(), testConfig.InputWidth());
// allocate memory on host / device for input / output
float *output;
float *inputDevice;
float *outputDevice;
size_t inputSize = testConfig.InputHeight() * testConfig.InputWidth() * 3 * sizeof(float);
cudaHostAlloc(&output, testConfig.NumOutputCategories() * sizeof(float), cudaHostAllocMapped);
if (testConfig.UseMappedMemory())
{
cudaHostGetDevicePointer(&inputDevice, input, 0);
cudaHostGetDevicePointer(&outputDevice, output, 0);
}
else
{
cudaMalloc(&inputDevice, inputSize);
cudaMalloc(&outputDevice, testConfig.NumOutputCategories() * sizeof(float));
}
float *bindings[2];
bindings[inputBindingIndex] = inputDevice;
bindings[outputBindingIndex] = outputDevice;
// run and compute average time over numRuns iterations
double avgTime = 0;
for (int i = 0; i < testConfig.NumRuns() + 1; i++)
{
chrono::duration<double> diff;
if (testConfig.UseMappedMemory())
{
auto t0 = chrono::steady_clock::now();
context->execute(1, (void**)bindings);
auto t1 = chrono::steady_clock::now();
diff = t1 - t0;
}
else
{
auto t0 = chrono::steady_clock::now();
cudaMemcpy(inputDevice, input, inputSize, cudaMemcpyHostToDevice);
context->execute(1, (void**)bindings);
cudaMemcpy(output, outputDevice, testConfig.NumOutputCategories() * sizeof(float), cudaMemcpyDeviceToHost);
auto t1 = chrono::steady_clock::now();
diff = t1 - t0;
}
if (i != 0)
avgTime += MS_PER_SEC * diff.count();
}
avgTime /= testConfig.NumRuns();
// save results to file
int maxCategoryIndex = argmax(output, testConfig.NumOutputCategories()) + 1001 - testConfig.NumOutputCategories();
cout << "Most likely category id is " << maxCategoryIndex << endl;
cout << "Average execution time in ms is " << avgTime << endl;
ofstream outfile;
outfile.open(testConfig.statsPath, ios_base::app);
outfile << "\n" << testConfig.planPath
<< " " << avgTime;
// << " " << maxCategoryIndex
// << " " << testConfig.InputWidth()
// << " " << testConfig.InputHeight()
// << " " << testConfig.MaxBatchSize()
// << " " << testConfig.WorkspaceSize()
// << " " << testConfig.dataType
// << " " << testConfig.NumRuns()
// << " " << testConfig.UseMappedMemory();
outfile.close();
cudaFree(inputDevice);
cudaFree(outputDevice);
cudaFreeHost(input);
cudaFreeHost(output);
engine->destroy();
context->destroy();
runtime->destroy();
}
|
44ed48c0e07ab45b2b5c8b4aed69a38b57906ffb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_initialise_chunk_kernel_volume;
int xdim0_initialise_chunk_kernel_volume_h = -1;
__constant__ int ydim0_initialise_chunk_kernel_volume;
int ydim0_initialise_chunk_kernel_volume_h = -1;
__constant__ int xdim1_initialise_chunk_kernel_volume;
int xdim1_initialise_chunk_kernel_volume_h = -1;
__constant__ int ydim1_initialise_chunk_kernel_volume;
int ydim1_initialise_chunk_kernel_volume_h = -1;
__constant__ int xdim2_initialise_chunk_kernel_volume;
int xdim2_initialise_chunk_kernel_volume_h = -1;
__constant__ int ydim2_initialise_chunk_kernel_volume;
int ydim2_initialise_chunk_kernel_volume_h = -1;
__constant__ int xdim3_initialise_chunk_kernel_volume;
int xdim3_initialise_chunk_kernel_volume_h = -1;
__constant__ int ydim3_initialise_chunk_kernel_volume;
int ydim3_initialise_chunk_kernel_volume_h = -1;
__constant__ int xdim4_initialise_chunk_kernel_volume;
int xdim4_initialise_chunk_kernel_volume_h = -1;
__constant__ int ydim4_initialise_chunk_kernel_volume;
int ydim4_initialise_chunk_kernel_volume_h = -1;
__constant__ int xdim5_initialise_chunk_kernel_volume;
int xdim5_initialise_chunk_kernel_volume_h = -1;
__constant__ int ydim5_initialise_chunk_kernel_volume;
int ydim5_initialise_chunk_kernel_volume_h = -1;
__constant__ int xdim6_initialise_chunk_kernel_volume;
int xdim6_initialise_chunk_kernel_volume_h = -1;
__constant__ int ydim6_initialise_chunk_kernel_volume;
int ydim6_initialise_chunk_kernel_volume_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
#define OPS_ACC0(x, y, z) \
(x + xdim0_initialise_chunk_kernel_volume * (y) + \
xdim0_initialise_chunk_kernel_volume * \
ydim0_initialise_chunk_kernel_volume * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_initialise_chunk_kernel_volume * (y) + \
xdim1_initialise_chunk_kernel_volume * \
ydim1_initialise_chunk_kernel_volume * (z))
#define OPS_ACC2(x, y, z) \
(x + xdim2_initialise_chunk_kernel_volume * (y) + \
xdim2_initialise_chunk_kernel_volume * \
ydim2_initialise_chunk_kernel_volume * (z))
#define OPS_ACC3(x, y, z) \
(x + xdim3_initialise_chunk_kernel_volume * (y) + \
xdim3_initialise_chunk_kernel_volume * \
ydim3_initialise_chunk_kernel_volume * (z))
#define OPS_ACC4(x, y, z) \
(x + xdim4_initialise_chunk_kernel_volume * (y) + \
xdim4_initialise_chunk_kernel_volume * \
ydim4_initialise_chunk_kernel_volume * (z))
#define OPS_ACC5(x, y, z) \
(x + xdim5_initialise_chunk_kernel_volume * (y) + \
xdim5_initialise_chunk_kernel_volume * \
ydim5_initialise_chunk_kernel_volume * (z))
#define OPS_ACC6(x, y, z) \
(x + xdim6_initialise_chunk_kernel_volume * (y) + \
xdim6_initialise_chunk_kernel_volume * \
ydim6_initialise_chunk_kernel_volume * (z))
// user function
__device__
void
initialise_chunk_kernel_volume_gpu(double *volume, const double *celldy,
double *xarea, const double *celldx,
double *yarea, const double *celldz,
double *zarea) {
double d_x, d_y, d_z;
d_x = (grid.xmax - grid.xmin) / (double)grid.x_cells;
d_y = (grid.ymax - grid.ymin) / (double)grid.y_cells;
d_z = (grid.zmax - grid.zmin) / (double)grid.z_cells;
volume[OPS_ACC0(0, 0, 0)] = d_x * d_y * d_z;
xarea[OPS_ACC2(0, 0, 0)] =
celldy[OPS_ACC1(0, 0, 0)] * celldz[OPS_ACC5(0, 0, 0)];
yarea[OPS_ACC4(0, 0, 0)] =
celldx[OPS_ACC3(0, 0, 0)] * celldz[OPS_ACC5(0, 0, 0)];
zarea[OPS_ACC6(0, 0, 0)] =
celldx[OPS_ACC3(0, 0, 0)] * celldy[OPS_ACC1(0, 0, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
__global__ void ops_initialise_chunk_kernel_volume(
double *__restrict arg0, const double *__restrict arg1,
double *__restrict arg2, const double *__restrict arg3,
double *__restrict arg4, const double *__restrict arg5,
double *__restrict arg6, int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_initialise_chunk_kernel_volume +
idx_z * 1 * 1 * xdim0_initialise_chunk_kernel_volume *
ydim0_initialise_chunk_kernel_volume;
arg1 += idx_x * 0 * 1 + idx_y * 1 * 1 * xdim1_initialise_chunk_kernel_volume +
idx_z * 0 * 1 * xdim1_initialise_chunk_kernel_volume *
ydim1_initialise_chunk_kernel_volume;
arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_initialise_chunk_kernel_volume +
idx_z * 1 * 1 * xdim2_initialise_chunk_kernel_volume *
ydim2_initialise_chunk_kernel_volume;
arg3 += idx_x * 1 * 1 + idx_y * 0 * 1 * xdim3_initialise_chunk_kernel_volume +
idx_z * 0 * 1 * xdim3_initialise_chunk_kernel_volume *
ydim3_initialise_chunk_kernel_volume;
arg4 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim4_initialise_chunk_kernel_volume +
idx_z * 1 * 1 * xdim4_initialise_chunk_kernel_volume *
ydim4_initialise_chunk_kernel_volume;
arg5 += idx_x * 0 * 1 + idx_y * 0 * 1 * xdim5_initialise_chunk_kernel_volume +
idx_z * 1 * 1 * xdim5_initialise_chunk_kernel_volume *
ydim5_initialise_chunk_kernel_volume;
arg6 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim6_initialise_chunk_kernel_volume +
idx_z * 1 * 1 * xdim6_initialise_chunk_kernel_volume *
ydim6_initialise_chunk_kernel_volume;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
initialise_chunk_kernel_volume_gpu(arg0, arg1, arg2, arg3, arg4, arg5,
arg6);
}
}
// host stub function
void ops_par_loop_initialise_chunk_kernel_volume(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2,
ops_arg arg3, ops_arg arg4,
ops_arg arg5, ops_arg arg6) {
// Timing
double t1, t2, c1, c2;
ops_arg args[7] = {arg0, arg1, arg2, arg3, arg4, arg5, arg6};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 7, range, 55))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(55, "initialise_chunk_kernel_volume");
OPS_kernels[55].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0];
int ydim4 = args[4].dat->size[1];
int xdim5 = args[5].dat->size[0];
int ydim5 = args[5].dat->size[1];
int xdim6 = args[6].dat->size[0];
int ydim6 = args[6].dat->size[1];
if (xdim0 != xdim0_initialise_chunk_kernel_volume_h ||
ydim0 != ydim0_initialise_chunk_kernel_volume_h ||
xdim1 != xdim1_initialise_chunk_kernel_volume_h ||
ydim1 != ydim1_initialise_chunk_kernel_volume_h ||
xdim2 != xdim2_initialise_chunk_kernel_volume_h ||
ydim2 != ydim2_initialise_chunk_kernel_volume_h ||
xdim3 != xdim3_initialise_chunk_kernel_volume_h ||
ydim3 != ydim3_initialise_chunk_kernel_volume_h ||
xdim4 != xdim4_initialise_chunk_kernel_volume_h ||
ydim4 != ydim4_initialise_chunk_kernel_volume_h ||
xdim5 != xdim5_initialise_chunk_kernel_volume_h ||
ydim5 != ydim5_initialise_chunk_kernel_volume_h ||
xdim6 != xdim6_initialise_chunk_kernel_volume_h ||
ydim6 != ydim6_initialise_chunk_kernel_volume_h) {
hipMemcpyToSymbol(xdim0_initialise_chunk_kernel_volume, &xdim0,
sizeof(int));
xdim0_initialise_chunk_kernel_volume_h = xdim0;
hipMemcpyToSymbol(ydim0_initialise_chunk_kernel_volume, &ydim0,
sizeof(int));
ydim0_initialise_chunk_kernel_volume_h = ydim0;
hipMemcpyToSymbol(xdim1_initialise_chunk_kernel_volume, &xdim1,
sizeof(int));
xdim1_initialise_chunk_kernel_volume_h = xdim1;
hipMemcpyToSymbol(ydim1_initialise_chunk_kernel_volume, &ydim1,
sizeof(int));
ydim1_initialise_chunk_kernel_volume_h = ydim1;
hipMemcpyToSymbol(xdim2_initialise_chunk_kernel_volume, &xdim2,
sizeof(int));
xdim2_initialise_chunk_kernel_volume_h = xdim2;
hipMemcpyToSymbol(ydim2_initialise_chunk_kernel_volume, &ydim2,
sizeof(int));
ydim2_initialise_chunk_kernel_volume_h = ydim2;
hipMemcpyToSymbol(xdim3_initialise_chunk_kernel_volume, &xdim3,
sizeof(int));
xdim3_initialise_chunk_kernel_volume_h = xdim3;
hipMemcpyToSymbol(ydim3_initialise_chunk_kernel_volume, &ydim3,
sizeof(int));
ydim3_initialise_chunk_kernel_volume_h = ydim3;
hipMemcpyToSymbol(xdim4_initialise_chunk_kernel_volume, &xdim4,
sizeof(int));
xdim4_initialise_chunk_kernel_volume_h = xdim4;
hipMemcpyToSymbol(ydim4_initialise_chunk_kernel_volume, &ydim4,
sizeof(int));
ydim4_initialise_chunk_kernel_volume_h = ydim4;
hipMemcpyToSymbol(xdim5_initialise_chunk_kernel_volume, &xdim5,
sizeof(int));
xdim5_initialise_chunk_kernel_volume_h = xdim5;
hipMemcpyToSymbol(ydim5_initialise_chunk_kernel_volume, &ydim5,
sizeof(int));
ydim5_initialise_chunk_kernel_volume_h = ydim5;
hipMemcpyToSymbol(xdim6_initialise_chunk_kernel_volume, &xdim6,
sizeof(int));
xdim6_initialise_chunk_kernel_volume_h = xdim6;
hipMemcpyToSymbol(ydim6_initialise_chunk_kernel_volume, &ydim6,
sizeof(int));
ydim6_initialise_chunk_kernel_volume_h = ydim6;
}
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
int dat2 = args[2].dat->elem_size;
int dat3 = args[3].dat->elem_size;
int dat4 = args[4].dat->elem_size;
int dat5 = args[5].dat->elem_size;
int dat6 = args[6].dat->elem_size;
char *p_a[7];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[2].dat->d_m[d];
#endif
int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] -
args[2].dat->base[0] - d_m[0]);
base2 = base2 +
dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1] -
args[2].dat->base[1] - d_m[1]);
base2 = base2 +
dat2 * args[2].dat->size[0] * args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2] - args[2].dat->base[2] -
d_m[2]);
p_a[2] = (char *)args[2].data_d + base2;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[3].dat->d_m[d];
#endif
int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] -
args[3].dat->base[0] - d_m[0]);
base3 = base3 +
dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1] -
args[3].dat->base[1] - d_m[1]);
base3 = base3 +
dat3 * args[3].dat->size[0] * args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2] - args[3].dat->base[2] -
d_m[2]);
p_a[3] = (char *)args[3].data_d + base3;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[4].dat->d_m[d] + OPS_sub_dat_list[args[4].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[4].dat->d_m[d];
#endif
int base4 = dat4 * 1 * (start[0] * args[4].stencil->stride[0] -
args[4].dat->base[0] - d_m[0]);
base4 = base4 +
dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1] -
args[4].dat->base[1] - d_m[1]);
base4 = base4 +
dat4 * args[4].dat->size[0] * args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2] - args[4].dat->base[2] -
d_m[2]);
p_a[4] = (char *)args[4].data_d + base4;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[5].dat->d_m[d] + OPS_sub_dat_list[args[5].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[5].dat->d_m[d];
#endif
int base5 = dat5 * 1 * (start[0] * args[5].stencil->stride[0] -
args[5].dat->base[0] - d_m[0]);
base5 = base5 +
dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1] -
args[5].dat->base[1] - d_m[1]);
base5 = base5 +
dat5 * args[5].dat->size[0] * args[5].dat->size[1] *
(start[2] * args[5].stencil->stride[2] - args[5].dat->base[2] -
d_m[2]);
p_a[5] = (char *)args[5].data_d + base5;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[6].dat->d_m[d] + OPS_sub_dat_list[args[6].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[6].dat->d_m[d];
#endif
int base6 = dat6 * 1 * (start[0] * args[6].stencil->stride[0] -
args[6].dat->base[0] - d_m[0]);
base6 = base6 +
dat6 * args[6].dat->size[0] * (start[1] * args[6].stencil->stride[1] -
args[6].dat->base[1] - d_m[1]);
base6 = base6 +
dat6 * args[6].dat->size[0] * args[6].dat->size[1] *
(start[2] * args[6].stencil->stride[2] - args[6].dat->base[2] -
d_m[2]);
p_a[6] = (char *)args[6].data_d + base6;
ops_H_D_exchanges_device(args, 7);
ops_halo_exchanges(args, 7, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[55].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_initialise_chunk_kernel_volume), dim3(grid), dim3(tblock), 0, 0,
(double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5], (double *)p_a[6], x_size, y_size,
z_size);
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[55].time += t1 - t2;
}
ops_set_dirtybit_device(args, 7);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[2], range);
ops_set_halo_dirtybit3(&args[4], range);
ops_set_halo_dirtybit3(&args[6], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[55].mpi_time += t2 - t1;
OPS_kernels[55].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[55].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[55].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[55].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[55].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[55].transfer += ops_compute_transfer(dim, start, end, &arg5);
OPS_kernels[55].transfer += ops_compute_transfer(dim, start, end, &arg6);
}
}
| 44ed48c0e07ab45b2b5c8b4aed69a38b57906ffb.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_initialise_chunk_kernel_volume;
int xdim0_initialise_chunk_kernel_volume_h = -1;
__constant__ int ydim0_initialise_chunk_kernel_volume;
int ydim0_initialise_chunk_kernel_volume_h = -1;
__constant__ int xdim1_initialise_chunk_kernel_volume;
int xdim1_initialise_chunk_kernel_volume_h = -1;
__constant__ int ydim1_initialise_chunk_kernel_volume;
int ydim1_initialise_chunk_kernel_volume_h = -1;
__constant__ int xdim2_initialise_chunk_kernel_volume;
int xdim2_initialise_chunk_kernel_volume_h = -1;
__constant__ int ydim2_initialise_chunk_kernel_volume;
int ydim2_initialise_chunk_kernel_volume_h = -1;
__constant__ int xdim3_initialise_chunk_kernel_volume;
int xdim3_initialise_chunk_kernel_volume_h = -1;
__constant__ int ydim3_initialise_chunk_kernel_volume;
int ydim3_initialise_chunk_kernel_volume_h = -1;
__constant__ int xdim4_initialise_chunk_kernel_volume;
int xdim4_initialise_chunk_kernel_volume_h = -1;
__constant__ int ydim4_initialise_chunk_kernel_volume;
int ydim4_initialise_chunk_kernel_volume_h = -1;
__constant__ int xdim5_initialise_chunk_kernel_volume;
int xdim5_initialise_chunk_kernel_volume_h = -1;
__constant__ int ydim5_initialise_chunk_kernel_volume;
int ydim5_initialise_chunk_kernel_volume_h = -1;
__constant__ int xdim6_initialise_chunk_kernel_volume;
int xdim6_initialise_chunk_kernel_volume_h = -1;
__constant__ int ydim6_initialise_chunk_kernel_volume;
int ydim6_initialise_chunk_kernel_volume_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
#define OPS_ACC0(x, y, z) \
(x + xdim0_initialise_chunk_kernel_volume * (y) + \
xdim0_initialise_chunk_kernel_volume * \
ydim0_initialise_chunk_kernel_volume * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_initialise_chunk_kernel_volume * (y) + \
xdim1_initialise_chunk_kernel_volume * \
ydim1_initialise_chunk_kernel_volume * (z))
#define OPS_ACC2(x, y, z) \
(x + xdim2_initialise_chunk_kernel_volume * (y) + \
xdim2_initialise_chunk_kernel_volume * \
ydim2_initialise_chunk_kernel_volume * (z))
#define OPS_ACC3(x, y, z) \
(x + xdim3_initialise_chunk_kernel_volume * (y) + \
xdim3_initialise_chunk_kernel_volume * \
ydim3_initialise_chunk_kernel_volume * (z))
#define OPS_ACC4(x, y, z) \
(x + xdim4_initialise_chunk_kernel_volume * (y) + \
xdim4_initialise_chunk_kernel_volume * \
ydim4_initialise_chunk_kernel_volume * (z))
#define OPS_ACC5(x, y, z) \
(x + xdim5_initialise_chunk_kernel_volume * (y) + \
xdim5_initialise_chunk_kernel_volume * \
ydim5_initialise_chunk_kernel_volume * (z))
#define OPS_ACC6(x, y, z) \
(x + xdim6_initialise_chunk_kernel_volume * (y) + \
xdim6_initialise_chunk_kernel_volume * \
ydim6_initialise_chunk_kernel_volume * (z))
// user function
__device__
void
initialise_chunk_kernel_volume_gpu(double *volume, const double *celldy,
double *xarea, const double *celldx,
double *yarea, const double *celldz,
double *zarea) {
double d_x, d_y, d_z;
d_x = (grid.xmax - grid.xmin) / (double)grid.x_cells;
d_y = (grid.ymax - grid.ymin) / (double)grid.y_cells;
d_z = (grid.zmax - grid.zmin) / (double)grid.z_cells;
volume[OPS_ACC0(0, 0, 0)] = d_x * d_y * d_z;
xarea[OPS_ACC2(0, 0, 0)] =
celldy[OPS_ACC1(0, 0, 0)] * celldz[OPS_ACC5(0, 0, 0)];
yarea[OPS_ACC4(0, 0, 0)] =
celldx[OPS_ACC3(0, 0, 0)] * celldz[OPS_ACC5(0, 0, 0)];
zarea[OPS_ACC6(0, 0, 0)] =
celldx[OPS_ACC3(0, 0, 0)] * celldy[OPS_ACC1(0, 0, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
__global__ void ops_initialise_chunk_kernel_volume(
double *__restrict arg0, const double *__restrict arg1,
double *__restrict arg2, const double *__restrict arg3,
double *__restrict arg4, const double *__restrict arg5,
double *__restrict arg6, int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_initialise_chunk_kernel_volume +
idx_z * 1 * 1 * xdim0_initialise_chunk_kernel_volume *
ydim0_initialise_chunk_kernel_volume;
arg1 += idx_x * 0 * 1 + idx_y * 1 * 1 * xdim1_initialise_chunk_kernel_volume +
idx_z * 0 * 1 * xdim1_initialise_chunk_kernel_volume *
ydim1_initialise_chunk_kernel_volume;
arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_initialise_chunk_kernel_volume +
idx_z * 1 * 1 * xdim2_initialise_chunk_kernel_volume *
ydim2_initialise_chunk_kernel_volume;
arg3 += idx_x * 1 * 1 + idx_y * 0 * 1 * xdim3_initialise_chunk_kernel_volume +
idx_z * 0 * 1 * xdim3_initialise_chunk_kernel_volume *
ydim3_initialise_chunk_kernel_volume;
arg4 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim4_initialise_chunk_kernel_volume +
idx_z * 1 * 1 * xdim4_initialise_chunk_kernel_volume *
ydim4_initialise_chunk_kernel_volume;
arg5 += idx_x * 0 * 1 + idx_y * 0 * 1 * xdim5_initialise_chunk_kernel_volume +
idx_z * 1 * 1 * xdim5_initialise_chunk_kernel_volume *
ydim5_initialise_chunk_kernel_volume;
arg6 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim6_initialise_chunk_kernel_volume +
idx_z * 1 * 1 * xdim6_initialise_chunk_kernel_volume *
ydim6_initialise_chunk_kernel_volume;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
initialise_chunk_kernel_volume_gpu(arg0, arg1, arg2, arg3, arg4, arg5,
arg6);
}
}
// host stub function
void ops_par_loop_initialise_chunk_kernel_volume(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2,
ops_arg arg3, ops_arg arg4,
ops_arg arg5, ops_arg arg6) {
// Timing
double t1, t2, c1, c2;
ops_arg args[7] = {arg0, arg1, arg2, arg3, arg4, arg5, arg6};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 7, range, 55))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(55, "initialise_chunk_kernel_volume");
OPS_kernels[55].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0];
int ydim4 = args[4].dat->size[1];
int xdim5 = args[5].dat->size[0];
int ydim5 = args[5].dat->size[1];
int xdim6 = args[6].dat->size[0];
int ydim6 = args[6].dat->size[1];
if (xdim0 != xdim0_initialise_chunk_kernel_volume_h ||
ydim0 != ydim0_initialise_chunk_kernel_volume_h ||
xdim1 != xdim1_initialise_chunk_kernel_volume_h ||
ydim1 != ydim1_initialise_chunk_kernel_volume_h ||
xdim2 != xdim2_initialise_chunk_kernel_volume_h ||
ydim2 != ydim2_initialise_chunk_kernel_volume_h ||
xdim3 != xdim3_initialise_chunk_kernel_volume_h ||
ydim3 != ydim3_initialise_chunk_kernel_volume_h ||
xdim4 != xdim4_initialise_chunk_kernel_volume_h ||
ydim4 != ydim4_initialise_chunk_kernel_volume_h ||
xdim5 != xdim5_initialise_chunk_kernel_volume_h ||
ydim5 != ydim5_initialise_chunk_kernel_volume_h ||
xdim6 != xdim6_initialise_chunk_kernel_volume_h ||
ydim6 != ydim6_initialise_chunk_kernel_volume_h) {
cudaMemcpyToSymbol(xdim0_initialise_chunk_kernel_volume, &xdim0,
sizeof(int));
xdim0_initialise_chunk_kernel_volume_h = xdim0;
cudaMemcpyToSymbol(ydim0_initialise_chunk_kernel_volume, &ydim0,
sizeof(int));
ydim0_initialise_chunk_kernel_volume_h = ydim0;
cudaMemcpyToSymbol(xdim1_initialise_chunk_kernel_volume, &xdim1,
sizeof(int));
xdim1_initialise_chunk_kernel_volume_h = xdim1;
cudaMemcpyToSymbol(ydim1_initialise_chunk_kernel_volume, &ydim1,
sizeof(int));
ydim1_initialise_chunk_kernel_volume_h = ydim1;
cudaMemcpyToSymbol(xdim2_initialise_chunk_kernel_volume, &xdim2,
sizeof(int));
xdim2_initialise_chunk_kernel_volume_h = xdim2;
cudaMemcpyToSymbol(ydim2_initialise_chunk_kernel_volume, &ydim2,
sizeof(int));
ydim2_initialise_chunk_kernel_volume_h = ydim2;
cudaMemcpyToSymbol(xdim3_initialise_chunk_kernel_volume, &xdim3,
sizeof(int));
xdim3_initialise_chunk_kernel_volume_h = xdim3;
cudaMemcpyToSymbol(ydim3_initialise_chunk_kernel_volume, &ydim3,
sizeof(int));
ydim3_initialise_chunk_kernel_volume_h = ydim3;
cudaMemcpyToSymbol(xdim4_initialise_chunk_kernel_volume, &xdim4,
sizeof(int));
xdim4_initialise_chunk_kernel_volume_h = xdim4;
cudaMemcpyToSymbol(ydim4_initialise_chunk_kernel_volume, &ydim4,
sizeof(int));
ydim4_initialise_chunk_kernel_volume_h = ydim4;
cudaMemcpyToSymbol(xdim5_initialise_chunk_kernel_volume, &xdim5,
sizeof(int));
xdim5_initialise_chunk_kernel_volume_h = xdim5;
cudaMemcpyToSymbol(ydim5_initialise_chunk_kernel_volume, &ydim5,
sizeof(int));
ydim5_initialise_chunk_kernel_volume_h = ydim5;
cudaMemcpyToSymbol(xdim6_initialise_chunk_kernel_volume, &xdim6,
sizeof(int));
xdim6_initialise_chunk_kernel_volume_h = xdim6;
cudaMemcpyToSymbol(ydim6_initialise_chunk_kernel_volume, &ydim6,
sizeof(int));
ydim6_initialise_chunk_kernel_volume_h = ydim6;
}
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
int dat2 = args[2].dat->elem_size;
int dat3 = args[3].dat->elem_size;
int dat4 = args[4].dat->elem_size;
int dat5 = args[5].dat->elem_size;
int dat6 = args[6].dat->elem_size;
char *p_a[7];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[2].dat->d_m[d];
#endif
int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] -
args[2].dat->base[0] - d_m[0]);
base2 = base2 +
dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1] -
args[2].dat->base[1] - d_m[1]);
base2 = base2 +
dat2 * args[2].dat->size[0] * args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2] - args[2].dat->base[2] -
d_m[2]);
p_a[2] = (char *)args[2].data_d + base2;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[3].dat->d_m[d];
#endif
int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] -
args[3].dat->base[0] - d_m[0]);
base3 = base3 +
dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1] -
args[3].dat->base[1] - d_m[1]);
base3 = base3 +
dat3 * args[3].dat->size[0] * args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2] - args[3].dat->base[2] -
d_m[2]);
p_a[3] = (char *)args[3].data_d + base3;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[4].dat->d_m[d] + OPS_sub_dat_list[args[4].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[4].dat->d_m[d];
#endif
int base4 = dat4 * 1 * (start[0] * args[4].stencil->stride[0] -
args[4].dat->base[0] - d_m[0]);
base4 = base4 +
dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1] -
args[4].dat->base[1] - d_m[1]);
base4 = base4 +
dat4 * args[4].dat->size[0] * args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2] - args[4].dat->base[2] -
d_m[2]);
p_a[4] = (char *)args[4].data_d + base4;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[5].dat->d_m[d] + OPS_sub_dat_list[args[5].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[5].dat->d_m[d];
#endif
int base5 = dat5 * 1 * (start[0] * args[5].stencil->stride[0] -
args[5].dat->base[0] - d_m[0]);
base5 = base5 +
dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1] -
args[5].dat->base[1] - d_m[1]);
base5 = base5 +
dat5 * args[5].dat->size[0] * args[5].dat->size[1] *
(start[2] * args[5].stencil->stride[2] - args[5].dat->base[2] -
d_m[2]);
p_a[5] = (char *)args[5].data_d + base5;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[6].dat->d_m[d] + OPS_sub_dat_list[args[6].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[6].dat->d_m[d];
#endif
int base6 = dat6 * 1 * (start[0] * args[6].stencil->stride[0] -
args[6].dat->base[0] - d_m[0]);
base6 = base6 +
dat6 * args[6].dat->size[0] * (start[1] * args[6].stencil->stride[1] -
args[6].dat->base[1] - d_m[1]);
base6 = base6 +
dat6 * args[6].dat->size[0] * args[6].dat->size[1] *
(start[2] * args[6].stencil->stride[2] - args[6].dat->base[2] -
d_m[2]);
p_a[6] = (char *)args[6].data_d + base6;
ops_H_D_exchanges_device(args, 7);
ops_halo_exchanges(args, 7, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[55].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_initialise_chunk_kernel_volume<<<grid, tblock>>>(
(double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5], (double *)p_a[6], x_size, y_size,
z_size);
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[55].time += t1 - t2;
}
ops_set_dirtybit_device(args, 7);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[2], range);
ops_set_halo_dirtybit3(&args[4], range);
ops_set_halo_dirtybit3(&args[6], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[55].mpi_time += t2 - t1;
OPS_kernels[55].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[55].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[55].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[55].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[55].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[55].transfer += ops_compute_transfer(dim, start, end, &arg5);
OPS_kernels[55].transfer += ops_compute_transfer(dim, start, end, &arg6);
}
}
|
3e86ef39d409d8e5d844fe237e7483e20bcb8031.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__device__ int GPUKernel_Position(int i,int j) {
if (i<j){
return j*(j+1)/2+i;
}
return i*(i+1)/2+j;
}
__global__ void GPUKernel_Vp(int a, int v,double * in,double * out) {
int blockid = blockIdx.x*gridDim.y + blockIdx.y;
int id = blockid*blockDim.x + threadIdx.x;
if ( id >= v*v*v ) return;
int d = id%v;
int b = (id-d)%(v*v)/v;
int c = (id-d-b*v)/(v*v);
if ( b < a ) return;
if ( d > c ) return;
int cd = GPUKernel_Position(c,d);
int vtri = v*(v+1)/2;
out[(b-a)*vtri+cd] = in[(b-a)*v*v+d*v+c] + in[(b-a)*v*v+c*v+d];
} | 3e86ef39d409d8e5d844fe237e7483e20bcb8031.cu | #include "includes.h"
__device__ int GPUKernel_Position(int i,int j) {
if (i<j){
return j*(j+1)/2+i;
}
return i*(i+1)/2+j;
}
__global__ void GPUKernel_Vp(int a, int v,double * in,double * out) {
int blockid = blockIdx.x*gridDim.y + blockIdx.y;
int id = blockid*blockDim.x + threadIdx.x;
if ( id >= v*v*v ) return;
int d = id%v;
int b = (id-d)%(v*v)/v;
int c = (id-d-b*v)/(v*v);
if ( b < a ) return;
if ( d > c ) return;
int cd = GPUKernel_Position(c,d);
int vtri = v*(v+1)/2;
out[(b-a)*vtri+cd] = in[(b-a)*v*v+d*v+c] + in[(b-a)*v*v+c*v+d];
} |
4157861f7fd37c4dcaa70d3818cf9f7000fbc304.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Row-wise convolution filtering
* conv_filter_row.h
*
* Copyright (c) 2019-2020 Balazs Nagy,
* Robotics and Perception Group, University of Zurich
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "vilib/cuda_common.h"
#include "vilib/preprocess/conv_filter_row.h"
namespace vilib {
#define BLOCKDIM_X 32
#define BLOCKDIM_Y 4
#define RESULT_STEPS 8
#define HALO_STEPS 1
#define INSTANTIATE_1D_ROW(I, O) \
template __host__ void conv_filter_row_gpu<I,O>(const I * d_image_in, \
const int input_pitch, \
O * d_image_out, \
const int output_pitch, \
const int width_px, \
const int height_px, \
const conv_filter_type_t filter_type, \
const conv_filter_border_type_t border_type, \
const bool skip_first_and_last_row, \
const float scale, \
hipStream_t stream)
template<typename I, typename O, int RADIUS, conv_filter_border_type BORDER>
__global__ void conv_filter_row_gpu_shm_kernel(O * __restrict__ output,
const int output_pitch,
const I * __restrict__ input,
const int input_pitch,
const int input_width,
const int output_height,
const filter1x3_t filter,
const float scale) {
__shared__ float s_Data[BLOCKDIM_Y][(RESULT_STEPS + 2 * HALO_STEPS) * BLOCKDIM_X];
// Offset to the left halo edge
const int baseX = (blockIdx.x * RESULT_STEPS - HALO_STEPS) * BLOCKDIM_X + threadIdx.x;
const int baseY = blockIdx.y * BLOCKDIM_Y + threadIdx.y;
if(baseY >= output_height) return;
input += baseY * input_pitch;
output += baseY * output_pitch + baseX;
// Load main data AND right halo
#pragma unroll
for (int i = HALO_STEPS, i_x = HALO_STEPS * BLOCKDIM_X + baseX; i < HALO_STEPS + RESULT_STEPS + HALO_STEPS; ++i, i_x+= BLOCKDIM_X) {
switch(BORDER) {
case conv_filter_border_type::BORDER_SKIP:
// fall-through
case conv_filter_border_type::BORDER_ZERO:
s_Data[threadIdx.y][threadIdx.x + i * BLOCKDIM_X] = (input_width > i_x) ? input[i_x]: 0;
break;
case conv_filter_border_type::BORDER_REPLICATE: // aaaaaa|abcdefgh|hhhhhhh
s_Data[threadIdx.y][threadIdx.x + i * BLOCKDIM_X] = (input_width > i_x) ? input[i_x]: input[input_width - 1];
break;
case conv_filter_border_type::BORDER_REFLECT: // fedcba|abcdefgh|hgfedcb
s_Data[threadIdx.y][threadIdx.x + i * BLOCKDIM_X] = (input_width > i_x) ? input[i_x]: input[(input_width<<1) - 1 - i_x];
break;
case conv_filter_border_type::BORDER_WRAP: // cdefgh|abcdefgh|abcdefg
s_Data[threadIdx.y][threadIdx.x + i * BLOCKDIM_X] = (input_width > i_x) ? input[i_x]: input[i_x - input_width];
break;
case conv_filter_border_type::BORDER_REFLECT_101: // gfedcb|abcdefgh|gfedcba
s_Data[threadIdx.y][threadIdx.x + i * BLOCKDIM_X] = (input_width > i_x) ? input[i_x]: input[(input_width<<1) - 2 - i_x];
break;
}
}
// Load left halo
#pragma unroll
for (int i = 0, i_x = baseX; i < HALO_STEPS; ++i, i_x += BLOCKDIM_X) {
switch(BORDER) {
case conv_filter_border_type::BORDER_SKIP:
// fall-through
case conv_filter_border_type::BORDER_ZERO: // 000000|abcdefgh|0000000
s_Data[threadIdx.y][threadIdx.x + i * BLOCKDIM_X] = (i_x >= 0) ? input[i_x] : 0;
break;
case conv_filter_border_type::BORDER_REPLICATE: // aaaaaa|abcdefgh|hhhhhhh
s_Data[threadIdx.y][threadIdx.x + i * BLOCKDIM_X] = (i_x >= 0) ? input[i_x] : input[0];
break;
case conv_filter_border_type::BORDER_REFLECT: // fedcba|abcdefgh|hgfedcb
s_Data[threadIdx.y][threadIdx.x + i * BLOCKDIM_X] = (i_x >= 0) ? input[i_x] : input[-i_x - 1];
break;
case conv_filter_border_type::BORDER_WRAP: // cdefgh|abcdefgh|abcdefg
s_Data[threadIdx.y][threadIdx.x + i * BLOCKDIM_X] = (i_x >= 0) ? input[i_x] : input[i_x + input_width];
break;
case conv_filter_border_type::BORDER_REFLECT_101: // gfedcb|abcdefgh|gfedcba
s_Data[threadIdx.y][threadIdx.x + i * BLOCKDIM_X] = (i_x >= 0) ? input[i_x] : input[-i_x];
break;
}
}
// Compute and store results
__syncthreads();
#pragma unroll
for (int i = HALO_STEPS; i < HALO_STEPS + RESULT_STEPS; i++)
{
float sum = 0.0f;
#pragma unroll
for (int j = -RADIUS; j <= RADIUS; j++)
{
sum += filter.d[RADIUS + j] * s_Data[threadIdx.y][threadIdx.x + i * BLOCKDIM_X + j];
}
sum *= scale;
// Saturate if non-float
if(sizeof(O) < sizeof(float)) {
sum = max(min(sum,255.0f),0.f);
}
if(input_width > i*BLOCKDIM_X + baseX) {
output[i * BLOCKDIM_X] = sum;
}
}
}
template <typename I, typename O>
__host__ void conv_filter_row_gpu(const I * d_image_in,
const int input_pitch,
O * d_image_out,
const int output_pitch,
const int width_px,
const int height_px,
const conv_filter_type_t filter_type,
const conv_filter_border_type_t border_type,
const bool skip_first_and_last_row,
const float scale,
hipStream_t stream) {
const filter1x3_t & filter = conv_filter_get1x3(filter_type);
int height_px_out = height_px - (skip_first_and_last_row?2:0);
dim3 threads_per_block(BLOCKDIM_X, BLOCKDIM_Y);
dim3 blocks_per_grid((width_px + RESULT_STEPS * BLOCKDIM_X -1) / (RESULT_STEPS * BLOCKDIM_X),
(height_px_out + BLOCKDIM_Y -1) / BLOCKDIM_Y);
// Note: we actually support radiuses up to BLOCKDIM_X * HALO_STEPS, but the filter itself
// is not defined beyond 1
decltype(&conv_filter_row_gpu_shm_kernel<I,O,1,conv_filter_border_type::BORDER_ZERO>) kernel;
switch(border_type) {
case conv_filter_border_type::BORDER_SKIP:
case conv_filter_border_type::BORDER_ZERO: // 000000|abcdefgh|0000000
kernel = conv_filter_row_gpu_shm_kernel<I,O,1,conv_filter_border_type::BORDER_ZERO>;
break;
case conv_filter_border_type::BORDER_REPLICATE: // aaaaaa|abcdefgh|hhhhhhh
kernel = conv_filter_row_gpu_shm_kernel<I,O,1,conv_filter_border_type::BORDER_REPLICATE>;
break;
case conv_filter_border_type::BORDER_REFLECT: // fedcba|abcdefgh|hgfedcb
kernel = conv_filter_row_gpu_shm_kernel<I,O,1,conv_filter_border_type::BORDER_REFLECT>;
break;
case conv_filter_border_type::BORDER_WRAP: // cdefgh|abcdefgh|abcdefg
kernel = conv_filter_row_gpu_shm_kernel<I,O,1,conv_filter_border_type::BORDER_WRAP>;
break;
case conv_filter_border_type::BORDER_REFLECT_101: // gfedcb|abcdefgh|gfedcba
kernel = conv_filter_row_gpu_shm_kernel<I,O,1,conv_filter_border_type::BORDER_REFLECT_101>;
break;
default:
assert(0);
kernel = conv_filter_row_gpu_shm_kernel<I,O,1,conv_filter_border_type::BORDER_ZERO>;
break;
}
hipLaunchKernelGGL(( kernel), dim3(blocks_per_grid),dim3(threads_per_block),0,stream,
d_image_out + (skip_first_and_last_row?output_pitch:0),
output_pitch,
d_image_in + (skip_first_and_last_row?input_pitch:0),
input_pitch,
width_px,
height_px_out,
filter,
scale);
CUDA_KERNEL_CHECK();
}
__host__ void conv_filter_row_cpu(const unsigned char * h_image_in,
const int input_pitch,
unsigned char * h_image_out,
const int output_pitch,
const int width_px,
const int height_px,
const conv_filter_type_t filter_type,
const conv_filter_border_type_t border_type,
const bool skip_first_and_last_row,
const float scale) {
const filter1x3_t & filter = conv_filter_get1x3(filter_type);
const int x_min = 0 + (border_type==conv_filter_border_type::BORDER_SKIP?1:0);
const int x_max = (width_px-1) - (border_type==conv_filter_border_type::BORDER_SKIP?1:0);
const int y_min = 0 + (skip_first_and_last_row?1:0);
const int y_max = (height_px-1) - (skip_first_and_last_row?1:0);
for(int y=y_min;y<=y_max;++y) {
for(int x=x_min;x<=x_max;++x) {
float accu = 0.0f;
for(int f_x=-1;f_x<=1;++f_x) {
int i_x = x+f_x;
switch(border_type) {
case conv_filter_border_type::BORDER_SKIP:
// nothing to do
break;
case conv_filter_border_type::BORDER_ZERO: // 000000|abcdefgh|0000000
// nothing to do
break;
case conv_filter_border_type::BORDER_REPLICATE: // aaaaaa|abcdefgh|hhhhhhh
i_x = min(max(i_x,0),x_max);
break;
case conv_filter_border_type::BORDER_REFLECT: // fedcba|abcdefgh|hgfedcb
if(i_x < 0) {
i_x = -1*i_x - 1;
} else if(i_x > x_max) {
i_x = x_max - (i_x-width_px);
}
break;
case conv_filter_border_type::BORDER_WRAP: // cdefgh|abcdefgh|abcdefg
if(i_x < 0) {
i_x += width_px;
} else if(i_x > x_max) {
i_x -= width_px;
}
break;
case conv_filter_border_type::BORDER_REFLECT_101: // gfedcb|abcdefgh|gfedcba
if(i_x < 0) {
i_x *= -1;
} else if(i_x > x_max) {
i_x = 2*x_max - i_x;
}
break;
}
// Handling of BORDER_ZERO
accu += ((i_x < 0 || i_x >= width_px) ? 0.0f : h_image_in[y*input_pitch+i_x])*filter.d[f_x+1];
}
accu *= scale;
h_image_out[y*output_pitch + x] = static_cast<unsigned char>(min(max(accu,0.0f),255.0f));
}
}
}
// Explicit instantiations
INSTANTIATE_1D_ROW(unsigned char, unsigned char);
INSTANTIATE_1D_ROW(unsigned char, float);
INSTANTIATE_1D_ROW(float, unsigned char);
INSTANTIATE_1D_ROW(float, float);
} // namespace vilib | 4157861f7fd37c4dcaa70d3818cf9f7000fbc304.cu | /*
* Row-wise convolution filtering
* conv_filter_row.h
*
* Copyright (c) 2019-2020 Balazs Nagy,
* Robotics and Perception Group, University of Zurich
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "vilib/cuda_common.h"
#include "vilib/preprocess/conv_filter_row.h"
namespace vilib {
#define BLOCKDIM_X 32
#define BLOCKDIM_Y 4
#define RESULT_STEPS 8
#define HALO_STEPS 1
#define INSTANTIATE_1D_ROW(I, O) \
template __host__ void conv_filter_row_gpu<I,O>(const I * d_image_in, \
const int input_pitch, \
O * d_image_out, \
const int output_pitch, \
const int width_px, \
const int height_px, \
const conv_filter_type_t filter_type, \
const conv_filter_border_type_t border_type, \
const bool skip_first_and_last_row, \
const float scale, \
cudaStream_t stream)
template<typename I, typename O, int RADIUS, conv_filter_border_type BORDER>
__global__ void conv_filter_row_gpu_shm_kernel(O * __restrict__ output,
const int output_pitch,
const I * __restrict__ input,
const int input_pitch,
const int input_width,
const int output_height,
const filter1x3_t filter,
const float scale) {
__shared__ float s_Data[BLOCKDIM_Y][(RESULT_STEPS + 2 * HALO_STEPS) * BLOCKDIM_X];
// Offset to the left halo edge
const int baseX = (blockIdx.x * RESULT_STEPS - HALO_STEPS) * BLOCKDIM_X + threadIdx.x;
const int baseY = blockIdx.y * BLOCKDIM_Y + threadIdx.y;
if(baseY >= output_height) return;
input += baseY * input_pitch;
output += baseY * output_pitch + baseX;
// Load main data AND right halo
#pragma unroll
for (int i = HALO_STEPS, i_x = HALO_STEPS * BLOCKDIM_X + baseX; i < HALO_STEPS + RESULT_STEPS + HALO_STEPS; ++i, i_x+= BLOCKDIM_X) {
switch(BORDER) {
case conv_filter_border_type::BORDER_SKIP:
// fall-through
case conv_filter_border_type::BORDER_ZERO:
s_Data[threadIdx.y][threadIdx.x + i * BLOCKDIM_X] = (input_width > i_x) ? input[i_x]: 0;
break;
case conv_filter_border_type::BORDER_REPLICATE: // aaaaaa|abcdefgh|hhhhhhh
s_Data[threadIdx.y][threadIdx.x + i * BLOCKDIM_X] = (input_width > i_x) ? input[i_x]: input[input_width - 1];
break;
case conv_filter_border_type::BORDER_REFLECT: // fedcba|abcdefgh|hgfedcb
s_Data[threadIdx.y][threadIdx.x + i * BLOCKDIM_X] = (input_width > i_x) ? input[i_x]: input[(input_width<<1) - 1 - i_x];
break;
case conv_filter_border_type::BORDER_WRAP: // cdefgh|abcdefgh|abcdefg
s_Data[threadIdx.y][threadIdx.x + i * BLOCKDIM_X] = (input_width > i_x) ? input[i_x]: input[i_x - input_width];
break;
case conv_filter_border_type::BORDER_REFLECT_101: // gfedcb|abcdefgh|gfedcba
s_Data[threadIdx.y][threadIdx.x + i * BLOCKDIM_X] = (input_width > i_x) ? input[i_x]: input[(input_width<<1) - 2 - i_x];
break;
}
}
// Load left halo
#pragma unroll
for (int i = 0, i_x = baseX; i < HALO_STEPS; ++i, i_x += BLOCKDIM_X) {
switch(BORDER) {
case conv_filter_border_type::BORDER_SKIP:
// fall-through
case conv_filter_border_type::BORDER_ZERO: // 000000|abcdefgh|0000000
s_Data[threadIdx.y][threadIdx.x + i * BLOCKDIM_X] = (i_x >= 0) ? input[i_x] : 0;
break;
case conv_filter_border_type::BORDER_REPLICATE: // aaaaaa|abcdefgh|hhhhhhh
s_Data[threadIdx.y][threadIdx.x + i * BLOCKDIM_X] = (i_x >= 0) ? input[i_x] : input[0];
break;
case conv_filter_border_type::BORDER_REFLECT: // fedcba|abcdefgh|hgfedcb
s_Data[threadIdx.y][threadIdx.x + i * BLOCKDIM_X] = (i_x >= 0) ? input[i_x] : input[-i_x - 1];
break;
case conv_filter_border_type::BORDER_WRAP: // cdefgh|abcdefgh|abcdefg
s_Data[threadIdx.y][threadIdx.x + i * BLOCKDIM_X] = (i_x >= 0) ? input[i_x] : input[i_x + input_width];
break;
case conv_filter_border_type::BORDER_REFLECT_101: // gfedcb|abcdefgh|gfedcba
s_Data[threadIdx.y][threadIdx.x + i * BLOCKDIM_X] = (i_x >= 0) ? input[i_x] : input[-i_x];
break;
}
}
// Compute and store results
__syncthreads();
#pragma unroll
for (int i = HALO_STEPS; i < HALO_STEPS + RESULT_STEPS; i++)
{
float sum = 0.0f;
#pragma unroll
for (int j = -RADIUS; j <= RADIUS; j++)
{
sum += filter.d[RADIUS + j] * s_Data[threadIdx.y][threadIdx.x + i * BLOCKDIM_X + j];
}
sum *= scale;
// Saturate if non-float
if(sizeof(O) < sizeof(float)) {
sum = max(min(sum,255.0f),0.f);
}
if(input_width > i*BLOCKDIM_X + baseX) {
output[i * BLOCKDIM_X] = sum;
}
}
}
template <typename I, typename O>
__host__ void conv_filter_row_gpu(const I * d_image_in,
const int input_pitch,
O * d_image_out,
const int output_pitch,
const int width_px,
const int height_px,
const conv_filter_type_t filter_type,
const conv_filter_border_type_t border_type,
const bool skip_first_and_last_row,
const float scale,
cudaStream_t stream) {
const filter1x3_t & filter = conv_filter_get1x3(filter_type);
int height_px_out = height_px - (skip_first_and_last_row?2:0);
dim3 threads_per_block(BLOCKDIM_X, BLOCKDIM_Y);
dim3 blocks_per_grid((width_px + RESULT_STEPS * BLOCKDIM_X -1) / (RESULT_STEPS * BLOCKDIM_X),
(height_px_out + BLOCKDIM_Y -1) / BLOCKDIM_Y);
// Note: we actually support radiuses up to BLOCKDIM_X * HALO_STEPS, but the filter itself
// is not defined beyond 1
decltype(&conv_filter_row_gpu_shm_kernel<I,O,1,conv_filter_border_type::BORDER_ZERO>) kernel;
switch(border_type) {
case conv_filter_border_type::BORDER_SKIP:
case conv_filter_border_type::BORDER_ZERO: // 000000|abcdefgh|0000000
kernel = conv_filter_row_gpu_shm_kernel<I,O,1,conv_filter_border_type::BORDER_ZERO>;
break;
case conv_filter_border_type::BORDER_REPLICATE: // aaaaaa|abcdefgh|hhhhhhh
kernel = conv_filter_row_gpu_shm_kernel<I,O,1,conv_filter_border_type::BORDER_REPLICATE>;
break;
case conv_filter_border_type::BORDER_REFLECT: // fedcba|abcdefgh|hgfedcb
kernel = conv_filter_row_gpu_shm_kernel<I,O,1,conv_filter_border_type::BORDER_REFLECT>;
break;
case conv_filter_border_type::BORDER_WRAP: // cdefgh|abcdefgh|abcdefg
kernel = conv_filter_row_gpu_shm_kernel<I,O,1,conv_filter_border_type::BORDER_WRAP>;
break;
case conv_filter_border_type::BORDER_REFLECT_101: // gfedcb|abcdefgh|gfedcba
kernel = conv_filter_row_gpu_shm_kernel<I,O,1,conv_filter_border_type::BORDER_REFLECT_101>;
break;
default:
assert(0);
kernel = conv_filter_row_gpu_shm_kernel<I,O,1,conv_filter_border_type::BORDER_ZERO>;
break;
}
kernel<<<blocks_per_grid,threads_per_block,0,stream>>>(
d_image_out + (skip_first_and_last_row?output_pitch:0),
output_pitch,
d_image_in + (skip_first_and_last_row?input_pitch:0),
input_pitch,
width_px,
height_px_out,
filter,
scale);
CUDA_KERNEL_CHECK();
}
__host__ void conv_filter_row_cpu(const unsigned char * h_image_in,
const int input_pitch,
unsigned char * h_image_out,
const int output_pitch,
const int width_px,
const int height_px,
const conv_filter_type_t filter_type,
const conv_filter_border_type_t border_type,
const bool skip_first_and_last_row,
const float scale) {
const filter1x3_t & filter = conv_filter_get1x3(filter_type);
const int x_min = 0 + (border_type==conv_filter_border_type::BORDER_SKIP?1:0);
const int x_max = (width_px-1) - (border_type==conv_filter_border_type::BORDER_SKIP?1:0);
const int y_min = 0 + (skip_first_and_last_row?1:0);
const int y_max = (height_px-1) - (skip_first_and_last_row?1:0);
for(int y=y_min;y<=y_max;++y) {
for(int x=x_min;x<=x_max;++x) {
float accu = 0.0f;
for(int f_x=-1;f_x<=1;++f_x) {
int i_x = x+f_x;
switch(border_type) {
case conv_filter_border_type::BORDER_SKIP:
// nothing to do
break;
case conv_filter_border_type::BORDER_ZERO: // 000000|abcdefgh|0000000
// nothing to do
break;
case conv_filter_border_type::BORDER_REPLICATE: // aaaaaa|abcdefgh|hhhhhhh
i_x = min(max(i_x,0),x_max);
break;
case conv_filter_border_type::BORDER_REFLECT: // fedcba|abcdefgh|hgfedcb
if(i_x < 0) {
i_x = -1*i_x - 1;
} else if(i_x > x_max) {
i_x = x_max - (i_x-width_px);
}
break;
case conv_filter_border_type::BORDER_WRAP: // cdefgh|abcdefgh|abcdefg
if(i_x < 0) {
i_x += width_px;
} else if(i_x > x_max) {
i_x -= width_px;
}
break;
case conv_filter_border_type::BORDER_REFLECT_101: // gfedcb|abcdefgh|gfedcba
if(i_x < 0) {
i_x *= -1;
} else if(i_x > x_max) {
i_x = 2*x_max - i_x;
}
break;
}
// Handling of BORDER_ZERO
accu += ((i_x < 0 || i_x >= width_px) ? 0.0f : h_image_in[y*input_pitch+i_x])*filter.d[f_x+1];
}
accu *= scale;
h_image_out[y*output_pitch + x] = static_cast<unsigned char>(min(max(accu,0.0f),255.0f));
}
}
}
// Explicit instantiations
INSTANTIATE_1D_ROW(unsigned char, unsigned char);
INSTANTIATE_1D_ROW(unsigned char, float);
INSTANTIATE_1D_ROW(float, unsigned char);
INSTANTIATE_1D_ROW(float, float);
} // namespace vilib |
7834bbb415a710e6fa39fb1dffeb5d6eb582a789.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "matrixAddKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *c = NULL;
hipMalloc(&c, XSIZE*YSIZE);
const double *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
const double *b = NULL;
hipMalloc(&b, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
matrixAddKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, c,a,b);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
matrixAddKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, c,a,b);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
matrixAddKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, c,a,b);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 7834bbb415a710e6fa39fb1dffeb5d6eb582a789.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "matrixAddKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *c = NULL;
cudaMalloc(&c, XSIZE*YSIZE);
const double *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
const double *b = NULL;
cudaMalloc(&b, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
matrixAddKernel<<<gridBlock,threadBlock>>>(c,a,b);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
matrixAddKernel<<<gridBlock,threadBlock>>>(c,a,b);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
matrixAddKernel<<<gridBlock,threadBlock>>>(c,a,b);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
c33d431bbb65e471febd731dd0ef6207042704c8.hip | // !!! This is a file automatically generated by hipify!!!
/************************************************************************************\
* *
* Copyright 2014 Advanced Micro Devices, Inc. *
* Copyright (c) 2015 Mark D. Hill and David A. Wood *
* All rights reserved. *
* *
* Redistribution and use in source and binary forms, with or without *
* modification, are permitted provided that the following are met: *
* *
* You must reproduce the above copyright notice. *
* *
* Neither the name of the copyright holder nor the names of its contributors *
* may be used to endorse or promote products derived from this software *
* without specific, prior, written permission from at least the copyright
holder. *
* *
* You must include the following terms in your license and/or other materials *
* provided with the software. *
* *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" *
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE *
* IMPLIED WARRANTIES OF MERCHANTABILITY, NON-INFRINGEMENT, AND FITNESS FOR A *
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER *
* OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, *
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT *
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS *
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN *
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING *
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY *
* OF SUCH DAMAGE. *
* *
* Without limiting the foregoing, the software may implement third party *
* technologies for which you must obtain licenses from parties other than AMD.
*
* You agree that AMD has not obtained or conveyed to you, and that you shall *
* be responsible for obtaining the rights to use and/or distribute the
applicable *
* underlying intellectual property rights related to the third party
technologies. *
* These third party technologies are not licensed hereunder. *
* *
* If you use the software (in whole or in part), you shall adhere to all *
* applicable U.S., European, and other export laws, including but not limited
to *
* the U.S. Export Administration Regulations ("EAR") (15 C.F.R Sections
730-774), *
* and E.U. Council Regulation (EC) No 428/2009 of 5 May 2009. Further,
pursuant *
* to Section 740.6 of the EAR, you hereby certify that, except pursuant to a *
* license granted by the United States Department of Commerce Bureau of
Industry *
* and Security or as otherwise permitted pursuant to a License Exception under
*
* the U.S. Export Administration Regulations ("EAR"), you will not (1) export,
*
* re-export or release to a national of a country in Country Groups D:1, E:1 or
*
* E:2 any restricted technology, software, or source code you receive
hereunder, *
* or (2) export to Country Groups D:1, E:1 or E:2 the direct product of such *
* technology or software, if such foreign produced direct product is subject to
*
* national security controls as identified on the Commerce Control List
(currently *
* found in Supplement 1 to Part 774 of EAR). For the most current Country
Group *
* listings, or for additional information about the EAR or your obligations
under *
* those regulations, please refer to the U.S. Bureau of Industry and Security's
*
* website at http://www.bis.doc.gov/. *
* *
\************************************************************************************/
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <new>
#include "../../mem_alloc/mem_alloc.h"
#include "../graph_parser/parse.h"
#include "parse_oo.h"
#include "../graph_parser/util.h"
#include "kernel.h"
__managed__ GraphChiContext *context_zz;
// Iteration count
#define ITER 20
void print_vectorf(int *vector, int num);
int main(int argc, char **argv) {
char *tmpchar;
mem_alloc shared_mem(4ULL * 1024 * 1024 * 1024);
//obj_alloc my_obj_alloc(&shared_mem);
int num_nodes;
int num_edges;
int file_format = 1;
bool directed = 0;
hipError_t err = hipSuccess;
obj_alloc my_obj_alloc(&shared_mem,atoll(argv[3]));
if (argc == 4) {
tmpchar = argv[1]; // Graph inputfile
file_format = atoi(argv[2]); // File format
} else {
fprintf(stderr, "You did something wrong!\n");
exit(1);
}
// Allocate the csr structure
csr_array *csr;
// Parse graph files into csr structure
if (file_format == 1) {
// Metis
csr = parseMetis(tmpchar, &num_nodes, &num_edges, directed);
} else if (file_format == 0) {
// Dimacs9
csr = parseCOO(tmpchar, &num_nodes, &num_edges, 1);
} else if (file_format == 2) {
// Matrix market
csr = parseMM(tmpchar, &num_nodes, &num_edges, directed, 0);
} else {
printf("reserve for future");
exit(1);
}
// Allocate rank_array
int *rank_array = (int *)malloc(num_nodes * sizeof(int));
if (!rank_array) {
fprintf(stderr, "rank array not allocated successfully\n");
return -1;
}
int *row_d;
int *col_d;
int *inrow_d;
int *incol_d;
int *index_d;
// Create device-side buffers for the graph
err = hipMallocManaged(&row_d, num_nodes * sizeof(int));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc row_d (size:%d) => %s\n", num_nodes,
hipGetErrorString(err));
return -1;
}
err = hipMallocManaged(&col_d, num_edges * sizeof(int));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc col_d (size:%d) => %s\n", num_edges,
hipGetErrorString(err));
return -1;
}
err = hipMallocManaged(&inrow_d, num_nodes * sizeof(int));
// hipMallocManaged(&context_zz
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc inrow_d (size:%d) => %s\n", num_nodes,
hipGetErrorString(err));
return -1;
}
err = hipMallocManaged(&incol_d, num_edges * sizeof(int));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc incol_d (size:%d) => %s\n", num_edges,
hipGetErrorString(err));
return -1;
}
// Create buffers for index
err = hipMallocManaged(&index_d, num_nodes * sizeof(int));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc index_d (size:%d) => %s\n", num_nodes,
hipGetErrorString(err));
return -1;
}
double timer1 = gettime();
// Copy the data to the device-side buffers
err = hipMemcpy(row_d, csr->row_array, num_nodes * sizeof(int),
hipMemcpyHostToDevice);
if (err != hipSuccess) {
fprintf(stderr, "ERROR:#endif hipMemcpy row_d (size:%d) => %s\n",
num_nodes, hipGetErrorString(err));
return -1;
}
err = hipMemcpy(col_d, csr->col_array, num_edges * sizeof(int),
hipMemcpyHostToDevice);
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMemcpy col_d (size:%d) => %s\n", num_nodes,
hipGetErrorString(err));
return -1;
}
err = hipMemcpy(inrow_d, csr->inrow_array, num_nodes * sizeof(int),
hipMemcpyHostToDevice);
if (err != hipSuccess) {
fprintf(stderr, "ERROR:#endif hipMemcpy inrow_d (size:%d) => %s\n",
num_nodes, hipGetErrorString(err));
return -1;
}
err = hipMemcpy(incol_d, csr->incol_array, num_edges * sizeof(int),
hipMemcpyHostToDevice);
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMemcpy incol_d (size:%d) => %s\n", num_nodes,
hipGetErrorString(err));
return -1;
}
// Set up work dimensions
int block_size = 256;
int num_blocks = (num_nodes + block_size - 1) / block_size;
dim3 threads(block_size, 1, 1);
dim3 grid(num_blocks, 1, 1);
hipDeviceSetLimit(hipLimitMallocHeapSize, 4ULL * 1024 * 1024 * 1024);
double timer3 = gettime();
VirtVertex<int, int> **vertex;
GraphChiContext *context;
// err = hipMalloc(&vertex, num_nodes * sizeof(ChiVertex<int, int>*));
vertex = (VirtVertex<int, int> **)my_obj_alloc.calloc<ChiVertex<int, int>*>(
num_nodes);
context = (GraphChiContext *)my_obj_alloc.calloc<GraphChiContext>(1);
printf("Start initCtx\n");
initContext(context, num_nodes, num_edges);
// printf("Start initObj\n");
// initObject(vertex, context, row_d, col_d, inrow_d, incol_d, &my_obj_alloc);
// printf("Start initOutEdge\n");
// initOutEdge(vertex, context, row_d, col_d);
printf("Start initObj\n");
part0_initObject(vertex, context, row_d, col_d, inrow_d, incol_d,&my_obj_alloc);
hipLaunchKernelGGL(( part_kern0_initObject), dim3(grid), dim3(threads), 0, 0, vertex, context, row_d, col_d,inrow_d, incol_d);
hipDeviceSynchronize();
part1_initObject(vertex, context, row_d, col_d, inrow_d, incol_d,&my_obj_alloc);
hipLaunchKernelGGL(( part_kern1_initObject), dim3(grid), dim3(threads), 0, 0, vertex, context, row_d, col_d,inrow_d, incol_d);
hipDeviceSynchronize();
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "ERROR: initObject failed (%s)\n", hipGetErrorString(err));
return -1;
}
printf("Start initOutEdge\n");
hipLaunchKernelGGL(( kern_initOutEdge), dim3(grid), dim3(threads), 0, 0, vertex, context, row_d, col_d);
hipDeviceSynchronize();
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "ERROR: initOutEdge failed (%s)\n",
hipGetErrorString(err));
return -1;
}
double timer5 = gettime();
printf("init time = %lf ms\n", (timer5 - timer3) * 1000);
printf("Fixing pointers \n");
my_obj_alloc.create_tree();
range_tree = my_obj_alloc.get_range_tree();
tree_size_g = my_obj_alloc.get_tree_size();
// vptrPatch<GraphChiContext><<<1, 1>>>(context,1);
// hipDeviceSynchronize();
// long ***mVtable = (long ***)&vertex[1000];
// printf("Derived VTABLE before: %p %p\n",&vertex[0] ,*mVtable);
// //printf("First entry of Derived VTABLE: %p\n", (void *)mVtable[0][0]);
// vptrPatch<ChiVertex<int, int>><<<grid, threads>>>((ChiVertex<int, int>
// *)vertex,num_nodes);
// hipDeviceSynchronize();
// printf("Derived VTABLE after: %p %p\n",&vertex[0] ,*mVtable);
// //printf("First entry of Derived VTABLE: %p\n", (void *)mVtable[0][0][0]);
// vptrPatch_Edge<<<grid, threads>>>((ChiVertex<int, int> *)vertex,num_nodes);
// hipDeviceSynchronize();
// Run BFS for some iter. TO: convergence determination
// ChiVertex<int, int> *vertex2=(ChiVertex<int, int> *)vertex;
// for(int mm=0;mm<num_nodes;mm++){
// long ***mVtable = (long ***)&vertex2[mm];
// printf("Derived VTABLE before: %d %p %p\n",mm,&vertex2[mm] ,*mVtable);
// }
// my_obj_alloc.create_tree();
double timer6 = gettime();
for (int i = 0; i < ITER; i++) {
printf("Start BFS :)\n");
//BFS<<<grid, threads>>>(vertex, context, i);
hipLaunchKernelGGL(( BFS), dim3(grid), dim3(threads), 0, 0, vertex, context, i);
// BFS_cpu(vertex, context);
printf("Finish BFS\n");
hipDeviceSynchronize();
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipLaunch failed (%s)\n",
hipGetErrorString(err));
return -1;
}
}
hipDeviceSynchronize();
// vfunCheck<<<1, 1>>>(vertex);
// hipDeviceSynchronize();
double timer4 = gettime();
printf("kernel time = %lf ms\n", (timer4 - timer6) * 1000);
// vptrPatch<ChiVertex<int, int>><<<grid, threads>>>((ChiVertex<int, int>
// *)vertex,num_nodes);
// hipDeviceSynchronize();
printf("Start Copyback\n");
hipLaunchKernelGGL(( copyBack), dim3(grid), dim3(threads), 0, 0, vertex, context, index_d);
printf("End Copyback\n");
hipDeviceSynchronize();
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipLaunch failed (%s)\n", hipGetErrorString(err));
return -1;
}
// Copy the rank buffer back
err = hipMemcpy(rank_array, index_d, num_nodes * sizeof(int),
hipMemcpyDeviceToHost);
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMemcpy() failed (%s)\n",
hipGetErrorString(err));
return -1;
}
double timer2 = gettime();
// Report timing characteristics
printf("kernel time = %lf ms\n", (timer4 - timer3) * 1000);
printf("kernel + memcpy time = %lf ms\n", (timer2 - timer1) * 1000);
#if 1
// Print rank array
print_vectorf(rank_array, num_nodes);
#endif
// Free the host-side arrays
free(rank_array);
csr->freeArrays();
free(csr);
// Free the device buffers
hipFree(row_d);
hipFree(col_d);
hipFree(inrow_d);
hipFree(incol_d);
hipFree(index_d);
return 0;
}
void print_vectorf(int *vector, int num) {
FILE *fp = fopen("result_BFSV.out", "w");
if (!fp) {
printf("ERROR: unable to open result.txt\n");
}
for (int i = 0; i < num; i++) {
fprintf(fp, "%d\n", vector[i]);
}
fclose(fp);
}
| c33d431bbb65e471febd731dd0ef6207042704c8.cu | /************************************************************************************\
* *
* Copyright � 2014 Advanced Micro Devices, Inc. *
* Copyright (c) 2015 Mark D. Hill and David A. Wood *
* All rights reserved. *
* *
* Redistribution and use in source and binary forms, with or without *
* modification, are permitted provided that the following are met: *
* *
* You must reproduce the above copyright notice. *
* *
* Neither the name of the copyright holder nor the names of its contributors *
* may be used to endorse or promote products derived from this software *
* without specific, prior, written permission from at least the copyright
holder. *
* *
* You must include the following terms in your license and/or other materials *
* provided with the software. *
* *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" *
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE *
* IMPLIED WARRANTIES OF MERCHANTABILITY, NON-INFRINGEMENT, AND FITNESS FOR A *
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER *
* OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, *
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT *
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS *
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN *
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING *
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY *
* OF SUCH DAMAGE. *
* *
* Without limiting the foregoing, the software may implement third party *
* technologies for which you must obtain licenses from parties other than AMD.
*
* You agree that AMD has not obtained or conveyed to you, and that you shall *
* be responsible for obtaining the rights to use and/or distribute the
applicable *
* underlying intellectual property rights related to the third party
technologies. *
* These third party technologies are not licensed hereunder. *
* *
* If you use the software (in whole or in part), you shall adhere to all *
* applicable U.S., European, and other export laws, including but not limited
to *
* the U.S. Export Administration Regulations ("EAR"�) (15 C.F.R Sections
730-774), *
* and E.U. Council Regulation (EC) No 428/2009 of 5 May 2009. Further,
pursuant *
* to Section 740.6 of the EAR, you hereby certify that, except pursuant to a *
* license granted by the United States Department of Commerce Bureau of
Industry *
* and Security or as otherwise permitted pursuant to a License Exception under
*
* the U.S. Export Administration Regulations ("EAR"), you will not (1) export,
*
* re-export or release to a national of a country in Country Groups D:1, E:1 or
*
* E:2 any restricted technology, software, or source code you receive
hereunder, *
* or (2) export to Country Groups D:1, E:1 or E:2 the direct product of such *
* technology or software, if such foreign produced direct product is subject to
*
* national security controls as identified on the Commerce Control List
(currently *
* found in Supplement 1 to Part 774 of EAR). For the most current Country
Group *
* listings, or for additional information about the EAR or your obligations
under *
* those regulations, please refer to the U.S. Bureau of Industry and Security's
*
* website at http://www.bis.doc.gov/. *
* *
\************************************************************************************/
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <new>
#include "../../mem_alloc/mem_alloc.h"
#include "../graph_parser/parse.h"
#include "parse_oo.h"
#include "../graph_parser/util.h"
#include "kernel.h"
__managed__ GraphChiContext *context_zz;
// Iteration count
#define ITER 20
void print_vectorf(int *vector, int num);
int main(int argc, char **argv) {
char *tmpchar;
mem_alloc shared_mem(4ULL * 1024 * 1024 * 1024);
//obj_alloc my_obj_alloc(&shared_mem);
int num_nodes;
int num_edges;
int file_format = 1;
bool directed = 0;
cudaError_t err = cudaSuccess;
obj_alloc my_obj_alloc(&shared_mem,atoll(argv[3]));
if (argc == 4) {
tmpchar = argv[1]; // Graph inputfile
file_format = atoi(argv[2]); // File format
} else {
fprintf(stderr, "You did something wrong!\n");
exit(1);
}
// Allocate the csr structure
csr_array *csr;
// Parse graph files into csr structure
if (file_format == 1) {
// Metis
csr = parseMetis(tmpchar, &num_nodes, &num_edges, directed);
} else if (file_format == 0) {
// Dimacs9
csr = parseCOO(tmpchar, &num_nodes, &num_edges, 1);
} else if (file_format == 2) {
// Matrix market
csr = parseMM(tmpchar, &num_nodes, &num_edges, directed, 0);
} else {
printf("reserve for future");
exit(1);
}
// Allocate rank_array
int *rank_array = (int *)malloc(num_nodes * sizeof(int));
if (!rank_array) {
fprintf(stderr, "rank array not allocated successfully\n");
return -1;
}
int *row_d;
int *col_d;
int *inrow_d;
int *incol_d;
int *index_d;
// Create device-side buffers for the graph
err = cudaMallocManaged(&row_d, num_nodes * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc row_d (size:%d) => %s\n", num_nodes,
cudaGetErrorString(err));
return -1;
}
err = cudaMallocManaged(&col_d, num_edges * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc col_d (size:%d) => %s\n", num_edges,
cudaGetErrorString(err));
return -1;
}
err = cudaMallocManaged(&inrow_d, num_nodes * sizeof(int));
// cudaMallocManaged(&context_zz
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc inrow_d (size:%d) => %s\n", num_nodes,
cudaGetErrorString(err));
return -1;
}
err = cudaMallocManaged(&incol_d, num_edges * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc incol_d (size:%d) => %s\n", num_edges,
cudaGetErrorString(err));
return -1;
}
// Create buffers for index
err = cudaMallocManaged(&index_d, num_nodes * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc index_d (size:%d) => %s\n", num_nodes,
cudaGetErrorString(err));
return -1;
}
double timer1 = gettime();
// Copy the data to the device-side buffers
err = cudaMemcpy(row_d, csr->row_array, num_nodes * sizeof(int),
cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR:#endif cudaMemcpy row_d (size:%d) => %s\n",
num_nodes, cudaGetErrorString(err));
return -1;
}
err = cudaMemcpy(col_d, csr->col_array, num_edges * sizeof(int),
cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMemcpy col_d (size:%d) => %s\n", num_nodes,
cudaGetErrorString(err));
return -1;
}
err = cudaMemcpy(inrow_d, csr->inrow_array, num_nodes * sizeof(int),
cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR:#endif cudaMemcpy inrow_d (size:%d) => %s\n",
num_nodes, cudaGetErrorString(err));
return -1;
}
err = cudaMemcpy(incol_d, csr->incol_array, num_edges * sizeof(int),
cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMemcpy incol_d (size:%d) => %s\n", num_nodes,
cudaGetErrorString(err));
return -1;
}
// Set up work dimensions
int block_size = 256;
int num_blocks = (num_nodes + block_size - 1) / block_size;
dim3 threads(block_size, 1, 1);
dim3 grid(num_blocks, 1, 1);
cudaDeviceSetLimit(cudaLimitMallocHeapSize, 4ULL * 1024 * 1024 * 1024);
double timer3 = gettime();
VirtVertex<int, int> **vertex;
GraphChiContext *context;
// err = cudaMalloc(&vertex, num_nodes * sizeof(ChiVertex<int, int>*));
vertex = (VirtVertex<int, int> **)my_obj_alloc.calloc<ChiVertex<int, int>*>(
num_nodes);
context = (GraphChiContext *)my_obj_alloc.calloc<GraphChiContext>(1);
printf("Start initCtx\n");
initContext(context, num_nodes, num_edges);
// printf("Start initObj\n");
// initObject(vertex, context, row_d, col_d, inrow_d, incol_d, &my_obj_alloc);
// printf("Start initOutEdge\n");
// initOutEdge(vertex, context, row_d, col_d);
printf("Start initObj\n");
part0_initObject(vertex, context, row_d, col_d, inrow_d, incol_d,&my_obj_alloc);
part_kern0_initObject<<<grid, threads>>>(vertex, context, row_d, col_d,inrow_d, incol_d);
cudaDeviceSynchronize();
part1_initObject(vertex, context, row_d, col_d, inrow_d, incol_d,&my_obj_alloc);
part_kern1_initObject<<<grid, threads>>>(vertex, context, row_d, col_d,inrow_d, incol_d);
cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: initObject failed (%s)\n", cudaGetErrorString(err));
return -1;
}
printf("Start initOutEdge\n");
kern_initOutEdge<<<grid, threads>>>(vertex, context, row_d, col_d);
cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: initOutEdge failed (%s)\n",
cudaGetErrorString(err));
return -1;
}
double timer5 = gettime();
printf("init time = %lf ms\n", (timer5 - timer3) * 1000);
printf("Fixing pointers \n");
my_obj_alloc.create_tree();
range_tree = my_obj_alloc.get_range_tree();
tree_size_g = my_obj_alloc.get_tree_size();
// vptrPatch<GraphChiContext><<<1, 1>>>(context,1);
// cudaDeviceSynchronize();
// long ***mVtable = (long ***)&vertex[1000];
// printf("Derived VTABLE before: %p %p\n",&vertex[0] ,*mVtable);
// //printf("First entry of Derived VTABLE: %p\n", (void *)mVtable[0][0]);
// vptrPatch<ChiVertex<int, int>><<<grid, threads>>>((ChiVertex<int, int>
// *)vertex,num_nodes);
// cudaDeviceSynchronize();
// printf("Derived VTABLE after: %p %p\n",&vertex[0] ,*mVtable);
// //printf("First entry of Derived VTABLE: %p\n", (void *)mVtable[0][0][0]);
// vptrPatch_Edge<<<grid, threads>>>((ChiVertex<int, int> *)vertex,num_nodes);
// cudaDeviceSynchronize();
// Run BFS for some iter. TO: convergence determination
// ChiVertex<int, int> *vertex2=(ChiVertex<int, int> *)vertex;
// for(int mm=0;mm<num_nodes;mm++){
// long ***mVtable = (long ***)&vertex2[mm];
// printf("Derived VTABLE before: %d %p %p\n",mm,&vertex2[mm] ,*mVtable);
// }
// my_obj_alloc.create_tree();
double timer6 = gettime();
for (int i = 0; i < ITER; i++) {
printf("Start BFS :)\n");
//BFS<<<grid, threads>>>(vertex, context, i);
BFS<<<grid, threads>>>(vertex, context, i);
// BFS_cpu(vertex, context);
printf("Finish BFS\n");
cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaLaunch failed (%s)\n",
cudaGetErrorString(err));
return -1;
}
}
cudaDeviceSynchronize();
// vfunCheck<<<1, 1>>>(vertex);
// cudaDeviceSynchronize();
double timer4 = gettime();
printf("kernel time = %lf ms\n", (timer4 - timer6) * 1000);
// vptrPatch<ChiVertex<int, int>><<<grid, threads>>>((ChiVertex<int, int>
// *)vertex,num_nodes);
// cudaDeviceSynchronize();
printf("Start Copyback\n");
copyBack<<<grid, threads>>>(vertex, context, index_d);
printf("End Copyback\n");
cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaLaunch failed (%s)\n", cudaGetErrorString(err));
return -1;
}
// Copy the rank buffer back
err = cudaMemcpy(rank_array, index_d, num_nodes * sizeof(int),
cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMemcpy() failed (%s)\n",
cudaGetErrorString(err));
return -1;
}
double timer2 = gettime();
// Report timing characteristics
printf("kernel time = %lf ms\n", (timer4 - timer3) * 1000);
printf("kernel + memcpy time = %lf ms\n", (timer2 - timer1) * 1000);
#if 1
// Print rank array
print_vectorf(rank_array, num_nodes);
#endif
// Free the host-side arrays
free(rank_array);
csr->freeArrays();
free(csr);
// Free the device buffers
cudaFree(row_d);
cudaFree(col_d);
cudaFree(inrow_d);
cudaFree(incol_d);
cudaFree(index_d);
return 0;
}
void print_vectorf(int *vector, int num) {
FILE *fp = fopen("result_BFSV.out", "w");
if (!fp) {
printf("ERROR: unable to open result.txt\n");
}
for (int i = 0; i < num; i++) {
fprintf(fp, "%d\n", vector[i]);
}
fclose(fp);
}
|
d1aa5bb8b9bf58d10fb403a6a545b0734047df0c.hip | // !!! This is a file automatically generated by hipify!!!
#include <cmath>
#include <iostream>
#include <pthread.h>
#include "../common/simpleTime.cuh"
#include "csr_format.cuh"
#include "../common/cuda_error_check.cuh"
#include "../common/cuda_utilities.cuh"
#include "vwc_process.cuh"
#include "../common/user_specified_structures.h"
#include "../common/user_specified_pre_and_post_processing_functions.hpp"
void csr_format::process(
const int vwsize_or_threads,
std::vector<initial_vertex>* initGraph,
const uint nEdges,
std::ofstream& outputFile,
bool EdgesOnHost ) {
const uint nVertices = initGraph->size();
// Variables collecting timing info.
float H2D_copy_time = 0, processing_time = 0, D2H_copy_time = 0;
// Allocate host buffers.
host_pinned_buffer<Vertex> vertexValue( nVertices );
host_pinned_buffer<uint> edgesIndices( nVertices + 1 );
edgesIndices.at(0) = 0;
host_pinned_buffer<uint> vertexIndices( nEdges );
host_pinned_buffer<Edge> EdgeValue;
if( sizeof(Edge) > 1 ) EdgeValue.alloc( nEdges );
host_pinned_buffer<Vertex_static> VertexValueStatic;
if( sizeof(Vertex_static) > 1 ) VertexValueStatic.alloc( nVertices );
// Put vertices into host buffer CSR form.
for( uint vIdx = 0; vIdx < nVertices; ++vIdx ) {
initial_vertex& vvv = initGraph->at(vIdx);
vertexValue[ vIdx ] = vvv.vertexValue;
if( sizeof(Vertex_static) > 1 ) VertexValueStatic[ vIdx ] = vvv.VertexValueStatic;
uint nNbrs = vvv.nbrs.size();
uint edgeIdxOffset = edgesIndices[ vIdx ];
for( uint nbrIdx = 0; nbrIdx < nNbrs; ++nbrIdx ) {
neighbor& nbr = vvv.nbrs.at( nbrIdx );
vertexIndices[ edgeIdxOffset + nbrIdx ] = nbr.srcIndex;
if( sizeof(Edge) > 1 ) EdgeValue[ edgeIdxOffset + nbrIdx ] = nbr.edgeValue;
}
edgesIndices[ vIdx + 1 ] = edgeIdxOffset + nNbrs;
}
// Define device buffers.
device_buffer<Vertex> dev_vertexValue;
device_buffer<uint> dev_edgesIndices;
device_buffer<uint> dev_vertexIndices;
device_buffer<Edge> dev_EdgeValue;
device_buffer<Vertex_static> dev_VertexValueStatic;
device_buffer<int> devFinished;
uint vwcGridDimen = 0;
vwcGridDimen = ::ceil( static_cast<float>( nVertices ) / ( VWC_COMPILE_TIME_DEFINED_BLOCK_SIZE / vwsize_or_threads ) );
// Allocate device buffers.
dev_vertexValue.alloc( nVertices );
dev_edgesIndices.alloc( nVertices + 1 );
if( !EdgesOnHost ) dev_vertexIndices.alloc( nEdges );
if( !EdgesOnHost ) if( sizeof(Edge) > 1 ) dev_EdgeValue.alloc( nEdges );
if( sizeof(Vertex_static) > 1 ) dev_VertexValueStatic.alloc( nVertices );
devFinished.alloc( 1 );
// Copy data to device buffers.
setTime();
dev_vertexValue = vertexValue;
dev_edgesIndices = edgesIndices;
if( !EdgesOnHost ) dev_vertexIndices = vertexIndices;
if( !EdgesOnHost ) if( sizeof(Edge) > 1 ) dev_EdgeValue = EdgeValue;
if( sizeof(Vertex_static) > 1 ) dev_VertexValueStatic = VertexValueStatic;
CUDAErrorCheck( hipDeviceSynchronize() );
H2D_copy_time = getTime();
std::cout << "Copying data to device took " << H2D_copy_time << " (ms)." << std::endl;
int finished;
// Iteratively process the graph.
unsigned int IterationCounter = 0;
setTime();
do {
finished = 0;
CUDAErrorCheck( hipMemcpyAsync( devFinished.get_ptr(), &finished, sizeof(int), hipMemcpyHostToDevice ) );
vwc_process(
vwsize_or_threads,
vwcGridDimen,
nVertices,
( !EdgesOnHost ) ? dev_vertexIndices.get_ptr() : vertexIndices.get_ptr(),
dev_edgesIndices.get_ptr(),
dev_vertexValue.get_ptr(),
( !EdgesOnHost ) ? dev_EdgeValue.get_ptr() : EdgeValue.get_ptr(),
dev_VertexValueStatic.get_ptr(),
devFinished.get_ptr() );
CUDAErrorCheck( hipPeekAtLastError() );
CUDAErrorCheck( hipMemcpyAsync( &finished, devFinished.get_ptr(), sizeof(int), hipMemcpyDeviceToHost ) );
CUDAErrorCheck( hipDeviceSynchronize() );
++IterationCounter;
} while( finished == 1 );
processing_time = getTime();
std::cout << "Processing finished in " << processing_time << " (ms).\n";
std::cout << "Performed " << IterationCounter << " iterations in total.\n";
// Copy resulted vertex values back from the device to the host.
setTime();
CUDAErrorCheck( hipMemcpy( vertexValue.get_ptr(), dev_vertexValue.get_ptr(), vertexValue.sizeInBytes(), hipMemcpyDeviceToHost ) );
D2H_copy_time = getTime();
std::cout << "Copying final vertex values back to the host took " << D2H_copy_time << " (ms).\n";
//std::cout << "Total Execution time was " << H2D_copy_time + processing_time + D2H_copy_time << " (ms)." << std::endl;
//std::cout << IterationCounter <<"\t"<< H2D_copy_time <<"\t"<< processing_time <<"\t"<< D2H_copy_time << "\n";
// Print the output vertex values to the file.
for( uint vvv = 0; vvv < nVertices; ++vvv ){
// outputFile.write(reinterpret_cast<const char*>(&vvv), sizeof(uint));
outputFile.write(reinterpret_cast<const char*>(&vertexValue[vvv]), sizeof(float));
}
// for( uint vvv = 0; vvv < nVertices; ++vvv )
// print_vertex_output(
// vvv,
// vertexValue[ vvv ],
// outputFile );
//
}
| d1aa5bb8b9bf58d10fb403a6a545b0734047df0c.cu | #include <cmath>
#include <iostream>
#include <pthread.h>
#include "../common/simpleTime.cuh"
#include "csr_format.cuh"
#include "../common/cuda_error_check.cuh"
#include "../common/cuda_utilities.cuh"
#include "vwc_process.cuh"
#include "../common/user_specified_structures.h"
#include "../common/user_specified_pre_and_post_processing_functions.hpp"
void csr_format::process(
const int vwsize_or_threads,
std::vector<initial_vertex>* initGraph,
const uint nEdges,
std::ofstream& outputFile,
bool EdgesOnHost ) {
const uint nVertices = initGraph->size();
// Variables collecting timing info.
float H2D_copy_time = 0, processing_time = 0, D2H_copy_time = 0;
// Allocate host buffers.
host_pinned_buffer<Vertex> vertexValue( nVertices );
host_pinned_buffer<uint> edgesIndices( nVertices + 1 );
edgesIndices.at(0) = 0;
host_pinned_buffer<uint> vertexIndices( nEdges );
host_pinned_buffer<Edge> EdgeValue;
if( sizeof(Edge) > 1 ) EdgeValue.alloc( nEdges );
host_pinned_buffer<Vertex_static> VertexValueStatic;
if( sizeof(Vertex_static) > 1 ) VertexValueStatic.alloc( nVertices );
// Put vertices into host buffer CSR form.
for( uint vIdx = 0; vIdx < nVertices; ++vIdx ) {
initial_vertex& vvv = initGraph->at(vIdx);
vertexValue[ vIdx ] = vvv.vertexValue;
if( sizeof(Vertex_static) > 1 ) VertexValueStatic[ vIdx ] = vvv.VertexValueStatic;
uint nNbrs = vvv.nbrs.size();
uint edgeIdxOffset = edgesIndices[ vIdx ];
for( uint nbrIdx = 0; nbrIdx < nNbrs; ++nbrIdx ) {
neighbor& nbr = vvv.nbrs.at( nbrIdx );
vertexIndices[ edgeIdxOffset + nbrIdx ] = nbr.srcIndex;
if( sizeof(Edge) > 1 ) EdgeValue[ edgeIdxOffset + nbrIdx ] = nbr.edgeValue;
}
edgesIndices[ vIdx + 1 ] = edgeIdxOffset + nNbrs;
}
// Define device buffers.
device_buffer<Vertex> dev_vertexValue;
device_buffer<uint> dev_edgesIndices;
device_buffer<uint> dev_vertexIndices;
device_buffer<Edge> dev_EdgeValue;
device_buffer<Vertex_static> dev_VertexValueStatic;
device_buffer<int> devFinished;
uint vwcGridDimen = 0;
vwcGridDimen = std::ceil( static_cast<float>( nVertices ) / ( VWC_COMPILE_TIME_DEFINED_BLOCK_SIZE / vwsize_or_threads ) );
// Allocate device buffers.
dev_vertexValue.alloc( nVertices );
dev_edgesIndices.alloc( nVertices + 1 );
if( !EdgesOnHost ) dev_vertexIndices.alloc( nEdges );
if( !EdgesOnHost ) if( sizeof(Edge) > 1 ) dev_EdgeValue.alloc( nEdges );
if( sizeof(Vertex_static) > 1 ) dev_VertexValueStatic.alloc( nVertices );
devFinished.alloc( 1 );
// Copy data to device buffers.
setTime();
dev_vertexValue = vertexValue;
dev_edgesIndices = edgesIndices;
if( !EdgesOnHost ) dev_vertexIndices = vertexIndices;
if( !EdgesOnHost ) if( sizeof(Edge) > 1 ) dev_EdgeValue = EdgeValue;
if( sizeof(Vertex_static) > 1 ) dev_VertexValueStatic = VertexValueStatic;
CUDAErrorCheck( cudaDeviceSynchronize() );
H2D_copy_time = getTime();
std::cout << "Copying data to device took " << H2D_copy_time << " (ms)." << std::endl;
int finished;
// Iteratively process the graph.
unsigned int IterationCounter = 0;
setTime();
do {
finished = 0;
CUDAErrorCheck( cudaMemcpyAsync( devFinished.get_ptr(), &finished, sizeof(int), cudaMemcpyHostToDevice ) );
vwc_process(
vwsize_or_threads,
vwcGridDimen,
nVertices,
( !EdgesOnHost ) ? dev_vertexIndices.get_ptr() : vertexIndices.get_ptr(),
dev_edgesIndices.get_ptr(),
dev_vertexValue.get_ptr(),
( !EdgesOnHost ) ? dev_EdgeValue.get_ptr() : EdgeValue.get_ptr(),
dev_VertexValueStatic.get_ptr(),
devFinished.get_ptr() );
CUDAErrorCheck( cudaPeekAtLastError() );
CUDAErrorCheck( cudaMemcpyAsync( &finished, devFinished.get_ptr(), sizeof(int), cudaMemcpyDeviceToHost ) );
CUDAErrorCheck( cudaDeviceSynchronize() );
++IterationCounter;
} while( finished == 1 );
processing_time = getTime();
std::cout << "Processing finished in " << processing_time << " (ms).\n";
std::cout << "Performed " << IterationCounter << " iterations in total.\n";
// Copy resulted vertex values back from the device to the host.
setTime();
CUDAErrorCheck( cudaMemcpy( vertexValue.get_ptr(), dev_vertexValue.get_ptr(), vertexValue.sizeInBytes(), cudaMemcpyDeviceToHost ) );
D2H_copy_time = getTime();
std::cout << "Copying final vertex values back to the host took " << D2H_copy_time << " (ms).\n";
//std::cout << "Total Execution time was " << H2D_copy_time + processing_time + D2H_copy_time << " (ms)." << std::endl;
//std::cout << IterationCounter <<"\t"<< H2D_copy_time <<"\t"<< processing_time <<"\t"<< D2H_copy_time << "\n";
// Print the output vertex values to the file.
for( uint vvv = 0; vvv < nVertices; ++vvv ){
// outputFile.write(reinterpret_cast<const char*>(&vvv), sizeof(uint));
outputFile.write(reinterpret_cast<const char*>(&vertexValue[vvv]), sizeof(float));
}
// for( uint vvv = 0; vvv < nVertices; ++vvv )
// print_vertex_output(
// vvv,
// vertexValue[ vvv ],
// outputFile );
//
}
|
94f5d2770a20ef2baa0716eba39c5f4ce26d3125.hip | // !!! This is a file automatically generated by hipify!!!
#include<iostream>
#include "hip/hip_runtime.h"
#include "assert.h"
#include <chrono>
#define N (1<<11)
void local_execute(int *c, int *local_a, int *local_b){
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
int s =0;
for(int k=0;k<N;k++){
c[i*N+j] = s + local_a[i*N + k]*local_b[j + k*N];
}
}
}
}
//
//int verify(int *c){
// int error = 0;
// for(int i=0;i<size;i++){
// error = error + abs(4-c[i]);
// }
// return error;
//}
//
void check_error(hipError_t e){
assert(e == hipSuccess);
}
//
__global__ void matmul_gpu(int *a,int *b,int *c){
int i = blockIdx.x;
for(int j=0;j<N;j++){
int s =0;
for(int k=0;k<N;k++){
c[i*N+j] = s + a[i*N + k]*b[j + k*N];
}
}
}
//
void gpu_execute(int *local_a, int* local_b, int *local_c){
int *a,*b,*c;
check_error(hipMalloc(&a, N * N * sizeof(int)));
check_error(hipMalloc(&b, N * N * sizeof(int)));
check_error(hipMalloc(&c, N * N * sizeof(int)));
check_error(hipMemcpy(a,local_a,N * N * sizeof(int),hipMemcpyHostToDevice));
check_error(hipMemcpy(b,local_b,N * N * sizeof(int),hipMemcpyHostToDevice));
hipLaunchKernelGGL(( matmul_gpu), dim3(N),dim3(1), 0, 0, a,b,c);
check_error(hipMemcpy(local_c,c,N * N * sizeof(int),hipMemcpyDeviceToHost));
hipDeviceSynchronize();
hipFree(a);
hipFree(b);
hipFree(c);
}
/*
* Current Speed UP:
* GPU Run time 3904ms
* Local run time 80398ms
*/
int main(){
int * local_a = (int *)malloc(sizeof(int) * N * N);
int * local_b = (int *)malloc(sizeof(int) * N * N);
int * c = (int *)malloc(sizeof(int) * N * N);
for(int i=0;i<N;i++){
local_a[i]= 1;
local_b[i]= 1;
}
std::cout << "Matrix Size" << ((N * N * 4)/(1<<20)) <<"MB\n";
auto start_time = std::chrono::high_resolution_clock::now();
gpu_execute(c,local_a,local_b);
auto end_time = std::chrono::high_resolution_clock::now();
std::cout << "GPU Run time " << (end_time - start_time)/std::chrono::milliseconds(1) <<"ms \n";
start_time = std::chrono::high_resolution_clock::now();
local_execute(c,local_a,local_b);
end_time = std::chrono::high_resolution_clock::now();
std::cout << "Local run time " << (end_time - start_time)/std::chrono::milliseconds(1) <<"ms \n";
free(local_a);
free(local_b);
free(c);
// gpu_execute(local_a,local_b,c);
// std::cout << "Max Error" << verify(c) <<"\n";
}
| 94f5d2770a20ef2baa0716eba39c5f4ce26d3125.cu | #include<iostream>
#include "cuda.h"
#include "assert.h"
#include <chrono>
#define N (1<<11)
void local_execute(int *c, int *local_a, int *local_b){
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
int s =0;
for(int k=0;k<N;k++){
c[i*N+j] = s + local_a[i*N + k]*local_b[j + k*N];
}
}
}
}
//
//int verify(int *c){
// int error = 0;
// for(int i=0;i<size;i++){
// error = error + abs(4-c[i]);
// }
// return error;
//}
//
void check_error(cudaError_t e){
assert(e == cudaSuccess);
}
//
__global__ void matmul_gpu(int *a,int *b,int *c){
int i = blockIdx.x;
for(int j=0;j<N;j++){
int s =0;
for(int k=0;k<N;k++){
c[i*N+j] = s + a[i*N + k]*b[j + k*N];
}
}
}
//
void gpu_execute(int *local_a, int* local_b, int *local_c){
int *a,*b,*c;
check_error(cudaMalloc(&a, N * N * sizeof(int)));
check_error(cudaMalloc(&b, N * N * sizeof(int)));
check_error(cudaMalloc(&c, N * N * sizeof(int)));
check_error(cudaMemcpy(a,local_a,N * N * sizeof(int),cudaMemcpyHostToDevice));
check_error(cudaMemcpy(b,local_b,N * N * sizeof(int),cudaMemcpyHostToDevice));
matmul_gpu<<<N,1>>>(a,b,c);
check_error(cudaMemcpy(local_c,c,N * N * sizeof(int),cudaMemcpyDeviceToHost));
cudaDeviceSynchronize();
cudaFree(a);
cudaFree(b);
cudaFree(c);
}
/*
* Current Speed UP:
* GPU Run time 3904ms
* Local run time 80398ms
*/
int main(){
int * local_a = (int *)malloc(sizeof(int) * N * N);
int * local_b = (int *)malloc(sizeof(int) * N * N);
int * c = (int *)malloc(sizeof(int) * N * N);
for(int i=0;i<N;i++){
local_a[i]= 1;
local_b[i]= 1;
}
std::cout << "Matrix Size" << ((N * N * 4)/(1<<20)) <<"MB\n";
auto start_time = std::chrono::high_resolution_clock::now();
gpu_execute(c,local_a,local_b);
auto end_time = std::chrono::high_resolution_clock::now();
std::cout << "GPU Run time " << (end_time - start_time)/std::chrono::milliseconds(1) <<"ms \n";
start_time = std::chrono::high_resolution_clock::now();
local_execute(c,local_a,local_b);
end_time = std::chrono::high_resolution_clock::now();
std::cout << "Local run time " << (end_time - start_time)/std::chrono::milliseconds(1) <<"ms \n";
free(local_a);
free(local_b);
free(c);
// gpu_execute(local_a,local_b,c);
// std::cout << "Max Error" << verify(c) <<"\n";
}
|
agg.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define THREADS _THREADS_
__global__ void agg(
const int n,
const int nz,
const int zone_leap,
const float *xy,
int *zone_num,
int *zone_node
){
const int i = blockIdx.x*THREADS + threadIdx.x;
if (i>=n){
return;
}
const int ii = 2*i;
const int zi = (int) floor(xy[ii]*nz);
const int zj = (int) floor(xy[ii+1]*nz);
const int z = zi*nz + zj;
const int o = atomicAdd(&zone_num[z], 1);
zone_node[z*zone_leap+o] = i;
}
| agg.cu | #define THREADS _THREADS_
__global__ void agg(
const int n,
const int nz,
const int zone_leap,
const float *xy,
int *zone_num,
int *zone_node
){
const int i = blockIdx.x*THREADS + threadIdx.x;
if (i>=n){
return;
}
const int ii = 2*i;
const int zi = (int) floor(xy[ii]*nz);
const int zj = (int) floor(xy[ii+1]*nz);
const int z = zi*nz + zj;
const int o = atomicAdd(&zone_num[z], 1);
zone_node[z*zone_leap+o] = i;
}
|
dfd740659dca29c3eba42b73767ae62ec021f8da.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cmath>
#include <util/timer.h>
#include <util/dimage.h>
#include <util/image_ops.h>
#include <iostream>
#include <sstream>
#include <cstdio> // for sprintf
#include <cerrno> // for errno
#include <cstring> // for strerror
#include "morph.h"
#include "imgio.h"
#include "pyramid.h"
#define USE_IMAGEMAGICK 1
Pyramid::~Pyramid()
{
for(size_t i=0; i<m_data.size(); ++i)
delete m_data[i];
}
PyramidLevel &Pyramid::append_new(int w, int h)
{
m_data.push_back(new PyramidLevel(w,h));
return *m_data.back();
}
PyramidLevel::PyramidLevel(int w, int h)
: width(w), height(h)
{
// align on 128 byte boundary
rowstride = (w + 31)/32 * 32;
inv_wh = 1.0f/(w*h);
hipChannelFormatDesc ccd = hipCreateChannelDesc<float>();
hipMallocArray(&img0, &ccd, w, h);
hipMallocArray(&img1, &ccd, w, h);
size_t size = rowstride*h;
v.resize(size);
ssim.cross.resize(size);
ssim.luma.resize(size);
ssim.mean.resize(size);
ssim.var.resize(size);
ssim.value.resize(size);
ssim.counter.resize(size);
tps.axy.resize(size);
tps.b.resize(size);
ui.axy.resize(size);
ui.b.resize(size);
impmask_rowstride = (w+4)/5+2;
improving_mask.resize(impmask_rowstride*((h+4)/5+2));
}
PyramidLevel::~PyramidLevel()
{
hipFreeArray(img0);
hipFreeArray(img1);
}
KernPyramidLevel::KernPyramidLevel(PyramidLevel &lvl)
{
ssim.cross = &lvl.ssim.cross;
ssim.var = &lvl.ssim.var;
ssim.mean = &lvl.ssim.mean;
ssim.luma = &lvl.ssim.luma;
ssim.value = &lvl.ssim.value;
ssim.counter = &lvl.ssim.counter;
tps.axy = &lvl.tps.axy;
tps.b = &lvl.tps.b;
ui.axy = &lvl.ui.axy;
ui.b = &lvl.ui.b;
v = &lvl.v;
improving_mask = &lvl.improving_mask;
rowstride = lvl.rowstride;
impmask_rowstride = lvl.impmask_rowstride;
pixdim = make_int2(lvl.width, lvl.height);
inv_wh = lvl.inv_wh;
}
template <class T>
T log2(const T &v)/*{{{*/
{
using std::log;
return log(v)/log(T(2));
}/*}}}*/
// VC < 10
#if defined(_MSC_VER) && _MSC_VER < 1600
inline float round(float v)
{
return (float)(int)(v+0.5);
}
#endif
void create_pyramid(Pyramid &pyr,
const rod::dimage<float3> &img0,
const rod::dimage<float3> &img1,
int start_res, bool verbose)
{
rod::base_timer &timer_total = rod::timers.gpu_add("Pyramid creation");
size_t nlevels
= (size_t)(log2((float)::min(img0.width(),img0.height())) - log2((float)start_res))+1;
rod::base_timer *timer
= &rod::timers.gpu_add("level 0",img0.width()*img0.height(),"P");
rod::dimage<float> luma0(img0.width(),img0.height()),
luma1(img1.width(),img1.height());
PyramidLevel &lvl0 = pyr.append_new(img0.width(), img0.height());
luminance(&luma0, &img0);
luminance(&luma1, &img1);
copy_to_array(lvl0.img0, &luma0);
copy_to_array(lvl0.img1, &luma1);
timer->stop();
for(size_t l=1; l<nlevels; ++l)
{
int w = (int)round(img0.width()/((float)(1<<l))),
h = (int)round(img0.height()/((float)(1<<l)));
if(verbose)
std::clog << "Level " << l << ": " << w << "x" << h << std::endl;
std::ostringstream ss;
ss << "level " << l;
rod::scoped_timer_stop sts(rod::timers.gpu_add(ss.str(), w*h,"P"));
PyramidLevel &lvl = pyr.append_new(w,h);
rod::dimage<float> luma(w,h);
::downsample(luma, luma0);
copy_to_array(lvl.img0, &luma);
::downsample(luma, luma1);
copy_to_array(lvl.img1, &luma);
}
timer_total.stop();
}
template <class T>
__global__ void internal_vector_to_image(rod::dimage_ptr<T> res,
const T *v, KernPyramidLevel lvl,
T mult)
{
int tx = threadIdx.x, ty = threadIdx.y,
bx = blockIdx.x, by = blockIdx.y;
int2 pos = make_int2(bx*blockDim.x + tx, by*blockDim.y + ty);
if(!res.is_inside(pos))
return;
res += res.offset_at(pos);
lvl.v += mem_index(lvl, pos);
*res = *lvl.v * mult;
}
template <class T>
void internal_vector_to_image(rod::dimage<T> &dest,
const rod::dvector<T> &orig,
const PyramidLevel &lvl,
T mult)
{
dim3 bdim(32,8),
gdim((lvl.width+bdim.x-1)/bdim.x,
(lvl.height+bdim.y-1)/bdim.y);
dest.resize(lvl.width,lvl.height);
// const cast is harmless
KernPyramidLevel klvl(const_cast<PyramidLevel&>(lvl));
dest.resize(lvl.width, lvl.height);
hipLaunchKernelGGL(( internal_vector_to_image), dim3(gdim), dim3(bdim), 0, 0, &dest, &orig, klvl, mult);
}
template
void internal_vector_to_image(rod::dimage<float> &dest,
const rod::dvector<float> &orig,
const PyramidLevel &lvl,
float mult);
template
void internal_vector_to_image(rod::dimage<float2> &dest,
const rod::dvector<float2> &orig,
const PyramidLevel &lvl,
float2 mult);
template <class T>
__global__ void image_to_internal_vector(T *v,
rod::dimage_ptr<const T> in,
KernPyramidLevel lvl,
T mult)
{
int tx = threadIdx.x, ty = threadIdx.y,
bx = blockIdx.x, by = blockIdx.y;
int2 pos = make_int2(bx*blockDim.x + tx, by*blockDim.y + ty);
if(!in.is_inside(pos))
return;
in += in.offset_at(pos);
v += mem_index(lvl, pos);
*v = *in * mult;
}
template <class T>
void image_to_internal_vector(rod::dvector<T> &dest,
const rod::dimage<T> &orig,
const PyramidLevel &lvl,
T mult)
{
assert(lvl.width == orig.width());
assert(lvl.height == orig.height());
// const cast is harmless
KernPyramidLevel klvl(const_cast<PyramidLevel&>(lvl));
dest.resize(lvl.width*lvl.height);
dim3 bdim(32,8),
gdim((lvl.width+bdim.x-1)/bdim.x,
(lvl.height+bdim.y-1)/bdim.y);
hipLaunchKernelGGL(( image_to_internal_vector<T>), dim3(gdim), dim3(bdim), 0, 0, &dest, &orig, klvl, mult);
}
template
void image_to_internal_vector(rod::dvector<float> &dest,
const rod::dimage<float> &orig,
const PyramidLevel &lvl, float mult);
template
void image_to_internal_vector(rod::dvector<float2> &dest,
const rod::dimage<float2> &orig,
const PyramidLevel &lvl, float2 mult);
| dfd740659dca29c3eba42b73767ae62ec021f8da.cu | #include <cmath>
#include <util/timer.h>
#include <util/dimage.h>
#include <util/image_ops.h>
#include <iostream>
#include <sstream>
#include <cstdio> // for sprintf
#include <cerrno> // for errno
#include <cstring> // for strerror
#include "morph.h"
#include "imgio.h"
#include "pyramid.h"
#define USE_IMAGEMAGICK 1
Pyramid::~Pyramid()
{
for(size_t i=0; i<m_data.size(); ++i)
delete m_data[i];
}
PyramidLevel &Pyramid::append_new(int w, int h)
{
m_data.push_back(new PyramidLevel(w,h));
return *m_data.back();
}
PyramidLevel::PyramidLevel(int w, int h)
: width(w), height(h)
{
// align on 128 byte boundary
rowstride = (w + 31)/32 * 32;
inv_wh = 1.0f/(w*h);
cudaChannelFormatDesc ccd = cudaCreateChannelDesc<float>();
cudaMallocArray(&img0, &ccd, w, h);
cudaMallocArray(&img1, &ccd, w, h);
size_t size = rowstride*h;
v.resize(size);
ssim.cross.resize(size);
ssim.luma.resize(size);
ssim.mean.resize(size);
ssim.var.resize(size);
ssim.value.resize(size);
ssim.counter.resize(size);
tps.axy.resize(size);
tps.b.resize(size);
ui.axy.resize(size);
ui.b.resize(size);
impmask_rowstride = (w+4)/5+2;
improving_mask.resize(impmask_rowstride*((h+4)/5+2));
}
PyramidLevel::~PyramidLevel()
{
cudaFreeArray(img0);
cudaFreeArray(img1);
}
KernPyramidLevel::KernPyramidLevel(PyramidLevel &lvl)
{
ssim.cross = &lvl.ssim.cross;
ssim.var = &lvl.ssim.var;
ssim.mean = &lvl.ssim.mean;
ssim.luma = &lvl.ssim.luma;
ssim.value = &lvl.ssim.value;
ssim.counter = &lvl.ssim.counter;
tps.axy = &lvl.tps.axy;
tps.b = &lvl.tps.b;
ui.axy = &lvl.ui.axy;
ui.b = &lvl.ui.b;
v = &lvl.v;
improving_mask = &lvl.improving_mask;
rowstride = lvl.rowstride;
impmask_rowstride = lvl.impmask_rowstride;
pixdim = make_int2(lvl.width, lvl.height);
inv_wh = lvl.inv_wh;
}
template <class T>
T log2(const T &v)/*{{{*/
{
using std::log;
return log(v)/log(T(2));
}/*}}}*/
// VC < 10
#if defined(_MSC_VER) && _MSC_VER < 1600
inline float round(float v)
{
return (float)(int)(v+0.5);
}
#endif
void create_pyramid(Pyramid &pyr,
const rod::dimage<float3> &img0,
const rod::dimage<float3> &img1,
int start_res, bool verbose)
{
rod::base_timer &timer_total = rod::timers.gpu_add("Pyramid creation");
size_t nlevels
= (size_t)(log2((float)std::min(img0.width(),img0.height())) - log2((float)start_res))+1;
rod::base_timer *timer
= &rod::timers.gpu_add("level 0",img0.width()*img0.height(),"P");
rod::dimage<float> luma0(img0.width(),img0.height()),
luma1(img1.width(),img1.height());
PyramidLevel &lvl0 = pyr.append_new(img0.width(), img0.height());
luminance(&luma0, &img0);
luminance(&luma1, &img1);
copy_to_array(lvl0.img0, &luma0);
copy_to_array(lvl0.img1, &luma1);
timer->stop();
for(size_t l=1; l<nlevels; ++l)
{
int w = (int)round(img0.width()/((float)(1<<l))),
h = (int)round(img0.height()/((float)(1<<l)));
if(verbose)
std::clog << "Level " << l << ": " << w << "x" << h << std::endl;
std::ostringstream ss;
ss << "level " << l;
rod::scoped_timer_stop sts(rod::timers.gpu_add(ss.str(), w*h,"P"));
PyramidLevel &lvl = pyr.append_new(w,h);
rod::dimage<float> luma(w,h);
::downsample(luma, luma0);
copy_to_array(lvl.img0, &luma);
::downsample(luma, luma1);
copy_to_array(lvl.img1, &luma);
}
timer_total.stop();
}
template <class T>
__global__ void internal_vector_to_image(rod::dimage_ptr<T> res,
const T *v, KernPyramidLevel lvl,
T mult)
{
int tx = threadIdx.x, ty = threadIdx.y,
bx = blockIdx.x, by = blockIdx.y;
int2 pos = make_int2(bx*blockDim.x + tx, by*blockDim.y + ty);
if(!res.is_inside(pos))
return;
res += res.offset_at(pos);
lvl.v += mem_index(lvl, pos);
*res = *lvl.v * mult;
}
template <class T>
void internal_vector_to_image(rod::dimage<T> &dest,
const rod::dvector<T> &orig,
const PyramidLevel &lvl,
T mult)
{
dim3 bdim(32,8),
gdim((lvl.width+bdim.x-1)/bdim.x,
(lvl.height+bdim.y-1)/bdim.y);
dest.resize(lvl.width,lvl.height);
// const cast is harmless
KernPyramidLevel klvl(const_cast<PyramidLevel&>(lvl));
dest.resize(lvl.width, lvl.height);
internal_vector_to_image<<<gdim, bdim>>>(&dest, &orig, klvl, mult);
}
template
void internal_vector_to_image(rod::dimage<float> &dest,
const rod::dvector<float> &orig,
const PyramidLevel &lvl,
float mult);
template
void internal_vector_to_image(rod::dimage<float2> &dest,
const rod::dvector<float2> &orig,
const PyramidLevel &lvl,
float2 mult);
template <class T>
__global__ void image_to_internal_vector(T *v,
rod::dimage_ptr<const T> in,
KernPyramidLevel lvl,
T mult)
{
int tx = threadIdx.x, ty = threadIdx.y,
bx = blockIdx.x, by = blockIdx.y;
int2 pos = make_int2(bx*blockDim.x + tx, by*blockDim.y + ty);
if(!in.is_inside(pos))
return;
in += in.offset_at(pos);
v += mem_index(lvl, pos);
*v = *in * mult;
}
template <class T>
void image_to_internal_vector(rod::dvector<T> &dest,
const rod::dimage<T> &orig,
const PyramidLevel &lvl,
T mult)
{
assert(lvl.width == orig.width());
assert(lvl.height == orig.height());
// const cast is harmless
KernPyramidLevel klvl(const_cast<PyramidLevel&>(lvl));
dest.resize(lvl.width*lvl.height);
dim3 bdim(32,8),
gdim((lvl.width+bdim.x-1)/bdim.x,
(lvl.height+bdim.y-1)/bdim.y);
image_to_internal_vector<T><<<gdim, bdim>>>(&dest, &orig, klvl, mult);
}
template
void image_to_internal_vector(rod::dvector<float> &dest,
const rod::dimage<float> &orig,
const PyramidLevel &lvl, float mult);
template
void image_to_internal_vector(rod::dvector<float2> &dest,
const rod::dimage<float2> &orig,
const PyramidLevel &lvl, float2 mult);
|
7c5798fa36db3ae05e045c30694c0f1f7dcd4751.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "math_op.h"
#include <cstring>
#include <cstdint>
#include <cmath>
#include <iostream>
#include <device_launch_parameters.h>
#include <rocblas.h>
#include "core/common.h"
namespace alchemy {
/*
* ===========================================================================
* Prototypes for level 1 BLAS routines
* ===========================================================================
*/
template <>
void vector_axpy_gpu(const int count, const float alpha, const float* X, float *Y)
{
hipblasSaxpy(Global::cublas_handle(), count, &alpha, X, 1, Y, 1);
}
template <>
void vector_axpy_gpu(const int count, const double alpha, const double* X, double *Y)
{
hipblasDaxpy(Global::cublas_handle(), count, &alpha, X, 1, Y, 1);
}
template <>
void vector_scal_gpu(const int count, const float alpha, float* X)
{
hipblasSscal(Global::cublas_handle(), count, &alpha, X, 1);
}
template <>
void vector_scal_gpu(const int count, const double alpha, double* X)
{
hipblasDscal(Global::cublas_handle(), count, &alpha, X, 1);
}
template <>
void vector_copy_gpu(const int count, const float* X, float* Y)
{
hipblasScopy(Global::cublas_handle(), count, X, 1, Y, 1);
}
template <>
void vector_copy_gpu(const int count, const double* X, double* Y)
{
hipblasDcopy(Global::cublas_handle(), count, X, 1, Y, 1);
}
template <typename T>
__global__ void set_kernel(const int count, const T value, T* X) {
for(auto i = blockIdx.x * blockDim.x + threadIdx.x; i < count; i += blockDim.x * gridDim.x) {
X[i] = value;
}
}
template <typename T>
void vector_set_gpu(const int count, const T value, T* X)
{
if(value == (T)0) {
hipMemset(X, value, count * sizeof(T));
return ;
}
hipLaunchKernelGGL(( set_kernel), dim3(CUDA_BLOCK_NUM(count)), dim3(CUDA_THREAD_NUM), 0, 0, count, value, X);
}
template void vector_set_gpu<float>(const int count, const float value, float * X);
template void vector_set_gpu<double>(const int count, const double value, double * X);
template <typename T>
__global__ void sub_kernel(const int count, const T* A, const T* B,T* C) {
for(auto i = blockIdx.x * blockDim.x + threadIdx.x; i < count; i += blockDim.x * gridDim.x) {
C[i] = A[i] - B[i];
}
}
template <typename T>
void vector_sub_gpu(const int count, const T* A, const T* B, T* C)
{
hipLaunchKernelGGL(( sub_kernel), dim3(CUDA_BLOCK_NUM(count)), dim3(CUDA_THREAD_NUM), 0, 0, count, A, B, C);
}
template void vector_sub_gpu<float>(const int count, const float* A, const float* B, float* C);
template void vector_sub_gpu<double>(const int count, const double* A, const double* B, double* C);
template <typename T>
__global__ void add_kernel(const int count, const T* A, const T* B,T* C) {
for(auto i = blockIdx.x * blockDim.x + threadIdx.x; i < count; i += blockDim.x * gridDim.x) {
C[i] = A[i] + B[i];
}
}
template <typename T>
void vector_add_gpu(const int count, const T* A, const T* B, T* C)
{
hipLaunchKernelGGL(( add_kernel), dim3(CUDA_BLOCK_NUM(count)), dim3(CUDA_THREAD_NUM), 0, 0, count, A, B, C);
}
template void vector_add_gpu<float>(const int count, const float* A, const float* B, float* C);
template void vector_add_gpu<double>(const int count, const double* A, const double* B, double* C);
template <>
float vector_dot_gpu(const int count, const float* A, const float* B)
{
float result = 0;
hipblasSdot(Global::cublas_handle(), count, A, 1, B, 1, &result);
return result;
}
template <>
double vector_dot_gpu(const int count, const double* A, const double* B)
{
double result = 0;
hipblasDdot(Global::cublas_handle(), count, A, 1, B, 1, &result);
return result;
}
template <typename T>
__global__ void exp_kernel(const int count, const T* A, T* B) {
for(auto i = blockIdx.x * blockDim.x + threadIdx.x; i < count; i += blockDim.x * gridDim.x) {
B[i] = ::exp(A[i]);
}
}
template <typename T>
void vector_exp_gpu(const int count, const T* A, T* B)
{
hipLaunchKernelGGL(( exp_kernel), dim3(CUDA_BLOCK_NUM(count)), dim3(CUDA_THREAD_NUM), 0, 0, count, A, B);
}
template void vector_exp_gpu(const int count, const float* A, float* B);
template void vector_exp_gpu(const int count, const double* A, double* B);
template <typename T>
__global__ void div_kernel(const int count, const T* A, const T* B, T* C) {
for(auto i = blockIdx.x * blockDim.x + threadIdx.x; i < count; i += blockDim.x * gridDim.x) {
C[i] = A[i] / B[i];
}
}
template <typename T>
void vector_div_gpu(const int count, const T* A, const T* B, T* C)
{
hipLaunchKernelGGL(( div_kernel), dim3(CUDA_BLOCK_NUM(count)), dim3(CUDA_THREAD_NUM), 0, 0, count, A, B, C);
}
template void vector_div_gpu(const int count, const float* A, const float* B, float* C);
template void vector_div_gpu(const int count, const double* A, const double* B, double* C);
template <typename T>
__global__ void sign_kernel(const int count, const T* A, T* B) {
for(auto i = blockIdx.x * blockDim.x + threadIdx.x; i < count; i += blockDim.x * gridDim.x) {
B[i] = (A[i] > 0) - (0 > A[i]);
}
}
template <typename T>
void vector_sign_gpu(const int count, const T* A, T*B)
{
hipLaunchKernelGGL(( sign_kernel), dim3(CUDA_BLOCK_NUM(count)), dim3(CUDA_THREAD_NUM), 0, 0, count, A, B);
}
template void vector_sign_gpu(const int count, const float* A, float* B);
template void vector_sign_gpu(const int count, const double* A, double* B);
/*
* ===========================================================================
* Prototypes for level 2 BLAS
* ===========================================================================
*/
template <>
void matvec_mul_gpu<float>(const enum CBLAS_TRANSPOSE TransA,
const int M, const int N,
const float alpha, const float *A, const float *X,
const float beta, float *Y)
{
hipblasOperation_t transa = (TransA == CblasTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasSgemv(Global::cublas_handle(), transa, N, M, &alpha, A, N, X, 1, &beta, Y, 1);
}
template <>
void matvec_mul_gpu<double>(const enum CBLAS_TRANSPOSE TransA,
const int M, const int N,
const double alpha, const double *A, const double *X,
const double beta, double *Y)
{
hipblasOperation_t transa = (TransA == CblasTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasDgemv(Global::cublas_handle(), transa, N, M, &alpha, A, N, X, 1, &beta, Y, 1);
}
/*
* ===========================================================================
* Prototypes for level 3 BLAS
* ===========================================================================
*/
template <>
void matrix_mul_gpu<float>(const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_TRANSPOSE TransB,
const int M, const int N, const int K,
const float alpha, const float *A, const float *B,
const float beta, float *C)
{
auto lda = (TransA == CblasNoTrans) ? K : M;
auto ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t transa = (TransA == CblasTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
hipblasOperation_t transb = (TransB == CblasTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
hipblasSgemm(Global::cublas_handle(), transb, transa, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N);
}
template <>
void matrix_mul_gpu<double>(const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_TRANSPOSE TransB,
const int M, const int N, const int K,
const double alpha, const double *A, const double *B,
const double beta, double *C)
{
auto lda = (TransA == CblasNoTrans) ? K : M;
auto ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t transa = (TransA == CblasTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
hipblasOperation_t transb = (TransB == CblasTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
hipblasDgemm(Global::cublas_handle(), transb, transa, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N);
}
///
__global__ void print_kernel(const int count, const float* A) {
for(auto i = blockIdx.x * blockDim.x + threadIdx.x; i < count; i += blockDim.x * gridDim.x) {
if(i == 0) printf("GPU: ");
printf("(%d, %f), ", i, A[i]);
if(i + 1 == count) printf("\n");
}
}
template <>
void print_gpu(const int count, const float* A)
{
hipLaunchKernelGGL(( print_kernel), dim3(CUDA_BLOCK_NUM(count)), dim3(CUDA_THREAD_NUM), 0, 0, count, A);
}
__global__ void print_kernel(const int count, const double* A) {
for(auto i = blockIdx.x * blockDim.x + threadIdx.x; i < count; i += blockDim.x * gridDim.x) {
if(i == 0) printf("GPU: ");
printf("(%d, %f), ", i, A[i]);
if(i + 1 == count) printf("\n");
}
}
template <>
void print_gpu(const int count, const double* A)
{
hipLaunchKernelGGL(( print_kernel), dim3(CUDA_BLOCK_NUM(count)), dim3(CUDA_THREAD_NUM), 0, 0, count, A);
}
} | 7c5798fa36db3ae05e045c30694c0f1f7dcd4751.cu | #include "math_op.h"
#include <cstring>
#include <cstdint>
#include <cmath>
#include <iostream>
#include <device_launch_parameters.h>
#include <cublas_v2.h>
#include "core/common.h"
namespace alchemy {
/*
* ===========================================================================
* Prototypes for level 1 BLAS routines
* ===========================================================================
*/
template <>
void vector_axpy_gpu(const int count, const float alpha, const float* X, float *Y)
{
cublasSaxpy(Global::cublas_handle(), count, &alpha, X, 1, Y, 1);
}
template <>
void vector_axpy_gpu(const int count, const double alpha, const double* X, double *Y)
{
cublasDaxpy(Global::cublas_handle(), count, &alpha, X, 1, Y, 1);
}
template <>
void vector_scal_gpu(const int count, const float alpha, float* X)
{
cublasSscal(Global::cublas_handle(), count, &alpha, X, 1);
}
template <>
void vector_scal_gpu(const int count, const double alpha, double* X)
{
cublasDscal(Global::cublas_handle(), count, &alpha, X, 1);
}
template <>
void vector_copy_gpu(const int count, const float* X, float* Y)
{
cublasScopy(Global::cublas_handle(), count, X, 1, Y, 1);
}
template <>
void vector_copy_gpu(const int count, const double* X, double* Y)
{
cublasDcopy(Global::cublas_handle(), count, X, 1, Y, 1);
}
template <typename T>
__global__ void set_kernel(const int count, const T value, T* X) {
for(auto i = blockIdx.x * blockDim.x + threadIdx.x; i < count; i += blockDim.x * gridDim.x) {
X[i] = value;
}
}
template <typename T>
void vector_set_gpu(const int count, const T value, T* X)
{
if(value == (T)0) {
cudaMemset(X, value, count * sizeof(T));
return ;
}
set_kernel<<<CUDA_BLOCK_NUM(count), CUDA_THREAD_NUM>>>(count, value, X);
}
template void vector_set_gpu<float>(const int count, const float value, float * X);
template void vector_set_gpu<double>(const int count, const double value, double * X);
template <typename T>
__global__ void sub_kernel(const int count, const T* A, const T* B,T* C) {
for(auto i = blockIdx.x * blockDim.x + threadIdx.x; i < count; i += blockDim.x * gridDim.x) {
C[i] = A[i] - B[i];
}
}
template <typename T>
void vector_sub_gpu(const int count, const T* A, const T* B, T* C)
{
sub_kernel<<<CUDA_BLOCK_NUM(count), CUDA_THREAD_NUM>>>(count, A, B, C);
}
template void vector_sub_gpu<float>(const int count, const float* A, const float* B, float* C);
template void vector_sub_gpu<double>(const int count, const double* A, const double* B, double* C);
template <typename T>
__global__ void add_kernel(const int count, const T* A, const T* B,T* C) {
for(auto i = blockIdx.x * blockDim.x + threadIdx.x; i < count; i += blockDim.x * gridDim.x) {
C[i] = A[i] + B[i];
}
}
template <typename T>
void vector_add_gpu(const int count, const T* A, const T* B, T* C)
{
add_kernel<<<CUDA_BLOCK_NUM(count), CUDA_THREAD_NUM>>>(count, A, B, C);
}
template void vector_add_gpu<float>(const int count, const float* A, const float* B, float* C);
template void vector_add_gpu<double>(const int count, const double* A, const double* B, double* C);
template <>
float vector_dot_gpu(const int count, const float* A, const float* B)
{
float result = 0;
cublasSdot(Global::cublas_handle(), count, A, 1, B, 1, &result);
return result;
}
template <>
double vector_dot_gpu(const int count, const double* A, const double* B)
{
double result = 0;
cublasDdot(Global::cublas_handle(), count, A, 1, B, 1, &result);
return result;
}
template <typename T>
__global__ void exp_kernel(const int count, const T* A, T* B) {
for(auto i = blockIdx.x * blockDim.x + threadIdx.x; i < count; i += blockDim.x * gridDim.x) {
B[i] = std::exp(A[i]);
}
}
template <typename T>
void vector_exp_gpu(const int count, const T* A, T* B)
{
exp_kernel<<<CUDA_BLOCK_NUM(count), CUDA_THREAD_NUM>>>(count, A, B);
}
template void vector_exp_gpu(const int count, const float* A, float* B);
template void vector_exp_gpu(const int count, const double* A, double* B);
template <typename T>
__global__ void div_kernel(const int count, const T* A, const T* B, T* C) {
for(auto i = blockIdx.x * blockDim.x + threadIdx.x; i < count; i += blockDim.x * gridDim.x) {
C[i] = A[i] / B[i];
}
}
template <typename T>
void vector_div_gpu(const int count, const T* A, const T* B, T* C)
{
div_kernel<<<CUDA_BLOCK_NUM(count), CUDA_THREAD_NUM>>>(count, A, B, C);
}
template void vector_div_gpu(const int count, const float* A, const float* B, float* C);
template void vector_div_gpu(const int count, const double* A, const double* B, double* C);
template <typename T>
__global__ void sign_kernel(const int count, const T* A, T* B) {
for(auto i = blockIdx.x * blockDim.x + threadIdx.x; i < count; i += blockDim.x * gridDim.x) {
B[i] = (A[i] > 0) - (0 > A[i]);
}
}
template <typename T>
void vector_sign_gpu(const int count, const T* A, T*B)
{
sign_kernel<<<CUDA_BLOCK_NUM(count), CUDA_THREAD_NUM>>>(count, A, B);
}
template void vector_sign_gpu(const int count, const float* A, float* B);
template void vector_sign_gpu(const int count, const double* A, double* B);
/*
* ===========================================================================
* Prototypes for level 2 BLAS
* ===========================================================================
*/
template <>
void matvec_mul_gpu<float>(const enum CBLAS_TRANSPOSE TransA,
const int M, const int N,
const float alpha, const float *A, const float *X,
const float beta, float *Y)
{
cublasOperation_t transa = (TransA == CblasTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasSgemv(Global::cublas_handle(), transa, N, M, &alpha, A, N, X, 1, &beta, Y, 1);
}
template <>
void matvec_mul_gpu<double>(const enum CBLAS_TRANSPOSE TransA,
const int M, const int N,
const double alpha, const double *A, const double *X,
const double beta, double *Y)
{
cublasOperation_t transa = (TransA == CblasTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasDgemv(Global::cublas_handle(), transa, N, M, &alpha, A, N, X, 1, &beta, Y, 1);
}
/*
* ===========================================================================
* Prototypes for level 3 BLAS
* ===========================================================================
*/
template <>
void matrix_mul_gpu<float>(const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_TRANSPOSE TransB,
const int M, const int N, const int K,
const float alpha, const float *A, const float *B,
const float beta, float *C)
{
auto lda = (TransA == CblasNoTrans) ? K : M;
auto ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t transa = (TransA == CblasTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
cublasOperation_t transb = (TransB == CblasTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
cublasSgemm(Global::cublas_handle(), transb, transa, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N);
}
template <>
void matrix_mul_gpu<double>(const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_TRANSPOSE TransB,
const int M, const int N, const int K,
const double alpha, const double *A, const double *B,
const double beta, double *C)
{
auto lda = (TransA == CblasNoTrans) ? K : M;
auto ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t transa = (TransA == CblasTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
cublasOperation_t transb = (TransB == CblasTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
cublasDgemm(Global::cublas_handle(), transb, transa, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N);
}
///
__global__ void print_kernel(const int count, const float* A) {
for(auto i = blockIdx.x * blockDim.x + threadIdx.x; i < count; i += blockDim.x * gridDim.x) {
if(i == 0) printf("GPU: ");
printf("(%d, %f), ", i, A[i]);
if(i + 1 == count) printf("\n");
}
}
template <>
void print_gpu(const int count, const float* A)
{
print_kernel<<<CUDA_BLOCK_NUM(count), CUDA_THREAD_NUM>>>(count, A);
}
__global__ void print_kernel(const int count, const double* A) {
for(auto i = blockIdx.x * blockDim.x + threadIdx.x; i < count; i += blockDim.x * gridDim.x) {
if(i == 0) printf("GPU: ");
printf("(%d, %f), ", i, A[i]);
if(i + 1 == count) printf("\n");
}
}
template <>
void print_gpu(const int count, const double* A)
{
print_kernel<<<CUDA_BLOCK_NUM(count), CUDA_THREAD_NUM>>>(count, A);
}
} |
c751832a8ed588dba6fb83944f6a274dcd894557.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
All modification made by Intel Corporation: 2016 Intel Corporation
All contributions by the University of California:
Copyright (c) 2014, 2015, The Regents of the University of California (Regents)
All rights reserved.
All other contributions:
Copyright (c) 2014, 2015, the respective contributors
All rights reserved.
For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <algorithm>
#include <utility>
#include <vector>
#include "caffe/layers/batch_reindex_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template<typename Dtype>
__global__ void BRForward(const int count, const int inner_dim, const Dtype* in,
const Dtype* permut, Dtype* out) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / (inner_dim);
int in_n = static_cast<int>(permut[n]);
out[index] = in[in_n * (inner_dim) + index % (inner_dim)];
}
}
template<typename Dtype>
void BatchReindexLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
check_batch_reindex(bottom[0]->shape(0), bottom[1]->count(),
bottom[1]->cpu_data());
if (top[0]->count() == 0) {
return;
}
int threads = top[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( BRForward<Dtype>) , dim3(CAFFE_GET_BLOCKS(threads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
top[0]->count(), bottom[0]->count() / bottom[0]->shape(0),
bottom[0]->gpu_data(), bottom[1]->gpu_data(), top[0]->mutable_gpu_data());
CUDA_POST_KERNEL_CHECK;
}
template<typename Dtype>
__global__ void BRBackward(const int count, const int inner_dim,
const Dtype* in, const Dtype* top_indexes,
const Dtype* begins, const Dtype* counts,
Dtype* out) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / (inner_dim);
out[index] = 0;
int lower = static_cast<int>(begins[n]);
int upper = lower + static_cast<int>(counts[n]);
for (int i = lower; i < upper; ++i) {
int in_n = static_cast<int>(top_indexes[i]);
out[index] += in[in_n * (inner_dim) + index % (inner_dim)];
}
}
}
template<typename Dtype>
void BatchReindexLayer<Dtype>::Backward_gpu(
const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
CHECK(!propagate_down[1]) << "Cannot backprop to index.";
if (!propagate_down[0]) {
return;
}
vector<std::pair<int, int> > mapping;
const Dtype* perm = bottom[1]->cpu_data();
for (int i = 0; i < bottom[1]->count(); ++i) {
mapping.push_back(pair<int, int>(static_cast<int>(perm[i]), i));
}
std::sort(mapping.begin(), mapping.end(), pair_sort_first());
// Each element of the bottom diff is potentially the sum of many top diffs.
// However, we'd like each CUDA thread to handle exactly one output. Hence,
// we first pre-compute a list of lists of indices that need to be summed for
// each output. `top_indexes` holds the data of this list of lists. The
// k'th element of `begins` points to the location in `top_indexes` where the
// list for the k'th example begin, and the k'th element of `counts` is the
// length of that list.
vector<int> shape;
shape.push_back(bottom[1]->count());
Blob<Dtype> top_indexes(shape);
shape[0] = bottom[0]->shape(0);
Blob<Dtype> counts(shape);
Blob<Dtype> begins(shape);
Dtype* t_i_data = top_indexes.mutable_cpu_data();
Dtype* c_data = counts.mutable_cpu_data();
Dtype* b_data = begins.mutable_cpu_data();
caffe_set(begins.count(), Dtype(-1), b_data);
caffe_set(counts.count(), Dtype(0), c_data);
for (int i = 0; i < mapping.size(); ++i) {
t_i_data[i] = mapping[i].second;
if (b_data[mapping[i].first] == -1) {
b_data[mapping[i].first] = i;
}
c_data[mapping[i].first] += 1;
}
int threads = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( BRBackward<Dtype>) , dim3(CAFFE_GET_BLOCKS(threads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
bottom[0]->count(), bottom[0]->count() / bottom[0]->shape(0),
top[0]->gpu_diff(), top_indexes.gpu_data(), begins.gpu_data(),
counts.gpu_data(), bottom[0]->mutable_gpu_diff());
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(BatchReindexLayer);
} // namespace caffe
| c751832a8ed588dba6fb83944f6a274dcd894557.cu | /*
All modification made by Intel Corporation: © 2016 Intel Corporation
All contributions by the University of California:
Copyright (c) 2014, 2015, The Regents of the University of California (Regents)
All rights reserved.
All other contributions:
Copyright (c) 2014, 2015, the respective contributors
All rights reserved.
For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <algorithm>
#include <utility>
#include <vector>
#include "caffe/layers/batch_reindex_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template<typename Dtype>
__global__ void BRForward(const int count, const int inner_dim, const Dtype* in,
const Dtype* permut, Dtype* out) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / (inner_dim);
int in_n = static_cast<int>(permut[n]);
out[index] = in[in_n * (inner_dim) + index % (inner_dim)];
}
}
template<typename Dtype>
void BatchReindexLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
check_batch_reindex(bottom[0]->shape(0), bottom[1]->count(),
bottom[1]->cpu_data());
if (top[0]->count() == 0) {
return;
}
int threads = top[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
BRForward<Dtype> <<<CAFFE_GET_BLOCKS(threads), CAFFE_CUDA_NUM_THREADS>>>(
top[0]->count(), bottom[0]->count() / bottom[0]->shape(0),
bottom[0]->gpu_data(), bottom[1]->gpu_data(), top[0]->mutable_gpu_data());
CUDA_POST_KERNEL_CHECK;
}
template<typename Dtype>
__global__ void BRBackward(const int count, const int inner_dim,
const Dtype* in, const Dtype* top_indexes,
const Dtype* begins, const Dtype* counts,
Dtype* out) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / (inner_dim);
out[index] = 0;
int lower = static_cast<int>(begins[n]);
int upper = lower + static_cast<int>(counts[n]);
for (int i = lower; i < upper; ++i) {
int in_n = static_cast<int>(top_indexes[i]);
out[index] += in[in_n * (inner_dim) + index % (inner_dim)];
}
}
}
template<typename Dtype>
void BatchReindexLayer<Dtype>::Backward_gpu(
const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
CHECK(!propagate_down[1]) << "Cannot backprop to index.";
if (!propagate_down[0]) {
return;
}
vector<std::pair<int, int> > mapping;
const Dtype* perm = bottom[1]->cpu_data();
for (int i = 0; i < bottom[1]->count(); ++i) {
mapping.push_back(pair<int, int>(static_cast<int>(perm[i]), i));
}
std::sort(mapping.begin(), mapping.end(), pair_sort_first());
// Each element of the bottom diff is potentially the sum of many top diffs.
// However, we'd like each CUDA thread to handle exactly one output. Hence,
// we first pre-compute a list of lists of indices that need to be summed for
// each output. `top_indexes` holds the data of this list of lists. The
// k'th element of `begins` points to the location in `top_indexes` where the
// list for the k'th example begin, and the k'th element of `counts` is the
// length of that list.
vector<int> shape;
shape.push_back(bottom[1]->count());
Blob<Dtype> top_indexes(shape);
shape[0] = bottom[0]->shape(0);
Blob<Dtype> counts(shape);
Blob<Dtype> begins(shape);
Dtype* t_i_data = top_indexes.mutable_cpu_data();
Dtype* c_data = counts.mutable_cpu_data();
Dtype* b_data = begins.mutable_cpu_data();
caffe_set(begins.count(), Dtype(-1), b_data);
caffe_set(counts.count(), Dtype(0), c_data);
for (int i = 0; i < mapping.size(); ++i) {
t_i_data[i] = mapping[i].second;
if (b_data[mapping[i].first] == -1) {
b_data[mapping[i].first] = i;
}
c_data[mapping[i].first] += 1;
}
int threads = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
BRBackward<Dtype> <<<CAFFE_GET_BLOCKS(threads), CAFFE_CUDA_NUM_THREADS>>>(
bottom[0]->count(), bottom[0]->count() / bottom[0]->shape(0),
top[0]->gpu_diff(), top_indexes.gpu_data(), begins.gpu_data(),
counts.gpu_data(), bottom[0]->mutable_gpu_diff());
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(BatchReindexLayer);
} // namespace caffe
|
e99f32ac49026d5073a38ad0480e41c7676ba2c6.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "kernelCollectEmptySlots.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
short *cnewtri = NULL;
hipMalloc(&cnewtri, XSIZE*YSIZE);
int *cprefix = NULL;
hipMalloc(&cprefix, XSIZE*YSIZE);
int *cempty = NULL;
hipMalloc(&cempty, XSIZE*YSIZE);
int nTris = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
kernelCollectEmptySlots), dim3(gridBlock),dim3(threadBlock), 0, 0, cnewtri,cprefix,cempty,nTris);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
kernelCollectEmptySlots), dim3(gridBlock),dim3(threadBlock), 0, 0, cnewtri,cprefix,cempty,nTris);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
kernelCollectEmptySlots), dim3(gridBlock),dim3(threadBlock), 0, 0, cnewtri,cprefix,cempty,nTris);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | e99f32ac49026d5073a38ad0480e41c7676ba2c6.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "kernelCollectEmptySlots.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
short *cnewtri = NULL;
cudaMalloc(&cnewtri, XSIZE*YSIZE);
int *cprefix = NULL;
cudaMalloc(&cprefix, XSIZE*YSIZE);
int *cempty = NULL;
cudaMalloc(&cempty, XSIZE*YSIZE);
int nTris = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
kernelCollectEmptySlots<<<gridBlock,threadBlock>>>(cnewtri,cprefix,cempty,nTris);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
kernelCollectEmptySlots<<<gridBlock,threadBlock>>>(cnewtri,cprefix,cempty,nTris);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
kernelCollectEmptySlots<<<gridBlock,threadBlock>>>(cnewtri,cprefix,cempty,nTris);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
804abedc2b0c5e8e97cc414fe1831ea20e19a419.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*=========================================================================
* GPU accelerated motion compensation for MRI
*
* Copyright (c) 2016 Bernhard Kainz, Amir Alansary, Maria Kuklisova-Murgasova,
* Kevin Keraudren, Markus Steinberger
* (b.kainz@imperial.ac.uk)
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
=========================================================================*/
#include "patchBasedSuperresolution_gpu.cuh"
//The globally constant point spread function
extern __constant__ PointSpreadFunction<float> _PSF;
__constant__ int d_directions[13][3];
__constant__ float d_factor[13];
template <typename T>
__global__ void patchBasedSuperresolution_gpuKernel(PatchBasedVolume<T> inputStack, ReconVolume<T> reconstruction)
{
//patch based coordinates
const uint3 pos = make_uint3(blockIdx.x* blockDim.x + threadIdx.x,
blockIdx.y* blockDim.y + threadIdx.y,
blockIdx.z* blockDim.z + threadIdx.z);
uint3 vSize = inputStack.getXYZPatchGridSize();
if (/*pos.x >= vSize.x || pos.y >= vSize.y ||*/ pos.z >= vSize.z)
return;
//from input data with patch calculation
//float s = inputStack.getValueFromPatchCoords(pos);
//from patch buffer
float patchVal = inputStack.getPatchValue(pos);
if ((patchVal == -1.0f))
return;
ImagePatch2D<T> patch = inputStack.getImagePatch2D(pos.z);
float scale = patch.scale;
patchVal = patchVal * scale;
float sume = inputStack.getPSFsumsValue(pos); //v_PSF_sums[idx];
if (sume == 0.0f)
return;
float w = inputStack.getWeightValue(pos);
float ss = inputStack.getSimulatedPatchValue(pos);
float patch_weight = patch.patchWeight;
if (ss > 0.0f)
patchVal = (patchVal - ss);
else
patchVal = 0.0f;
float3 patchPos = make_float3(pos.x, pos.y, 0);
float3 patchDim = inputStack.getDim();
float size_inv = 2.0f * _PSF.m_quality_factor / reconstruction.m_dim.x;
int xDim = round_((patchDim.x * size_inv));
int yDim = round_((patchDim.y * size_inv));
int zDim = round_((patchDim.z * size_inv));
//truncate if value gets close to epsilon
int dim = MAX_PSF_SUPPORT;
int centre = (MAX_PSF_SUPPORT - 1) / 2;
Matrix4<float> combInvTrans = patch.W2I * (patch.InvTransformation * reconstruction.reconstructedI2W);
float3 psfxyz;
float3 _psfxyz = reconstruction.reconstructedW2I*(patch.Transformation* (patch.I2W * patchPos));
psfxyz = make_float3(round_(_psfxyz.x), round_(_psfxyz.y), round_(_psfxyz.z));
for (int z = 0; z < dim; z++) {
for (int y = 0; y < dim; y++) {
float oldPSF = FLT_MAX;
for (int x = 0; x < dim; x++)
{
float3 ofsPos;
float psfval = _PSF.getPSFParamsPrecomp(ofsPos, psfxyz, make_int3(x - centre, y - centre, z - centre), combInvTrans, patchPos, patchDim);
if (abs(oldPSF - psfval) < PSF_EPSILON) continue;
oldPSF = psfval;
uint3 apos = make_uint3(round_(ofsPos.x), round_(ofsPos.y), round_(ofsPos.z)); //NN
if (apos.x < reconstruction.m_size.x && apos.y < reconstruction.m_size.y && apos.z < reconstruction.m_size.z
&& reconstruction.m_d_mask[apos.x + apos.y*reconstruction.m_size.x + apos.z*reconstruction.m_size.x*reconstruction.m_size.y] != 0)
{
psfval /= sume;
reconstruction.addAddonValue(apos, psfval * w * patch_weight * patchVal);
reconstruction.addCMapValue(apos, psfval * w * patch_weight);
}
}
}
}
}
template <typename T>
void patchBasedSuperresolution_gpu<T>::run(int _cuda_device,
PatchBasedVolume<T>* _inputStack, ReconVolume<T>* _reconstruction)
{
printf("patchBasedSuperresolution_gpu\n");
m_inputStack = _inputStack;
m_reconstruction = _reconstruction;
m_cuda_device = _cuda_device;
//TODO patch batch wise for kernel 2s watchdogs necesary?
checkCudaErrors(hipSetDevice(m_cuda_device));
//TODO addon and consider multi-GPU...
//m_d_buffer as original
//
//TODO
//updatePatchWeights(); --> these are done by Estep and kept with the patches, no explicit update!
if (m_alpha * m_lambda / (m_delta * m_delta) > 0.068)
{
printf("Warning: regularization might not have smoothing effect! Ensure that alpha*lambda/delta^2 is below 0.068.");
}
dim3 blockSize3 = dim3(8, 8, 8); //max 1024 threads
dim3 gridSize3 = divup(dim3(m_inputStack->getXYZPatchGridSize().x, m_inputStack->getXYZPatchGridSize().y,
m_inputStack->getXYZPatchGridSize().z), blockSize3);
patchBasedSuperresolution_gpuKernel<T> << <gridSize3, blockSize3 >> >(*m_inputStack, *m_reconstruction);
CHECK_ERROR(patchBasedPSFReconstructionKernel);
checkCudaErrors(hipDeviceSynchronize());
}
template <typename T>
void patchBasedSuperresolution_gpu<T>::updatePatchWeights()
{
}
template <typename T>
__global__ void AdaptiveRegularizationPrepKernel(ReconVolume<T> reconstruction, bool _adaptive, T _alpha, T _min_intensity, T _max_intensity)
{
const uint3 pos = make_uint3(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y,
blockIdx.z * blockDim.z + threadIdx.z);
if (pos.x >= reconstruction.m_size.x || pos.y >= reconstruction.m_size.y || pos.z >= reconstruction.m_size.z)
return;
T addon = reconstruction.getAddonValue(pos);
T cmap = reconstruction.getCMapValue(pos);
T recon = reconstruction.getReconValue(pos);
if (!_adaptive)
{
if (cmap != 0)
{
addon = addon / cmap;
cmap = 1.0f;
}
}
recon = recon + addon*_alpha;
if (recon < _min_intensity * 0.9f)
recon = _min_intensity * 0.9f;
if (recon > _max_intensity * 1.1f)
recon = _max_intensity * 1.1f;
reconstruction.setReconValue(pos, recon);
reconstruction.setAddonValue(pos, addon);
reconstruction.setCMapValue(pos, cmap);
}
template <typename T>
__device__ T AdaptiveRegularization1(int i, uint3 pos, uint3 pos2, ReconVolume<T> reconstruction, T* original, float delta)
{
if (pos.x >= reconstruction.m_size.x || pos.y >= reconstruction.m_size.y || pos.z >= reconstruction.m_size.z
|| pos.x < 0 || pos.y < 0 || pos.z < 0 || reconstruction.getCMapValue(pos) <= 0 || reconstruction.getCMapValue(pos2) <= 0 ||
pos2.x >= reconstruction.m_size.x || pos2.y >= reconstruction.m_size.y || pos2.z >= reconstruction.m_size.z
|| pos2.x < 0 || pos2.y < 0 || pos2.z < 0)
return 0.0;
//central differences would be better... improve with texture linear interpolation
unsigned int idx1 = pos.x + pos.y * reconstruction.m_size.x + pos.z * reconstruction.m_size.x * reconstruction.m_size.y;
unsigned int idx2 = pos2.x + pos2.y * reconstruction.m_size.x + pos2.z * reconstruction.m_size.x * reconstruction.m_size.y;
float diff = (original[idx2] - original[idx1]) * sqrt(d_factor[i]) / delta;
return d_factor[i] / sqrt(1.0 + diff * diff);
}
template <typename T>
__global__ void AdaptiveRegularizationKernel(ReconVolume<T> reconstruction, T* original, T _delta, T _alpha, T _lambda)
{
uint3 pos = make_uint3(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y,
blockIdx.z * blockDim.z + threadIdx.z);
if (pos.x >= reconstruction.m_size.x || pos.y >= reconstruction.m_size.y || pos.z >= reconstruction.m_size.z)
return;
T val = 0;
T valW = 0;
T sum = 0;
for (int i = 0; i < 13; i++)
{
uint3 pos2 = make_uint3(pos.x + d_directions[i][0], pos.y + d_directions[i][1], pos.z + d_directions[i][2]);
if ((pos2.x >= 0) && (pos2.x < reconstruction.m_size.x) && (pos2.y >= 0) && (pos2.y < reconstruction.m_size.y) && (pos2.z >= 0) && (pos2.z < reconstruction.m_size.z))
{
T bi = AdaptiveRegularization1(i, pos, pos2, reconstruction, original, _delta);
T cmapval = reconstruction.getCMapValue(pos2);
val += bi * reconstruction.getReconValue(pos2) * cmapval; //reconstructed == original2
valW += bi * cmapval;
sum += bi;
}
uint3 pos3 = make_uint3(pos.x - d_directions[i][0], pos.y - d_directions[i][1], pos.z - d_directions[i][2]); //recycle pos register
if ((pos3.x >= 0) && (pos3.x < reconstruction.m_size.x) && (pos3.y >= 0) && (pos3.y < reconstruction.m_size.y)
&& (pos3.z >= 0) && (pos3.z < reconstruction.m_size.z) &&
(pos2.x >= 0) && (pos2.x < reconstruction.m_size.x) && (pos2.y >= 0) && (pos2.y < reconstruction.m_size.y)
&& (pos2.z >= 0) && (pos2.z < reconstruction.m_size.z)
)
{
T bi = AdaptiveRegularization1(i, pos3, pos2, reconstruction, original, _delta);
T cmapval = reconstruction.getCMapValue(pos3);
val += bi * reconstruction.getReconValue(pos3) * cmapval; //reconstructed == original2
valW += bi * cmapval;
sum += bi;
}
}
T reconval = reconstruction.getReconValue(pos);
T cmapval = reconstruction.getCMapValue(pos);
val -= sum * reconval * cmapval;
valW -= sum * cmapval;
val = reconval * cmapval + _alpha * _lambda / (_delta * _delta) * val;
valW = cmapval + _alpha * _lambda / (_delta * _delta) * valW;
if (valW > 0.0) {
reconstruction.setReconValue(pos, val / valW);
}
else
{
reconstruction.setReconValue(pos, 0.0);
}
}
template <typename T>
void patchBasedSuperresolution_gpu<T>::regularize(int rdevice, ReconVolume<T>* _reconstruction)
{
Volume<T> original;
original.init(_reconstruction->m_size, _reconstruction->m_dim);
checkCudaErrors(hipMemcpy((original.m_d_data), _reconstruction->m_d_data, original.m_size.x*original.m_size.y*original.m_size.z*sizeof(T), hipMemcpyDeviceToDevice));
checkCudaErrors(hipSetDevice(rdevice));
dim3 blockSize = dim3(8, 8, 8);
dim3 gridSize = divup(dim3(_reconstruction->m_size.x, _reconstruction->m_size.y, _reconstruction->m_size.z), blockSize);
AdaptiveRegularizationPrepKernel<T> << <gridSize, blockSize >> >(*_reconstruction, m_adaptive, m_alpha, m_min_intensity, m_max_intensity);
CHECK_ERROR(AdaptiveRegularizationPrep);
checkCudaErrors(hipDeviceSynchronize());
AdaptiveRegularizationKernel<T> << <gridSize, blockSize >> >(*_reconstruction, original.m_d_data, m_delta, m_alpha, m_lambda);
CHECK_ERROR(AdaptiveRegularizationKernel);
checkCudaErrors(hipDeviceSynchronize());
original.release();
}
template <typename T>
patchBasedSuperresolution_gpu<T>::patchBasedSuperresolution_gpu(T _min_intensity, T _max_intensity, bool _adaptive) :
m_min_intensity(_min_intensity), m_max_intensity(_max_intensity), m_adaptive(_adaptive)
{
m_delta = 1;
m_lambda = 0.1f;
m_alpha = (0.05f / m_lambda) * m_delta * m_delta;
int h_directions[][3] = {
{ 1, 0, -1 },
{ 0, 1, -1 },
{ 1, 1, -1 },
{ 1, -1, -1 },
{ 1, 0, 0 },
{ 0, 1, 0 },
{ 1, 1, 0 },
{ 1, -1, 0 },
{ 1, 0, 1 },
{ 0, 1, 1 },
{ 1, 1, 1 },
{ 1, -1, 1 },
{ 0, 0, 1 }
};
// this is constant -> constant mem for regularization
float factor[13];
for (int i = 0; i < 13; i++) {
factor[i] = 0;
}
for (int i = 0; i < 13; i++) {
for (int j = 0; j < 3; j++)
{
factor[i] += fabs((float)(h_directions[i][j]));
}
factor[i] = 1.0f / factor[i];
}
checkCudaErrors(hipMemcpyToSymbol(d_factor, factor, 13 * sizeof(float)));
checkCudaErrors(hipMemcpyToSymbol(d_directions, h_directions, 3 * 13 * sizeof(int)));
}
template <typename T>
patchBasedSuperresolution_gpu<T>::~patchBasedSuperresolution_gpu()
{
}
template class patchBasedSuperresolution_gpu < float >;
template class patchBasedSuperresolution_gpu < double >; | 804abedc2b0c5e8e97cc414fe1831ea20e19a419.cu | /*=========================================================================
* GPU accelerated motion compensation for MRI
*
* Copyright (c) 2016 Bernhard Kainz, Amir Alansary, Maria Kuklisova-Murgasova,
* Kevin Keraudren, Markus Steinberger
* (b.kainz@imperial.ac.uk)
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
=========================================================================*/
#include "patchBasedSuperresolution_gpu.cuh"
//The globally constant point spread function
extern __constant__ PointSpreadFunction<float> _PSF;
__constant__ int d_directions[13][3];
__constant__ float d_factor[13];
template <typename T>
__global__ void patchBasedSuperresolution_gpuKernel(PatchBasedVolume<T> inputStack, ReconVolume<T> reconstruction)
{
//patch based coordinates
const uint3 pos = make_uint3(blockIdx.x* blockDim.x + threadIdx.x,
blockIdx.y* blockDim.y + threadIdx.y,
blockIdx.z* blockDim.z + threadIdx.z);
uint3 vSize = inputStack.getXYZPatchGridSize();
if (/*pos.x >= vSize.x || pos.y >= vSize.y ||*/ pos.z >= vSize.z)
return;
//from input data with patch calculation
//float s = inputStack.getValueFromPatchCoords(pos);
//from patch buffer
float patchVal = inputStack.getPatchValue(pos);
if ((patchVal == -1.0f))
return;
ImagePatch2D<T> patch = inputStack.getImagePatch2D(pos.z);
float scale = patch.scale;
patchVal = patchVal * scale;
float sume = inputStack.getPSFsumsValue(pos); //v_PSF_sums[idx];
if (sume == 0.0f)
return;
float w = inputStack.getWeightValue(pos);
float ss = inputStack.getSimulatedPatchValue(pos);
float patch_weight = patch.patchWeight;
if (ss > 0.0f)
patchVal = (patchVal - ss);
else
patchVal = 0.0f;
float3 patchPos = make_float3(pos.x, pos.y, 0);
float3 patchDim = inputStack.getDim();
float size_inv = 2.0f * _PSF.m_quality_factor / reconstruction.m_dim.x;
int xDim = round_((patchDim.x * size_inv));
int yDim = round_((patchDim.y * size_inv));
int zDim = round_((patchDim.z * size_inv));
//truncate if value gets close to epsilon
int dim = MAX_PSF_SUPPORT;
int centre = (MAX_PSF_SUPPORT - 1) / 2;
Matrix4<float> combInvTrans = patch.W2I * (patch.InvTransformation * reconstruction.reconstructedI2W);
float3 psfxyz;
float3 _psfxyz = reconstruction.reconstructedW2I*(patch.Transformation* (patch.I2W * patchPos));
psfxyz = make_float3(round_(_psfxyz.x), round_(_psfxyz.y), round_(_psfxyz.z));
for (int z = 0; z < dim; z++) {
for (int y = 0; y < dim; y++) {
float oldPSF = FLT_MAX;
for (int x = 0; x < dim; x++)
{
float3 ofsPos;
float psfval = _PSF.getPSFParamsPrecomp(ofsPos, psfxyz, make_int3(x - centre, y - centre, z - centre), combInvTrans, patchPos, patchDim);
if (abs(oldPSF - psfval) < PSF_EPSILON) continue;
oldPSF = psfval;
uint3 apos = make_uint3(round_(ofsPos.x), round_(ofsPos.y), round_(ofsPos.z)); //NN
if (apos.x < reconstruction.m_size.x && apos.y < reconstruction.m_size.y && apos.z < reconstruction.m_size.z
&& reconstruction.m_d_mask[apos.x + apos.y*reconstruction.m_size.x + apos.z*reconstruction.m_size.x*reconstruction.m_size.y] != 0)
{
psfval /= sume;
reconstruction.addAddonValue(apos, psfval * w * patch_weight * patchVal);
reconstruction.addCMapValue(apos, psfval * w * patch_weight);
}
}
}
}
}
template <typename T>
void patchBasedSuperresolution_gpu<T>::run(int _cuda_device,
PatchBasedVolume<T>* _inputStack, ReconVolume<T>* _reconstruction)
{
printf("patchBasedSuperresolution_gpu\n");
m_inputStack = _inputStack;
m_reconstruction = _reconstruction;
m_cuda_device = _cuda_device;
//TODO patch batch wise for kernel 2s watchdogs necesary?
checkCudaErrors(cudaSetDevice(m_cuda_device));
//TODO addon and consider multi-GPU...
//m_d_buffer as original
//
//TODO
//updatePatchWeights(); --> these are done by Estep and kept with the patches, no explicit update!
if (m_alpha * m_lambda / (m_delta * m_delta) > 0.068)
{
printf("Warning: regularization might not have smoothing effect! Ensure that alpha*lambda/delta^2 is below 0.068.");
}
dim3 blockSize3 = dim3(8, 8, 8); //max 1024 threads
dim3 gridSize3 = divup(dim3(m_inputStack->getXYZPatchGridSize().x, m_inputStack->getXYZPatchGridSize().y,
m_inputStack->getXYZPatchGridSize().z), blockSize3);
patchBasedSuperresolution_gpuKernel<T> << <gridSize3, blockSize3 >> >(*m_inputStack, *m_reconstruction);
CHECK_ERROR(patchBasedPSFReconstructionKernel);
checkCudaErrors(cudaDeviceSynchronize());
}
template <typename T>
void patchBasedSuperresolution_gpu<T>::updatePatchWeights()
{
}
template <typename T>
__global__ void AdaptiveRegularizationPrepKernel(ReconVolume<T> reconstruction, bool _adaptive, T _alpha, T _min_intensity, T _max_intensity)
{
const uint3 pos = make_uint3(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y,
blockIdx.z * blockDim.z + threadIdx.z);
if (pos.x >= reconstruction.m_size.x || pos.y >= reconstruction.m_size.y || pos.z >= reconstruction.m_size.z)
return;
T addon = reconstruction.getAddonValue(pos);
T cmap = reconstruction.getCMapValue(pos);
T recon = reconstruction.getReconValue(pos);
if (!_adaptive)
{
if (cmap != 0)
{
addon = addon / cmap;
cmap = 1.0f;
}
}
recon = recon + addon*_alpha;
if (recon < _min_intensity * 0.9f)
recon = _min_intensity * 0.9f;
if (recon > _max_intensity * 1.1f)
recon = _max_intensity * 1.1f;
reconstruction.setReconValue(pos, recon);
reconstruction.setAddonValue(pos, addon);
reconstruction.setCMapValue(pos, cmap);
}
template <typename T>
__device__ T AdaptiveRegularization1(int i, uint3 pos, uint3 pos2, ReconVolume<T> reconstruction, T* original, float delta)
{
if (pos.x >= reconstruction.m_size.x || pos.y >= reconstruction.m_size.y || pos.z >= reconstruction.m_size.z
|| pos.x < 0 || pos.y < 0 || pos.z < 0 || reconstruction.getCMapValue(pos) <= 0 || reconstruction.getCMapValue(pos2) <= 0 ||
pos2.x >= reconstruction.m_size.x || pos2.y >= reconstruction.m_size.y || pos2.z >= reconstruction.m_size.z
|| pos2.x < 0 || pos2.y < 0 || pos2.z < 0)
return 0.0;
//central differences would be better... improve with texture linear interpolation
unsigned int idx1 = pos.x + pos.y * reconstruction.m_size.x + pos.z * reconstruction.m_size.x * reconstruction.m_size.y;
unsigned int idx2 = pos2.x + pos2.y * reconstruction.m_size.x + pos2.z * reconstruction.m_size.x * reconstruction.m_size.y;
float diff = (original[idx2] - original[idx1]) * sqrt(d_factor[i]) / delta;
return d_factor[i] / sqrt(1.0 + diff * diff);
}
template <typename T>
__global__ void AdaptiveRegularizationKernel(ReconVolume<T> reconstruction, T* original, T _delta, T _alpha, T _lambda)
{
uint3 pos = make_uint3(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y,
blockIdx.z * blockDim.z + threadIdx.z);
if (pos.x >= reconstruction.m_size.x || pos.y >= reconstruction.m_size.y || pos.z >= reconstruction.m_size.z)
return;
T val = 0;
T valW = 0;
T sum = 0;
for (int i = 0; i < 13; i++)
{
uint3 pos2 = make_uint3(pos.x + d_directions[i][0], pos.y + d_directions[i][1], pos.z + d_directions[i][2]);
if ((pos2.x >= 0) && (pos2.x < reconstruction.m_size.x) && (pos2.y >= 0) && (pos2.y < reconstruction.m_size.y) && (pos2.z >= 0) && (pos2.z < reconstruction.m_size.z))
{
T bi = AdaptiveRegularization1(i, pos, pos2, reconstruction, original, _delta);
T cmapval = reconstruction.getCMapValue(pos2);
val += bi * reconstruction.getReconValue(pos2) * cmapval; //reconstructed == original2
valW += bi * cmapval;
sum += bi;
}
uint3 pos3 = make_uint3(pos.x - d_directions[i][0], pos.y - d_directions[i][1], pos.z - d_directions[i][2]); //recycle pos register
if ((pos3.x >= 0) && (pos3.x < reconstruction.m_size.x) && (pos3.y >= 0) && (pos3.y < reconstruction.m_size.y)
&& (pos3.z >= 0) && (pos3.z < reconstruction.m_size.z) &&
(pos2.x >= 0) && (pos2.x < reconstruction.m_size.x) && (pos2.y >= 0) && (pos2.y < reconstruction.m_size.y)
&& (pos2.z >= 0) && (pos2.z < reconstruction.m_size.z)
)
{
T bi = AdaptiveRegularization1(i, pos3, pos2, reconstruction, original, _delta);
T cmapval = reconstruction.getCMapValue(pos3);
val += bi * reconstruction.getReconValue(pos3) * cmapval; //reconstructed == original2
valW += bi * cmapval;
sum += bi;
}
}
T reconval = reconstruction.getReconValue(pos);
T cmapval = reconstruction.getCMapValue(pos);
val -= sum * reconval * cmapval;
valW -= sum * cmapval;
val = reconval * cmapval + _alpha * _lambda / (_delta * _delta) * val;
valW = cmapval + _alpha * _lambda / (_delta * _delta) * valW;
if (valW > 0.0) {
reconstruction.setReconValue(pos, val / valW);
}
else
{
reconstruction.setReconValue(pos, 0.0);
}
}
template <typename T>
void patchBasedSuperresolution_gpu<T>::regularize(int rdevice, ReconVolume<T>* _reconstruction)
{
Volume<T> original;
original.init(_reconstruction->m_size, _reconstruction->m_dim);
checkCudaErrors(cudaMemcpy((original.m_d_data), _reconstruction->m_d_data, original.m_size.x*original.m_size.y*original.m_size.z*sizeof(T), cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaSetDevice(rdevice));
dim3 blockSize = dim3(8, 8, 8);
dim3 gridSize = divup(dim3(_reconstruction->m_size.x, _reconstruction->m_size.y, _reconstruction->m_size.z), blockSize);
AdaptiveRegularizationPrepKernel<T> << <gridSize, blockSize >> >(*_reconstruction, m_adaptive, m_alpha, m_min_intensity, m_max_intensity);
CHECK_ERROR(AdaptiveRegularizationPrep);
checkCudaErrors(cudaDeviceSynchronize());
AdaptiveRegularizationKernel<T> << <gridSize, blockSize >> >(*_reconstruction, original.m_d_data, m_delta, m_alpha, m_lambda);
CHECK_ERROR(AdaptiveRegularizationKernel);
checkCudaErrors(cudaDeviceSynchronize());
original.release();
}
template <typename T>
patchBasedSuperresolution_gpu<T>::patchBasedSuperresolution_gpu(T _min_intensity, T _max_intensity, bool _adaptive) :
m_min_intensity(_min_intensity), m_max_intensity(_max_intensity), m_adaptive(_adaptive)
{
m_delta = 1;
m_lambda = 0.1f;
m_alpha = (0.05f / m_lambda) * m_delta * m_delta;
int h_directions[][3] = {
{ 1, 0, -1 },
{ 0, 1, -1 },
{ 1, 1, -1 },
{ 1, -1, -1 },
{ 1, 0, 0 },
{ 0, 1, 0 },
{ 1, 1, 0 },
{ 1, -1, 0 },
{ 1, 0, 1 },
{ 0, 1, 1 },
{ 1, 1, 1 },
{ 1, -1, 1 },
{ 0, 0, 1 }
};
// this is constant -> constant mem for regularization
float factor[13];
for (int i = 0; i < 13; i++) {
factor[i] = 0;
}
for (int i = 0; i < 13; i++) {
for (int j = 0; j < 3; j++)
{
factor[i] += fabs((float)(h_directions[i][j]));
}
factor[i] = 1.0f / factor[i];
}
checkCudaErrors(cudaMemcpyToSymbol(d_factor, factor, 13 * sizeof(float)));
checkCudaErrors(cudaMemcpyToSymbol(d_directions, h_directions, 3 * 13 * sizeof(int)));
}
template <typename T>
patchBasedSuperresolution_gpu<T>::~patchBasedSuperresolution_gpu()
{
}
template class patchBasedSuperresolution_gpu < float >;
template class patchBasedSuperresolution_gpu < double >; |
ec564cd320c1e7f2bb99b960b99df12d17efdc84.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2018, Natural Language Processing Lab, Northeastern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: XIAO Tong (email: xiaotong@mail.neu.edu.cn) 2018-07-18
* I'm surprised that I did not write this file till today.
*/
#include <hiprand/hiprand.h>
#include <time.h>
#include "SetData.cuh"
#include <hiprand/hiprand_kernel.h>
#include "../../XDevice.h"
#include "../../XUtility.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
#ifdef USE_ROCM
/*
set a data array with a fixed value
>> d - pointer to the data array
>> v - the initial value
>> size - size of the array
*/
template<class T>
__global__
void KernelSetDataFixed(T * d, T v, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size)
d[i] = v;
}
template __global__ void KernelSetDataFixed<int>(int *, int, int);
template __global__ void KernelSetDataFixed<float>(float *, float, int);
template __global__ void KernelSetDataFixed<double>(double *, double, int);
//template __global__ void KernelSetDataFixed<__half>(__half*, __half, int);
/*
generate data items with a fixed value
>> tensor - the tensor for initialization
>> value - the initial value
*/
template<class T>
void _CudaSetDataFixed(XTensor * tensor, T value)
{
int gridSize[3];
int blockSize[3];
GDevs.GetCudaThread(tensor->devID, tensor->unitNum, gridSize, blockSize);
dim3 blocks(gridSize[0]);
dim3 threads(blockSize[0]);
int devIDBackup;
ProtectCudaDev(tensor->devID, devIDBackup);
if (tensor->dataType == X_INT)
KernelSetDataFixed << <blocks, threads >> > ((int*)tensor->data, (int)value, tensor->unitNum);
else if (tensor->dataType == X_FLOAT)
KernelSetDataFixed << <blocks, threads >> > ((float*)tensor->data, (float)value, tensor->unitNum);
else if (tensor->dataType == X_DOUBLE)
KernelSetDataFixed << <blocks, threads >> > ((double*)tensor->data, (double)value, tensor->unitNum);
//else if (tensor->dataType == X_FLOAT16)
// KernelSetDataFixed << <blocks, threads >> > ((__half*)tensor->data, (__half)value, tensor->unitNum);
else
ShowNTErrors("TODO! Unsupported datatype!")
BacktoCudaDev(tensor->devID, devIDBackup);
}
template void _CudaSetDataFixed<int>(XTensor *, int);
template void _CudaSetDataFixed<float>(XTensor *, float);
template void _CudaSetDataFixed<double>(XTensor *, double);
/*
set a float data array with a fixed value p (in int) only
if the condition entry is non-zero
>> d - pointer to the data array
>> c - pointer to the condition array
>> size - size of the array
>> p - the initial value
*/
template<class T>
__global__
void KernelSetDataFixedCond(T * d, T * c, T value, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size && c[i] != 0)
d[i] = value;
}
template __global__ void KernelSetDataFixedCond<int>(int*, int*, int, int);
template __global__ void KernelSetDataFixedCond<float>(float*, float*, float, int);
template __global__ void KernelSetDataFixedCond<double>(double*, double*, double, int);
//template __global__ void KernelSetDataFixedCond<__half>(__half*, __half*, __half, int);
/*
generate data items with a fixed value p
only if the condition entry is non-zero
>> tensor - the tensor for initialization
>> condition - the condition tensor whose entry would be check to
set the corresponding entry in "tensor"
>> value - the initial value
*/
template<class T>
void _CudaSetDataFixedCond(XTensor* tensor, XTensor* condition, T value)
{
int gridSize[3];
int blockSize[3];
GDevs.GetCudaThread(tensor->devID, tensor->unitNum, gridSize, blockSize);
dim3 blocks(gridSize[0]);
dim3 threads(blockSize[0]);
int devIDBackup;
ProtectCudaDev(tensor->devID, devIDBackup);
if (tensor->dataType == X_INT)
hipLaunchKernelGGL(( KernelSetDataFixedCond) , dim3(blocks), dim3(threads) , 0, 0, (int*)tensor->data, (int*)condition->data,
(int)value, tensor->unitNum);
else if (tensor->dataType == X_FLOAT)
hipLaunchKernelGGL(( KernelSetDataFixedCond) , dim3(blocks), dim3(threads) , 0, 0, (float*)tensor->data, (float*)condition->data,
(float)value, tensor->unitNum);
else if (tensor->dataType == X_DOUBLE)
hipLaunchKernelGGL(( KernelSetDataFixedCond) , dim3(blocks), dim3(threads) , 0, 0, (double*)tensor->data, (double*)condition->data,
(double)value, tensor->unitNum);
//else if (tensor->dataType == X_FLOAT16)
// hipLaunchKernelGGL(( KernelSetDataFixedCond) , dim3(blocks), dim3(threads) , 0, 0, (__half*)tensor->data, (__half*)condition->data,
// (__half)value, tensor->unitNum);
else
ShowNTErrors("TODO! Unsupported datatype!")
BacktoCudaDev(tensor->devID, devIDBackup);
}
template void _CudaSetDataFixedCond<int>(XTensor*, XTensor*, int);
template void _CudaSetDataFixedCond<float>(XTensor*, XTensor*, float);
template void _CudaSetDataFixedCond<double>(XTensor*, XTensor*, double);
/*
set data array with a uniform distribution in [low, high]
>> deviceStates - the state of hiprand
>> d - float datatype pointer to the data array
>> size - size of the array
>> lower - low value of the range
>> variance - the variance of the range
*/
__global__
void KernelSetDataRandFloat(float * d, int size, DTYPE lower, DTYPE variance)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size) {
d[i] = d[i] * variance + lower;
}
}
/*
set data array with a uniform distribution in [low, high]
>> deviceStates - the state of hiprand
>> d - double datatype pointer to the data array
>> size - size of the array
>> lower - low value of the range
>> variance - the variance of the range
*/
__global__
void KernelSetDataRandDouble(double * d, int size, DTYPE lower, DTYPE variance)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size){
d[i] = d[i] * variance + lower;
}
}
/*
set data items to a pre-defined value if its value >= p, set it to 0 otherwise
>> d - pointer to the data array
>> size - size of the array
>> lower - low value of the range
>> variance - the variance of the range
*/
__global__
void KernelSetDataPCut(DTYPE * d, int size, DTYPE p, DTYPE value)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size) {
if (d[i] >= p)
d[i] = value;
else
d[i] = 0;
}
}
/*
set data items along with a given dimension (and keep the remaining items unchanged) - kernel version
>> tensor - the tensor whose data array would be initialized
>> beg - the beginning position
>> len - length of the segment to be set
>> blockSize - size of a data block
>> blockNum - number of data blocks
*/
template<class T>
__global__
void KernelSetDataDim(T * d, int beg, int len, int blockSize, int blockNum, T p)
{
/* offset in each block */
int i = blockDim.x * blockIdx.x + threadIdx.x;
/* block id */
int j = blockDim.y * blockIdx.y + threadIdx.y;
if(i >= blockSize || j > blockNum)
return;
if(i < beg || i >= beg + len)
return;
d[blockSize * j + i] = p;
}
template __global__ void KernelSetDataDim<int>(int*, int, int, int, int, int);
template __global__ void KernelSetDataDim<float>(float*, int, int, int, int, float);
template __global__ void KernelSetDataDim<double>(double*, int, int, int, int, double);
/*
set data items along with a given dimension (and keep the remaining items unchanged) - cuda version
>> tensor - the tensor whose data array would be initialized
>> beg - the beginning position
>> len - length along with the given dimension
>> dim - the dimension along which we set the data
e.g., given a 3 * 3 tensor
1 2 3
4 5 6
7 8 9
when beg = 1, len = 1, dim = 0 and p = 0, we have
1 2 3
0 0 0
7 8 9
i.e., we set all entries of row 1 to 0
*/
template<class T>
void _CudaSetDataDim(XTensor * tensor, int beg, int len, int dim, T p)
{
int n = tensor->order;
CheckNTErrors(tensor->dataType == DEFAULT_DTYPE, "TODO!");
CheckNTErrors(dim < n && dim >= 0, "Illegal dimension!");
CheckNTErrors(beg >= 0 && beg < tensor->GetDim(dim), "Illegal beginning position!");
CheckNTErrors(beg + len >= 0 && beg + len < tensor->GetDim(dim), "Illegal length!");
int stride = 1;
int blockSize = 1;
int blockNum = 1;
for(int i = n - 1; i > dim; i--){
stride *= tensor->GetDim(i);
}
blockSize = stride * tensor->GetDim(dim);
blockNum = tensor->unitNum / blockSize;
int cudaGrids[3];
int cudaBlocks[3];
GDevs.GetCudaThread2D(tensor->devID, blockSize, blockNum, MAX_INT, cudaGrids, cudaBlocks);
dim3 blocks(cudaGrids[0], cudaGrids[1]);
dim3 threads(cudaBlocks[0], cudaBlocks[1]);
int devIDBackup;
ProtectCudaDev(tensor->devID, devIDBackup);
if (tensor->dataType == X_INT)
KernelSetDataDim << <blocks, threads >> > ((int*)tensor->data, beg * stride,
len * stride, blockSize, blockNum, (int)p);
else if (tensor->dataType == X_FLOAT)
KernelSetDataDim << <blocks, threads >> > ((float*)tensor->data, beg * stride,
len * stride, blockSize, blockNum, (float)p);
else if (tensor->dataType == X_DOUBLE)
KernelSetDataDim << <blocks, threads >> > ((double*)tensor->data, beg * stride,
len * stride, blockSize, blockNum, (double)p);
else
ShowNTErrors("TODO! Unsupported datatype!")
BacktoCudaDev(tensor->devID, devIDBackup);
}
template void _CudaSetDataDim<int>(XTensor*, int, int, int, int);
template void _CudaSetDataDim<float>(XTensor*, int, int, int, float);
template void _CudaSetDataDim<double>(XTensor*, int, int, int, double);
/*
modify data items along with a given index and dimension
(and keep the remaining items unchanged) - kernel version
>> s - the pointer whose data would be modified
>> m - the pointer whose data would be used to modify the data pointed by s
>> blockNum - number of data blocks
>> blockSize - size of a data block
>> stride - stride of a data block
*/
__global__
void KernelSetDataIndexed(DTYPE * s, DTYPE * m, int blockNum, int blockSize, int stride)
{
/* offset in each block */
int i = blockDim.x * blockIdx.x + threadIdx.x;
/* block id */
int j = blockDim.y * blockIdx.y + threadIdx.y;
if(i >= stride || j >= blockNum)
return;
int x = blockSize * j + i;
int y = stride * j + i;
s[x] = m[y];
}
/*
modify data items along with a given index and dimension (and keep the remaining items unchanged)
>> source - the tensor whose data array would be modified
>> modify - the tensor whose data array would be used to modify the source tensor
>> dim - the dimension along which we modify the tensor
>> index - index of the given dimension
e.g., given a source tensor (3, 3)
1 2 3
4 5 6
7 8 9
given a modified tensor (3)
1 2 3
when dim = 0, index = 1, we have
1 2 3
1 2 3
7 8 9
i.e., we set entries of row 1 to {1, 2, 3}
*/
void _CudaSetDataIndexed(XTensor * source, XTensor * modify, int dim, int index)
{
int order = source->order;
int size = source->GetDim(dim);
CheckNTErrors(source->dataType == DEFAULT_DTYPE, "TODO!");
CheckNTErrors(dim >= 0 && dim < order, "Illegal dimension!");
CheckNTErrors(index >= 0 && index < size, "Illegal index!");
int stride = 1;
int blockSize = 1;
int blockNum = 1;
for(int i = order - 1; i > dim; i--){
stride *= source->GetDim(i);
}
blockSize = stride * source->GetDim(dim);
blockNum = source->unitNum / blockSize;
int cudaGrids[3];
int cudaBlocks[3];
GDevs.GetCudaThread2D(source->devID, stride, blockNum, MAX_INT, cudaGrids, cudaBlocks);
dim3 blocks(cudaGrids[0], cudaGrids[1]);
dim3 threads(cudaBlocks[0], cudaBlocks[1]);
int devIDBackup;
ProtectCudaDev(source->devID, devIDBackup);
hipLaunchKernelGGL(( KernelSetDataIndexed), dim3(blocks), dim3(threads) , 0, 0, (DTYPE*)source->data + index * stride, (DTYPE*)modify->data,
blockNum, blockSize, stride);
BacktoCudaDev(source->devID, devIDBackup);
}
/*
set lower triangular matrics for each block
>> d - pointer to the data array
>> l - row number (or column number) of each block, i.e,
a block is l * l matrix
>> blockSize - size of each block (blockSize = l * l)
>> blockNum - number of the blocks
>> p - the value for each entry of the lower triangular matrics
>> shift - the offset from diagonal
e.g., for a 3* 3 tensor,
when p = 1 ans shift = 0, we have
1 0 0
1 1 0
1 1 1
when p = 2 and shift = -1, we have
0 0 0
2 0 0
2 2 0
*/
__global__
void KernelSetDataLowTri(DTYPE * d, int l, int blockSize, int blockNum, DTYPE p, int shift)
{
/* offset in each block */
int i = blockDim.x * blockIdx.x + threadIdx.x;
/* block id */
int j = blockDim.y * blockIdx.y + threadIdx.y;
if(i >= blockSize || j > blockNum)
return;
int row = i / l;
int col = i % l;
DTYPE * d2 = d + blockSize * j + row * l + col;
if(col <= row + shift)
*d2 = p;
else
*d2 = 0;
}
/*
generate data as lower triangular matrics for last two dimensions (cuda version)
>> tensor - the tensor whose data to be set
>> value - the value for each entry of the lower triangular matrics
>> shift - the offset from diagonal
e.g., for a 3 * 3 tensor,
when value = 1 ans shift = 0, we have
1 0 0
1 1 0
1 1 1
when value = 2 and shift = -1, we have
0 0 0
2 0 0
2 2 0
*/
void _CudaSetDataLowTri(XTensor * tensor, DTYPE value, int shift)
{
int size = tensor->GetDim(-1);
int blockSize = size * size;
int blockNum = tensor->unitNum / blockSize;
int cudaGrids[3];
int cudaBlocks[3];
GDevs.GetCudaThread2D(tensor->devID, blockSize, blockNum, MAX_INT, cudaGrids, cudaBlocks);
dim3 blocks(cudaGrids[0], cudaGrids[1]);
dim3 threads(cudaBlocks[0], cudaBlocks[1]);
int devIDBackup;
ProtectCudaDev(tensor->devID, devIDBackup);
hipLaunchKernelGGL(( KernelSetDataLowTri), dim3(blocks), dim3(threads) , 0, 0, (DTYPE*)tensor->data, size, blockSize, blockNum, value, shift);
BacktoCudaDev(tensor->devID, devIDBackup);
}
/*
generate data items with a uniform distribution in [lower, upper]
>> tensor - the tensor whose data array would be initialized
>> lower - lower value of the range
>> upper - upper value of the range
*/
void _CudaSetDataRand(const XTensor * tensor, DTYPE lower, DTYPE upper)
{
CheckNTErrors(upper > lower, "the high value must be greater than low value!");
int gridSize[3];
int blockSize[3];
GDevs.GetCudaThread(tensor->devID, tensor->unitNum, gridSize, blockSize);
dim3 blocks(gridSize[0]);
dim3 threads(blockSize[0]);
int devIDBackup;
ProtectCudaDev(tensor->devID, devIDBackup);
hiprandGenerator_t & gen = GDevs.GPUs[tensor->devID].gen;
hiprandGenerateUniform(gen, (float*)tensor->data, tensor->unitNum);
DTYPE variance = upper - lower;
if(variance != 1.0F || lower != 0){
if (tensor->dataType == X_FLOAT)
hipLaunchKernelGGL(( KernelSetDataRandFloat) , dim3(blocks), dim3(threads) , 0, 0,
(float*) tensor->data, tensor->unitNum, lower, variance);
else if (tensor->dataType == X_DOUBLE)
hipLaunchKernelGGL(( KernelSetDataRandDouble) , dim3(blocks), dim3(threads) , 0, 0,
(double*)tensor->data, tensor->unitNum, lower, variance);
}
BacktoCudaDev(tensor->devID, devIDBackup);
}
/*
generate data items with a uniform distribution in [lower, upper] and set
the item to a pre-defined value if the item >= p, set the item to 0 otherwise
>> tensor - the tensor whose data array would be initialized
>> lower - lower value of the range
>> upper - upper value of the range
>> p - the threshold
>> value - the value we intend to assign to the item
*/
void _CudaSetDataRandP(const XTensor * tensor, DTYPE lower, DTYPE upper, DTYPE p, DTYPE value)
{
_CudaSetDataRand(tensor, lower, upper);
int gridSize[3];
int blockSize[3];
GDevs.GetCudaThread(tensor->devID, tensor->unitNum, gridSize, blockSize);
dim3 blocks(gridSize[0]);
dim3 threads(blockSize[0]);
int devIDBackup;
ProtectCudaDev(tensor->devID, devIDBackup);
KernelSetDataPCut << <blocks, threads >> >((float*)tensor->data, tensor->unitNum, p, value);
BacktoCudaDev(tensor->devID, devIDBackup);
}
/*
set the data with an array of offsets (kernel version)
>> data - pointer to the data array
>> offsets - offset for each data item
>> value - value of the data items
>> num - number of the data items
*/
__global__
void KernelSetDataWithOffset(DTYPE * data, MTYPE * offsets, DTYPE value, MTYPE num)
{
/* index */
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < num)
data[offsets[i]] = value;
}
/*
set the data with an array of offsets (cuda version)
>> tensor - the tensor that keeps the data
>> offsets - offset for each data item
>> value - value of the data items
>> num - number of the data items
*/
void _CudaSetDataWithOffset(XTensor * tensor, MTYPE * offsets, DTYPE value, MTYPE num)
{
CheckNTErrors(tensor->dataType == X_FLOAT, "Data type is incorrect!");
int gridSize[3];
int blockSize[3];
GDevs.GetCudaThread(tensor->devID, (int)num, gridSize, blockSize);
dim3 blocks(gridSize[0]);
dim3 threads(blockSize[0]);
int devIDBackup;
ProtectCudaDev(tensor->devID, devIDBackup);
KernelSetDataWithOffset << <blocks, threads >> > ((DTYPE*)tensor->data, offsets, value, num);
BacktoCudaDev(tensor->devID, devIDBackup);
}
/*
set the data with an array of offsets (kernel version)
>> data - pointer to the data array
>> offsets - offset for each data item
>> value - value of the data items
>> num - number of the data items
>> dataType - the data type of the data and values
*/
__global__
void KernelSetDataWithOffsetAndValue(void * data, MTYPE * offsets, void * values, MTYPE num, TENSOR_DATA_TYPE dataType)
{
/* index */
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < num) {
if (dataType == X_INT)
*((int *)data + offsets[i]) = *((int *)values + i);
else if (dataType == X_FLOAT)
*((float *)data + offsets[i]) = *((float *)values + i);
}
}
/*
set the data with an array of values
>> tensor - the tensor that keeps the data
>> offsets - offset for each data item
>> value - value of the ech data item
>> num - number of the data items
*/
void _CudaSetDataWithOffsetAndValue(XTensor * tensor, MTYPE * offsets, void * values, MTYPE num)
{
XMem * mem = tensor->mem;
MTYPE offsetSize = num * sizeof(MTYPE);
MTYPE valueSize;
if (tensor->dataType == X_INT)
valueSize = num * sizeof(int);
else if (tensor->dataType == X_FLOAT)
valueSize = num * sizeof(float);
else
ShowNTErrors("TO DO!!!");
int gridSize[3];
int blockSize[3];
GDevs.GetCudaThread(tensor->devID, (int)num, gridSize, blockSize);
dim3 blocks(gridSize[0]);
dim3 threads(blockSize[0]);
int devIDBackup;
ProtectCudaDev(tensor->devID, devIDBackup);
MTYPE * offsetsCuda = mem != NULL ?
(MTYPE*)mem->AllocBuf(mem->devID, offsetSize) :
(MTYPE*)XMemAlloc(tensor->devID, offsetSize);
void * valuesCuda = mem != NULL ?
mem->AllocBuf(mem->devID, valueSize) :
XMemAlloc(tensor->devID, valueSize);
if (mem != NULL) {
XMemCopy(offsetsCuda, mem->devID, offsets, -1, offsetSize);
XMemCopy(valuesCuda, mem->devID, values, -1, valueSize);
}
else {
XMemCopy(offsetsCuda, tensor->devID, offsets, -1, offsetSize);
XMemCopy(valuesCuda, tensor->devID, values, -1, valueSize);
}
hipLaunchKernelGGL(( KernelSetDataWithOffsetAndValue), dim3(blocks), dim3(threads) , 0, 0, tensor->data, offsetsCuda, valuesCuda, num, tensor->dataType);
if (mem != NULL) {
mem->ReleaseBuf(mem->devID, valueSize);
mem->ReleaseBuf(mem->devID, offsetSize);
}
else {
XMemFree(tensor->devID, valuesCuda);
XMemFree(tensor->devID, offsetsCuda);
}
BacktoCudaDev(tensor->devID, devIDBackup);
}
#endif // USE_ROCM
} // namespace nts(NiuTrans.Tensor)
| ec564cd320c1e7f2bb99b960b99df12d17efdc84.cu | /*
* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2018, Natural Language Processing Lab, Northeastern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: XIAO Tong (email: xiaotong@mail.neu.edu.cn) 2018-07-18
* I'm surprised that I did not write this file till today.
*/
#include <curand.h>
#include <time.h>
#include "SetData.cuh"
#include <curand_kernel.h>
#include "../../XDevice.h"
#include "../../XUtility.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
#ifdef USE_CUDA
/*
set a data array with a fixed value
>> d - pointer to the data array
>> v - the initial value
>> size - size of the array
*/
template<class T>
__global__
void KernelSetDataFixed(T * d, T v, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size)
d[i] = v;
}
template __global__ void KernelSetDataFixed<int>(int *, int, int);
template __global__ void KernelSetDataFixed<float>(float *, float, int);
template __global__ void KernelSetDataFixed<double>(double *, double, int);
//template __global__ void KernelSetDataFixed<__half>(__half*, __half, int);
/*
generate data items with a fixed value
>> tensor - the tensor for initialization
>> value - the initial value
*/
template<class T>
void _CudaSetDataFixed(XTensor * tensor, T value)
{
int gridSize[3];
int blockSize[3];
GDevs.GetCudaThread(tensor->devID, tensor->unitNum, gridSize, blockSize);
dim3 blocks(gridSize[0]);
dim3 threads(blockSize[0]);
int devIDBackup;
ProtectCudaDev(tensor->devID, devIDBackup);
if (tensor->dataType == X_INT)
KernelSetDataFixed << <blocks, threads >> > ((int*)tensor->data, (int)value, tensor->unitNum);
else if (tensor->dataType == X_FLOAT)
KernelSetDataFixed << <blocks, threads >> > ((float*)tensor->data, (float)value, tensor->unitNum);
else if (tensor->dataType == X_DOUBLE)
KernelSetDataFixed << <blocks, threads >> > ((double*)tensor->data, (double)value, tensor->unitNum);
//else if (tensor->dataType == X_FLOAT16)
// KernelSetDataFixed << <blocks, threads >> > ((__half*)tensor->data, (__half)value, tensor->unitNum);
else
ShowNTErrors("TODO! Unsupported datatype!")
BacktoCudaDev(tensor->devID, devIDBackup);
}
template void _CudaSetDataFixed<int>(XTensor *, int);
template void _CudaSetDataFixed<float>(XTensor *, float);
template void _CudaSetDataFixed<double>(XTensor *, double);
/*
set a float data array with a fixed value p (in int) only
if the condition entry is non-zero
>> d - pointer to the data array
>> c - pointer to the condition array
>> size - size of the array
>> p - the initial value
*/
template<class T>
__global__
void KernelSetDataFixedCond(T * d, T * c, T value, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size && c[i] != 0)
d[i] = value;
}
template __global__ void KernelSetDataFixedCond<int>(int*, int*, int, int);
template __global__ void KernelSetDataFixedCond<float>(float*, float*, float, int);
template __global__ void KernelSetDataFixedCond<double>(double*, double*, double, int);
//template __global__ void KernelSetDataFixedCond<__half>(__half*, __half*, __half, int);
/*
generate data items with a fixed value p
only if the condition entry is non-zero
>> tensor - the tensor for initialization
>> condition - the condition tensor whose entry would be check to
set the corresponding entry in "tensor"
>> value - the initial value
*/
template<class T>
void _CudaSetDataFixedCond(XTensor* tensor, XTensor* condition, T value)
{
int gridSize[3];
int blockSize[3];
GDevs.GetCudaThread(tensor->devID, tensor->unitNum, gridSize, blockSize);
dim3 blocks(gridSize[0]);
dim3 threads(blockSize[0]);
int devIDBackup;
ProtectCudaDev(tensor->devID, devIDBackup);
if (tensor->dataType == X_INT)
KernelSetDataFixedCond <<< blocks, threads >>> ((int*)tensor->data, (int*)condition->data,
(int)value, tensor->unitNum);
else if (tensor->dataType == X_FLOAT)
KernelSetDataFixedCond <<< blocks, threads >>> ((float*)tensor->data, (float*)condition->data,
(float)value, tensor->unitNum);
else if (tensor->dataType == X_DOUBLE)
KernelSetDataFixedCond <<< blocks, threads >>> ((double*)tensor->data, (double*)condition->data,
(double)value, tensor->unitNum);
//else if (tensor->dataType == X_FLOAT16)
// KernelSetDataFixedCond <<< blocks, threads >>> ((__half*)tensor->data, (__half*)condition->data,
// (__half)value, tensor->unitNum);
else
ShowNTErrors("TODO! Unsupported datatype!")
BacktoCudaDev(tensor->devID, devIDBackup);
}
template void _CudaSetDataFixedCond<int>(XTensor*, XTensor*, int);
template void _CudaSetDataFixedCond<float>(XTensor*, XTensor*, float);
template void _CudaSetDataFixedCond<double>(XTensor*, XTensor*, double);
/*
set data array with a uniform distribution in [low, high]
>> deviceStates - the state of curand
>> d - float datatype pointer to the data array
>> size - size of the array
>> lower - low value of the range
>> variance - the variance of the range
*/
__global__
void KernelSetDataRandFloat(float * d, int size, DTYPE lower, DTYPE variance)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size) {
d[i] = d[i] * variance + lower;
}
}
/*
set data array with a uniform distribution in [low, high]
>> deviceStates - the state of curand
>> d - double datatype pointer to the data array
>> size - size of the array
>> lower - low value of the range
>> variance - the variance of the range
*/
__global__
void KernelSetDataRandDouble(double * d, int size, DTYPE lower, DTYPE variance)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size){
d[i] = d[i] * variance + lower;
}
}
/*
set data items to a pre-defined value if its value >= p, set it to 0 otherwise
>> d - pointer to the data array
>> size - size of the array
>> lower - low value of the range
>> variance - the variance of the range
*/
__global__
void KernelSetDataPCut(DTYPE * d, int size, DTYPE p, DTYPE value)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size) {
if (d[i] >= p)
d[i] = value;
else
d[i] = 0;
}
}
/*
set data items along with a given dimension (and keep the remaining items unchanged) - kernel version
>> tensor - the tensor whose data array would be initialized
>> beg - the beginning position
>> len - length of the segment to be set
>> blockSize - size of a data block
>> blockNum - number of data blocks
*/
template<class T>
__global__
void KernelSetDataDim(T * d, int beg, int len, int blockSize, int blockNum, T p)
{
/* offset in each block */
int i = blockDim.x * blockIdx.x + threadIdx.x;
/* block id */
int j = blockDim.y * blockIdx.y + threadIdx.y;
if(i >= blockSize || j > blockNum)
return;
if(i < beg || i >= beg + len)
return;
d[blockSize * j + i] = p;
}
template __global__ void KernelSetDataDim<int>(int*, int, int, int, int, int);
template __global__ void KernelSetDataDim<float>(float*, int, int, int, int, float);
template __global__ void KernelSetDataDim<double>(double*, int, int, int, int, double);
/*
set data items along with a given dimension (and keep the remaining items unchanged) - cuda version
>> tensor - the tensor whose data array would be initialized
>> beg - the beginning position
>> len - length along with the given dimension
>> dim - the dimension along which we set the data
e.g., given a 3 * 3 tensor
1 2 3
4 5 6
7 8 9
when beg = 1, len = 1, dim = 0 and p = 0, we have
1 2 3
0 0 0
7 8 9
i.e., we set all entries of row 1 to 0
*/
template<class T>
void _CudaSetDataDim(XTensor * tensor, int beg, int len, int dim, T p)
{
int n = tensor->order;
CheckNTErrors(tensor->dataType == DEFAULT_DTYPE, "TODO!");
CheckNTErrors(dim < n && dim >= 0, "Illegal dimension!");
CheckNTErrors(beg >= 0 && beg < tensor->GetDim(dim), "Illegal beginning position!");
CheckNTErrors(beg + len >= 0 && beg + len < tensor->GetDim(dim), "Illegal length!");
int stride = 1;
int blockSize = 1;
int blockNum = 1;
for(int i = n - 1; i > dim; i--){
stride *= tensor->GetDim(i);
}
blockSize = stride * tensor->GetDim(dim);
blockNum = tensor->unitNum / blockSize;
int cudaGrids[3];
int cudaBlocks[3];
GDevs.GetCudaThread2D(tensor->devID, blockSize, blockNum, MAX_INT, cudaGrids, cudaBlocks);
dim3 blocks(cudaGrids[0], cudaGrids[1]);
dim3 threads(cudaBlocks[0], cudaBlocks[1]);
int devIDBackup;
ProtectCudaDev(tensor->devID, devIDBackup);
if (tensor->dataType == X_INT)
KernelSetDataDim << <blocks, threads >> > ((int*)tensor->data, beg * stride,
len * stride, blockSize, blockNum, (int)p);
else if (tensor->dataType == X_FLOAT)
KernelSetDataDim << <blocks, threads >> > ((float*)tensor->data, beg * stride,
len * stride, blockSize, blockNum, (float)p);
else if (tensor->dataType == X_DOUBLE)
KernelSetDataDim << <blocks, threads >> > ((double*)tensor->data, beg * stride,
len * stride, blockSize, blockNum, (double)p);
else
ShowNTErrors("TODO! Unsupported datatype!")
BacktoCudaDev(tensor->devID, devIDBackup);
}
template void _CudaSetDataDim<int>(XTensor*, int, int, int, int);
template void _CudaSetDataDim<float>(XTensor*, int, int, int, float);
template void _CudaSetDataDim<double>(XTensor*, int, int, int, double);
/*
modify data items along with a given index and dimension
(and keep the remaining items unchanged) - kernel version
>> s - the pointer whose data would be modified
>> m - the pointer whose data would be used to modify the data pointed by s
>> blockNum - number of data blocks
>> blockSize - size of a data block
>> stride - stride of a data block
*/
__global__
void KernelSetDataIndexed(DTYPE * s, DTYPE * m, int blockNum, int blockSize, int stride)
{
/* offset in each block */
int i = blockDim.x * blockIdx.x + threadIdx.x;
/* block id */
int j = blockDim.y * blockIdx.y + threadIdx.y;
if(i >= stride || j >= blockNum)
return;
int x = blockSize * j + i;
int y = stride * j + i;
s[x] = m[y];
}
/*
modify data items along with a given index and dimension (and keep the remaining items unchanged)
>> source - the tensor whose data array would be modified
>> modify - the tensor whose data array would be used to modify the source tensor
>> dim - the dimension along which we modify the tensor
>> index - index of the given dimension
e.g., given a source tensor (3, 3)
1 2 3
4 5 6
7 8 9
given a modified tensor (3)
1 2 3
when dim = 0, index = 1, we have
1 2 3
1 2 3
7 8 9
i.e., we set entries of row 1 to {1, 2, 3}
*/
void _CudaSetDataIndexed(XTensor * source, XTensor * modify, int dim, int index)
{
int order = source->order;
int size = source->GetDim(dim);
CheckNTErrors(source->dataType == DEFAULT_DTYPE, "TODO!");
CheckNTErrors(dim >= 0 && dim < order, "Illegal dimension!");
CheckNTErrors(index >= 0 && index < size, "Illegal index!");
int stride = 1;
int blockSize = 1;
int blockNum = 1;
for(int i = order - 1; i > dim; i--){
stride *= source->GetDim(i);
}
blockSize = stride * source->GetDim(dim);
blockNum = source->unitNum / blockSize;
int cudaGrids[3];
int cudaBlocks[3];
GDevs.GetCudaThread2D(source->devID, stride, blockNum, MAX_INT, cudaGrids, cudaBlocks);
dim3 blocks(cudaGrids[0], cudaGrids[1]);
dim3 threads(cudaBlocks[0], cudaBlocks[1]);
int devIDBackup;
ProtectCudaDev(source->devID, devIDBackup);
KernelSetDataIndexed<<<blocks, threads >>>((DTYPE*)source->data + index * stride, (DTYPE*)modify->data,
blockNum, blockSize, stride);
BacktoCudaDev(source->devID, devIDBackup);
}
/*
set lower triangular matrics for each block
>> d - pointer to the data array
>> l - row number (or column number) of each block, i.e,
a block is l * l matrix
>> blockSize - size of each block (blockSize = l * l)
>> blockNum - number of the blocks
>> p - the value for each entry of the lower triangular matrics
>> shift - the offset from diagonal
e.g., for a 3* 3 tensor,
when p = 1 ans shift = 0, we have
1 0 0
1 1 0
1 1 1
when p = 2 and shift = -1, we have
0 0 0
2 0 0
2 2 0
*/
__global__
void KernelSetDataLowTri(DTYPE * d, int l, int blockSize, int blockNum, DTYPE p, int shift)
{
/* offset in each block */
int i = blockDim.x * blockIdx.x + threadIdx.x;
/* block id */
int j = blockDim.y * blockIdx.y + threadIdx.y;
if(i >= blockSize || j > blockNum)
return;
int row = i / l;
int col = i % l;
DTYPE * d2 = d + blockSize * j + row * l + col;
if(col <= row + shift)
*d2 = p;
else
*d2 = 0;
}
/*
generate data as lower triangular matrics for last two dimensions (cuda version)
>> tensor - the tensor whose data to be set
>> value - the value for each entry of the lower triangular matrics
>> shift - the offset from diagonal
e.g., for a 3 * 3 tensor,
when value = 1 ans shift = 0, we have
1 0 0
1 1 0
1 1 1
when value = 2 and shift = -1, we have
0 0 0
2 0 0
2 2 0
*/
void _CudaSetDataLowTri(XTensor * tensor, DTYPE value, int shift)
{
int size = tensor->GetDim(-1);
int blockSize = size * size;
int blockNum = tensor->unitNum / blockSize;
int cudaGrids[3];
int cudaBlocks[3];
GDevs.GetCudaThread2D(tensor->devID, blockSize, blockNum, MAX_INT, cudaGrids, cudaBlocks);
dim3 blocks(cudaGrids[0], cudaGrids[1]);
dim3 threads(cudaBlocks[0], cudaBlocks[1]);
int devIDBackup;
ProtectCudaDev(tensor->devID, devIDBackup);
KernelSetDataLowTri<<<blocks, threads >>>((DTYPE*)tensor->data, size, blockSize, blockNum, value, shift);
BacktoCudaDev(tensor->devID, devIDBackup);
}
/*
generate data items with a uniform distribution in [lower, upper]
>> tensor - the tensor whose data array would be initialized
>> lower - lower value of the range
>> upper - upper value of the range
*/
void _CudaSetDataRand(const XTensor * tensor, DTYPE lower, DTYPE upper)
{
CheckNTErrors(upper > lower, "the high value must be greater than low value!");
int gridSize[3];
int blockSize[3];
GDevs.GetCudaThread(tensor->devID, tensor->unitNum, gridSize, blockSize);
dim3 blocks(gridSize[0]);
dim3 threads(blockSize[0]);
int devIDBackup;
ProtectCudaDev(tensor->devID, devIDBackup);
curandGenerator_t & gen = GDevs.GPUs[tensor->devID].gen;
curandGenerateUniform(gen, (float*)tensor->data, tensor->unitNum);
DTYPE variance = upper - lower;
if(variance != 1.0F || lower != 0){
if (tensor->dataType == X_FLOAT)
KernelSetDataRandFloat <<<blocks, threads >>>
((float*) tensor->data, tensor->unitNum, lower, variance);
else if (tensor->dataType == X_DOUBLE)
KernelSetDataRandDouble <<<blocks, threads >>>
((double*)tensor->data, tensor->unitNum, lower, variance);
}
BacktoCudaDev(tensor->devID, devIDBackup);
}
/*
generate data items with a uniform distribution in [lower, upper] and set
the item to a pre-defined value if the item >= p, set the item to 0 otherwise
>> tensor - the tensor whose data array would be initialized
>> lower - lower value of the range
>> upper - upper value of the range
>> p - the threshold
>> value - the value we intend to assign to the item
*/
void _CudaSetDataRandP(const XTensor * tensor, DTYPE lower, DTYPE upper, DTYPE p, DTYPE value)
{
_CudaSetDataRand(tensor, lower, upper);
int gridSize[3];
int blockSize[3];
GDevs.GetCudaThread(tensor->devID, tensor->unitNum, gridSize, blockSize);
dim3 blocks(gridSize[0]);
dim3 threads(blockSize[0]);
int devIDBackup;
ProtectCudaDev(tensor->devID, devIDBackup);
KernelSetDataPCut << <blocks, threads >> >((float*)tensor->data, tensor->unitNum, p, value);
BacktoCudaDev(tensor->devID, devIDBackup);
}
/*
set the data with an array of offsets (kernel version)
>> data - pointer to the data array
>> offsets - offset for each data item
>> value - value of the data items
>> num - number of the data items
*/
__global__
void KernelSetDataWithOffset(DTYPE * data, MTYPE * offsets, DTYPE value, MTYPE num)
{
/* index */
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < num)
data[offsets[i]] = value;
}
/*
set the data with an array of offsets (cuda version)
>> tensor - the tensor that keeps the data
>> offsets - offset for each data item
>> value - value of the data items
>> num - number of the data items
*/
void _CudaSetDataWithOffset(XTensor * tensor, MTYPE * offsets, DTYPE value, MTYPE num)
{
CheckNTErrors(tensor->dataType == X_FLOAT, "Data type is incorrect!");
int gridSize[3];
int blockSize[3];
GDevs.GetCudaThread(tensor->devID, (int)num, gridSize, blockSize);
dim3 blocks(gridSize[0]);
dim3 threads(blockSize[0]);
int devIDBackup;
ProtectCudaDev(tensor->devID, devIDBackup);
KernelSetDataWithOffset << <blocks, threads >> > ((DTYPE*)tensor->data, offsets, value, num);
BacktoCudaDev(tensor->devID, devIDBackup);
}
/*
set the data with an array of offsets (kernel version)
>> data - pointer to the data array
>> offsets - offset for each data item
>> value - value of the data items
>> num - number of the data items
>> dataType - the data type of the data and values
*/
__global__
void KernelSetDataWithOffsetAndValue(void * data, MTYPE * offsets, void * values, MTYPE num, TENSOR_DATA_TYPE dataType)
{
/* index */
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < num) {
if (dataType == X_INT)
*((int *)data + offsets[i]) = *((int *)values + i);
else if (dataType == X_FLOAT)
*((float *)data + offsets[i]) = *((float *)values + i);
}
}
/*
set the data with an array of values
>> tensor - the tensor that keeps the data
>> offsets - offset for each data item
>> value - value of the ech data item
>> num - number of the data items
*/
void _CudaSetDataWithOffsetAndValue(XTensor * tensor, MTYPE * offsets, void * values, MTYPE num)
{
XMem * mem = tensor->mem;
MTYPE offsetSize = num * sizeof(MTYPE);
MTYPE valueSize;
if (tensor->dataType == X_INT)
valueSize = num * sizeof(int);
else if (tensor->dataType == X_FLOAT)
valueSize = num * sizeof(float);
else
ShowNTErrors("TO DO!!!");
int gridSize[3];
int blockSize[3];
GDevs.GetCudaThread(tensor->devID, (int)num, gridSize, blockSize);
dim3 blocks(gridSize[0]);
dim3 threads(blockSize[0]);
int devIDBackup;
ProtectCudaDev(tensor->devID, devIDBackup);
MTYPE * offsetsCuda = mem != NULL ?
(MTYPE*)mem->AllocBuf(mem->devID, offsetSize) :
(MTYPE*)XMemAlloc(tensor->devID, offsetSize);
void * valuesCuda = mem != NULL ?
mem->AllocBuf(mem->devID, valueSize) :
XMemAlloc(tensor->devID, valueSize);
if (mem != NULL) {
XMemCopy(offsetsCuda, mem->devID, offsets, -1, offsetSize);
XMemCopy(valuesCuda, mem->devID, values, -1, valueSize);
}
else {
XMemCopy(offsetsCuda, tensor->devID, offsets, -1, offsetSize);
XMemCopy(valuesCuda, tensor->devID, values, -1, valueSize);
}
KernelSetDataWithOffsetAndValue<<<blocks, threads >>> (tensor->data, offsetsCuda, valuesCuda, num, tensor->dataType);
if (mem != NULL) {
mem->ReleaseBuf(mem->devID, valueSize);
mem->ReleaseBuf(mem->devID, offsetSize);
}
else {
XMemFree(tensor->devID, valuesCuda);
XMemFree(tensor->devID, offsetsCuda);
}
BacktoCudaDev(tensor->devID, devIDBackup);
}
#endif // USE_CUDA
} // namespace nts(NiuTrans.Tensor)
|
62507ca3d1c6c53d19e29d93bf5e35292074503e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* .optix.cu - Copyright 2019 Utrecht University
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
This file contains a minimal set of Optix functions. From here we will
dispatch program flow to our own functions that implement the path tracer.
*/
#include "../kernels/noerrors.h"
#include "helper_math.h"
// global include files
#include "../../RenderSystem/common_settings.h"
#include "../../RenderSystem/common_types.h"
#define OPTIX_CU // skip CUDAMaterial definition in core_settings.h; not needed here
#include "../core_settings.h"
// global path tracing parameters
extern "C" { __constant__ Params params; }
// tools
__device__ __inline__ uint WangHash( uint s ) { s = (s ^ 61) ^ (s >> 16), s *= 9, s = s ^ (s >> 4), s *= 0x27d4eb2d, s = s ^ (s >> 15); return s; }
__device__ __inline__ uint RandomInt( uint& s ) { s ^= s << 13, s ^= s >> 17, s ^= s << 5; return s; }
__device__ __inline__ float RandomFloat( uint& s ) { return RandomInt( s ) * 2.3283064365387e-10f; }
static __inline __device__ float blueNoiseSampler( int x, int y, int sampleIndex, int sampleDimension )
{
// Adapated from E. Heitz. Arguments:
// sampleIndex: 0..255
// sampleDimension: 0..255
x &= 127, y &= 127, sampleIndex &= 255, sampleDimension &= 255;
// xor index based on optimized ranking
int rankedSampleIndex = (sampleIndex ^ params.blueNoise[sampleDimension + (x + y * 128) * 8 + 65536 * 3]) & 255;
// fetch value in sequence
int value = params.blueNoise[sampleDimension + rankedSampleIndex * 256];
// if the dimension is optimized, xor sequence value based on optimized scrambling
value ^= params.blueNoise[(sampleDimension & 7) + (x + y * 128) * 8 + 65536];
// convert to float and return
return (0.5f + value) * (1.0f / 256.0f);
}
static __inline __device__ float3 RandomPointOnLens( const float r0, float r1 )
{
const float blade = (int)(r0 * 9);
float r2 = (r0 - blade * (1.0f / 9.0f)) * 9.0f;
float x1, y1, x2, y2;
__sincosf( blade * PI / 4.5f, &x1, &y1 );
__sincosf( (blade + 1.0f) * PI / 4.5f, &x2, &y2 );
if ((r1 + r2) > 1) r1 = 1.0f - r1, r2 = 1.0f - r2;
const float xr = x1 * r1 + x2 * r2;
const float yr = y1 * r1 + y2 * r2;
float4 posLens = params.posLensSize;
return make_float3( posLens ) + posLens.w * (params.right * xr + params.up * yr);
}
static __inline __device__ void generateEyeRay( float3& O, float3& D, const uint pixelIdx, const uint sampleIdx, uint& seed )
{
// random point on pixel and lens
int sx = pixelIdx % params.scrsize.x;
int sy = pixelIdx / params.scrsize.x;
float r0, r1, r2, r3;
if (params.j0 == -5.0f)
{
if (sampleIdx < 256)
r0 = blueNoiseSampler( sx, sy, sampleIdx, 0 ),
r1 = blueNoiseSampler( sx, sy, sampleIdx, 1 ),
r2 = blueNoiseSampler( sx, sy, sampleIdx, 2 ),
r3 = blueNoiseSampler( sx, sy, sampleIdx, 3 );
else
r0 = RandomFloat( seed ), r1 = RandomFloat( seed ),
r2 = RandomFloat( seed ), r3 = RandomFloat( seed );
O = RandomPointOnLens( r2, r3 );
}
else
{
r0 = r1 = 0;
O = make_float3( params.posLensSize );
}
float3 posOnPixel;
if (params.distortion == 0)
{
const float u = ((float)sx + r0) * (1.0f / params.scrsize.x);
const float v = ((float)sy + r1) * (1.0f / params.scrsize.y);
posOnPixel = params.p1 + u * params.right + v * params.up;
}
else
{
const float tx = sx / (float)params.scrsize.x - 0.5f, ty = sy / (float)params.scrsize.y - 0.5f;
const float rr = tx * tx + ty * ty;
const float rq = sqrtf( rr ) * (1.0f + params.distortion * rr + params.distortion * rr * rr);
const float theta = atan2f( tx, ty );
const float bx = (sinf( theta ) * rq + 0.5f) * params.scrsize.x;
const float by = (cosf( theta ) * rq + 0.5f) * params.scrsize.y;
posOnPixel = params.p1 + (bx + r0) * (params.right / (float)params.scrsize.x) + (by + r1) * (params.up / (float)params.scrsize.y);
}
D = normalize( posOnPixel - O );
}
#if __CUDA_ARCH__ >= 700
#define THREADMASK __activemask() // volta, turing
#else
#define THREADMASK 0xffffffff // pascal, kepler, fermi
#endif
__device__ void setupPrimaryRay( const uint pathIdx, const uint stride )
{
const uint pixelIdx = pathIdx % (params.scrsize.x * params.scrsize.y);
const uint sampleIdx = pathIdx / (params.scrsize.x * params.scrsize.y) + params.pass;
uint seed = WangHash( pathIdx * 16789 + params.pass * 1791 );
// generate eye ray
float3 O, D;
generateEyeRay( O, D, pixelIdx, sampleIdx, seed );
// populate path state array
params.pathStates[pathIdx] = make_float4( O, __uint_as_float( (pathIdx << 6) + 1 /* S_SPECULAR in CUDA code */ ) );
params.pathStates[pathIdx + stride] = make_float4( D, 0 );
// trace eye ray
uint u0, u1 = 0, u2 = 0xffffffff, u3 = __float_as_uint( 1e34f );
optixTrace( params.bvhRoot, O, D, params.geometryEpsilon, 1e34f, 0.0f /* ray time */, OptixVisibilityMask( 1 ),
OPTIX_RAY_FLAG_NONE, 0, 2, 0, u0, u1, u2, u3 );
params.hitData[pathIdx] = make_float4( __uint_as_float( u0 ), __uint_as_float( u1 ), __uint_as_float( u2 ), __uint_as_float( u3 ) );
}
__device__ void setupSecondaryRay( const uint rayIdx, const uint stride )
{
const float4 O4 = params.pathStates[rayIdx];
const float4 D4 = params.pathStates[rayIdx + stride];
float4 result = make_float4( 0, 0, __int_as_float( -1 ), 0 );
uint pixelIdx = __float_as_uint( O4.w ) >> 8;
uint u0, u1 = 0, u2 = 0xffffffff, u3 = __float_as_uint( 1e34f );
optixTrace( params.bvhRoot, make_float3( O4 ), make_float3( D4 ), params.geometryEpsilon, 1e34f, 0.0f /* ray time */, OptixVisibilityMask( 1 ),
OPTIX_RAY_FLAG_NONE, 0, 2, 0, u0, u1, u2, u3 );
params.hitData[rayIdx] = make_float4( __uint_as_float( u0 ), __uint_as_float( u1 ), __uint_as_float( u2 ), __uint_as_float( u3 ) );
}
__device__ void generateShadowRay( const uint rayIdx, const uint stride )
{
const float4 O4 = params.connectData[rayIdx]; // O4
const float4 D4 = params.connectData[rayIdx + stride * 2]; // D4
// launch shadow ray
uint u0 = 1;
optixTrace( params.bvhRoot, make_float3( O4 ), make_float3( D4 ), params.geometryEpsilon, D4.w, 0.0f /* ray time */, OptixVisibilityMask( 1 ),
OPTIX_RAY_FLAG_TERMINATE_ON_FIRST_HIT, 1, 2, 1, u0 );
if (u0) return;
const float4 E4 = params.connectData[rayIdx + stride * 2 * 2]; // E4
const int pixelIdx = __float_as_int( E4.w );
if (pixelIdx < stride /* OptiX bug workaround? */) params.accumulator[pixelIdx] += make_float4( E4.x, E4.y, E4.z, 1 );
}
extern "C" __global__ void __raygen__rg()
{
const uint stride = params.scrsize.x * params.scrsize.y * params.scrsize.z;
const uint3 idx = optixGetLaunchIndex();
const uint rayIdx = idx.x + idx.y * params.scrsize.x;
switch (params.phase)
{
case Params::SPAWN_PRIMARY: /* primary rays */ setupPrimaryRay( rayIdx, stride ); break;
case Params::SPAWN_SECONDARY: /* secondary rays */ setupSecondaryRay( rayIdx, stride ); break;
case Params::SPAWN_SHADOW: /* shadow rays */ generateShadowRay( rayIdx, stride ); break;
}
}
extern "C" __global__ void __miss__occlusion()
{
optixSetPayload_0( 0u ); // instead of any hit. suggested by WillUsher.io.
}
extern "C" __global__ void __closesthit__radiance()
{
const uint prim_idx = optixGetPrimitiveIndex();
const uint inst_idx = optixGetInstanceIndex();
const float2 bary = optixGetTriangleBarycentrics();
const float tmin = optixGetRayTmax();
optixSetPayload_0( (uint)(65535.0f * bary.x) + ((uint)(65535.0f * bary.y) << 16) );
optixSetPayload_1( inst_idx );
optixSetPayload_2( prim_idx );
optixSetPayload_3( __float_as_uint( tmin ) );
}
// EOF | 62507ca3d1c6c53d19e29d93bf5e35292074503e.cu | /* .optix.cu - Copyright 2019 Utrecht University
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
This file contains a minimal set of Optix functions. From here we will
dispatch program flow to our own functions that implement the path tracer.
*/
#include "../kernels/noerrors.h"
#include "helper_math.h"
// global include files
#include "../../RenderSystem/common_settings.h"
#include "../../RenderSystem/common_types.h"
#define OPTIX_CU // skip CUDAMaterial definition in core_settings.h; not needed here
#include "../core_settings.h"
// global path tracing parameters
extern "C" { __constant__ Params params; }
// tools
__device__ __inline__ uint WangHash( uint s ) { s = (s ^ 61) ^ (s >> 16), s *= 9, s = s ^ (s >> 4), s *= 0x27d4eb2d, s = s ^ (s >> 15); return s; }
__device__ __inline__ uint RandomInt( uint& s ) { s ^= s << 13, s ^= s >> 17, s ^= s << 5; return s; }
__device__ __inline__ float RandomFloat( uint& s ) { return RandomInt( s ) * 2.3283064365387e-10f; }
static __inline __device__ float blueNoiseSampler( int x, int y, int sampleIndex, int sampleDimension )
{
// Adapated from E. Heitz. Arguments:
// sampleIndex: 0..255
// sampleDimension: 0..255
x &= 127, y &= 127, sampleIndex &= 255, sampleDimension &= 255;
// xor index based on optimized ranking
int rankedSampleIndex = (sampleIndex ^ params.blueNoise[sampleDimension + (x + y * 128) * 8 + 65536 * 3]) & 255;
// fetch value in sequence
int value = params.blueNoise[sampleDimension + rankedSampleIndex * 256];
// if the dimension is optimized, xor sequence value based on optimized scrambling
value ^= params.blueNoise[(sampleDimension & 7) + (x + y * 128) * 8 + 65536];
// convert to float and return
return (0.5f + value) * (1.0f / 256.0f);
}
static __inline __device__ float3 RandomPointOnLens( const float r0, float r1 )
{
const float blade = (int)(r0 * 9);
float r2 = (r0 - blade * (1.0f / 9.0f)) * 9.0f;
float x1, y1, x2, y2;
__sincosf( blade * PI / 4.5f, &x1, &y1 );
__sincosf( (blade + 1.0f) * PI / 4.5f, &x2, &y2 );
if ((r1 + r2) > 1) r1 = 1.0f - r1, r2 = 1.0f - r2;
const float xr = x1 * r1 + x2 * r2;
const float yr = y1 * r1 + y2 * r2;
float4 posLens = params.posLensSize;
return make_float3( posLens ) + posLens.w * (params.right * xr + params.up * yr);
}
static __inline __device__ void generateEyeRay( float3& O, float3& D, const uint pixelIdx, const uint sampleIdx, uint& seed )
{
// random point on pixel and lens
int sx = pixelIdx % params.scrsize.x;
int sy = pixelIdx / params.scrsize.x;
float r0, r1, r2, r3;
if (params.j0 == -5.0f)
{
if (sampleIdx < 256)
r0 = blueNoiseSampler( sx, sy, sampleIdx, 0 ),
r1 = blueNoiseSampler( sx, sy, sampleIdx, 1 ),
r2 = blueNoiseSampler( sx, sy, sampleIdx, 2 ),
r3 = blueNoiseSampler( sx, sy, sampleIdx, 3 );
else
r0 = RandomFloat( seed ), r1 = RandomFloat( seed ),
r2 = RandomFloat( seed ), r3 = RandomFloat( seed );
O = RandomPointOnLens( r2, r3 );
}
else
{
r0 = r1 = 0;
O = make_float3( params.posLensSize );
}
float3 posOnPixel;
if (params.distortion == 0)
{
const float u = ((float)sx + r0) * (1.0f / params.scrsize.x);
const float v = ((float)sy + r1) * (1.0f / params.scrsize.y);
posOnPixel = params.p1 + u * params.right + v * params.up;
}
else
{
const float tx = sx / (float)params.scrsize.x - 0.5f, ty = sy / (float)params.scrsize.y - 0.5f;
const float rr = tx * tx + ty * ty;
const float rq = sqrtf( rr ) * (1.0f + params.distortion * rr + params.distortion * rr * rr);
const float theta = atan2f( tx, ty );
const float bx = (sinf( theta ) * rq + 0.5f) * params.scrsize.x;
const float by = (cosf( theta ) * rq + 0.5f) * params.scrsize.y;
posOnPixel = params.p1 + (bx + r0) * (params.right / (float)params.scrsize.x) + (by + r1) * (params.up / (float)params.scrsize.y);
}
D = normalize( posOnPixel - O );
}
#if __CUDA_ARCH__ >= 700
#define THREADMASK __activemask() // volta, turing
#else
#define THREADMASK 0xffffffff // pascal, kepler, fermi
#endif
__device__ void setupPrimaryRay( const uint pathIdx, const uint stride )
{
const uint pixelIdx = pathIdx % (params.scrsize.x * params.scrsize.y);
const uint sampleIdx = pathIdx / (params.scrsize.x * params.scrsize.y) + params.pass;
uint seed = WangHash( pathIdx * 16789 + params.pass * 1791 );
// generate eye ray
float3 O, D;
generateEyeRay( O, D, pixelIdx, sampleIdx, seed );
// populate path state array
params.pathStates[pathIdx] = make_float4( O, __uint_as_float( (pathIdx << 6) + 1 /* S_SPECULAR in CUDA code */ ) );
params.pathStates[pathIdx + stride] = make_float4( D, 0 );
// trace eye ray
uint u0, u1 = 0, u2 = 0xffffffff, u3 = __float_as_uint( 1e34f );
optixTrace( params.bvhRoot, O, D, params.geometryEpsilon, 1e34f, 0.0f /* ray time */, OptixVisibilityMask( 1 ),
OPTIX_RAY_FLAG_NONE, 0, 2, 0, u0, u1, u2, u3 );
params.hitData[pathIdx] = make_float4( __uint_as_float( u0 ), __uint_as_float( u1 ), __uint_as_float( u2 ), __uint_as_float( u3 ) );
}
__device__ void setupSecondaryRay( const uint rayIdx, const uint stride )
{
const float4 O4 = params.pathStates[rayIdx];
const float4 D4 = params.pathStates[rayIdx + stride];
float4 result = make_float4( 0, 0, __int_as_float( -1 ), 0 );
uint pixelIdx = __float_as_uint( O4.w ) >> 8;
uint u0, u1 = 0, u2 = 0xffffffff, u3 = __float_as_uint( 1e34f );
optixTrace( params.bvhRoot, make_float3( O4 ), make_float3( D4 ), params.geometryEpsilon, 1e34f, 0.0f /* ray time */, OptixVisibilityMask( 1 ),
OPTIX_RAY_FLAG_NONE, 0, 2, 0, u0, u1, u2, u3 );
params.hitData[rayIdx] = make_float4( __uint_as_float( u0 ), __uint_as_float( u1 ), __uint_as_float( u2 ), __uint_as_float( u3 ) );
}
__device__ void generateShadowRay( const uint rayIdx, const uint stride )
{
const float4 O4 = params.connectData[rayIdx]; // O4
const float4 D4 = params.connectData[rayIdx + stride * 2]; // D4
// launch shadow ray
uint u0 = 1;
optixTrace( params.bvhRoot, make_float3( O4 ), make_float3( D4 ), params.geometryEpsilon, D4.w, 0.0f /* ray time */, OptixVisibilityMask( 1 ),
OPTIX_RAY_FLAG_TERMINATE_ON_FIRST_HIT, 1, 2, 1, u0 );
if (u0) return;
const float4 E4 = params.connectData[rayIdx + stride * 2 * 2]; // E4
const int pixelIdx = __float_as_int( E4.w );
if (pixelIdx < stride /* OptiX bug workaround? */) params.accumulator[pixelIdx] += make_float4( E4.x, E4.y, E4.z, 1 );
}
extern "C" __global__ void __raygen__rg()
{
const uint stride = params.scrsize.x * params.scrsize.y * params.scrsize.z;
const uint3 idx = optixGetLaunchIndex();
const uint rayIdx = idx.x + idx.y * params.scrsize.x;
switch (params.phase)
{
case Params::SPAWN_PRIMARY: /* primary rays */ setupPrimaryRay( rayIdx, stride ); break;
case Params::SPAWN_SECONDARY: /* secondary rays */ setupSecondaryRay( rayIdx, stride ); break;
case Params::SPAWN_SHADOW: /* shadow rays */ generateShadowRay( rayIdx, stride ); break;
}
}
extern "C" __global__ void __miss__occlusion()
{
optixSetPayload_0( 0u ); // instead of any hit. suggested by WillUsher.io.
}
extern "C" __global__ void __closesthit__radiance()
{
const uint prim_idx = optixGetPrimitiveIndex();
const uint inst_idx = optixGetInstanceIndex();
const float2 bary = optixGetTriangleBarycentrics();
const float tmin = optixGetRayTmax();
optixSetPayload_0( (uint)(65535.0f * bary.x) + ((uint)(65535.0f * bary.y) << 16) );
optixSetPayload_1( inst_idx );
optixSetPayload_2( prim_idx );
optixSetPayload_3( __float_as_uint( tmin ) );
}
// EOF |
d7243b71795d68077b0239888ec047191f2892bf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
template <typename T>
__device__ void fill(T *x, size_t n, T value) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = idx; i < n; i += gridDim.x * blockDim.x) x[i] = value;
}
template <typename T>
__device__ void axpy(T a, T *x, T *y, size_t n) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = idx; i < n; i += gridDim.x * blockDim.x) y[i] += a * x[i];
}
#define FILL_C(T) \
__global__ void fill ## _ ## T(T *x, size_t n, T value) { fill(x, n, value); }
#define AXPY_C(T) \
__global__ void axpy ## _ ## T(T a, T *x, T *y, size_t n) { axpy(a, x, y, n); }
extern "C" {
FILL_C(float)
FILL_C(double)
AXPY_C(float)
AXPY_C(double)
}
| d7243b71795d68077b0239888ec047191f2892bf.cu | template <typename T>
__device__ void fill(T *x, size_t n, T value) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = idx; i < n; i += gridDim.x * blockDim.x) x[i] = value;
}
template <typename T>
__device__ void axpy(T a, T *x, T *y, size_t n) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = idx; i < n; i += gridDim.x * blockDim.x) y[i] += a * x[i];
}
#define FILL_C(T) \
__global__ void fill ## _ ## T(T *x, size_t n, T value) { fill(x, n, value); }
#define AXPY_C(T) \
__global__ void axpy ## _ ## T(T a, T *x, T *y, size_t n) { axpy(a, x, y, n); }
extern "C" {
FILL_C(float)
FILL_C(double)
AXPY_C(float)
AXPY_C(double)
}
|
0888817ad01cea38d4ba7930c07efaa283b9895e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "star3d1r-32x32-5-256_kernel.hu"
__device__ double __sbref_wrap(double *sb, size_t index) { return sb[index]; }
__global__ void kernel0_5(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 1 - 1);
const AN5D_TYPE __c3Pad = (1);
#define __c3 c3
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __halo3 = 1;
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 22;
const AN5D_TYPE __side3Len = 22;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid / __side3LenOl;
const AN5D_TYPE __local_c3 = __tid % __side3LenOl;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num;
const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
__shared__ double __b_sb_double[__blockSize * 2];
double *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2) && __local_c3 >= (__halo3 * 2) && __local_c3 < __side3LenOl - (__halo3 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3) && __local_c3 >= (__halo3 * 3) && __local_c3 < __side3LenOl - (__halo3 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4) && __local_c3 >= (__halo3 * 4) && __local_c3 < __side3LenOl - (__halo3 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5) && __local_c3 >= (__halo3 * 5) && __local_c3 < __side3LenOl - (__halo3 * 5);
const AN5D_TYPE __storeValid = __writeValid5;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0)
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((((0.2500f * (__REGREF(__b, 0, 0))) + (0.1248f * (__REGREF(__a, 0, 0)))) + (0.1249f * (__REGREF(__c, 0, 0)))) + (0.1250f * (__SBREF(__b_sb, -1, 0)))) + (0.1251f * (__SBREF(__b_sb, 1, 0)))) + (0.1252f * (__SBREF(__b_sb, 0, -1)))) + (0.1253f * (__SBREF(__b_sb, 0, 1)))); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_4_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_4_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_4_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_4_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_4_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(1, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(2, __reg_4_1, __reg_4_2, __reg_4_0);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(3, __reg_4_2, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(4, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(5, __reg_4_1, __reg_4_2, __reg_4_0);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(5, __reg_4_1, __reg_4_2, __reg_4_0);
}
__b_sb = __b_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 11; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0);
__STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_0_1);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2);
__STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_4_1);
__STORE(__h - 1, __reg_4_0, __reg_4_1, __reg_0_2);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0);
__STORE(__h - 1, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h + 0, __reg_4_1, __reg_4_2, __reg_0_0);
}
}
else
{
for (__h = 11; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0);
__h++;
}
}
__global__ void kernel0_4(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 1 - 1);
const AN5D_TYPE __c3Pad = (1);
#define __c3 c3
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __halo3 = 1;
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 24;
const AN5D_TYPE __side3Len = 24;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid / __side3LenOl;
const AN5D_TYPE __local_c3 = __tid % __side3LenOl;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num;
const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
__shared__ double __b_sb_double[__blockSize * 2];
double *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2) && __local_c3 >= (__halo3 * 2) && __local_c3 < __side3LenOl - (__halo3 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3) && __local_c3 >= (__halo3 * 3) && __local_c3 < __side3LenOl - (__halo3 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4) && __local_c3 >= (__halo3 * 4) && __local_c3 < __side3LenOl - (__halo3 * 4);
const AN5D_TYPE __storeValid = __writeValid4;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0)
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((((0.2500f * (__REGREF(__b, 0, 0))) + (0.1248f * (__REGREF(__a, 0, 0)))) + (0.1249f * (__REGREF(__c, 0, 0)))) + (0.1250f * (__SBREF(__b_sb, -1, 0)))) + (0.1251f * (__SBREF(__b_sb, 1, 0)))) + (0.1252f * (__SBREF(__b_sb, 0, -1)))) + (0.1253f * (__SBREF(__b_sb, 0, 1)))); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_3_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_3_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_3_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_3_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(1, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(2, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(3, __reg_3_2, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(4, __reg_3_0, __reg_3_1, __reg_3_2);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(4, __reg_3_0, __reg_3_1, __reg_3_2);
}
__b_sb = __b_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 9; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2);
__STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_0_2);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0);
__STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 1, __reg_3_1, __reg_3_2, __reg_0_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, __h + 1);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1);
__STORE(__h - 1, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(__h + 0, __reg_3_2, __reg_3_0, __reg_0_1);
}
}
else
{
for (__h = 9; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2);
__h++;
}
}
__global__ void kernel0_3(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 1 - 1);
const AN5D_TYPE __c3Pad = (1);
#define __c3 c3
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __halo3 = 1;
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 26;
const AN5D_TYPE __side3Len = 26;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid / __side3LenOl;
const AN5D_TYPE __local_c3 = __tid % __side3LenOl;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num;
const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
__shared__ double __b_sb_double[__blockSize * 2];
double *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2) && __local_c3 >= (__halo3 * 2) && __local_c3 < __side3LenOl - (__halo3 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3) && __local_c3 >= (__halo3 * 3) && __local_c3 < __side3LenOl - (__halo3 * 3);
const AN5D_TYPE __storeValid = __writeValid3;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0)
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((((0.2500f * (__REGREF(__b, 0, 0))) + (0.1248f * (__REGREF(__a, 0, 0)))) + (0.1249f * (__REGREF(__c, 0, 0)))) + (0.1250f * (__SBREF(__b_sb, -1, 0)))) + (0.1251f * (__SBREF(__b_sb, 1, 0)))) + (0.1252f * (__SBREF(__b_sb, 0, -1)))) + (0.1253f * (__SBREF(__b_sb, 0, 1)))); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_2_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_2_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__STORE(1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__STORE(2, __reg_2_1, __reg_2_2, __reg_2_0);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(3, __reg_2_2, __reg_2_0, __reg_2_1);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(3, __reg_2_2, __reg_2_0, __reg_2_1);
__DB_SWITCH(); __syncthreads();
}
__b_sb = __b_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 7; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_0_0);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(__h - 1, __reg_2_2, __reg_2_0, __reg_0_1);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__STORE(__h - 1, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(__h + 0, __reg_2_0, __reg_2_1, __reg_0_2);
}
}
else
{
for (__h = 7; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1);
__h++;
}
}
__global__ void kernel0_2(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 1 - 1);
const AN5D_TYPE __c3Pad = (1);
#define __c3 c3
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __halo3 = 1;
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid / __side3LenOl;
const AN5D_TYPE __local_c3 = __tid % __side3LenOl;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num;
const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
__shared__ double __b_sb_double[__blockSize * 2];
double *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2) && __local_c3 >= (__halo3 * 2) && __local_c3 < __side3LenOl - (__halo3 * 2);
const AN5D_TYPE __storeValid = __writeValid2;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0)
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((((0.2500f * (__REGREF(__b, 0, 0))) + (0.1248f * (__REGREF(__a, 0, 0)))) + (0.1249f * (__REGREF(__c, 0, 0)))) + (0.1250f * (__SBREF(__b_sb, -1, 0)))) + (0.1251f * (__SBREF(__b_sb, 1, 0)))) + (0.1252f * (__SBREF(__b_sb, 0, -1)))) + (0.1253f * (__SBREF(__b_sb, 0, 1)))); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_1_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_1_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__STORE(1, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__STORE(2, __reg_1_1, __reg_1_2, __reg_1_0);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__STORE(2, __reg_1_1, __reg_1_2, __reg_1_0);
__DB_SWITCH(); __syncthreads();
}
__b_sb = __b_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 5; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_0_1);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1);
__STORE(__h - 1, __reg_1_0, __reg_1_1, __reg_0_2);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__STORE(__h - 1, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h + 0, __reg_1_1, __reg_1_2, __reg_0_0);
}
}
else
{
for (__h = 5; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0);
__h++;
}
}
__global__ void kernel0_1(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 1 - 1);
const AN5D_TYPE __c3Pad = (1);
#define __c3 c3
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __halo3 = 1;
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 30;
const AN5D_TYPE __side3Len = 30;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid / __side3LenOl;
const AN5D_TYPE __local_c3 = __tid % __side3LenOl;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num;
const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
__shared__ double __b_sb_double[__blockSize * 2];
double *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1);
const AN5D_TYPE __storeValid = __writeValid1;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0)
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((((0.2500f * (__REGREF(__b, 0, 0))) + (0.1248f * (__REGREF(__a, 0, 0)))) + (0.1249f * (__REGREF(__c, 0, 0)))) + (0.1250f * (__SBREF(__b_sb, -1, 0)))) + (0.1251f * (__SBREF(__b_sb, 1, 0)))) + (0.1252f * (__SBREF(__b_sb, 0, -1)))) + (0.1253f * (__SBREF(__b_sb, 0, 1)))); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__STORE(1, __reg_0_0, __reg_0_1, __reg_0_2);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__STORE(1, __reg_0_0, __reg_0_1, __reg_0_2);
}
__b_sb = __b_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 3; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_0, __h);
__STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0);
__h++;
__LOAD(__reg_0_1, __h);
__STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1);
__h++;
__LOAD(__reg_0_2, __h);
__STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h + 0, __reg_0_2, __reg_0_0, __reg_0_1);
}
}
else
{
for (__h = 3; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_0, __h);
__STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0);
__h++;
__LOAD(__reg_0_1, __h);
__STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1);
__h++;
__LOAD(__reg_0_2, __h);
__STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
}
}
| 0888817ad01cea38d4ba7930c07efaa283b9895e.cu | #include "star3d1r-32x32-5-256_kernel.hu"
__device__ double __sbref_wrap(double *sb, size_t index) { return sb[index]; }
__global__ void kernel0_5(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 1 - 1);
const AN5D_TYPE __c3Pad = (1);
#define __c3 c3
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __halo3 = 1;
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 22;
const AN5D_TYPE __side3Len = 22;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid / __side3LenOl;
const AN5D_TYPE __local_c3 = __tid % __side3LenOl;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num;
const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
__shared__ double __b_sb_double[__blockSize * 2];
double *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2) && __local_c3 >= (__halo3 * 2) && __local_c3 < __side3LenOl - (__halo3 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3) && __local_c3 >= (__halo3 * 3) && __local_c3 < __side3LenOl - (__halo3 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4) && __local_c3 >= (__halo3 * 4) && __local_c3 < __side3LenOl - (__halo3 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5) && __local_c3 >= (__halo3 * 5) && __local_c3 < __side3LenOl - (__halo3 * 5);
const AN5D_TYPE __storeValid = __writeValid5;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0)
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((((0.2500f * (__REGREF(__b, 0, 0))) + (0.1248f * (__REGREF(__a, 0, 0)))) + (0.1249f * (__REGREF(__c, 0, 0)))) + (0.1250f * (__SBREF(__b_sb, -1, 0)))) + (0.1251f * (__SBREF(__b_sb, 1, 0)))) + (0.1252f * (__SBREF(__b_sb, 0, -1)))) + (0.1253f * (__SBREF(__b_sb, 0, 1)))); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_4_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_4_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_4_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_4_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_4_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(1, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(2, __reg_4_1, __reg_4_2, __reg_4_0);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(3, __reg_4_2, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(4, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(5, __reg_4_1, __reg_4_2, __reg_4_0);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(5, __reg_4_1, __reg_4_2, __reg_4_0);
}
__b_sb = __b_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 11; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0);
__STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_0_1);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2);
__STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_4_1);
__STORE(__h - 1, __reg_4_0, __reg_4_1, __reg_0_2);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_4_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0);
__STORE(__h - 1, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h + 0, __reg_4_1, __reg_4_2, __reg_0_0);
}
}
else
{
for (__h = 11; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0);
__h++;
}
}
__global__ void kernel0_4(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 1 - 1);
const AN5D_TYPE __c3Pad = (1);
#define __c3 c3
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __halo3 = 1;
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 24;
const AN5D_TYPE __side3Len = 24;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid / __side3LenOl;
const AN5D_TYPE __local_c3 = __tid % __side3LenOl;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num;
const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
__shared__ double __b_sb_double[__blockSize * 2];
double *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2) && __local_c3 >= (__halo3 * 2) && __local_c3 < __side3LenOl - (__halo3 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3) && __local_c3 >= (__halo3 * 3) && __local_c3 < __side3LenOl - (__halo3 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4) && __local_c3 >= (__halo3 * 4) && __local_c3 < __side3LenOl - (__halo3 * 4);
const AN5D_TYPE __storeValid = __writeValid4;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0)
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((((0.2500f * (__REGREF(__b, 0, 0))) + (0.1248f * (__REGREF(__a, 0, 0)))) + (0.1249f * (__REGREF(__c, 0, 0)))) + (0.1250f * (__SBREF(__b_sb, -1, 0)))) + (0.1251f * (__SBREF(__b_sb, 1, 0)))) + (0.1252f * (__SBREF(__b_sb, 0, -1)))) + (0.1253f * (__SBREF(__b_sb, 0, 1)))); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_3_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_3_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_3_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_3_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(1, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(2, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(3, __reg_3_2, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(4, __reg_3_0, __reg_3_1, __reg_3_2);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 7);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 8);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(4, __reg_3_0, __reg_3_1, __reg_3_2);
}
__b_sb = __b_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 9; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2);
__STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1);
__STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_0_2);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0);
__STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 1, __reg_3_1, __reg_3_2, __reg_0_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0);
__LOAD(__reg_0_1, __h + 1);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1);
__STORE(__h - 1, __reg_3_1, __reg_3_2, __reg_3_0);
__STORE(__h + 0, __reg_3_2, __reg_3_0, __reg_0_1);
}
}
else
{
for (__h = 9; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2);
__h++;
}
}
__global__ void kernel0_3(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 1 - 1);
const AN5D_TYPE __c3Pad = (1);
#define __c3 c3
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __halo3 = 1;
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 26;
const AN5D_TYPE __side3Len = 26;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid / __side3LenOl;
const AN5D_TYPE __local_c3 = __tid % __side3LenOl;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num;
const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
__shared__ double __b_sb_double[__blockSize * 2];
double *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2) && __local_c3 >= (__halo3 * 2) && __local_c3 < __side3LenOl - (__halo3 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3) && __local_c3 >= (__halo3 * 3) && __local_c3 < __side3LenOl - (__halo3 * 3);
const AN5D_TYPE __storeValid = __writeValid3;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0)
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((((0.2500f * (__REGREF(__b, 0, 0))) + (0.1248f * (__REGREF(__a, 0, 0)))) + (0.1249f * (__REGREF(__c, 0, 0)))) + (0.1250f * (__SBREF(__b_sb, -1, 0)))) + (0.1251f * (__SBREF(__b_sb, 1, 0)))) + (0.1252f * (__SBREF(__b_sb, 0, -1)))) + (0.1253f * (__SBREF(__b_sb, 0, 1)))); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_2_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_2_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__STORE(1, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__STORE(2, __reg_2_1, __reg_2_2, __reg_2_0);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(3, __reg_2_2, __reg_2_0, __reg_2_1);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__LOAD(__reg_0_2, 5);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, 6);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(3, __reg_2_2, __reg_2_0, __reg_2_1);
__DB_SWITCH(); __syncthreads();
}
__b_sb = __b_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 7; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0);
__STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_0_0);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1);
__STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_2_0);
__STORE(__h - 1, __reg_2_2, __reg_2_0, __reg_0_1);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_2_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2);
__STORE(__h - 1, __reg_2_2, __reg_2_0, __reg_2_1);
__STORE(__h + 0, __reg_2_0, __reg_2_1, __reg_0_2);
}
}
else
{
for (__h = 7; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0);
__STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1);
__STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1);
__h++;
}
}
__global__ void kernel0_2(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 1 - 1);
const AN5D_TYPE __c3Pad = (1);
#define __c3 c3
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __halo3 = 1;
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid / __side3LenOl;
const AN5D_TYPE __local_c3 = __tid % __side3LenOl;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num;
const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
__shared__ double __b_sb_double[__blockSize * 2];
double *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2) && __local_c3 >= (__halo3 * 2) && __local_c3 < __side3LenOl - (__halo3 * 2);
const AN5D_TYPE __storeValid = __writeValid2;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0)
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((((0.2500f * (__REGREF(__b, 0, 0))) + (0.1248f * (__REGREF(__a, 0, 0)))) + (0.1249f * (__REGREF(__c, 0, 0)))) + (0.1250f * (__SBREF(__b_sb, -1, 0)))) + (0.1251f * (__SBREF(__b_sb, 1, 0)))) + (0.1252f * (__SBREF(__b_sb, 0, -1)))) + (0.1253f * (__SBREF(__b_sb, 0, 1)))); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_1_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_1_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__STORE(1, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__STORE(2, __reg_1_1, __reg_1_2, __reg_1_0);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_0, 3);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, 4);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__STORE(2, __reg_1_1, __reg_1_2, __reg_1_0);
__DB_SWITCH(); __syncthreads();
}
__b_sb = __b_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 5; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_0_1);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1);
__STORE(__h - 1, __reg_1_0, __reg_1_1, __reg_0_2);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__STORE(__h - 1, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h + 0, __reg_1_1, __reg_1_2, __reg_0_0);
}
}
else
{
for (__h = 5; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1);
__STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0);
__h++;
}
}
__global__ void kernel0_1(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 1 - 1);
const AN5D_TYPE __c3Pad = (1);
#define __c3 c3
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __halo3 = 1;
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 30;
const AN5D_TYPE __side3Len = 30;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid / __side3LenOl;
const AN5D_TYPE __local_c3 = __tid % __side3LenOl;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num;
const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
__shared__ double __b_sb_double[__blockSize * 2];
double *__b_sb = __b_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1);
const AN5D_TYPE __storeValid = __writeValid1;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0)
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = (((((((0.2500f * (__REGREF(__b, 0, 0))) + (0.1248f * (__REGREF(__a, 0, 0)))) + (0.1249f * (__REGREF(__c, 0, 0)))) + (0.1250f * (__SBREF(__b_sb, -1, 0)))) + (0.1251f * (__SBREF(__b_sb, 1, 0)))) + (0.1252f * (__SBREF(__b_sb, 0, -1)))) + (0.1253f * (__SBREF(__b_sb, 0, 1)))); } while (0)
#define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0)
#define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__STORE(1, __reg_0_0, __reg_0_1, __reg_0_2);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__STORE(1, __reg_0_0, __reg_0_1, __reg_0_2);
}
__b_sb = __b_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 3; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;)
{
__LOAD(__reg_0_0, __h);
__STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0);
__h++;
__LOAD(__reg_0_1, __h);
__STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1);
__h++;
__LOAD(__reg_0_2, __h);
__STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h + 0, __reg_0_2, __reg_0_0, __reg_0_1);
}
}
else
{
for (__h = 3; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0_0, __h);
__STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0);
__h++;
__LOAD(__reg_0_1, __h);
__STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1);
__h++;
__LOAD(__reg_0_2, __h);
__STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
}
}
|
c31bd0335baeab469fcfdd4669dd8025ffc2b613.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<algorithm>
#include<iostream>
#include"./findPattern.h"
#include<ctime>
#define RADIX_SIZE 4
#define RADIX_BITS 2
#define RADIX_MASK 3
#define K 1
#define order true
// bool order = true ;max topk else min topk
using namespace std;
static inline __device__ void gpuAtomicAdd(int32_t *address, int32_t val) {
atomicAdd(address, val);
}
__device__ __forceinline__ int getLaneId() {
#if defined(__HIP_PLATFORM_HCC__)
return __lane_id();
#else
int laneId;
asm("mov.s32 %0, %%laneid;" : "=r"(laneId) );
return laneId;
#endif
}
__device__ __forceinline__ unsigned int ACTIVE_MASK()
{
#ifndef __HIP_PLATFORM_HCC__
return __activemask();
#else
// will be ignored anyway
return 0xffffffff;
#endif
}
#if defined(__HIP_PLATFORM_HCC__)
__device__ __forceinline__ unsigned long long int WARP_BALLOT(int predicate)
{
return __ballot(predicate);
}
#else
__device__ __forceinline__ unsigned int WARP_BALLOT(int predicate, unsigned int mask = 0xffffffff)
{
#ifndef __HIP_PLATFORM_HCC__
return __ballot_sync(mask, predicate);
#else
return __ballot(predicate);
#endif
}
#endif
template<typename T>
struct Bitfield{};
template<>
struct Bitfield<unsigned int> {
static __device__ __forceinline__
unsigned int getBitfield(unsigned int val, int pos, int len) {
#if defined(__HIP_PLATFORM_HCC__)
pos &= 0xff;
len &= 0xff;
unsigned int m = (1u << len) - 1u;
return (val >> pos) & m;
#else
unsigned int ret;
asm("bfe.u32 %0, %1, %2, %3;" : "=r"(ret) : "r"(val), "r"(pos), "r"(len));
return ret;
#endif
}
static __device__ __forceinline__
unsigned int setBitfield(unsigned int val, unsigned int toInsert, int pos, int len) {
#if defined(__HIP_PLATFORM_HCC__)
pos &= 0xff;
len &= 0xff;
unsigned int m = (1u << len) - 1u;
toInsert &= m;
toInsert <<= pos;
m <<= pos;
return (val & ~m) | toInsert;
#else
unsigned int ret;
asm("bfi.b32 %0, %1, %2, %3, %4;" :
"=r"(ret) : "r"(toInsert), "r"(val), "r"(pos), "r"(len));
return ret;
#endif
}
};
template <typename scalar_t,
typename bitwise_t,
typename index_t,
typename CountType,
int RadixSize,
int RadixBits>
__device__ void countRadixUsingMask(
CountType counts[RadixSize],
CountType* smem,
bitwise_t desired,
bitwise_t desiredMask,
int radixDigitPos,
index_t sliceSize,
index_t withinSliceStride,
scalar_t* data) {
// Clear out per-thread counts from a previous round
#pragma unroll
for (int i = 0; i < RadixSize; ++i) {
counts[i] = 0;
}
if (threadIdx.x < RadixSize) {
smem[threadIdx.x] = 0;
}
__syncthreads();
// Scan over all the data. Upon a read, the warp will accumulate
// counts per each digit in the radix using warp voting.
for (index_t i = threadIdx.x; i < sliceSize; i += blockDim.x) {
bitwise_t val =
TopKTypeConfig<scalar_t>::convert(doLdg(&data[i * withinSliceStride]));
bool hasVal = ((val & desiredMask) == desired);
bitwise_t digitInRadix =
Bitfield<bitwise_t>::getBitfield(val, radixDigitPos, RadixBits);
#pragma unroll
for (uint32_t j = 0; j < RadixSize; ++j) {
bool vote = hasVal && (digitInRadix == j);
#if defined(__HIP_PLATFORM_HCC__)
counts[j] += __popcll(WARP_BALLOT(vote));
#else
counts[j] += __popc(WARP_BALLOT(vote, ACTIVE_MASK()));
#endif
}
}
// Now, for each warp, sum values
//first thread of warp
if (getLaneId() == 0) {
#pragma unroll
for (uint32_t i = 0; i < RadixSize; ++i) {
gpuAtomicAdd(&smem[i], counts[i]);
}
}
__syncthreads();
// For each thread, read in the total counts
#pragma unroll
for (uint32_t i = 0; i < RadixSize; ++i) {
counts[i] = smem[i];
//printf("%u ",counts[i]);
}
__syncthreads();
}
template <typename scalar_t, typename bitwise_t, typename index_t, bool Order>
__device__ void radixSelect(
scalar_t* data,
index_t k,
index_t sliceSize,
index_t withinSliceStride,
int* smem,
scalar_t* topK) {
// Per-thread buckets into which we accumulate digit counts in our
// radix
int counts[RADIX_SIZE];
// We only consider elements x such that (x & desiredMask) == desired
// Initially, we consider all elements of the array, so the above
// statement is true regardless of input.
bitwise_t desired = 0;
bitwise_t desiredMask = 0;
// We are looking for the top kToFind-th element when iterating over
// digits; this count gets reduced by elimination when counting
// successive digits
int kToFind = k;
// We start at the most signific,ant digit in our radix, scanning
// through to the least significant digit
#pragma unroll
for (int digitPos = sizeof(scalar_t) * 8 - RADIX_BITS; digitPos >= 0;
digitPos -= RADIX_BITS) {
// Count radix distribution for the current position and reduce
// across all threads
countRadixUsingMask<
scalar_t,
bitwise_t,
index_t,
int,
RADIX_SIZE,
RADIX_BITS>(
counts,
smem,
desired,
desiredMask,
digitPos,
sliceSize,
withinSliceStride,
data);
auto found_unique = [&](int i, int count) -> bool {
/* All threads have the same value in counts here, so all */
/* threads will return from the function. */
if (count == 1 && kToFind == 1) {
/* There is a unique answer. */
desired =
Bitfield<bitwise_t>::setBitfield(desired, i, digitPos, RADIX_BITS);
desiredMask = Bitfield<bitwise_t>::setBitfield(
desiredMask, RADIX_MASK, digitPos, RADIX_BITS);
/* The answer is now the unique element v such that: */
/* (v & desiredMask) == desired */
/* However, we do not yet know what the actual element is. We */
/* need to perform a search through the data to find the */
/* element that matches this pattern. */
*topK = findPattern<scalar_t, bitwise_t, index_t>(
(scalar_t*)smem,
data,
sliceSize,
withinSliceStride,
desired,
desiredMask);
return true;
}
return false;
};
auto found_non_unique = [&](int i, int count) -> bool {
if (count >= kToFind) {
desired =
Bitfield<bitwise_t>::setBitfield(desired, i, digitPos, RADIX_BITS);
desiredMask = Bitfield<bitwise_t>::setBitfield(
desiredMask, RADIX_MASK, digitPos, RADIX_BITS);
/* The top-Kth element v must now be one such that: */
/* (v & desiredMask == desired) */
/* but we haven't narrowed it down; we must check the next */
/* least-significant digit */
return true;
}
kToFind -= count;
return false; // continue the loop
};
// All threads participate in the comparisons below to know the
// final result
if (Order) {
// Process in descending order
#pragma unroll
for (int i = RADIX_SIZE - 1; i >= 0; --i) {
int count = counts[i];
//
if (found_unique(i, count)) {
return;
}
if (found_non_unique(i, count)) {
break;
}
}
} else {
// Process in ascending order
#pragma unroll
for (int i = 0; i < RADIX_SIZE; ++i) {
int count = counts[i];
if (found_unique(i, count)) {
return;
}
if (found_non_unique(i, count)) {
break;
}
}
}
} // end digitPos for
// There is no unique result, but there is a non-unique result
// matching `desired` exactly
*topK = TopKTypeConfig<scalar_t>::deconvert(desired);
}
__global__ void __test(float *data,unsigned int sliceSize, float *topk) {
unsigned int withinSliceStride = 1;
__shared__ int smem[64];
radixSelect<float, unsigned int, unsigned int,order>(data, blockIdx.x + 1, sliceSize, withinSliceStride, smem, topk + blockIdx.x);
}
//__shared__ int smem[2];
int main() {
int N = 1024;
float *data_dev,data[N];
float topk[K],*topk_dev;
for(int i = 0;i < N;i++) {
data[i] = rand()%1000;
}
data[N - 1] = 10000;
hipMalloc((void**)&topk_dev, sizeof(float)*K);
hipMalloc((void**)&data_dev, sizeof(float)*N);
hipMemcpy(data_dev,data,sizeof(data), hipMemcpyHostToDevice);
clock_t start ,end;
start = clock();
hipLaunchKernelGGL(( __test), dim3(K),dim3(1024), 0, 0, data_dev, N, topk_dev);
hipDeviceSynchronize();
end = clock();
cout <<"time gpu:" << end - start << "us"<<endl;
hipMemcpy(topk,topk_dev,sizeof(float)*K, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
start = clock();
sort(data,data + N, [](float a, float b) {
return a > b;
});
end = clock();
cout <<"time cpu:" << end - start << "us"<<endl;
hipMemcpy(&topk,topk_dev,sizeof(float), hipMemcpyDeviceToHost);
for (int i = 0;i < K;i++) {
if (data[i] != topk[i]) {
cout <<"index:" << i <<" "<< "failed !!!" << "cpu:" << data[i] << "gpu:" << topk[i] << endl;
}
}
cout <<data[0] <<endl;
return 0;
}
| c31bd0335baeab469fcfdd4669dd8025ffc2b613.cu | #include<stdio.h>
#include<algorithm>
#include<iostream>
#include"./findPattern.h"
#include<ctime>
#define RADIX_SIZE 4
#define RADIX_BITS 2
#define RADIX_MASK 3
#define K 1
#define order true
// bool order = true ;max topk else min topk
using namespace std;
static inline __device__ void gpuAtomicAdd(int32_t *address, int32_t val) {
atomicAdd(address, val);
}
__device__ __forceinline__ int getLaneId() {
#if defined(__HIP_PLATFORM_HCC__)
return __lane_id();
#else
int laneId;
asm("mov.s32 %0, %%laneid;" : "=r"(laneId) );
return laneId;
#endif
}
__device__ __forceinline__ unsigned int ACTIVE_MASK()
{
#ifndef __HIP_PLATFORM_HCC__
return __activemask();
#else
// will be ignored anyway
return 0xffffffff;
#endif
}
#if defined(__HIP_PLATFORM_HCC__)
__device__ __forceinline__ unsigned long long int WARP_BALLOT(int predicate)
{
return __ballot(predicate);
}
#else
__device__ __forceinline__ unsigned int WARP_BALLOT(int predicate, unsigned int mask = 0xffffffff)
{
#ifndef __HIP_PLATFORM_HCC__
return __ballot_sync(mask, predicate);
#else
return __ballot(predicate);
#endif
}
#endif
template<typename T>
struct Bitfield{};
template<>
struct Bitfield<unsigned int> {
static __device__ __forceinline__
unsigned int getBitfield(unsigned int val, int pos, int len) {
#if defined(__HIP_PLATFORM_HCC__)
pos &= 0xff;
len &= 0xff;
unsigned int m = (1u << len) - 1u;
return (val >> pos) & m;
#else
unsigned int ret;
asm("bfe.u32 %0, %1, %2, %3;" : "=r"(ret) : "r"(val), "r"(pos), "r"(len));
return ret;
#endif
}
static __device__ __forceinline__
unsigned int setBitfield(unsigned int val, unsigned int toInsert, int pos, int len) {
#if defined(__HIP_PLATFORM_HCC__)
pos &= 0xff;
len &= 0xff;
unsigned int m = (1u << len) - 1u;
toInsert &= m;
toInsert <<= pos;
m <<= pos;
return (val & ~m) | toInsert;
#else
unsigned int ret;
asm("bfi.b32 %0, %1, %2, %3, %4;" :
"=r"(ret) : "r"(toInsert), "r"(val), "r"(pos), "r"(len));
return ret;
#endif
}
};
template <typename scalar_t,
typename bitwise_t,
typename index_t,
typename CountType,
int RadixSize,
int RadixBits>
__device__ void countRadixUsingMask(
CountType counts[RadixSize],
CountType* smem,
bitwise_t desired,
bitwise_t desiredMask,
int radixDigitPos,
index_t sliceSize,
index_t withinSliceStride,
scalar_t* data) {
// Clear out per-thread counts from a previous round
#pragma unroll
for (int i = 0; i < RadixSize; ++i) {
counts[i] = 0;
}
if (threadIdx.x < RadixSize) {
smem[threadIdx.x] = 0;
}
__syncthreads();
// Scan over all the data. Upon a read, the warp will accumulate
// counts per each digit in the radix using warp voting.
for (index_t i = threadIdx.x; i < sliceSize; i += blockDim.x) {
bitwise_t val =
TopKTypeConfig<scalar_t>::convert(doLdg(&data[i * withinSliceStride]));
bool hasVal = ((val & desiredMask) == desired);
bitwise_t digitInRadix =
Bitfield<bitwise_t>::getBitfield(val, radixDigitPos, RadixBits);
#pragma unroll
for (uint32_t j = 0; j < RadixSize; ++j) {
bool vote = hasVal && (digitInRadix == j);
#if defined(__HIP_PLATFORM_HCC__)
counts[j] += __popcll(WARP_BALLOT(vote));
#else
counts[j] += __popc(WARP_BALLOT(vote, ACTIVE_MASK()));
#endif
}
}
// Now, for each warp, sum values
//first thread of warp
if (getLaneId() == 0) {
#pragma unroll
for (uint32_t i = 0; i < RadixSize; ++i) {
gpuAtomicAdd(&smem[i], counts[i]);
}
}
__syncthreads();
// For each thread, read in the total counts
#pragma unroll
for (uint32_t i = 0; i < RadixSize; ++i) {
counts[i] = smem[i];
//printf("%u ",counts[i]);
}
__syncthreads();
}
template <typename scalar_t, typename bitwise_t, typename index_t, bool Order>
__device__ void radixSelect(
scalar_t* data,
index_t k,
index_t sliceSize,
index_t withinSliceStride,
int* smem,
scalar_t* topK) {
// Per-thread buckets into which we accumulate digit counts in our
// radix
int counts[RADIX_SIZE];
// We only consider elements x such that (x & desiredMask) == desired
// Initially, we consider all elements of the array, so the above
// statement is true regardless of input.
bitwise_t desired = 0;
bitwise_t desiredMask = 0;
// We are looking for the top kToFind-th element when iterating over
// digits; this count gets reduced by elimination when counting
// successive digits
int kToFind = k;
// We start at the most signific,ant digit in our radix, scanning
// through to the least significant digit
#pragma unroll
for (int digitPos = sizeof(scalar_t) * 8 - RADIX_BITS; digitPos >= 0;
digitPos -= RADIX_BITS) {
// Count radix distribution for the current position and reduce
// across all threads
countRadixUsingMask<
scalar_t,
bitwise_t,
index_t,
int,
RADIX_SIZE,
RADIX_BITS>(
counts,
smem,
desired,
desiredMask,
digitPos,
sliceSize,
withinSliceStride,
data);
auto found_unique = [&](int i, int count) -> bool {
/* All threads have the same value in counts here, so all */
/* threads will return from the function. */
if (count == 1 && kToFind == 1) {
/* There is a unique answer. */
desired =
Bitfield<bitwise_t>::setBitfield(desired, i, digitPos, RADIX_BITS);
desiredMask = Bitfield<bitwise_t>::setBitfield(
desiredMask, RADIX_MASK, digitPos, RADIX_BITS);
/* The answer is now the unique element v such that: */
/* (v & desiredMask) == desired */
/* However, we do not yet know what the actual element is. We */
/* need to perform a search through the data to find the */
/* element that matches this pattern. */
*topK = findPattern<scalar_t, bitwise_t, index_t>(
(scalar_t*)smem,
data,
sliceSize,
withinSliceStride,
desired,
desiredMask);
return true;
}
return false;
};
auto found_non_unique = [&](int i, int count) -> bool {
if (count >= kToFind) {
desired =
Bitfield<bitwise_t>::setBitfield(desired, i, digitPos, RADIX_BITS);
desiredMask = Bitfield<bitwise_t>::setBitfield(
desiredMask, RADIX_MASK, digitPos, RADIX_BITS);
/* The top-Kth element v must now be one such that: */
/* (v & desiredMask == desired) */
/* but we haven't narrowed it down; we must check the next */
/* least-significant digit */
return true;
}
kToFind -= count;
return false; // continue the loop
};
// All threads participate in the comparisons below to know the
// final result
if (Order) {
// Process in descending order
#pragma unroll
for (int i = RADIX_SIZE - 1; i >= 0; --i) {
int count = counts[i];
//
if (found_unique(i, count)) {
return;
}
if (found_non_unique(i, count)) {
break;
}
}
} else {
// Process in ascending order
#pragma unroll
for (int i = 0; i < RADIX_SIZE; ++i) {
int count = counts[i];
if (found_unique(i, count)) {
return;
}
if (found_non_unique(i, count)) {
break;
}
}
}
} // end digitPos for
// There is no unique result, but there is a non-unique result
// matching `desired` exactly
*topK = TopKTypeConfig<scalar_t>::deconvert(desired);
}
__global__ void __test(float *data,unsigned int sliceSize, float *topk) {
unsigned int withinSliceStride = 1;
__shared__ int smem[64];
radixSelect<float, unsigned int, unsigned int,order>(data, blockIdx.x + 1, sliceSize, withinSliceStride, smem, topk + blockIdx.x);
}
//__shared__ int smem[2];
int main() {
int N = 1024;
float *data_dev,data[N];
float topk[K],*topk_dev;
for(int i = 0;i < N;i++) {
data[i] = rand()%1000;
}
data[N - 1] = 10000;
cudaMalloc((void**)&topk_dev, sizeof(float)*K);
cudaMalloc((void**)&data_dev, sizeof(float)*N);
cudaMemcpy(data_dev,data,sizeof(data), cudaMemcpyHostToDevice);
clock_t start ,end;
start = clock();
__test<<<K,1024>>>(data_dev, N, topk_dev);
cudaDeviceSynchronize();
end = clock();
cout <<"time gpu:" << end - start << "us"<<endl;
cudaMemcpy(topk,topk_dev,sizeof(float)*K, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
start = clock();
sort(data,data + N, [](float a, float b) {
return a > b;
});
end = clock();
cout <<"time cpu:" << end - start << "us"<<endl;
cudaMemcpy(&topk,topk_dev,sizeof(float), cudaMemcpyDeviceToHost);
for (int i = 0;i < K;i++) {
if (data[i] != topk[i]) {
cout <<"index:" << i <<" "<< "failed !!!" << "cpu:" << data[i] << "gpu:" << topk[i] << endl;
}
}
cout <<data[0] <<endl;
return 0;
}
|
dd5bf4b28e7175b24eacce6bb776827bdb8406c8.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include "../DIEKUHDA/kuhda.h"
#include "omp.h"
#include <assert.h>
// run with
// nvcc -O3 -Xcompiler -fopenmp -lcublas ../DIEKUHDA/kuhda.c OMPspeedTest.c && ./a.out
/*
Here we will investigate which OMP approach is the fastest when performing loops in parallel
*/
#define NUMTHREADS 4
int main() {
int n = 18000, tiledim = n/2, tilesize = tiledim*tiledim*sizeof(double);
int device, devicecount = 4;
Timer timer;
timer.Start();
kuhdaWarmup(4);
float elapsedtime = timer.Stop();
printf("Warmup took %f ms\n", elapsedtime);
// Containers for host and device matrices
matrix *h_A, *d_A[devicecount];
double *hostbuffer[devicecount], *hostbuffer_singlerow[devicecount];
int streamsperdevice = 2;
int stream, streamcount = streamsperdevice*devicecount;
hipStream_t d_streams[streamcount];
// Time the allocation loop
printf("Timing the (de)allocation loops for n = %d\n", n);
timer.Start();
h_A = kuhdaMallocM1(n, n);
for (device = 0; device < devicecount; device++){
GPUCHECK(hipSetDevice(device));
d_A[device] = kuhdaMallocDeviceM(tiledim, tiledim);
GPUCHECK(hipHostMalloc((void**) &hostbuffer[device], tilesize));
GPUCHECK(hipHostMalloc((void**) &hostbuffer_singlerow[device], tiledim*sizeof(double)));
for (stream = 0; stream < streamsperdevice; ++stream){
GPUCHECK(hipStreamCreate(&d_streams[stream + streamsperdevice*device]));
}
}
elapsedtime = timer.Stop();
printf("Simple allocation took %f ms\n", elapsedtime);
timer.Start();
kuhdaFreeM(h_A, 'k');
for (device = 0; device < devicecount; device++){
GPUCHECK(hipSetDevice(device));
kuhdaFreeM(d_A[device], 'c');
hipHostFree(hostbuffer[device]);
hipHostFree(hostbuffer_singlerow[device]);
for (stream = 0; stream < streamsperdevice; ++stream){
GPUCHECK(hipStreamDestroy(d_streams[stream + streamsperdevice*device]));
}
GPUCHECK(hipDeviceSynchronize());
}
elapsedtime = timer.Stop();
printf("Simple destruction took %f ms\n", elapsedtime);
// Time the allocation loop
timer.Start();
h_A = kuhdaMallocM1(n, n);
#pragma omp parallel for private(device) num_threads(NUMTHREADS)
for (device = 0; device < devicecount; device++){
GPUCHECK(hipSetDevice(device));
d_A[device] = kuhdaMallocDeviceM(tiledim, tiledim);
GPUCHECK(hipHostMalloc((void**) &hostbuffer[device], tilesize));
GPUCHECK(hipHostMalloc((void**) &hostbuffer_singlerow[device], tiledim*sizeof(double)));
#pragma unroll
for (stream = 0; stream < streamsperdevice; ++stream){
GPUCHECK(hipStreamCreate(&d_streams[stream + streamsperdevice*device]));
}
}
elapsedtime = timer.Stop();
printf("Prllel allocation took %f ms\n", elapsedtime);
timer.Start();
kuhdaFreeM(h_A, 'k');
#pragma omp parallel for private(device) num_threads(NUMTHREADS)
for (device = 0; device < devicecount; device++){
GPUCHECK(hipSetDevice(device));
kuhdaFreeM(d_A[device], 'c');
hipHostFree(hostbuffer[device]);
hipHostFree(hostbuffer_singlerow[device]);
#pragma unroll
for (stream = 0; stream < streamsperdevice; ++stream){
GPUCHECK(hipStreamDestroy(d_streams[stream + streamsperdevice*device]));
}
GPUCHECK(hipDeviceSynchronize());
}
elapsedtime = timer.Stop();
printf("Prllel destruction took %f ms\n", elapsedtime);
return 0;
}
| dd5bf4b28e7175b24eacce6bb776827bdb8406c8.cu | #include <stdio.h>
#include "../DIEKUHDA/kuhda.h"
#include "omp.h"
#include <assert.h>
// run with
// nvcc -O3 -Xcompiler -fopenmp -lcublas ../DIEKUHDA/kuhda.c OMPspeedTest.c && ./a.out
/*
Here we will investigate which OMP approach is the fastest when performing loops in parallel
*/
#define NUMTHREADS 4
int main() {
int n = 18000, tiledim = n/2, tilesize = tiledim*tiledim*sizeof(double);
int device, devicecount = 4;
Timer timer;
timer.Start();
kuhdaWarmup(4);
float elapsedtime = timer.Stop();
printf("Warmup took %f ms\n", elapsedtime);
// Containers for host and device matrices
matrix *h_A, *d_A[devicecount];
double *hostbuffer[devicecount], *hostbuffer_singlerow[devicecount];
int streamsperdevice = 2;
int stream, streamcount = streamsperdevice*devicecount;
cudaStream_t d_streams[streamcount];
// Time the allocation loop
printf("Timing the (de)allocation loops for n = %d\n", n);
timer.Start();
h_A = kuhdaMallocM1(n, n);
for (device = 0; device < devicecount; device++){
GPUCHECK(cudaSetDevice(device));
d_A[device] = kuhdaMallocDeviceM(tiledim, tiledim);
GPUCHECK(cudaMallocHost((void**) &hostbuffer[device], tilesize));
GPUCHECK(cudaMallocHost((void**) &hostbuffer_singlerow[device], tiledim*sizeof(double)));
for (stream = 0; stream < streamsperdevice; ++stream){
GPUCHECK(cudaStreamCreate(&d_streams[stream + streamsperdevice*device]));
}
}
elapsedtime = timer.Stop();
printf("Simple allocation took %f ms\n", elapsedtime);
timer.Start();
kuhdaFreeM(h_A, 'k');
for (device = 0; device < devicecount; device++){
GPUCHECK(cudaSetDevice(device));
kuhdaFreeM(d_A[device], 'c');
cudaFreeHost(hostbuffer[device]);
cudaFreeHost(hostbuffer_singlerow[device]);
for (stream = 0; stream < streamsperdevice; ++stream){
GPUCHECK(cudaStreamDestroy(d_streams[stream + streamsperdevice*device]));
}
GPUCHECK(cudaDeviceSynchronize());
}
elapsedtime = timer.Stop();
printf("Simple destruction took %f ms\n", elapsedtime);
// Time the allocation loop
timer.Start();
h_A = kuhdaMallocM1(n, n);
#pragma omp parallel for private(device) num_threads(NUMTHREADS)
for (device = 0; device < devicecount; device++){
GPUCHECK(cudaSetDevice(device));
d_A[device] = kuhdaMallocDeviceM(tiledim, tiledim);
GPUCHECK(cudaMallocHost((void**) &hostbuffer[device], tilesize));
GPUCHECK(cudaMallocHost((void**) &hostbuffer_singlerow[device], tiledim*sizeof(double)));
#pragma unroll
for (stream = 0; stream < streamsperdevice; ++stream){
GPUCHECK(cudaStreamCreate(&d_streams[stream + streamsperdevice*device]));
}
}
elapsedtime = timer.Stop();
printf("Prllel allocation took %f ms\n", elapsedtime);
timer.Start();
kuhdaFreeM(h_A, 'k');
#pragma omp parallel for private(device) num_threads(NUMTHREADS)
for (device = 0; device < devicecount; device++){
GPUCHECK(cudaSetDevice(device));
kuhdaFreeM(d_A[device], 'c');
cudaFreeHost(hostbuffer[device]);
cudaFreeHost(hostbuffer_singlerow[device]);
#pragma unroll
for (stream = 0; stream < streamsperdevice; ++stream){
GPUCHECK(cudaStreamDestroy(d_streams[stream + streamsperdevice*device]));
}
GPUCHECK(cudaDeviceSynchronize());
}
elapsedtime = timer.Stop();
printf("Prllel destruction took %f ms\n", elapsedtime);
return 0;
}
|
208d1075024a92a44aea284083cfde97e697be82.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <Windows.h>
#include <stdio.h>
#include <tchar.h>
__global__ void add(int a, int b, int* c)
{
*c = a + b;
}
int WINAPI WinMain(_In_ HINSTANCE hInstance, HINSTANCE hPrevInstance, LPSTR lpCmdLine, int nShowCmd)
{
int c;
int* dev_c;
dev_c=new int(1);
int handle=hipMalloc((void**)&dev_c, sizeof(int));
hipLaunchKernelGGL(( add) , dim3(2), dim3(2), 0, 0, 2, 7, dev_c);
hipMemcpy(&c, dev_c, sizeof(int), hipMemcpyDeviceToHost);
char result[11];
itoa(c, result, 10);
MessageBox(NULL, TEXT(result), "result", MB_OK);
hipFree(dev_c);
size_t free_byte;
size_t total_byte;
hipError_t cuda_status = hipMemGetInfo(&free_byte, &total_byte);
itoa(free_byte, result, 10);
MessageBox(NULL, TEXT(result), "result", MB_OK);
return 0;
} | 208d1075024a92a44aea284083cfde97e697be82.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <Windows.h>
#include <stdio.h>
#include <tchar.h>
__global__ void add(int a, int b, int* c)
{
*c = a + b;
}
int WINAPI WinMain(_In_ HINSTANCE hInstance, HINSTANCE hPrevInstance, LPSTR lpCmdLine, int nShowCmd)
{
int c;
int* dev_c;
dev_c=new int(1);
int handle=cudaMalloc((void**)&dev_c, sizeof(int));
add <<<2, 2>>> (2, 7, dev_c);
cudaMemcpy(&c, dev_c, sizeof(int), cudaMemcpyDeviceToHost);
char result[11];
itoa(c, result, 10);
MessageBox(NULL, TEXT(result), "result", MB_OK);
cudaFree(dev_c);
size_t free_byte;
size_t total_byte;
cudaError_t cuda_status = cudaMemGetInfo(&free_byte, &total_byte);
itoa(free_byte, result, 10);
MessageBox(NULL, TEXT(result), "result", MB_OK);
return 0;
} |
61fb8bacb1fa4720c26d4aca6dfe160c430619e6.hip | // !!! This is a file automatically generated by hipify!!!
#include<utility>
#include<stdio.h>
#include<assert.h>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include <ostream>
#include <iostream>
#include <cstdio>
#include <cstdlib>
#include <cmath>
#include <fstream>
#include <omp.h>
#include <time.h>
#include <string.h>
#include <utility>
enum method_type { JACOBI, GS, SOR };
template <typename method_type>
__host__ __device__
double iterativeOperation(const double leftMatrix, const double centerMatrix, const double rightMatrix, const double topMatrix, const double bottomMatrix, double leftX, double centerX, double rightX, double topX, double bottomX, const double centerRhs, int gridPoint, method_type method)
{
double gridValue = centerX;
switch(method)
{
case JACOBI:
return gridValue = (centerRhs - (leftMatrix * leftX + rightMatrix * rightX + topMatrix * topX + bottomMatrix * bottomX)) / centerMatrix;
case GS:
if (gridPoint % 2 == 1) {
return gridValue = (centerRhs - (leftMatrix * leftX + rightMatrix * rightX + topMatrix * topX + bottomMatrix * bottomX)) / centerMatrix;
}
case SOR:
double relaxation = 1.9939;
if (gridPoint % 2 == 1) {
return gridValue = relaxation*((centerRhs - (leftMatrix * leftX + rightMatrix * rightX + topMatrix * topX + bottomMatrix * bottomX)) / centerMatrix) + (1.0-relaxation)*centerX;
}
}
return gridValue;
}
template <typename method_type>
__host__ __device__
double iterativeOperation2(const double leftMatrix, const double centerMatrix, const double rightMatrix, const double topMatrix, const double bottomMatrix, double leftX, double centerX, double rightX, double topX, double bottomX, const double centerRhs, int gridPoint, method_type method)
{
double gridValue = centerX;
switch(method)
{
case JACOBI:
return gridValue = (centerRhs - (leftMatrix * leftX + rightMatrix * rightX + topMatrix * topX + bottomMatrix * bottomX)) / centerMatrix;
case GS:
if (gridPoint % 2 == 0) {
return gridValue = (centerRhs - (leftMatrix * leftX + rightMatrix * rightX + topMatrix * topX + bottomMatrix * bottomX)) / centerMatrix;
}
case SOR:
double relaxation = 1.9939;
if (gridPoint % 2 == 0) {
return gridValue = relaxation*((centerRhs - (leftMatrix * leftX + rightMatrix * rightX + topMatrix * topX + bottomMatrix * bottomX)) / centerMatrix) + (1.0-relaxation)*centerX;
}
}
return gridValue;
}
double normFromRow(double leftMatrix, double centerMatrix, double rightMatrix, double topMatrix, double bottomMatrix, double leftX, double centerX, double rightX, double topX, double bottomX, double centerRhs)
{
return centerRhs - (leftMatrix*leftX + centerMatrix*centerX + rightMatrix*rightX + topMatrix*topX + bottomMatrix*bottomX);
}
double Residual(const double * solution, const double * rhs, const double * leftMatrix, const double * centerMatrix, const double * rightMatrix, const double * topMatrix, const double * bottomMatrix, int nxGrids, int nyGrids)
{
int nDofs = nxGrids * nyGrids;
double residual = 0.0;
for (int iGrid = 0; iGrid < nDofs; iGrid++) {
double leftX = ((iGrid % nxGrids) == 0) ? 0.0 : solution[iGrid-1];
double centerX = solution[iGrid];
double rightX = ((iGrid + 1) % nxGrids == 0) ? 0.0 : solution[iGrid+1];
double topX = (iGrid < nxGrids * (nyGrids - 1)) ? solution[iGrid + nxGrids] : 0.0;
double bottomX = (iGrid >= nxGrids) ? solution[iGrid-nxGrids] : 0.0;
double residualContributionFromRow = normFromRow(leftMatrix[iGrid], centerMatrix[iGrid], rightMatrix[iGrid], topMatrix[iGrid], bottomMatrix[iGrid], leftX, centerX, rightX, topX, bottomX, rhs[iGrid]);
residual = residual + residualContributionFromRow * residualContributionFromRow;
}
residual = sqrt(residual);
return residual;
}
double * iterativeCpu(const double * initX, const double * rhs,
const double * leftMatrix, const double * centerMatrix,
const double * rightMatrix, const double * topMatrix,
const double * bottomMatrix, int nxGrids, int nyGrids,
int nIters, int method)
{
int nDofs = nxGrids * nyGrids;
double * x0 = new double[nDofs];
double * x1 = new double[nDofs];
memcpy(x0, initX, sizeof(double) * nDofs);
memcpy(x1, initX, sizeof(double)* nDofs);
for (int iIter = 0; iIter < nIters; ++ iIter) {
for (int iGrid = 0; iGrid < nDofs; ++iGrid) {
double leftX = ((iGrid % nxGrids) == 0) ? 0.0f : x0[iGrid - 1];
double centerX = x0[iGrid];
double rightX = (((iGrid + 1) % nxGrids) == 0) ? 0.0f : x0[iGrid + 1];
double bottomX = (iGrid < nxGrids) ? 0.0f : x0[iGrid - nxGrids];
double topX = (iGrid < nDofs - nxGrids) ? x0[iGrid + nxGrids] : 0.0f;
if (iIter % 2 == 0) {
x1[iGrid] = iterativeOperation(leftMatrix[iGrid], centerMatrix[iGrid], rightMatrix[iGrid], topMatrix[iGrid], bottomMatrix[iGrid],
leftX, centerX, rightX, topX, bottomX, rhs[iGrid], iGrid, method);
}
else {
x1[iGrid] = iterativeOperation2(leftMatrix[iGrid], centerMatrix[iGrid],
rightMatrix[iGrid], topMatrix[iGrid], bottomMatrix[iGrid],
leftX, centerX, rightX, topX, bottomX,
rhs[iGrid], iGrid, method);
}
}
double * tmp = x0; x0 = x1; x1 = tmp;
}
delete[] x1;
return x0;
}
__global__
void _iterativeGpuClassicIteration(double * x1, const double * x0, const double * rhs,
const double * leftMatrix, const double * centerMatrix,
const double * rightMatrix, const double * topMatrix, const double * bottomMatrix,
int nxGrids, int nyGrids, int iteration, int method)
{
int ixGrid = blockIdx.x * blockDim.x + threadIdx.x; // Col
int iyGrid = blockIdx.y * blockDim.y + threadIdx.y; // Row
int iGrid = iyGrid * (nxGrids) + ixGrid;
int nDofs = nxGrids * nyGrids;
if (iGrid < nDofs) {
double leftX = (ixGrid == 0) ? 0.0f : x0[iGrid - 1] ;
double centerX = x0[iGrid];
double rightX = (ixGrid == nxGrids - 1) ? 0.0f : x0[iGrid + 1];
double topX = (iyGrid == nyGrids - 1) ? 0.0f : x0[iGrid + nxGrids];
double bottomX = (iyGrid == 0) ? 0.0f : x0[iGrid - nxGrids];
if (iteration % 2 == 0) {
x1[iGrid] = iterativeOperation(leftMatrix[iGrid], centerMatrix[iGrid],
rightMatrix[iGrid], topMatrix[iGrid], bottomMatrix[iGrid],
leftX, centerX, rightX, topX, bottomX, rhs[iGrid], iGrid, method);
}
else {
x1[iGrid] = iterativeOperation2(leftMatrix[iGrid], centerMatrix[iGrid],
rightMatrix[iGrid], topMatrix[iGrid], bottomMatrix[iGrid],
leftX, centerX, rightX, topX, bottomX, rhs[iGrid], iGrid, method);
}
}
__syncthreads();
}
double * iterativeGpuClassic(const double * initX, const double * rhs,
const double * leftMatrix, const double * centerMatrix,
const double * rightMatrix, const double * topMatrix, const double * bottomMatrix,
int nxGrids, int nyGrids, int nIters, const int threadsPerBlock, int method)
{
int nDofs = nxGrids * nyGrids;
// Allocate memory in the CPU for the solution
double * x0Gpu, * x1Gpu;
hipMalloc(&x0Gpu, sizeof(double) * nDofs);
hipMalloc(&x1Gpu, sizeof(double) * nDofs);
// Allocate CPU memory for other variables
double * rhsGpu, * leftMatrixGpu, * rightMatrixGpu, * centerMatrixGpu, * topMatrixGpu, * bottomMatrixGpu;
hipMalloc(&rhsGpu, sizeof(double) * nDofs);
hipMalloc(&leftMatrixGpu, sizeof(double) * nDofs);
hipMalloc(¢erMatrixGpu, sizeof(double) * nDofs);
hipMalloc(&rightMatrixGpu, sizeof(double) * nDofs);
hipMalloc(&topMatrixGpu, sizeof(double) * nDofs);
hipMalloc(&bottomMatrixGpu, sizeof(double) * nDofs);
// Allocate GPU memory
hipMemcpy(x0Gpu, initX, sizeof(double) * nDofs, hipMemcpyHostToDevice);
hipMemcpy(rhsGpu, rhs, sizeof(double) * nDofs, hipMemcpyHostToDevice);
hipMemcpy(leftMatrixGpu, leftMatrix, sizeof(double) * nDofs,
hipMemcpyHostToDevice);
hipMemcpy(centerMatrixGpu, centerMatrix, sizeof(double) * nDofs,
hipMemcpyHostToDevice);
hipMemcpy(rightMatrixGpu, rightMatrix, sizeof(double) * nDofs,
hipMemcpyHostToDevice);
hipMemcpy(topMatrixGpu, topMatrix, sizeof(double) * nDofs,
hipMemcpyHostToDevice);
hipMemcpy(bottomMatrixGpu, bottomMatrix, sizeof(double) * nDofs,
hipMemcpyHostToDevice);
// Run the classic iteration for prescribed number of iterations
// int threadsPerBlock = 16;
int nxBlocks = (int)ceil(nxGrids / (double)threadsPerBlock);
int nyBlocks = (int)ceil(nyGrids / (double)threadsPerBlock);
dim3 grid(nxBlocks, nyBlocks);
dim3 block(threadsPerBlock, threadsPerBlock);
for (int iIter = 0; iIter < nIters; ++iIter) {
// Jacobi iteration on the CPU (used to be <<<nBlocks, threadsPerBlock>>>)
hipLaunchKernelGGL(( _iterativeGpuClassicIteration), dim3(grid), dim3(block), 0, 0,
x1Gpu, x0Gpu, rhsGpu, leftMatrixGpu, centerMatrixGpu,
rightMatrixGpu, topMatrixGpu, bottomMatrixGpu,
nxGrids, nyGrids, iIter, method);
double * tmp = x1Gpu; x0Gpu = x1Gpu; x1Gpu = tmp;
}
// Write solution from GPU to CPU variable
double * solution = new double[nDofs];
hipMemcpy(solution, x0Gpu, sizeof(double) * nDofs,
hipMemcpyDeviceToHost);
// Free all memory
hipFree(x0Gpu);
hipFree(x1Gpu);
hipFree(rhsGpu);
hipFree(leftMatrixGpu);
hipFree(centerMatrixGpu);
hipFree(rightMatrixGpu);
return solution;
}
//// SWEPT METHODS HERE ////
__device__
void __iterativeBlockUpdateToLeftRight(double * xLeftBlock, double * xRightBlock, const double *rhsBlock,
const double * leftMatrixBlock, const double *centerMatrixBlock, const double * rightMatrixBlock,
const double * topMatrixBlock, const double * bottomMatrixBlock, int nxGrids, int nyGrids, int iGrid, int method, int maxSteps)
{
extern __shared__ double sharedMemory[];
double * x0 = sharedMemory;
int elemPerBlock = blockDim.x * blockDim.y;
double * x1 = sharedMemory + elemPerBlock;
int idx = threadIdx.x + threadIdx.y * blockDim.x;
if ((threadIdx.x >= 1 && threadIdx.x <= blockDim.x-2) && (threadIdx.y >= 1 && threadIdx.y <= blockDim.y-2)) {
for (int k = 0; k < maxSteps; k++) {
// Define necessary constants
double centerRhs = rhsBlock[idx];
double leftMatrix = leftMatrixBlock[idx];
double centerMatrix = centerMatrixBlock[idx];
double rightMatrix = rightMatrixBlock[idx];
double topMatrix = topMatrixBlock[idx];
double bottomMatrix = bottomMatrixBlock[idx];
double leftX = ((iGrid % nxGrids) == 0) ? 0.0 : x0[idx-1];
double centerX = x0[idx];
double rightX = ((iGrid + 1) % nxGrids == 0) ? 0.0 : x0[idx+1];
double topX = (iGrid < nxGrids * (nyGrids - 1)) ? x0[idx+blockDim.x] : 0.0;
double bottomX = (iGrid >= nxGrids) ? x0[idx-blockDim.x] : 0.0;
//printf("In iGrid %d, idx = %d, left %f, right %f, center %f, top %f, bottom %f\n", iGrid, idx, leftX, rightX, centerX, topX, bottomX );
// Perform update
x1[idx] = iterativeOperation(leftMatrix, centerMatrix, rightMatrix, topMatrix, bottomMatrix,
leftX, centerX, rightX, topX, bottomX, centerRhs, iGrid, method);
// Synchronize
__syncthreads();
double * tmp; tmp = x0; x0 = x1;
}
}
// Save xLeft, xRight, xTop, xBottom
if (idx < (blockDim.x * blockDim.y)/2) {
xLeftBlock[idx] = x0[threadIdx.x * blockDim.x + threadIdx.y];
xRightBlock[idx] = x0[(blockDim.x-1-threadIdx.y) + threadIdx.x * blockDim.x];
}
__syncthreads();
}
__device__
void __iterativeBlockUpdateToNorthSouth(double * xTopBlock, double * xBottomBlock, const double *rhsBlock,
const double * leftMatrixBlock, const double *centerMatrixBlock, const double * rightMatrixBlock,
const double * topMatrixBlock, const double * bottomMatrixBlock, int nxGrids, int nyGrids, int iGrid, int method, int maxSteps)
{
extern __shared__ double sharedMemory[];
double * x0 = sharedMemory;
int elemPerBlock = blockDim.x * blockDim.y;
double * x1 = sharedMemory + elemPerBlock;
int idx = threadIdx.x + threadIdx.y * blockDim.x;
if ((threadIdx.x >= 1 && threadIdx.x <= blockDim.x-2) && (threadIdx.y >= 1 && threadIdx.y <= blockDim.y-2)) {
for (int k = 0; k < maxSteps; k++) {
// Define necessary constants
double centerRhs = rhsBlock[idx];
double leftMatrix = leftMatrixBlock[idx];
double centerMatrix = centerMatrixBlock[idx];
double rightMatrix = rightMatrixBlock[idx];
double topMatrix = topMatrixBlock[idx];
double bottomMatrix = bottomMatrixBlock[idx];
double leftX = ((iGrid % nxGrids) == 0) ? 0.0 : x0[idx-1];
double centerX = x0[idx];
double rightX = ((iGrid + 1) % nxGrids == 0) ? 0.0 : x0[idx+1];
double topX = (iGrid < nxGrids * (nyGrids - 1)) ? x0[idx+blockDim.x] : 0.0;
double bottomX = (iGrid >= nxGrids) ? x0[idx-blockDim.x] : 0.0;
// Perform update
x1[idx] = iterativeOperation(leftMatrix, centerMatrix, rightMatrix, topMatrix, bottomMatrix,
leftX, centerX, rightX, topX, bottomX, centerRhs, iGrid, method);
// Synchronize
__syncthreads();
double * tmp; tmp = x0; x0 = x1;
}
}
// Return values for xTop and xBottom here
if (idx < (blockDim.x * blockDim.y)/2) {
xBottomBlock[idx] = x0[idx];
xTopBlock[idx] = x0[threadIdx.x + (blockDim.x)*(blockDim.x-1-threadIdx.y)];
}
__syncthreads();
}
__global__
void _iterativeGpuOriginal(double * xLeftGpu, double *xRightGpu,
const double * x0Gpu, const double *rhsGpu,
const double * leftMatrixGpu, const double *centerMatrixGpu, const double * rightMatrixGpu,
const double * topMatrixGpu, const double * bottomMatrixGpu, int nxGrids, int nyGrids, int method, int maxSteps)
{
int xShift = blockDim.x * blockIdx.x;
int yShift = blockDim.y * blockIdx.y;
int blockShift = xShift + yShift * nxGrids;
const double * x0Block = x0Gpu + blockShift;
const double * rhsBlock = rhsGpu + blockShift;
const double * leftMatrixBlock = leftMatrixGpu + blockShift;
const double * centerMatrixBlock = centerMatrixGpu + blockShift;
const double * rightMatrixBlock = rightMatrixGpu + blockShift;
const double * topMatrixBlock = topMatrixGpu + blockShift;
const double * bottomMatrixBlock = bottomMatrixGpu + blockShift;
int numElementsPerBlock = blockDim.x * blockDim.y;
int blockID = blockIdx.x + blockIdx.y * gridDim.x;
int arrayShift = (numElementsPerBlock*blockID)/2;
double * xLeftBlock = xLeftGpu + arrayShift;
double * xRightBlock = xRightGpu + arrayShift;
int idx = threadIdx.x + threadIdx.y * nxGrids;
int iGrid = blockShift + idx;
extern __shared__ double sharedMemory[];
sharedMemory[threadIdx.x + threadIdx.y * blockDim.x] = x0Block[threadIdx.x + threadIdx.y * nxGrids];
sharedMemory[threadIdx.x + threadIdx.y * blockDim.x + blockDim.x * blockDim.y] = x0Block[threadIdx.x + threadIdx.y * nxGrids];
__syncthreads();
__iterativeBlockUpdateToLeftRight(xLeftBlock, xRightBlock, rhsBlock,
leftMatrixBlock, centerMatrixBlock, rightMatrixBlock, topMatrixBlock, bottomMatrixBlock,
nxGrids, nyGrids, iGrid, method, maxSteps);
}
__global__
void _iterativeGpuHorizontalShift(double * xLeftGpu, double *xRightGpu, double * xTopGpu, double * xBottomGpu,
const double *rhsGpu, const double * leftMatrixGpu, const double *centerMatrixGpu,
const double * rightMatrixGpu, const double * topMatrixGpu, const double * bottomMatrixGpu,
int nxGrids, int nyGrids, int method, int maxSteps)
{
int xShift = blockDim.x * blockIdx.x;
int yShift = blockDim.y * blockIdx.y;
int blockShift = xShift + yShift * nxGrids;
int horizontalShift = blockDim.x/2;
const double * rhsBlock = rhsGpu + blockShift; //+ horizontalShift;
const double * leftMatrixBlock = leftMatrixGpu + blockShift; //+ horizontalShift;
const double * centerMatrixBlock = centerMatrixGpu + blockShift; //+ horizontalShift;
const double * rightMatrixBlock = rightMatrixGpu + blockShift; //+ horizontalShift;
const double * topMatrixBlock = topMatrixGpu + blockShift; //+ horizontalShift;
const double * bottomMatrixBlock = bottomMatrixGpu + blockShift; //+ horizontalShift;
int numElementsPerBlock = (blockDim.x * blockDim.y)/2;
int blockID = blockIdx.x + blockIdx.y * gridDim.x;
int arrayShift = numElementsPerBlock*blockID;
double * xLeftBlock = xRightGpu + arrayShift;
double * xRightBlock = (blockIdx.x != gridDim.x-1) ?
xLeftGpu + arrayShift + numElementsPerBlock :
xLeftGpu + (numElementsPerBlock * blockIdx.y * gridDim.x);
double * xBottomBlock = xBottomGpu + arrayShift;
double * xTopBlock = xTopGpu + arrayShift;
int idx = threadIdx.x + threadIdx.y * nxGrids;
int iGrid = blockShift + idx + horizontalShift;
if ((blockIdx.x == gridDim.x-1) && threadIdx.x >= (blockDim.x/2)) {
iGrid = iGrid - nxGrids;
}
// printf("In loop: I am idx %d and grid point %d\n", idx, iGrid);
extern __shared__ double sharedMemory[];
idx = threadIdx.x + threadIdx.y * blockDim.x;
if (threadIdx.x < blockDim.x/2) {
sharedMemory[idx] = xLeftBlock[threadIdx.y + (blockDim.x/2-1-threadIdx.x)*blockDim.y];
}
else {
sharedMemory[idx] = xRightBlock[threadIdx.y + (threadIdx.x-(blockDim.x/2))*blockDim.y];
}
__syncthreads();
__iterativeBlockUpdateToNorthSouth(xTopBlock, xBottomBlock, rhsBlock,
leftMatrixBlock, centerMatrixBlock, rightMatrixBlock, topMatrixBlock, bottomMatrixBlock,
nxGrids, nyGrids, iGrid, method, maxSteps);
}
__global__
void _iterativeGpuVerticalandHorizontalShift(double * xLeftGpu, double *xRightGpu, double * xTopGpu, double * xBottomGpu,
const double *rhsGpu, const double * leftMatrixGpu, const double *centerMatrixGpu,
const double * rightMatrixGpu, const double * topMatrixGpu, const double * bottomMatrixGpu,
int nxGrids, int nyGrids, int method, int maxSteps)
{
int xShift = blockDim.x * blockIdx.x;
int yShift = blockDim.y * blockIdx.y;
int blockShift = xShift + yShift * nxGrids;
int horizontalShift = blockDim.x/2;
int verticalShift = blockDim.y/2 * nxGrids;
const double * rhsBlock = rhsGpu + blockShift; //+ verticalShift;
const double * leftMatrixBlock = leftMatrixGpu + blockShift; //+ verticalShift;
const double * centerMatrixBlock = centerMatrixGpu + blockShift; //+ verticalShift;
const double * rightMatrixBlock = rightMatrixGpu + blockShift; //+ verticalShift;
const double * topMatrixBlock = topMatrixGpu + blockShift; //+ verticalShift;
const double * bottomMatrixBlock = bottomMatrixGpu + blockShift; //+ verticalShift;
int numElementsPerBlock = (blockDim.x * blockDim.y)/2;
int blockID = blockIdx.x + blockIdx.y * gridDim.x;
int arrayShift = numElementsPerBlock*blockID;
double * xBottomBlock = xTopGpu + arrayShift;
double * xTopBlock = (blockIdx.y != gridDim.y-1) ?
xBottomGpu + arrayShift + numElementsPerBlock * gridDim.x :
xBottomGpu + (numElementsPerBlock * blockIdx.x);
double * xLeftBlock = xLeftGpu + arrayShift;
double * xRightBlock = xRightGpu + arrayShift;
int idx = threadIdx.x + threadIdx.y * nxGrids;
int iGrid = blockShift + verticalShift + horizontalShift + idx;
if ((blockIdx.x == gridDim.x-1) && threadIdx.x >= (blockDim.x/2)) {
iGrid = iGrid - nxGrids;
}
int nDofs = nxGrids * nyGrids;
if ((blockIdx.y == gridDim.y-1) && threadIdx.y >= (blockDim.y/2)) {
iGrid = iGrid - nDofs;
}
extern __shared__ double sharedMemory[];
idx = threadIdx.x + threadIdx.y * blockDim.x;
if (idx < numElementsPerBlock) {
sharedMemory[idx] = xBottomBlock[threadIdx.x + (blockDim.y/2-1-threadIdx.y)*blockDim.x];
}
else {
sharedMemory[idx] = xTopBlock[threadIdx.x + (threadIdx.y-(blockDim.y/2))*blockDim.x];
}
__syncthreads();
__iterativeBlockUpdateToLeftRight(xLeftBlock, xRightBlock, rhsBlock,
leftMatrixBlock, centerMatrixBlock, rightMatrixBlock, topMatrixBlock, bottomMatrixBlock,
nxGrids, nyGrids, iGrid, method, maxSteps);
}
__global__
void _iterativeGpuVerticalShift(double * xLeftGpu, double *xRightGpu, double * xTopGpu, double * xBottomGpu,
const double *rhsGpu, const double * leftMatrixGpu, const double *centerMatrixGpu,
const double * rightMatrixGpu, const double * topMatrixGpu, const double * bottomMatrixGpu,
int nxGrids, int nyGrids, int method, int maxSteps)
{
int xShift = blockDim.x * blockIdx.x;
int yShift = blockDim.y * blockIdx.y;
int blockShift = xShift + yShift * nxGrids;
int verticalShift = blockDim.y/2 * nxGrids;
const double * rhsBlock = rhsGpu + blockShift; //+ verticalShift;
const double * leftMatrixBlock = leftMatrixGpu + blockShift; //+ verticalShift;
const double * centerMatrixBlock = centerMatrixGpu + blockShift; //+ verticalShift;
const double * rightMatrixBlock = rightMatrixGpu + blockShift; //+ verticalShift;
const double * topMatrixBlock = topMatrixGpu + blockShift; //+ verticalShift;
const double * bottomMatrixBlock = bottomMatrixGpu + blockShift; //+ verticalShift;
int numElementsPerBlock = (blockDim.x * blockDim.y)/2;
int blockID = blockIdx.x + blockIdx.y * gridDim.x;
int arrayShift = numElementsPerBlock*blockID;
double * xRightBlock = xLeftGpu + arrayShift;
double * xLeftBlock = (blockIdx.x != 0) ?
xRightGpu + arrayShift - numElementsPerBlock :
xRightGpu + numElementsPerBlock * ((gridDim.x-1) + blockIdx.y * gridDim.x);
double * xBottomBlock = xBottomGpu + arrayShift;
double * xTopBlock = xTopGpu + arrayShift;
int idx = threadIdx.x + threadIdx.y * nxGrids;
int nDofs = nxGrids * nyGrids;
int iGrid = blockShift + verticalShift + threadIdx.y * nxGrids + threadIdx.x;
iGrid = (iGrid >= nDofs) ? iGrid - nDofs : iGrid;
extern __shared__ double sharedMemory[];
idx = threadIdx.x + threadIdx.y * blockDim.x;
if (threadIdx.x < blockDim.x/2) {
sharedMemory[idx] = xLeftBlock[threadIdx.y + (blockDim.x/2-1-threadIdx.x)*blockDim.y];
}
else {
sharedMemory[idx] = xRightBlock[threadIdx.y + (threadIdx.x-(blockDim.x/2))*blockDim.y];
}
__syncthreads();
__iterativeBlockUpdateToNorthSouth( xTopBlock, xBottomBlock, rhsBlock,
leftMatrixBlock, centerMatrixBlock, rightMatrixBlock, topMatrixBlock, bottomMatrixBlock,
nxGrids, nyGrids, iGrid, method, maxSteps);
}
__global__
void _finalSolution(double * xTopGpu, double * xBottomGpu, double * x0Gpu, int nxGrids)
{
extern __shared__ double sharedMemory[];
int numElementsPerBlock = (blockDim.x * blockDim.y)/2;
int blockID = blockIdx.x + blockIdx.y * gridDim.x;
int arrayShift = numElementsPerBlock*blockID;
double * xTopBlock = xBottomGpu + arrayShift;
double * xBottomBlock = (blockIdx.y != 0) ?
xTopGpu + (blockIdx.x + (blockIdx.y-1) * gridDim.x) * numElementsPerBlock :
xTopGpu + (gridDim.x * (gridDim.y-1) + blockIdx.x) * numElementsPerBlock;
int xShift = blockDim.x * blockIdx.x;
int yShift = blockDim.y * blockIdx.y;
int blockShift = xShift + yShift * nxGrids;
double * x0Block = x0Gpu + blockShift;
int idx = threadIdx.x + threadIdx.y * blockDim.x;
if (idx < (blockDim.x * blockDim.y)/2) {
sharedMemory[idx + numElementsPerBlock] = xTopBlock[idx];
sharedMemory[threadIdx.x + (blockDim.x)*(blockDim.x/2-1-threadIdx.y)] = xBottomBlock[idx];
}
__syncthreads();
double * x0 = x0Gpu + blockShift;
idx = threadIdx.x + threadIdx.y * nxGrids;
x0[threadIdx.x + threadIdx.y * nxGrids] = sharedMemory[threadIdx.x + threadIdx.y * blockDim.x];
}
///////////////////////////////////////////////////
double * iterativeGpuSwept(const double * initX, const double * rhs,
const double * leftMatrix, const double * centerMatrix,
const double * rightMatrix, const double * topMatrix, const double * bottomMatrix,
int nxGrids, int nyGrids, int nIters, int maxSteps, const int threadsPerBlock, const int method)
{
// Determine number of threads and blocks
const int nxBlocks = (int)ceil(nxGrids / (double)threadsPerBlock);
const int nyBlocks = (int)ceil(nyGrids / (double)threadsPerBlock);
const int nDofs = nxGrids * nyGrids;
dim3 grid(nxBlocks, nyBlocks);
dim3 block(threadsPerBlock, threadsPerBlock);
// Allocate memory for solution and inputs
double *xLeftGpu, *xRightGpu, *xTopGpu, *xBottomGpu;
int numSharedElemPerBlock = threadsPerBlock * (threadsPerBlock / 2 + 1);
hipMalloc(&xLeftGpu, sizeof(double) * numSharedElemPerBlock * nxBlocks * nyBlocks);
hipMalloc(&xRightGpu, sizeof(double) * numSharedElemPerBlock * nxBlocks * nyBlocks);
hipMalloc(&xTopGpu, sizeof(double) * numSharedElemPerBlock * nxBlocks * nyBlocks);
hipMalloc(&xBottomGpu, sizeof(double) * numSharedElemPerBlock * nxBlocks * nyBlocks);
double * x0Gpu, * rhsGpu, * leftMatrixGpu, * rightMatrixGpu, * centerMatrixGpu, * topMatrixGpu, * bottomMatrixGpu;
hipMalloc(&x0Gpu, sizeof(double) * nDofs);
hipMalloc(&rhsGpu, sizeof(double) * nDofs);
hipMalloc(&leftMatrixGpu, sizeof(double) * nDofs);
hipMalloc(¢erMatrixGpu, sizeof(double) * nDofs);
hipMalloc(&rightMatrixGpu, sizeof(double) * nDofs);
hipMalloc(&topMatrixGpu, sizeof(double) * nDofs);
hipMalloc(&bottomMatrixGpu, sizeof(double) * nDofs);
// Allocate memory in the GPU
hipMemcpy(x0Gpu, initX, sizeof(double) * nDofs, hipMemcpyHostToDevice);
hipMemcpy(rhsGpu, rhs, sizeof(double) * nDofs, hipMemcpyHostToDevice);
hipMemcpy(leftMatrixGpu, leftMatrix, sizeof(double) * nDofs,
hipMemcpyHostToDevice);
hipMemcpy(centerMatrixGpu, centerMatrix, sizeof(double) * nDofs,
hipMemcpyHostToDevice);
hipMemcpy(rightMatrixGpu, rightMatrix, sizeof(double) * nDofs,
hipMemcpyHostToDevice);
hipMemcpy(topMatrixGpu, topMatrix, sizeof(double) * nDofs,
hipMemcpyHostToDevice);
hipMemcpy(bottomMatrixGpu, bottomMatrix, sizeof(double) * nDofs,
hipMemcpyHostToDevice);
int sharedBytes = 2 * threadsPerBlock * threadsPerBlock * sizeof(double);
for (int i = 0; i < nIters; i++) {
// APPLY METHOD TO ADVANCE POINTS (NO SHIFT)
hipLaunchKernelGGL(( _iterativeGpuOriginal) , dim3(grid), dim3(block), sharedBytes, 0, xLeftGpu, xRightGpu, x0Gpu, rhsGpu, leftMatrixGpu, centerMatrixGpu, rightMatrixGpu, topMatrixGpu, bottomMatrixGpu, nxGrids, nyGrids, method, maxSteps);
// APPLY HORIZONTAL SHIFT
hipLaunchKernelGGL(( _iterativeGpuHorizontalShift) , dim3(grid), dim3(block), sharedBytes, 0, xLeftGpu, xRightGpu, xTopGpu, xBottomGpu, rhsGpu, leftMatrixGpu, centerMatrixGpu, rightMatrixGpu, topMatrixGpu, bottomMatrixGpu, nxGrids, nyGrids, method, maxSteps);
// APPLY VERTICAL SHIFT (ALONG WITH PREVIOUS HORIZONTAL SHIFT)
hipLaunchKernelGGL(( _iterativeGpuVerticalandHorizontalShift) , dim3(grid), dim3(block), sharedBytes, 0, xLeftGpu, xRightGpu, xTopGpu, xBottomGpu, rhsGpu, leftMatrixGpu, centerMatrixGpu, rightMatrixGpu, topMatrixGpu, bottomMatrixGpu, nxGrids, nyGrids, method, maxSteps);
// APPLY VERTICAL SHIFT
hipLaunchKernelGGL(( _iterativeGpuVerticalShift) , dim3(grid), dim3(block), sharedBytes, 0, xLeftGpu, xRightGpu, xTopGpu, xBottomGpu, rhsGpu, leftMatrixGpu, centerMatrixGpu, rightMatrixGpu, topMatrixGpu, bottomMatrixGpu, nxGrids, nyGrids, method, maxSteps);
// APPLY FINAL STEP
hipLaunchKernelGGL(( _finalSolution) , dim3(grid), dim3(block), sharedBytes, 0, xTopGpu, xBottomGpu, x0Gpu, nxGrids);
}
double * solution = new double[nDofs];
hipMemcpy(solution, x0Gpu, sizeof(double) * nDofs,
hipMemcpyDeviceToHost);
hipFree(x0Gpu);
hipFree(xLeftGpu);
hipFree(xRightGpu);
hipFree(rhsGpu);
hipFree(leftMatrixGpu);
hipFree(centerMatrixGpu);
hipFree(rightMatrixGpu);
return solution;
}
int main(int argc, char *argv[])
{
// Ask user for inputs
const int nxGrids = atoi(argv[1]);
const int nyGrids = atoi(argv[1]);
const int threadsPerBlock = atoi(argv[2]);
const int nIters = atoi(argv[3]);
const int nCycles = atoi(argv[4]);
const int maxSteps = atoi(argv[5]);
method_type method = JACOBI;
int nDofs = nxGrids * nyGrids;
// Declare arrays and population with values for Poisson equation
double * initX = new double[nDofs];
double * rhs = new double[nDofs];
double * leftMatrix = new double[nDofs];
double * centerMatrix = new double[nDofs];
double * rightMatrix = new double[nDofs];
double * bottomMatrix = new double[nDofs];
double * topMatrix = new double[nDofs];
double dx = 1.0f / (nxGrids + 1);
double dy = 1.0f / (nyGrids + 1);
for (int iGrid = 0; iGrid < nDofs; ++iGrid) {
initX[iGrid] = (double)iGrid;
rhs[iGrid] = 1.0f;
leftMatrix[iGrid] = -1.0f / (dx * dx);
centerMatrix[iGrid] = 2.0f / (dx * dx) + 2.0f / (dy * dy);
rightMatrix[iGrid] = -1.0f / (dx * dx);
bottomMatrix[iGrid] = -1.0f / (dy * dy);
topMatrix[iGrid] = -1.0f / (dy * dy);
}
// Run the CPU Implementation and measure the time required
clock_t cpuStartTime = clock();
double * solutionCpu = iterativeCpu(initX, rhs, leftMatrix, centerMatrix,
rightMatrix, topMatrix, bottomMatrix, nxGrids, nyGrids, nIters, method);
clock_t cpuEndTime = clock();
double cpuTime = (cpuEndTime - cpuStartTime) / (double) CLOCKS_PER_SEC;
// Run the Classic GPU Implementation and measure the time required
hipEvent_t startClassic, stopClassic;
float timeClassic;
hipEventCreate( &startClassic );
hipEventCreate( &stopClassic );
hipEventRecord(startClassic, 0);
double * solutionGpuClassic = iterativeGpuClassic(initX, rhs, leftMatrix, centerMatrix,
rightMatrix, topMatrix, bottomMatrix, nxGrids, nyGrids, nIters, threadsPerBlock, method);
hipEventRecord(stopClassic, 0);
hipEventSynchronize(stopClassic);
hipEventElapsedTime(&timeClassic, startClassic, stopClassic);
// Run the Swept GPU Implementation and measure the time required
hipEvent_t startSwept, stopSwept;
float timeSwept;
hipEventCreate( &startSwept );
hipEventCreate( &stopSwept );
hipEventRecord( startSwept, 0);
double * solutionGpuSwept = iterativeGpuSwept(initX, rhs, leftMatrix, centerMatrix,
rightMatrix, topMatrix, bottomMatrix, nxGrids, nyGrids, nCycles, maxSteps, threadsPerBlock, method);
hipEventRecord(stopSwept, 0);
hipEventSynchronize(stopSwept);
hipEventElapsedTime(&timeSwept, startSwept, stopSwept);
// Print parameters of the problem to screen
printf("===============INFORMATION============================\n");
printf("Number of total grid points: %d\n", nDofs);
printf("Number of grid points in x-direction: %d\n", nxGrids);
printf("Number of grid points in y-direction: %d\n", nyGrids);
printf("Threads Per Block in each direction: %d\n", threadsPerBlock);
printf("Method used: %d\n", method);
printf("Number of Iterations performed: %d\n", nIters);
printf("\n");
// Print out results to the screen, notify if any GPU Classic or Swept values differ significantly
for (int iGrid = 0; iGrid < nDofs; ++iGrid) {
printf("%d %f %f %f \n",iGrid, solutionCpu[iGrid],
solutionGpuClassic[iGrid],
solutionGpuSwept[iGrid]);
//assert(solutionGpuClassic[iGrid] == solutionGpuSwept[iGrid]);
// if (abs(solutionGpuClassic[iGrid] - solutionGpuSwept[iGrid]) > 1e-2) {
// printf("For grid point %d, Classic and Swept give %f and %f respectively\n", iGrid, solutionGpuClassic[iGrid], solutionGpuSwept[iGrid]);
// }
}
// Print out time for cpu, classic gpu, and swept gpu approaches
double cpuTimePerIteration = (cpuTime / nIters) * 1e3;
double classicTimePerIteration = timeClassic / nIters;
double sweptTimePerIteration = timeSwept / nIters;
double timeMultiplier = classicTimePerIteration / sweptTimePerIteration;
/* printf("Time needed for the CPU (per iteration): %f ms\n", cpuTimePerIteration);
printf("Time needed for the Classic GPU (per iteration) is %f ms\n", classicTimePerIteration);
printf("Time needed for the Swept GPU (per iteration): %f ms\n", sweptTimePerIteration); */
printf("Total Time needed for the CPU: %f ms\n", cpuTime * 1e3);
printf("Total Time needed for the Classic GPU is %f ms\n", timeClassic);
printf("Total Time needed for the Swept GPU: %f ms\n", timeSwept);
// Compute the residual of the resulting solution (||b-Ax||)
double residualCPU = Residual(solutionGpuClassic, rhs, leftMatrix, centerMatrix, rightMatrix, topMatrix, bottomMatrix, nxGrids, nyGrids);
double residualClassicGPU = Residual(solutionGpuClassic, rhs, leftMatrix, centerMatrix, rightMatrix, topMatrix, bottomMatrix, nxGrids, nyGrids);
double residualSweptGPU = Residual(solutionGpuSwept, rhs, leftMatrix, centerMatrix, rightMatrix, topMatrix, bottomMatrix, nxGrids, nyGrids);
printf("Residual of the CPU solution is %f\n", residualCPU);
printf("Residual of the Classic GPU solution is %f\n", residualClassicGPU);
printf("Residual of the Swept GPU solution is %f\n", residualSweptGPU);
// Save residual to a file
/* std::ofstream residuals;
residuals.open("residual-gs.txt",std::ios_base::app);
residuals << nGrids << "\t" << threadsPerBlock << "\t" << nIters << "\t" << residualSwept << "\n";
residuals.close(); */
// Save Results to a file "N tpb Iterations CPUTime/perstep ClassicTime/perstep SweptTime/perStep ClassicTime/SweptTime"
std::ofstream timings;
timings.open("time.txt",std::ios_base::app);
// timings << nxGrids << "\t" << nyGrids << "\t" << threadsPerBlock << "\t" << nIters << "\t" << cpuTimePerIteration << "\t" << classicTimePerIteration << "\t" << sweptTimePerIteration << "\t" << timeMultiplier << "\n";
timings.close();
// Free memory
hipEventDestroy(startClassic);
hipEventDestroy(startSwept);
delete[] initX;
delete[] rhs;
delete[] leftMatrix;
delete[] centerMatrix;
delete[] rightMatrix;
delete[] solutionCpu;
delete[] solutionGpuClassic;
}
| 61fb8bacb1fa4720c26d4aca6dfe160c430619e6.cu | #include<utility>
#include<stdio.h>
#include<assert.h>
#include <cuda_runtime_api.h>
#include <cuda_runtime.h>
#include <ostream>
#include <iostream>
#include <cstdio>
#include <cstdlib>
#include <cmath>
#include <fstream>
#include <omp.h>
#include <time.h>
#include <string.h>
#include <utility>
enum method_type { JACOBI, GS, SOR };
template <typename method_type>
__host__ __device__
double iterativeOperation(const double leftMatrix, const double centerMatrix, const double rightMatrix, const double topMatrix, const double bottomMatrix, double leftX, double centerX, double rightX, double topX, double bottomX, const double centerRhs, int gridPoint, method_type method)
{
double gridValue = centerX;
switch(method)
{
case JACOBI:
return gridValue = (centerRhs - (leftMatrix * leftX + rightMatrix * rightX + topMatrix * topX + bottomMatrix * bottomX)) / centerMatrix;
case GS:
if (gridPoint % 2 == 1) {
return gridValue = (centerRhs - (leftMatrix * leftX + rightMatrix * rightX + topMatrix * topX + bottomMatrix * bottomX)) / centerMatrix;
}
case SOR:
double relaxation = 1.9939;
if (gridPoint % 2 == 1) {
return gridValue = relaxation*((centerRhs - (leftMatrix * leftX + rightMatrix * rightX + topMatrix * topX + bottomMatrix * bottomX)) / centerMatrix) + (1.0-relaxation)*centerX;
}
}
return gridValue;
}
template <typename method_type>
__host__ __device__
double iterativeOperation2(const double leftMatrix, const double centerMatrix, const double rightMatrix, const double topMatrix, const double bottomMatrix, double leftX, double centerX, double rightX, double topX, double bottomX, const double centerRhs, int gridPoint, method_type method)
{
double gridValue = centerX;
switch(method)
{
case JACOBI:
return gridValue = (centerRhs - (leftMatrix * leftX + rightMatrix * rightX + topMatrix * topX + bottomMatrix * bottomX)) / centerMatrix;
case GS:
if (gridPoint % 2 == 0) {
return gridValue = (centerRhs - (leftMatrix * leftX + rightMatrix * rightX + topMatrix * topX + bottomMatrix * bottomX)) / centerMatrix;
}
case SOR:
double relaxation = 1.9939;
if (gridPoint % 2 == 0) {
return gridValue = relaxation*((centerRhs - (leftMatrix * leftX + rightMatrix * rightX + topMatrix * topX + bottomMatrix * bottomX)) / centerMatrix) + (1.0-relaxation)*centerX;
}
}
return gridValue;
}
double normFromRow(double leftMatrix, double centerMatrix, double rightMatrix, double topMatrix, double bottomMatrix, double leftX, double centerX, double rightX, double topX, double bottomX, double centerRhs)
{
return centerRhs - (leftMatrix*leftX + centerMatrix*centerX + rightMatrix*rightX + topMatrix*topX + bottomMatrix*bottomX);
}
double Residual(const double * solution, const double * rhs, const double * leftMatrix, const double * centerMatrix, const double * rightMatrix, const double * topMatrix, const double * bottomMatrix, int nxGrids, int nyGrids)
{
int nDofs = nxGrids * nyGrids;
double residual = 0.0;
for (int iGrid = 0; iGrid < nDofs; iGrid++) {
double leftX = ((iGrid % nxGrids) == 0) ? 0.0 : solution[iGrid-1];
double centerX = solution[iGrid];
double rightX = ((iGrid + 1) % nxGrids == 0) ? 0.0 : solution[iGrid+1];
double topX = (iGrid < nxGrids * (nyGrids - 1)) ? solution[iGrid + nxGrids] : 0.0;
double bottomX = (iGrid >= nxGrids) ? solution[iGrid-nxGrids] : 0.0;
double residualContributionFromRow = normFromRow(leftMatrix[iGrid], centerMatrix[iGrid], rightMatrix[iGrid], topMatrix[iGrid], bottomMatrix[iGrid], leftX, centerX, rightX, topX, bottomX, rhs[iGrid]);
residual = residual + residualContributionFromRow * residualContributionFromRow;
}
residual = sqrt(residual);
return residual;
}
double * iterativeCpu(const double * initX, const double * rhs,
const double * leftMatrix, const double * centerMatrix,
const double * rightMatrix, const double * topMatrix,
const double * bottomMatrix, int nxGrids, int nyGrids,
int nIters, int method)
{
int nDofs = nxGrids * nyGrids;
double * x0 = new double[nDofs];
double * x1 = new double[nDofs];
memcpy(x0, initX, sizeof(double) * nDofs);
memcpy(x1, initX, sizeof(double)* nDofs);
for (int iIter = 0; iIter < nIters; ++ iIter) {
for (int iGrid = 0; iGrid < nDofs; ++iGrid) {
double leftX = ((iGrid % nxGrids) == 0) ? 0.0f : x0[iGrid - 1];
double centerX = x0[iGrid];
double rightX = (((iGrid + 1) % nxGrids) == 0) ? 0.0f : x0[iGrid + 1];
double bottomX = (iGrid < nxGrids) ? 0.0f : x0[iGrid - nxGrids];
double topX = (iGrid < nDofs - nxGrids) ? x0[iGrid + nxGrids] : 0.0f;
if (iIter % 2 == 0) {
x1[iGrid] = iterativeOperation(leftMatrix[iGrid], centerMatrix[iGrid], rightMatrix[iGrid], topMatrix[iGrid], bottomMatrix[iGrid],
leftX, centerX, rightX, topX, bottomX, rhs[iGrid], iGrid, method);
}
else {
x1[iGrid] = iterativeOperation2(leftMatrix[iGrid], centerMatrix[iGrid],
rightMatrix[iGrid], topMatrix[iGrid], bottomMatrix[iGrid],
leftX, centerX, rightX, topX, bottomX,
rhs[iGrid], iGrid, method);
}
}
double * tmp = x0; x0 = x1; x1 = tmp;
}
delete[] x1;
return x0;
}
__global__
void _iterativeGpuClassicIteration(double * x1, const double * x0, const double * rhs,
const double * leftMatrix, const double * centerMatrix,
const double * rightMatrix, const double * topMatrix, const double * bottomMatrix,
int nxGrids, int nyGrids, int iteration, int method)
{
int ixGrid = blockIdx.x * blockDim.x + threadIdx.x; // Col
int iyGrid = blockIdx.y * blockDim.y + threadIdx.y; // Row
int iGrid = iyGrid * (nxGrids) + ixGrid;
int nDofs = nxGrids * nyGrids;
if (iGrid < nDofs) {
double leftX = (ixGrid == 0) ? 0.0f : x0[iGrid - 1] ;
double centerX = x0[iGrid];
double rightX = (ixGrid == nxGrids - 1) ? 0.0f : x0[iGrid + 1];
double topX = (iyGrid == nyGrids - 1) ? 0.0f : x0[iGrid + nxGrids];
double bottomX = (iyGrid == 0) ? 0.0f : x0[iGrid - nxGrids];
if (iteration % 2 == 0) {
x1[iGrid] = iterativeOperation(leftMatrix[iGrid], centerMatrix[iGrid],
rightMatrix[iGrid], topMatrix[iGrid], bottomMatrix[iGrid],
leftX, centerX, rightX, topX, bottomX, rhs[iGrid], iGrid, method);
}
else {
x1[iGrid] = iterativeOperation2(leftMatrix[iGrid], centerMatrix[iGrid],
rightMatrix[iGrid], topMatrix[iGrid], bottomMatrix[iGrid],
leftX, centerX, rightX, topX, bottomX, rhs[iGrid], iGrid, method);
}
}
__syncthreads();
}
double * iterativeGpuClassic(const double * initX, const double * rhs,
const double * leftMatrix, const double * centerMatrix,
const double * rightMatrix, const double * topMatrix, const double * bottomMatrix,
int nxGrids, int nyGrids, int nIters, const int threadsPerBlock, int method)
{
int nDofs = nxGrids * nyGrids;
// Allocate memory in the CPU for the solution
double * x0Gpu, * x1Gpu;
cudaMalloc(&x0Gpu, sizeof(double) * nDofs);
cudaMalloc(&x1Gpu, sizeof(double) * nDofs);
// Allocate CPU memory for other variables
double * rhsGpu, * leftMatrixGpu, * rightMatrixGpu, * centerMatrixGpu, * topMatrixGpu, * bottomMatrixGpu;
cudaMalloc(&rhsGpu, sizeof(double) * nDofs);
cudaMalloc(&leftMatrixGpu, sizeof(double) * nDofs);
cudaMalloc(¢erMatrixGpu, sizeof(double) * nDofs);
cudaMalloc(&rightMatrixGpu, sizeof(double) * nDofs);
cudaMalloc(&topMatrixGpu, sizeof(double) * nDofs);
cudaMalloc(&bottomMatrixGpu, sizeof(double) * nDofs);
// Allocate GPU memory
cudaMemcpy(x0Gpu, initX, sizeof(double) * nDofs, cudaMemcpyHostToDevice);
cudaMemcpy(rhsGpu, rhs, sizeof(double) * nDofs, cudaMemcpyHostToDevice);
cudaMemcpy(leftMatrixGpu, leftMatrix, sizeof(double) * nDofs,
cudaMemcpyHostToDevice);
cudaMemcpy(centerMatrixGpu, centerMatrix, sizeof(double) * nDofs,
cudaMemcpyHostToDevice);
cudaMemcpy(rightMatrixGpu, rightMatrix, sizeof(double) * nDofs,
cudaMemcpyHostToDevice);
cudaMemcpy(topMatrixGpu, topMatrix, sizeof(double) * nDofs,
cudaMemcpyHostToDevice);
cudaMemcpy(bottomMatrixGpu, bottomMatrix, sizeof(double) * nDofs,
cudaMemcpyHostToDevice);
// Run the classic iteration for prescribed number of iterations
// int threadsPerBlock = 16;
int nxBlocks = (int)ceil(nxGrids / (double)threadsPerBlock);
int nyBlocks = (int)ceil(nyGrids / (double)threadsPerBlock);
dim3 grid(nxBlocks, nyBlocks);
dim3 block(threadsPerBlock, threadsPerBlock);
for (int iIter = 0; iIter < nIters; ++iIter) {
// Jacobi iteration on the CPU (used to be <<<nBlocks, threadsPerBlock>>>)
_iterativeGpuClassicIteration<<<grid, block>>>(
x1Gpu, x0Gpu, rhsGpu, leftMatrixGpu, centerMatrixGpu,
rightMatrixGpu, topMatrixGpu, bottomMatrixGpu,
nxGrids, nyGrids, iIter, method);
double * tmp = x1Gpu; x0Gpu = x1Gpu; x1Gpu = tmp;
}
// Write solution from GPU to CPU variable
double * solution = new double[nDofs];
cudaMemcpy(solution, x0Gpu, sizeof(double) * nDofs,
cudaMemcpyDeviceToHost);
// Free all memory
cudaFree(x0Gpu);
cudaFree(x1Gpu);
cudaFree(rhsGpu);
cudaFree(leftMatrixGpu);
cudaFree(centerMatrixGpu);
cudaFree(rightMatrixGpu);
return solution;
}
//// SWEPT METHODS HERE ////
__device__
void __iterativeBlockUpdateToLeftRight(double * xLeftBlock, double * xRightBlock, const double *rhsBlock,
const double * leftMatrixBlock, const double *centerMatrixBlock, const double * rightMatrixBlock,
const double * topMatrixBlock, const double * bottomMatrixBlock, int nxGrids, int nyGrids, int iGrid, int method, int maxSteps)
{
extern __shared__ double sharedMemory[];
double * x0 = sharedMemory;
int elemPerBlock = blockDim.x * blockDim.y;
double * x1 = sharedMemory + elemPerBlock;
int idx = threadIdx.x + threadIdx.y * blockDim.x;
if ((threadIdx.x >= 1 && threadIdx.x <= blockDim.x-2) && (threadIdx.y >= 1 && threadIdx.y <= blockDim.y-2)) {
for (int k = 0; k < maxSteps; k++) {
// Define necessary constants
double centerRhs = rhsBlock[idx];
double leftMatrix = leftMatrixBlock[idx];
double centerMatrix = centerMatrixBlock[idx];
double rightMatrix = rightMatrixBlock[idx];
double topMatrix = topMatrixBlock[idx];
double bottomMatrix = bottomMatrixBlock[idx];
double leftX = ((iGrid % nxGrids) == 0) ? 0.0 : x0[idx-1];
double centerX = x0[idx];
double rightX = ((iGrid + 1) % nxGrids == 0) ? 0.0 : x0[idx+1];
double topX = (iGrid < nxGrids * (nyGrids - 1)) ? x0[idx+blockDim.x] : 0.0;
double bottomX = (iGrid >= nxGrids) ? x0[idx-blockDim.x] : 0.0;
//printf("In iGrid %d, idx = %d, left %f, right %f, center %f, top %f, bottom %f\n", iGrid, idx, leftX, rightX, centerX, topX, bottomX );
// Perform update
x1[idx] = iterativeOperation(leftMatrix, centerMatrix, rightMatrix, topMatrix, bottomMatrix,
leftX, centerX, rightX, topX, bottomX, centerRhs, iGrid, method);
// Synchronize
__syncthreads();
double * tmp; tmp = x0; x0 = x1;
}
}
// Save xLeft, xRight, xTop, xBottom
if (idx < (blockDim.x * blockDim.y)/2) {
xLeftBlock[idx] = x0[threadIdx.x * blockDim.x + threadIdx.y];
xRightBlock[idx] = x0[(blockDim.x-1-threadIdx.y) + threadIdx.x * blockDim.x];
}
__syncthreads();
}
__device__
void __iterativeBlockUpdateToNorthSouth(double * xTopBlock, double * xBottomBlock, const double *rhsBlock,
const double * leftMatrixBlock, const double *centerMatrixBlock, const double * rightMatrixBlock,
const double * topMatrixBlock, const double * bottomMatrixBlock, int nxGrids, int nyGrids, int iGrid, int method, int maxSteps)
{
extern __shared__ double sharedMemory[];
double * x0 = sharedMemory;
int elemPerBlock = blockDim.x * blockDim.y;
double * x1 = sharedMemory + elemPerBlock;
int idx = threadIdx.x + threadIdx.y * blockDim.x;
if ((threadIdx.x >= 1 && threadIdx.x <= blockDim.x-2) && (threadIdx.y >= 1 && threadIdx.y <= blockDim.y-2)) {
for (int k = 0; k < maxSteps; k++) {
// Define necessary constants
double centerRhs = rhsBlock[idx];
double leftMatrix = leftMatrixBlock[idx];
double centerMatrix = centerMatrixBlock[idx];
double rightMatrix = rightMatrixBlock[idx];
double topMatrix = topMatrixBlock[idx];
double bottomMatrix = bottomMatrixBlock[idx];
double leftX = ((iGrid % nxGrids) == 0) ? 0.0 : x0[idx-1];
double centerX = x0[idx];
double rightX = ((iGrid + 1) % nxGrids == 0) ? 0.0 : x0[idx+1];
double topX = (iGrid < nxGrids * (nyGrids - 1)) ? x0[idx+blockDim.x] : 0.0;
double bottomX = (iGrid >= nxGrids) ? x0[idx-blockDim.x] : 0.0;
// Perform update
x1[idx] = iterativeOperation(leftMatrix, centerMatrix, rightMatrix, topMatrix, bottomMatrix,
leftX, centerX, rightX, topX, bottomX, centerRhs, iGrid, method);
// Synchronize
__syncthreads();
double * tmp; tmp = x0; x0 = x1;
}
}
// Return values for xTop and xBottom here
if (idx < (blockDim.x * blockDim.y)/2) {
xBottomBlock[idx] = x0[idx];
xTopBlock[idx] = x0[threadIdx.x + (blockDim.x)*(blockDim.x-1-threadIdx.y)];
}
__syncthreads();
}
__global__
void _iterativeGpuOriginal(double * xLeftGpu, double *xRightGpu,
const double * x0Gpu, const double *rhsGpu,
const double * leftMatrixGpu, const double *centerMatrixGpu, const double * rightMatrixGpu,
const double * topMatrixGpu, const double * bottomMatrixGpu, int nxGrids, int nyGrids, int method, int maxSteps)
{
int xShift = blockDim.x * blockIdx.x;
int yShift = blockDim.y * blockIdx.y;
int blockShift = xShift + yShift * nxGrids;
const double * x0Block = x0Gpu + blockShift;
const double * rhsBlock = rhsGpu + blockShift;
const double * leftMatrixBlock = leftMatrixGpu + blockShift;
const double * centerMatrixBlock = centerMatrixGpu + blockShift;
const double * rightMatrixBlock = rightMatrixGpu + blockShift;
const double * topMatrixBlock = topMatrixGpu + blockShift;
const double * bottomMatrixBlock = bottomMatrixGpu + blockShift;
int numElementsPerBlock = blockDim.x * blockDim.y;
int blockID = blockIdx.x + blockIdx.y * gridDim.x;
int arrayShift = (numElementsPerBlock*blockID)/2;
double * xLeftBlock = xLeftGpu + arrayShift;
double * xRightBlock = xRightGpu + arrayShift;
int idx = threadIdx.x + threadIdx.y * nxGrids;
int iGrid = blockShift + idx;
extern __shared__ double sharedMemory[];
sharedMemory[threadIdx.x + threadIdx.y * blockDim.x] = x0Block[threadIdx.x + threadIdx.y * nxGrids];
sharedMemory[threadIdx.x + threadIdx.y * blockDim.x + blockDim.x * blockDim.y] = x0Block[threadIdx.x + threadIdx.y * nxGrids];
__syncthreads();
__iterativeBlockUpdateToLeftRight(xLeftBlock, xRightBlock, rhsBlock,
leftMatrixBlock, centerMatrixBlock, rightMatrixBlock, topMatrixBlock, bottomMatrixBlock,
nxGrids, nyGrids, iGrid, method, maxSteps);
}
__global__
void _iterativeGpuHorizontalShift(double * xLeftGpu, double *xRightGpu, double * xTopGpu, double * xBottomGpu,
const double *rhsGpu, const double * leftMatrixGpu, const double *centerMatrixGpu,
const double * rightMatrixGpu, const double * topMatrixGpu, const double * bottomMatrixGpu,
int nxGrids, int nyGrids, int method, int maxSteps)
{
int xShift = blockDim.x * blockIdx.x;
int yShift = blockDim.y * blockIdx.y;
int blockShift = xShift + yShift * nxGrids;
int horizontalShift = blockDim.x/2;
const double * rhsBlock = rhsGpu + blockShift; //+ horizontalShift;
const double * leftMatrixBlock = leftMatrixGpu + blockShift; //+ horizontalShift;
const double * centerMatrixBlock = centerMatrixGpu + blockShift; //+ horizontalShift;
const double * rightMatrixBlock = rightMatrixGpu + blockShift; //+ horizontalShift;
const double * topMatrixBlock = topMatrixGpu + blockShift; //+ horizontalShift;
const double * bottomMatrixBlock = bottomMatrixGpu + blockShift; //+ horizontalShift;
int numElementsPerBlock = (blockDim.x * blockDim.y)/2;
int blockID = blockIdx.x + blockIdx.y * gridDim.x;
int arrayShift = numElementsPerBlock*blockID;
double * xLeftBlock = xRightGpu + arrayShift;
double * xRightBlock = (blockIdx.x != gridDim.x-1) ?
xLeftGpu + arrayShift + numElementsPerBlock :
xLeftGpu + (numElementsPerBlock * blockIdx.y * gridDim.x);
double * xBottomBlock = xBottomGpu + arrayShift;
double * xTopBlock = xTopGpu + arrayShift;
int idx = threadIdx.x + threadIdx.y * nxGrids;
int iGrid = blockShift + idx + horizontalShift;
if ((blockIdx.x == gridDim.x-1) && threadIdx.x >= (blockDim.x/2)) {
iGrid = iGrid - nxGrids;
}
// printf("In loop: I am idx %d and grid point %d\n", idx, iGrid);
extern __shared__ double sharedMemory[];
idx = threadIdx.x + threadIdx.y * blockDim.x;
if (threadIdx.x < blockDim.x/2) {
sharedMemory[idx] = xLeftBlock[threadIdx.y + (blockDim.x/2-1-threadIdx.x)*blockDim.y];
}
else {
sharedMemory[idx] = xRightBlock[threadIdx.y + (threadIdx.x-(blockDim.x/2))*blockDim.y];
}
__syncthreads();
__iterativeBlockUpdateToNorthSouth(xTopBlock, xBottomBlock, rhsBlock,
leftMatrixBlock, centerMatrixBlock, rightMatrixBlock, topMatrixBlock, bottomMatrixBlock,
nxGrids, nyGrids, iGrid, method, maxSteps);
}
__global__
void _iterativeGpuVerticalandHorizontalShift(double * xLeftGpu, double *xRightGpu, double * xTopGpu, double * xBottomGpu,
const double *rhsGpu, const double * leftMatrixGpu, const double *centerMatrixGpu,
const double * rightMatrixGpu, const double * topMatrixGpu, const double * bottomMatrixGpu,
int nxGrids, int nyGrids, int method, int maxSteps)
{
int xShift = blockDim.x * blockIdx.x;
int yShift = blockDim.y * blockIdx.y;
int blockShift = xShift + yShift * nxGrids;
int horizontalShift = blockDim.x/2;
int verticalShift = blockDim.y/2 * nxGrids;
const double * rhsBlock = rhsGpu + blockShift; //+ verticalShift;
const double * leftMatrixBlock = leftMatrixGpu + blockShift; //+ verticalShift;
const double * centerMatrixBlock = centerMatrixGpu + blockShift; //+ verticalShift;
const double * rightMatrixBlock = rightMatrixGpu + blockShift; //+ verticalShift;
const double * topMatrixBlock = topMatrixGpu + blockShift; //+ verticalShift;
const double * bottomMatrixBlock = bottomMatrixGpu + blockShift; //+ verticalShift;
int numElementsPerBlock = (blockDim.x * blockDim.y)/2;
int blockID = blockIdx.x + blockIdx.y * gridDim.x;
int arrayShift = numElementsPerBlock*blockID;
double * xBottomBlock = xTopGpu + arrayShift;
double * xTopBlock = (blockIdx.y != gridDim.y-1) ?
xBottomGpu + arrayShift + numElementsPerBlock * gridDim.x :
xBottomGpu + (numElementsPerBlock * blockIdx.x);
double * xLeftBlock = xLeftGpu + arrayShift;
double * xRightBlock = xRightGpu + arrayShift;
int idx = threadIdx.x + threadIdx.y * nxGrids;
int iGrid = blockShift + verticalShift + horizontalShift + idx;
if ((blockIdx.x == gridDim.x-1) && threadIdx.x >= (blockDim.x/2)) {
iGrid = iGrid - nxGrids;
}
int nDofs = nxGrids * nyGrids;
if ((blockIdx.y == gridDim.y-1) && threadIdx.y >= (blockDim.y/2)) {
iGrid = iGrid - nDofs;
}
extern __shared__ double sharedMemory[];
idx = threadIdx.x + threadIdx.y * blockDim.x;
if (idx < numElementsPerBlock) {
sharedMemory[idx] = xBottomBlock[threadIdx.x + (blockDim.y/2-1-threadIdx.y)*blockDim.x];
}
else {
sharedMemory[idx] = xTopBlock[threadIdx.x + (threadIdx.y-(blockDim.y/2))*blockDim.x];
}
__syncthreads();
__iterativeBlockUpdateToLeftRight(xLeftBlock, xRightBlock, rhsBlock,
leftMatrixBlock, centerMatrixBlock, rightMatrixBlock, topMatrixBlock, bottomMatrixBlock,
nxGrids, nyGrids, iGrid, method, maxSteps);
}
__global__
void _iterativeGpuVerticalShift(double * xLeftGpu, double *xRightGpu, double * xTopGpu, double * xBottomGpu,
const double *rhsGpu, const double * leftMatrixGpu, const double *centerMatrixGpu,
const double * rightMatrixGpu, const double * topMatrixGpu, const double * bottomMatrixGpu,
int nxGrids, int nyGrids, int method, int maxSteps)
{
int xShift = blockDim.x * blockIdx.x;
int yShift = blockDim.y * blockIdx.y;
int blockShift = xShift + yShift * nxGrids;
int verticalShift = blockDim.y/2 * nxGrids;
const double * rhsBlock = rhsGpu + blockShift; //+ verticalShift;
const double * leftMatrixBlock = leftMatrixGpu + blockShift; //+ verticalShift;
const double * centerMatrixBlock = centerMatrixGpu + blockShift; //+ verticalShift;
const double * rightMatrixBlock = rightMatrixGpu + blockShift; //+ verticalShift;
const double * topMatrixBlock = topMatrixGpu + blockShift; //+ verticalShift;
const double * bottomMatrixBlock = bottomMatrixGpu + blockShift; //+ verticalShift;
int numElementsPerBlock = (blockDim.x * blockDim.y)/2;
int blockID = blockIdx.x + blockIdx.y * gridDim.x;
int arrayShift = numElementsPerBlock*blockID;
double * xRightBlock = xLeftGpu + arrayShift;
double * xLeftBlock = (blockIdx.x != 0) ?
xRightGpu + arrayShift - numElementsPerBlock :
xRightGpu + numElementsPerBlock * ((gridDim.x-1) + blockIdx.y * gridDim.x);
double * xBottomBlock = xBottomGpu + arrayShift;
double * xTopBlock = xTopGpu + arrayShift;
int idx = threadIdx.x + threadIdx.y * nxGrids;
int nDofs = nxGrids * nyGrids;
int iGrid = blockShift + verticalShift + threadIdx.y * nxGrids + threadIdx.x;
iGrid = (iGrid >= nDofs) ? iGrid - nDofs : iGrid;
extern __shared__ double sharedMemory[];
idx = threadIdx.x + threadIdx.y * blockDim.x;
if (threadIdx.x < blockDim.x/2) {
sharedMemory[idx] = xLeftBlock[threadIdx.y + (blockDim.x/2-1-threadIdx.x)*blockDim.y];
}
else {
sharedMemory[idx] = xRightBlock[threadIdx.y + (threadIdx.x-(blockDim.x/2))*blockDim.y];
}
__syncthreads();
__iterativeBlockUpdateToNorthSouth( xTopBlock, xBottomBlock, rhsBlock,
leftMatrixBlock, centerMatrixBlock, rightMatrixBlock, topMatrixBlock, bottomMatrixBlock,
nxGrids, nyGrids, iGrid, method, maxSteps);
}
__global__
void _finalSolution(double * xTopGpu, double * xBottomGpu, double * x0Gpu, int nxGrids)
{
extern __shared__ double sharedMemory[];
int numElementsPerBlock = (blockDim.x * blockDim.y)/2;
int blockID = blockIdx.x + blockIdx.y * gridDim.x;
int arrayShift = numElementsPerBlock*blockID;
double * xTopBlock = xBottomGpu + arrayShift;
double * xBottomBlock = (blockIdx.y != 0) ?
xTopGpu + (blockIdx.x + (blockIdx.y-1) * gridDim.x) * numElementsPerBlock :
xTopGpu + (gridDim.x * (gridDim.y-1) + blockIdx.x) * numElementsPerBlock;
int xShift = blockDim.x * blockIdx.x;
int yShift = blockDim.y * blockIdx.y;
int blockShift = xShift + yShift * nxGrids;
double * x0Block = x0Gpu + blockShift;
int idx = threadIdx.x + threadIdx.y * blockDim.x;
if (idx < (blockDim.x * blockDim.y)/2) {
sharedMemory[idx + numElementsPerBlock] = xTopBlock[idx];
sharedMemory[threadIdx.x + (blockDim.x)*(blockDim.x/2-1-threadIdx.y)] = xBottomBlock[idx];
}
__syncthreads();
double * x0 = x0Gpu + blockShift;
idx = threadIdx.x + threadIdx.y * nxGrids;
x0[threadIdx.x + threadIdx.y * nxGrids] = sharedMemory[threadIdx.x + threadIdx.y * blockDim.x];
}
///////////////////////////////////////////////////
double * iterativeGpuSwept(const double * initX, const double * rhs,
const double * leftMatrix, const double * centerMatrix,
const double * rightMatrix, const double * topMatrix, const double * bottomMatrix,
int nxGrids, int nyGrids, int nIters, int maxSteps, const int threadsPerBlock, const int method)
{
// Determine number of threads and blocks
const int nxBlocks = (int)ceil(nxGrids / (double)threadsPerBlock);
const int nyBlocks = (int)ceil(nyGrids / (double)threadsPerBlock);
const int nDofs = nxGrids * nyGrids;
dim3 grid(nxBlocks, nyBlocks);
dim3 block(threadsPerBlock, threadsPerBlock);
// Allocate memory for solution and inputs
double *xLeftGpu, *xRightGpu, *xTopGpu, *xBottomGpu;
int numSharedElemPerBlock = threadsPerBlock * (threadsPerBlock / 2 + 1);
cudaMalloc(&xLeftGpu, sizeof(double) * numSharedElemPerBlock * nxBlocks * nyBlocks);
cudaMalloc(&xRightGpu, sizeof(double) * numSharedElemPerBlock * nxBlocks * nyBlocks);
cudaMalloc(&xTopGpu, sizeof(double) * numSharedElemPerBlock * nxBlocks * nyBlocks);
cudaMalloc(&xBottomGpu, sizeof(double) * numSharedElemPerBlock * nxBlocks * nyBlocks);
double * x0Gpu, * rhsGpu, * leftMatrixGpu, * rightMatrixGpu, * centerMatrixGpu, * topMatrixGpu, * bottomMatrixGpu;
cudaMalloc(&x0Gpu, sizeof(double) * nDofs);
cudaMalloc(&rhsGpu, sizeof(double) * nDofs);
cudaMalloc(&leftMatrixGpu, sizeof(double) * nDofs);
cudaMalloc(¢erMatrixGpu, sizeof(double) * nDofs);
cudaMalloc(&rightMatrixGpu, sizeof(double) * nDofs);
cudaMalloc(&topMatrixGpu, sizeof(double) * nDofs);
cudaMalloc(&bottomMatrixGpu, sizeof(double) * nDofs);
// Allocate memory in the GPU
cudaMemcpy(x0Gpu, initX, sizeof(double) * nDofs, cudaMemcpyHostToDevice);
cudaMemcpy(rhsGpu, rhs, sizeof(double) * nDofs, cudaMemcpyHostToDevice);
cudaMemcpy(leftMatrixGpu, leftMatrix, sizeof(double) * nDofs,
cudaMemcpyHostToDevice);
cudaMemcpy(centerMatrixGpu, centerMatrix, sizeof(double) * nDofs,
cudaMemcpyHostToDevice);
cudaMemcpy(rightMatrixGpu, rightMatrix, sizeof(double) * nDofs,
cudaMemcpyHostToDevice);
cudaMemcpy(topMatrixGpu, topMatrix, sizeof(double) * nDofs,
cudaMemcpyHostToDevice);
cudaMemcpy(bottomMatrixGpu, bottomMatrix, sizeof(double) * nDofs,
cudaMemcpyHostToDevice);
int sharedBytes = 2 * threadsPerBlock * threadsPerBlock * sizeof(double);
for (int i = 0; i < nIters; i++) {
// APPLY METHOD TO ADVANCE POINTS (NO SHIFT)
_iterativeGpuOriginal <<<grid, block, sharedBytes>>>(xLeftGpu, xRightGpu, x0Gpu, rhsGpu, leftMatrixGpu, centerMatrixGpu, rightMatrixGpu, topMatrixGpu, bottomMatrixGpu, nxGrids, nyGrids, method, maxSteps);
// APPLY HORIZONTAL SHIFT
_iterativeGpuHorizontalShift <<<grid, block, sharedBytes>>> (xLeftGpu, xRightGpu, xTopGpu, xBottomGpu, rhsGpu, leftMatrixGpu, centerMatrixGpu, rightMatrixGpu, topMatrixGpu, bottomMatrixGpu, nxGrids, nyGrids, method, maxSteps);
// APPLY VERTICAL SHIFT (ALONG WITH PREVIOUS HORIZONTAL SHIFT)
_iterativeGpuVerticalandHorizontalShift <<<grid, block, sharedBytes>>> (xLeftGpu, xRightGpu, xTopGpu, xBottomGpu, rhsGpu, leftMatrixGpu, centerMatrixGpu, rightMatrixGpu, topMatrixGpu, bottomMatrixGpu, nxGrids, nyGrids, method, maxSteps);
// APPLY VERTICAL SHIFT
_iterativeGpuVerticalShift <<<grid, block, sharedBytes>>> (xLeftGpu, xRightGpu, xTopGpu, xBottomGpu, rhsGpu, leftMatrixGpu, centerMatrixGpu, rightMatrixGpu, topMatrixGpu, bottomMatrixGpu, nxGrids, nyGrids, method, maxSteps);
// APPLY FINAL STEP
_finalSolution <<<grid, block, sharedBytes>>>(xTopGpu, xBottomGpu, x0Gpu, nxGrids);
}
double * solution = new double[nDofs];
cudaMemcpy(solution, x0Gpu, sizeof(double) * nDofs,
cudaMemcpyDeviceToHost);
cudaFree(x0Gpu);
cudaFree(xLeftGpu);
cudaFree(xRightGpu);
cudaFree(rhsGpu);
cudaFree(leftMatrixGpu);
cudaFree(centerMatrixGpu);
cudaFree(rightMatrixGpu);
return solution;
}
int main(int argc, char *argv[])
{
// Ask user for inputs
const int nxGrids = atoi(argv[1]);
const int nyGrids = atoi(argv[1]);
const int threadsPerBlock = atoi(argv[2]);
const int nIters = atoi(argv[3]);
const int nCycles = atoi(argv[4]);
const int maxSteps = atoi(argv[5]);
method_type method = JACOBI;
int nDofs = nxGrids * nyGrids;
// Declare arrays and population with values for Poisson equation
double * initX = new double[nDofs];
double * rhs = new double[nDofs];
double * leftMatrix = new double[nDofs];
double * centerMatrix = new double[nDofs];
double * rightMatrix = new double[nDofs];
double * bottomMatrix = new double[nDofs];
double * topMatrix = new double[nDofs];
double dx = 1.0f / (nxGrids + 1);
double dy = 1.0f / (nyGrids + 1);
for (int iGrid = 0; iGrid < nDofs; ++iGrid) {
initX[iGrid] = (double)iGrid;
rhs[iGrid] = 1.0f;
leftMatrix[iGrid] = -1.0f / (dx * dx);
centerMatrix[iGrid] = 2.0f / (dx * dx) + 2.0f / (dy * dy);
rightMatrix[iGrid] = -1.0f / (dx * dx);
bottomMatrix[iGrid] = -1.0f / (dy * dy);
topMatrix[iGrid] = -1.0f / (dy * dy);
}
// Run the CPU Implementation and measure the time required
clock_t cpuStartTime = clock();
double * solutionCpu = iterativeCpu(initX, rhs, leftMatrix, centerMatrix,
rightMatrix, topMatrix, bottomMatrix, nxGrids, nyGrids, nIters, method);
clock_t cpuEndTime = clock();
double cpuTime = (cpuEndTime - cpuStartTime) / (double) CLOCKS_PER_SEC;
// Run the Classic GPU Implementation and measure the time required
cudaEvent_t startClassic, stopClassic;
float timeClassic;
cudaEventCreate( &startClassic );
cudaEventCreate( &stopClassic );
cudaEventRecord(startClassic, 0);
double * solutionGpuClassic = iterativeGpuClassic(initX, rhs, leftMatrix, centerMatrix,
rightMatrix, topMatrix, bottomMatrix, nxGrids, nyGrids, nIters, threadsPerBlock, method);
cudaEventRecord(stopClassic, 0);
cudaEventSynchronize(stopClassic);
cudaEventElapsedTime(&timeClassic, startClassic, stopClassic);
// Run the Swept GPU Implementation and measure the time required
cudaEvent_t startSwept, stopSwept;
float timeSwept;
cudaEventCreate( &startSwept );
cudaEventCreate( &stopSwept );
cudaEventRecord( startSwept, 0);
double * solutionGpuSwept = iterativeGpuSwept(initX, rhs, leftMatrix, centerMatrix,
rightMatrix, topMatrix, bottomMatrix, nxGrids, nyGrids, nCycles, maxSteps, threadsPerBlock, method);
cudaEventRecord(stopSwept, 0);
cudaEventSynchronize(stopSwept);
cudaEventElapsedTime(&timeSwept, startSwept, stopSwept);
// Print parameters of the problem to screen
printf("===============INFORMATION============================\n");
printf("Number of total grid points: %d\n", nDofs);
printf("Number of grid points in x-direction: %d\n", nxGrids);
printf("Number of grid points in y-direction: %d\n", nyGrids);
printf("Threads Per Block in each direction: %d\n", threadsPerBlock);
printf("Method used: %d\n", method);
printf("Number of Iterations performed: %d\n", nIters);
printf("\n");
// Print out results to the screen, notify if any GPU Classic or Swept values differ significantly
for (int iGrid = 0; iGrid < nDofs; ++iGrid) {
printf("%d %f %f %f \n",iGrid, solutionCpu[iGrid],
solutionGpuClassic[iGrid],
solutionGpuSwept[iGrid]);
//assert(solutionGpuClassic[iGrid] == solutionGpuSwept[iGrid]);
// if (abs(solutionGpuClassic[iGrid] - solutionGpuSwept[iGrid]) > 1e-2) {
// printf("For grid point %d, Classic and Swept give %f and %f respectively\n", iGrid, solutionGpuClassic[iGrid], solutionGpuSwept[iGrid]);
// }
}
// Print out time for cpu, classic gpu, and swept gpu approaches
double cpuTimePerIteration = (cpuTime / nIters) * 1e3;
double classicTimePerIteration = timeClassic / nIters;
double sweptTimePerIteration = timeSwept / nIters;
double timeMultiplier = classicTimePerIteration / sweptTimePerIteration;
/* printf("Time needed for the CPU (per iteration): %f ms\n", cpuTimePerIteration);
printf("Time needed for the Classic GPU (per iteration) is %f ms\n", classicTimePerIteration);
printf("Time needed for the Swept GPU (per iteration): %f ms\n", sweptTimePerIteration); */
printf("Total Time needed for the CPU: %f ms\n", cpuTime * 1e3);
printf("Total Time needed for the Classic GPU is %f ms\n", timeClassic);
printf("Total Time needed for the Swept GPU: %f ms\n", timeSwept);
// Compute the residual of the resulting solution (||b-Ax||)
double residualCPU = Residual(solutionGpuClassic, rhs, leftMatrix, centerMatrix, rightMatrix, topMatrix, bottomMatrix, nxGrids, nyGrids);
double residualClassicGPU = Residual(solutionGpuClassic, rhs, leftMatrix, centerMatrix, rightMatrix, topMatrix, bottomMatrix, nxGrids, nyGrids);
double residualSweptGPU = Residual(solutionGpuSwept, rhs, leftMatrix, centerMatrix, rightMatrix, topMatrix, bottomMatrix, nxGrids, nyGrids);
printf("Residual of the CPU solution is %f\n", residualCPU);
printf("Residual of the Classic GPU solution is %f\n", residualClassicGPU);
printf("Residual of the Swept GPU solution is %f\n", residualSweptGPU);
// Save residual to a file
/* std::ofstream residuals;
residuals.open("residual-gs.txt",std::ios_base::app);
residuals << nGrids << "\t" << threadsPerBlock << "\t" << nIters << "\t" << residualSwept << "\n";
residuals.close(); */
// Save Results to a file "N tpb Iterations CPUTime/perstep ClassicTime/perstep SweptTime/perStep ClassicTime/SweptTime"
std::ofstream timings;
timings.open("time.txt",std::ios_base::app);
// timings << nxGrids << "\t" << nyGrids << "\t" << threadsPerBlock << "\t" << nIters << "\t" << cpuTimePerIteration << "\t" << classicTimePerIteration << "\t" << sweptTimePerIteration << "\t" << timeMultiplier << "\n";
timings.close();
// Free memory
cudaEventDestroy(startClassic);
cudaEventDestroy(startSwept);
delete[] initX;
delete[] rhs;
delete[] leftMatrix;
delete[] centerMatrix;
delete[] rightMatrix;
delete[] solutionCpu;
delete[] solutionGpuClassic;
}
|
df7219dbbfcf6bd320825a0ad0b1d3dd172815eb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// This is a basic, recursive bitonic sort taken from
// http://www.iti.fh-flensburg.de/lang/algorithmen/sortieren/bitonic/oddn.htm
//
// The parallel code is based on:
// http://www.tools-of-computing.com/tc/CS/Sorts/bitonic_sort.htm
//
// The multithread code is from me.
#include <stdio.h>
#include "quicksort.h"
// Inline PTX call to return index of highest non-zero bit in a word
static __device__ __forceinline__ unsigned int __btflo(unsigned int word)
{
unsigned int ret;
asm volatile("bfind.u32 %0, %1;" : "=r"(ret) : "r"(word));
return ret;
}
////////////////////////////////////////////////////////////////////////////////
//
// qcompare
//
// Comparison function. Note difference from libc standard in
// that we take by reference, not by pointer. I can't figure
// out how to get a template-to-pointer specialisation working.
// Perhaps it requires a class?
//
////////////////////////////////////////////////////////////////////////////////
__device__ __forceinline__ int qcompare(unsigned &val1, unsigned &val2)
{
return (val1 > val2) ? 1 : (val1 == val2) ? 0 : -1;
}
////////////////////////////////////////////////////////////////////////////////
//
// Basic any-N bitonic sort. We sort "len" elements of "indata", starting
// from the "offset" elements into the input data array. Note that "outdata"
// can safely be the same as "indata" for an in-place sort (we stage through
// shared memory).
//
// We handle non-power-of-2 sizes by padding out to the next largest power of 2.
// This is the fully-generic version, for sorting arbitrary data which does not
// have a clear "maximum" value. We track "invalid" entries in a separate array
// to make sure that they always sorts as "max value" elements. A template
// parameter "OOR" allows specialisation to optimise away the invalid tracking.
//
// We can do a more specialised version for int/longlong/flat/double, in which
// we pad out the array with max-value-of-type elements. That's another function.
//
// The last step copies from indata -> outdata... the rest is done in-place.
// We use shared memory as temporary storage, which puts an upper limit on
// how much data we can sort per block.
//
////////////////////////////////////////////////////////////////////////////////
static __device__ __forceinline__ void bitonicsort_kernel(unsigned *indata, unsigned *outdata, unsigned int offset, unsigned int len)
{
__shared__ unsigned sortbuf[1024]; // Max of 1024 elements - TODO: make this dynamic
// First copy data into shared memory.
unsigned int inside = (threadIdx.x < len);
sortbuf[threadIdx.x] = inside ? indata[threadIdx.x + offset] : 0xffffffffu;
__syncthreads();
// Now the sort loops
// Here, "k" is the sort level (remember bitonic does a multi-level butterfly style sort)
// and "j" is the partner element in the butterfly.
// Two threads each work on one butterfly, because the read/write needs to happen
// simultaneously
for (unsigned int k=2; k<=blockDim.x; k*=2) // Butterfly stride increments in powers of 2
{
for (unsigned int j=k>>1; j>0; j>>=1) // Strides also in powers of to, up to <k
{
unsigned int swap_idx = threadIdx.x ^ j; // Index of element we're compare-and-swapping with
unsigned my_elem = sortbuf[threadIdx.x];
unsigned swap_elem = sortbuf[swap_idx];
__syncthreads();
// The k'th bit of my threadid (and hence my sort item ID)
// determines if we sort ascending or descending.
// However, since threads are reading from the top AND the bottom of
// the butterfly, if my ID is > swap_idx, then ascending means mine<swap.
// Finally, if either my_elem or swap_elem is out of range, then it
// ALWAYS acts like it's the largest number.
// Confusing? It saves us two writes though.
unsigned int ascend = k * (swap_idx < threadIdx.x);
unsigned int descend = k * (swap_idx > threadIdx.x);
bool swap = false;
if ((threadIdx.x & k) == ascend)
{
if (my_elem > swap_elem)
swap = true;
}
if ((threadIdx.x & k) == descend)
{
if (my_elem < swap_elem)
swap = true;
}
// If we had to swap, then write my data to the other element's position.
// Don't forget to track out-of-range status too!
if (swap)
{
sortbuf[swap_idx] = my_elem;
}
__syncthreads();
}
}
// Copy the sorted data from shared memory back to the output buffer
if (threadIdx.x < len)
outdata[threadIdx.x + offset] = sortbuf[threadIdx.x];
}
//////////////////////////////////////////////////////////////////////////////////
// This is an emergency-CTA sort, which sorts an arbitrary sized chunk
// using a single block. Useful for if qsort runs out of nesting depth.
//
// Note that bitonic sort needs enough storage to pad up to the nearest
// power of 2. This means that the double-buffer is always large enough
// (when combined with the main buffer), but we do not get enough space
// to keep OOR information.
//
// This in turn means that this sort does not work with a generic data
// type. It must be a directly-comparable (i.e. with max value) type.
//
////////////////////////////////////////////////////////////////////////////////
static __device__ __forceinline__ void big_bitonicsort_kernel(unsigned *indata, unsigned *outdata, unsigned *backbuf, unsigned int offset, unsigned int len)
{
unsigned int len2 = 1 << (__btflo(len-1U)+1); // Round up len to nearest power-of-2
if (threadIdx.x >= len2) return; // Early out for case where more threads launched than there is data
// First, set up our unused values to be the max data type.
for (unsigned int i=len; i<len2; i+=blockDim.x)
{
unsigned int index = i + threadIdx.x;
if (index < len2)
{
// Must split our index between two buffers
if (index < len)
indata[index+offset] = 0xffffffffu;
else
backbuf[index+offset-len] = 0xffffffffu;
}
}
__syncthreads();
// Now the sort loops
// Here, "k" is the sort level (remember bitonic does a multi-level butterfly style sort)
// and "j" is the partner element in the butterfly.
// Two threads each work on one butterfly, because the read/write needs to happen
// simultaneously
for (unsigned int k=2; k<=len2; k*=2) // Butterfly stride increments in powers of 2
{
for (unsigned int j=k>>1; j>0; j>>=1) // Strides also in powers of to, up to <k
{
for (unsigned int i=0; i<len2; i+=blockDim.x)
{
unsigned int index = threadIdx.x + i;
unsigned int swap_idx = index ^ j; // Index of element we're compare-and-swapping with
// Only do the swap for index<swap_idx (avoids collision between other threads)
if (swap_idx > index)
{
unsigned my_elem, swap_elem;
if (index < len)
my_elem = indata[index+offset];
else
my_elem = backbuf[index+offset-len];
if (swap_idx < len)
swap_elem = indata[swap_idx+offset];
else
swap_elem = backbuf[swap_idx+offset-len];
// The k'th bit of my index (and hence my sort item ID)
// determines if we sort ascending or descending.
// Also, if either my_elem or swap_elem is out of range, then it
// ALWAYS acts like it's the largest number.
bool swap = false;
if ((index & k) == 0)
{
if (my_elem > swap_elem)
swap = true;
}
if ((index & k) == k)
{
if (my_elem < swap_elem)
swap = true;
}
// If we had to swap, then write my data to the other element's position.
if (swap)
{
if (swap_idx < len)
indata[swap_idx+offset] = my_elem;
else
backbuf[swap_idx+offset-len] = my_elem;
if (index < len)
indata[index+offset] = swap_elem;
else
backbuf[index+offset-len] = swap_elem;
}
}
}
__syncthreads(); // Only need to sync for each "j" pass
}
}
// Copy the sorted data from the input to the output buffer, because we sort in-place
if (outdata != indata)
{
for (unsigned int i=0; i<len; i+=blockDim.x)
{
unsigned int index = i + threadIdx.x;
if (index < len)
outdata[index+offset] = indata[index+offset];
}
}
}
////////////////////////////////////////////////////////////////////////////////
// KERNELS
////////////////////////////////////////////////////////////////////////////////
__global__ void bitonicsort(unsigned *indata, unsigned *outdata, unsigned int offset, unsigned int len)
{
bitonicsort_kernel(indata, outdata, offset, len);
}
__global__ void big_bitonicsort(unsigned *indata, unsigned *outdata, unsigned *backbuf, unsigned int offset, unsigned int len)
{
big_bitonicsort_kernel(indata, outdata, backbuf, offset, len);
}
////////////////////////////////////////////////////////////////////////////////
| df7219dbbfcf6bd320825a0ad0b1d3dd172815eb.cu | // This is a basic, recursive bitonic sort taken from
// http://www.iti.fh-flensburg.de/lang/algorithmen/sortieren/bitonic/oddn.htm
//
// The parallel code is based on:
// http://www.tools-of-computing.com/tc/CS/Sorts/bitonic_sort.htm
//
// The multithread code is from me.
#include <stdio.h>
#include "quicksort.h"
// Inline PTX call to return index of highest non-zero bit in a word
static __device__ __forceinline__ unsigned int __btflo(unsigned int word)
{
unsigned int ret;
asm volatile("bfind.u32 %0, %1;" : "=r"(ret) : "r"(word));
return ret;
}
////////////////////////////////////////////////////////////////////////////////
//
// qcompare
//
// Comparison function. Note difference from libc standard in
// that we take by reference, not by pointer. I can't figure
// out how to get a template-to-pointer specialisation working.
// Perhaps it requires a class?
//
////////////////////////////////////////////////////////////////////////////////
__device__ __forceinline__ int qcompare(unsigned &val1, unsigned &val2)
{
return (val1 > val2) ? 1 : (val1 == val2) ? 0 : -1;
}
////////////////////////////////////////////////////////////////////////////////
//
// Basic any-N bitonic sort. We sort "len" elements of "indata", starting
// from the "offset" elements into the input data array. Note that "outdata"
// can safely be the same as "indata" for an in-place sort (we stage through
// shared memory).
//
// We handle non-power-of-2 sizes by padding out to the next largest power of 2.
// This is the fully-generic version, for sorting arbitrary data which does not
// have a clear "maximum" value. We track "invalid" entries in a separate array
// to make sure that they always sorts as "max value" elements. A template
// parameter "OOR" allows specialisation to optimise away the invalid tracking.
//
// We can do a more specialised version for int/longlong/flat/double, in which
// we pad out the array with max-value-of-type elements. That's another function.
//
// The last step copies from indata -> outdata... the rest is done in-place.
// We use shared memory as temporary storage, which puts an upper limit on
// how much data we can sort per block.
//
////////////////////////////////////////////////////////////////////////////////
static __device__ __forceinline__ void bitonicsort_kernel(unsigned *indata, unsigned *outdata, unsigned int offset, unsigned int len)
{
__shared__ unsigned sortbuf[1024]; // Max of 1024 elements - TODO: make this dynamic
// First copy data into shared memory.
unsigned int inside = (threadIdx.x < len);
sortbuf[threadIdx.x] = inside ? indata[threadIdx.x + offset] : 0xffffffffu;
__syncthreads();
// Now the sort loops
// Here, "k" is the sort level (remember bitonic does a multi-level butterfly style sort)
// and "j" is the partner element in the butterfly.
// Two threads each work on one butterfly, because the read/write needs to happen
// simultaneously
for (unsigned int k=2; k<=blockDim.x; k*=2) // Butterfly stride increments in powers of 2
{
for (unsigned int j=k>>1; j>0; j>>=1) // Strides also in powers of to, up to <k
{
unsigned int swap_idx = threadIdx.x ^ j; // Index of element we're compare-and-swapping with
unsigned my_elem = sortbuf[threadIdx.x];
unsigned swap_elem = sortbuf[swap_idx];
__syncthreads();
// The k'th bit of my threadid (and hence my sort item ID)
// determines if we sort ascending or descending.
// However, since threads are reading from the top AND the bottom of
// the butterfly, if my ID is > swap_idx, then ascending means mine<swap.
// Finally, if either my_elem or swap_elem is out of range, then it
// ALWAYS acts like it's the largest number.
// Confusing? It saves us two writes though.
unsigned int ascend = k * (swap_idx < threadIdx.x);
unsigned int descend = k * (swap_idx > threadIdx.x);
bool swap = false;
if ((threadIdx.x & k) == ascend)
{
if (my_elem > swap_elem)
swap = true;
}
if ((threadIdx.x & k) == descend)
{
if (my_elem < swap_elem)
swap = true;
}
// If we had to swap, then write my data to the other element's position.
// Don't forget to track out-of-range status too!
if (swap)
{
sortbuf[swap_idx] = my_elem;
}
__syncthreads();
}
}
// Copy the sorted data from shared memory back to the output buffer
if (threadIdx.x < len)
outdata[threadIdx.x + offset] = sortbuf[threadIdx.x];
}
//////////////////////////////////////////////////////////////////////////////////
// This is an emergency-CTA sort, which sorts an arbitrary sized chunk
// using a single block. Useful for if qsort runs out of nesting depth.
//
// Note that bitonic sort needs enough storage to pad up to the nearest
// power of 2. This means that the double-buffer is always large enough
// (when combined with the main buffer), but we do not get enough space
// to keep OOR information.
//
// This in turn means that this sort does not work with a generic data
// type. It must be a directly-comparable (i.e. with max value) type.
//
////////////////////////////////////////////////////////////////////////////////
static __device__ __forceinline__ void big_bitonicsort_kernel(unsigned *indata, unsigned *outdata, unsigned *backbuf, unsigned int offset, unsigned int len)
{
unsigned int len2 = 1 << (__btflo(len-1U)+1); // Round up len to nearest power-of-2
if (threadIdx.x >= len2) return; // Early out for case where more threads launched than there is data
// First, set up our unused values to be the max data type.
for (unsigned int i=len; i<len2; i+=blockDim.x)
{
unsigned int index = i + threadIdx.x;
if (index < len2)
{
// Must split our index between two buffers
if (index < len)
indata[index+offset] = 0xffffffffu;
else
backbuf[index+offset-len] = 0xffffffffu;
}
}
__syncthreads();
// Now the sort loops
// Here, "k" is the sort level (remember bitonic does a multi-level butterfly style sort)
// and "j" is the partner element in the butterfly.
// Two threads each work on one butterfly, because the read/write needs to happen
// simultaneously
for (unsigned int k=2; k<=len2; k*=2) // Butterfly stride increments in powers of 2
{
for (unsigned int j=k>>1; j>0; j>>=1) // Strides also in powers of to, up to <k
{
for (unsigned int i=0; i<len2; i+=blockDim.x)
{
unsigned int index = threadIdx.x + i;
unsigned int swap_idx = index ^ j; // Index of element we're compare-and-swapping with
// Only do the swap for index<swap_idx (avoids collision between other threads)
if (swap_idx > index)
{
unsigned my_elem, swap_elem;
if (index < len)
my_elem = indata[index+offset];
else
my_elem = backbuf[index+offset-len];
if (swap_idx < len)
swap_elem = indata[swap_idx+offset];
else
swap_elem = backbuf[swap_idx+offset-len];
// The k'th bit of my index (and hence my sort item ID)
// determines if we sort ascending or descending.
// Also, if either my_elem or swap_elem is out of range, then it
// ALWAYS acts like it's the largest number.
bool swap = false;
if ((index & k) == 0)
{
if (my_elem > swap_elem)
swap = true;
}
if ((index & k) == k)
{
if (my_elem < swap_elem)
swap = true;
}
// If we had to swap, then write my data to the other element's position.
if (swap)
{
if (swap_idx < len)
indata[swap_idx+offset] = my_elem;
else
backbuf[swap_idx+offset-len] = my_elem;
if (index < len)
indata[index+offset] = swap_elem;
else
backbuf[index+offset-len] = swap_elem;
}
}
}
__syncthreads(); // Only need to sync for each "j" pass
}
}
// Copy the sorted data from the input to the output buffer, because we sort in-place
if (outdata != indata)
{
for (unsigned int i=0; i<len; i+=blockDim.x)
{
unsigned int index = i + threadIdx.x;
if (index < len)
outdata[index+offset] = indata[index+offset];
}
}
}
////////////////////////////////////////////////////////////////////////////////
// KERNELS
////////////////////////////////////////////////////////////////////////////////
__global__ void bitonicsort(unsigned *indata, unsigned *outdata, unsigned int offset, unsigned int len)
{
bitonicsort_kernel(indata, outdata, offset, len);
}
__global__ void big_bitonicsort(unsigned *indata, unsigned *outdata, unsigned *backbuf, unsigned int offset, unsigned int len)
{
big_bitonicsort_kernel(indata, outdata, backbuf, offset, len);
}
////////////////////////////////////////////////////////////////////////////////
|
9c0706a8cf1106556bc150b3d04aa15bf7361efe.hip | // !!! This is a file automatically generated by hipify!!!
/**TODO: Add copyright*/
#if COMPILE_WITH_CUDA
#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int
#define EIGEN_USE_GPU
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <unsupported/Eigen/CXX11/Tensor>
#include <EvoNet/core/Preprocessing.h>
#include <EvoNet/ml/ActivationFunctionTensor.h>
#define AssertPrint(a) if (!a) std::cout<<"Test failed"<<std::endl; // Macro to print instead of abort on test failures
using namespace EvoNet;
using namespace std;
void test_operationfunctionReluTensorOp()
{
ReLUTensorOp<double, Eigen::GpuDevice> operation;
const int batch_size = 5;
const int memory_size = 2;
const int layer_size = 2;
hipStream_t stream; AssertPrint(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking) == hipSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size);
input.setValues({
{{0,0}, {0,0}},
{{1,1}, {0,0}},
{{10,10}, {0,0}},
{{-1,-1}, {0,0}},
{{-10,-10}, {0,0}} });
Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size);
output.setZero();
Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size);
test.setValues({
{{0,0}, {0,0}},
{{1,1}, {0,0}},
{{10,10}, {0,0}},
{{0,0}, {0,0}},
{{0,0}, {0,0}} });
operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device);
// Test
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < memory_size; ++j) {
for (int k = 0; k < layer_size; ++k) {
AssertPrint(assert_close(output(i,j,k), test(i,j,k)));
}
}
}
}
void test_operationfunctionReluGradTensorOp()
{
ReLUGradTensorOp<double, Eigen::GpuDevice> operation;
const int batch_size = 5;
const int memory_size = 2;
const int layer_size = 2;
hipStream_t stream; AssertPrint(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking) == hipSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size);
input.setValues({
{{0,0}, {0,0}},
{{1,1}, {0,0}},
{{10,10}, {0,0}},
{{-1,-1}, {0,0}},
{{-10,-10}, {0,0}} });
Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size);
output.setZero();
Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size);
test.setValues({
{{0,0}, {0,0}},
{{1,1}, {0,0}},
{{1,1}, {0,0}},
{{0,0}, {0,0}},
{{0,0}, {0,0}} });
operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device);
// Test
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < memory_size; ++j) {
for (int k = 0; k < layer_size; ++k) {
AssertPrint(assert_close(output(i,j,k), test(i,j,k)));
}
}
}
}
void test_operationfunctionEluTensorOp()
{
ELUTensorOp<double, Eigen::GpuDevice> operation(1.0);
const int batch_size = 5;
const int memory_size = 2;
const int layer_size = 2;
hipStream_t stream; AssertPrint(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking) == hipSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size);
input.setValues({
{{0,0}, {0,0}},
{{1,1}, {0,0}},
{{10,10}, {0,0}},
{{-1,-1}, {0,0}},
{{-10,-10}, {0,0}} });
Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size);
output.setZero();
Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size);
test.setValues({
{{0,0}, {0,0}},
{{1,1}, {0,0}},
{{10,10}, {0,0}},
{{-0.63212055882855767,-0.63212055882855767}, {0,0}},
{{-0.99995460007023751,-0.99995460007023751}, {0,0}} });
operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device);
// Test
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < memory_size; ++j) {
for (int k = 0; k < layer_size; ++k) {
AssertPrint(assert_close(output(i,j,k), test(i,j,k)));
}
}
}
}
void test_operationfunctionEluGradTensorOp()
{
ELUGradTensorOp<double, Eigen::GpuDevice> operation(1.0);
const int batch_size = 5;
const int memory_size = 2;
const int layer_size = 2;
hipStream_t stream; AssertPrint(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking) == hipSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size);
input.setValues({
{{0,0}, {0,0}},
{{1,1}, {0,0}},
{{10,10}, {0,0}},
{{-1,-1}, {0,0}},
{{-10,-10}, {0,0}} });
Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size);
output.setZero();
Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size);
test.setValues({
{{1,1}, {0,0}},
{{1,1}, {0,0}},
{{1,1}, {0,0}},
{{0.36787944117144233,0.36787944117144233}, {0,0}},
{{4.5399929762490743e-05,4.5399929762490743e-05}, {0,0}} });
operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device);
// Test
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < memory_size; ++j) {
for (int k = 0; k < layer_size; ++k) {
AssertPrint(assert_close(output(i,j,k), test(i,j,k)));
}
}
}
}
void test_operationfunctionSigmoidTensorOp()
{
SigmoidTensorOp<double, Eigen::GpuDevice> operation;
const int batch_size = 5;
const int memory_size = 2;
const int layer_size = 2;
hipStream_t stream; AssertPrint(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking) == hipSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size);
input.setValues({
{{0,0}, {0,0}},
{{1,1}, {0,0}},
{{10,10}, {0,0}},
{{-1,-1}, {0,0}},
{{-10,-10}, {0,0}} });
Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size);
output.setZero();
Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size);
test.setValues({
{{0.5,0.5}, {0,0}},
{{0.7310585786300049,0.7310585786300049}, {0,0}},
{{0.99995460213129761,0.99995460213129761}, {0,0}},
{{0.2689414213699951,0.2689414213699951}, {0,0}},
{{4.5397868702434395e-05,4.5397868702434395e-05}, {0,0}} });
operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device);
// Test
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < memory_size; ++j) {
for (int k = 0; k < layer_size; ++k) {
AssertPrint(assert_close(output(i,j,k), test(i,j,k)));
}
}
}
}
void test_operationfunctionSigmoidGradTensorOp()
{
SigmoidGradTensorOp<double, Eigen::GpuDevice> operation;
const int batch_size = 5;
const int memory_size = 2;
const int layer_size = 2;
hipStream_t stream; AssertPrint(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking) == hipSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size);
input.setValues({
{{0,0}, {0,0}},
{{1,1}, {0,0}},
{{10,10}, {0,0}},
{{-1,-1}, {0,0}},
{{-10,-10}, {0,0}} });
Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size);
output.setZero();
Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size);
test.setValues({
{{0.25,0.25}, {0,0}},
{{0.19661193324148185,0.19661193324148185}, {0,0}},
{{4.5395807735907655e-05,4.5395807735907655e-05}, {0,0}},
{{0.19661193324148185,0.19661193324148185}, {0,0}},
{{4.53958091e-05,4.53958091e-05}, {0,0}} });
operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device);
// Test
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < memory_size; ++j) {
for (int k = 0; k < layer_size; ++k) {
AssertPrint(assert_close(output(i,j,k), test(i,j,k)));
}
}
}
}
void test_operationfunctionTanHTensorOp()
{
TanHTensorOp<double, Eigen::GpuDevice> operation;
const int batch_size = 5;
const int memory_size = 2;
const int layer_size = 2;
hipStream_t stream; AssertPrint(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking) == hipSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size);
input.setValues({
{{0,0}, {0,0}},
{{1,1}, {0,0}},
{{10,10}, {0,0}},
{{-1,-1}, {0,0}},
{{-10,-10}, {0,0}} });
Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size);
output.setZero();
Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size);
test.setValues({
{{0.0,0.0}, {0,0}},
{{0.76159415595576485,0.76159415595576485}, {0,0}},
{{0.99999999587769262,0.99999999587769262}, {0,0}},
{{-0.76159415595576485,-0.76159415595576485}, {0,0}},
{{-0.99999999587769262,-0.99999999587769262}, {0,0}} });
operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device);
// Test
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < memory_size; ++j) {
for (int k = 0; k < layer_size; ++k) {
AssertPrint(assert_close(output(i,j,k), test(i,j,k)));
}
}
}
}
void test_operationfunctionTanHGradTensorOp()
{
TanHGradTensorOp<double, Eigen::GpuDevice> operation;
const int batch_size = 5;
const int memory_size = 2;
const int layer_size = 2;
hipStream_t stream; AssertPrint(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking) == hipSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size);
input.setValues({
{{0,0}, {0,0}},
{{1,1}, {0,0}},
{{10,10}, {0,0}},
{{-1,-1}, {0,0}},
{{-10,-10}, {0,0}} });
Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size);
output.setZero();
Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size);
test.setValues({
{{1,1}, {0,0}},
{{0.41997434161402614,0.41997434161402614}, {0,0}},
{{8.2446145466263943e-09,8.2446145466263943e-09}, {0,0}},
{{0.41997434161402614,0.41997434161402614}, {0,0}},
{{8.2446145466263943e-09,8.2446145466263943e-09}, {0,0}} });
operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device);
// Test
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < memory_size; ++j) {
for (int k = 0; k < layer_size; ++k) {
AssertPrint(assert_close(output(i,j,k), test(i,j,k)));
}
}
}
}
// [TODO: need to re-implement]
void test_operationfunctionReTanHTensorOp()
{
ReTanHTensorOp<double, Eigen::GpuDevice> operation;
const int batch_size = 5;
const int memory_size = 2;
const int layer_size = 2;
hipStream_t stream; AssertPrint(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking) == hipSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size);
input.setValues({
{{0,0}, {0,0}},
{{1,1}, {0,0}},
{{10,10}, {0,0}},
{{-1,-1}, {0,0}},
{{-10,-10}, {0,0}} });
Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size);
output.setZero();
Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size);
test.setValues({
{{0,0}, {0,0}},
{{0.76159415595576485,0.76159415595576485}, {0,0}},
{{0.99999999587769262,0.99999999587769262}, {0,0}},
{{0,0}, {0,0}},
{{0,0}, {0,0}} });
operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device);
// Test
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < memory_size; ++j) {
for (int k = 0; k < layer_size; ++k) {
AssertPrint(assert_close(output(i,j,k), test(i,j,k)));
}
}
}
}
// TODO: need to re-implement
void test_operationfunctionReTanHGradTensorOp()
{
ReTanHGradTensorOp<double, Eigen::GpuDevice> operation;
const int batch_size = 5;
const int memory_size = 2;
const int layer_size = 2;
hipStream_t stream; AssertPrint(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking) == hipSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size);
input.setValues({
{{0,0}, {0,0}},
{{1,1}, {0,0}},
{{10,10}, {0,0}},
{{-1,-1}, {0,0}},
{{-10,-10}, {0,0}} });
Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size);
output.setZero();
Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size);
test.setValues({
{{0,0}, {0,0}},
{{0.41997434161402614,0.41997434161402614}, {0,0}},
{{8.2446147686709992e-09,8.2446147686709992e-09}, {0,0}},
{{0,0}, {0,0}},
{{0,0}, {0,0}} });
operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device);
// Test
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < memory_size; ++j) {
for (int k = 0; k < layer_size; ++k) {
AssertPrint(assert_close(output(i,j,k), test(i,j,k)));
}
}
}
}
void test_operationfunctionLinearTensorOp()
{
LinearTensorOp<double, Eigen::GpuDevice> operation;
const int batch_size = 5;
const int memory_size = 2;
const int layer_size = 2;
hipStream_t stream; AssertPrint(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking) == hipSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size);
input.setValues({
{{0,0}, {0,0}},
{{1,1}, {0,0}},
{{10,10}, {0,0}},
{{-1,-1}, {0,0}},
{{-10,-10}, {0,0}} });
Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size);
output.setZero();
Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size);
test.setValues({
{{0,0}, {0,0}},
{{1,1}, {0,0}},
{{10,10}, {0,0}},
{{-1,-1}, {0,0}},
{{-10,-10}, {0,0}} });
operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device);
// Test
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < memory_size; ++j) {
for (int k = 0; k < layer_size; ++k) {
AssertPrint(assert_close(output(i,j,k), test(i,j,k)));
}
}
}
}
void test_operationfunctionLinearGradTensorOp()
{
LinearGradTensorOp<double, Eigen::GpuDevice> operation;
const int batch_size = 5;
const int memory_size = 2;
const int layer_size = 2;
hipStream_t stream; AssertPrint(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking) == hipSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size);
input.setValues({
{{0,0}, {0,0}},
{{1,1}, {0,0}},
{{10,10}, {0,0}},
{{-1,-1}, {0,0}},
{{-10,-10}, {0,0}} });
Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size);
output.setZero();
Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size);
test.setValues({
{{1,1}, {0,0}},
{{1,1}, {0,0}},
{{1,1}, {0,0}},
{{1,1}, {0,0}},
{{1,1}, {0,0}} });
operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device);
// Test
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < memory_size; ++j) {
for (int k = 0; k < layer_size; ++k) {
AssertPrint(assert_close(output(i,j,k), test(i,j,k)));
}
}
}
}
void test_operationfunctionInverseTensorOp()
{
InverseTensorOp<double, Eigen::GpuDevice> operation;
const int batch_size = 5;
const int memory_size = 2;
const int layer_size = 2;
hipStream_t stream; AssertPrint(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking) == hipSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size);
input.setValues({
{{0,0}, {0,0}},
{{1,1}, {0,0}},
{{10,10}, {0,0}},
{{-1,-1}, {0,0}},
{{-10,-10}, {0,0}} });
Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size);
output.setZero();
Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size);
test.setValues({
{{0,0}, {0,0}},
{{1,1}, {0,0}},
{{0.1,0.1}, {0,0}},
{{-1,-1}, {0,0}},
{{-0.1,-0.1}, {0,0}} });
operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device);
// Test
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < memory_size; ++j) {
for (int k = 0; k < layer_size; ++k) {
AssertPrint(assert_close(output(i,j,k), test(i,j,k)));
}
}
}
}
void test_operationfunctionInverseGradTensorOp()
{
InverseGradTensorOp<double, Eigen::GpuDevice> operation;
const int batch_size = 5;
const int memory_size = 2;
const int layer_size = 2;
hipStream_t stream; AssertPrint(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking) == hipSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size);
input.setValues({
{{0,0}, {0,0}},
{{1,1}, {0,0}},
{{10,10}, {0,0}},
{{-1,-1}, {0,0}},
{{-10,-10}, {0,0}} });
Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size);
output.setZero();
Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size);
test.setValues({
{{0,0}, {0,0}},
{{-1,-1}, {0,0}},
{{-0.01,-0.01}, {0,0}},
{{-1,-1}, {0,0}},
{{-0.01,-0.01}, {0,0}} });
operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device);
// Test
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < memory_size; ++j) {
for (int k = 0; k < layer_size; ++k) {
AssertPrint(assert_close(output(i, j, k), test(i, j, k)));
}
}
}
}
void test_operationfunctionExponentialTensorOp()
{
ExponentialTensorOp<double, Eigen::GpuDevice> operation;
const int batch_size = 5;
const int memory_size = 2;
const int layer_size = 2;
hipStream_t stream; AssertPrint(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking) == hipSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size);
input.setValues({
{{0,0}, {0,0}},
{{1,1}, {0,0}},
{{10,10}, {0,0}},
{{-1,-1}, {0,0}},
{{-10,-10}, {0,0}} });
Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size);
output.setZero();
Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size);
test.setValues({
{{1,1}, {0,0}},
{{2.718281828,2.718281828}, {0,0}},
{{22026.46579,22026.46579}, {0,0}},
{{0.367879441,0.367879441}, {0,0}},
{{4.53999E-05,4.53999E-05}, {0,0}} });
operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device);
// Test
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < memory_size; ++j) {
for (int k = 0; k < layer_size; ++k) {
AssertPrint(assert_close(output(i, j, k), test(i, j, k)));
}
}
}
}
void test_operationfunctionExponentialGradTensorOp()
{
ExponentialGradTensorOp<double, Eigen::GpuDevice> operation;
const int batch_size = 5;
const int memory_size = 2;
const int layer_size = 2;
hipStream_t stream; AssertPrint(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking) == hipSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size);
input.setValues({
{{0,0}, {0,0}},
{{1,1}, {0,0}},
{{10,10}, {0,0}},
{{-1,-1}, {0,0}},
{{-10,-10}, {0,0}} });
Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size);
output.setZero();
Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size);
test.setValues({
{{1,1}, {0,0}},
{{2.718281828,2.718281828}, {0,0}},
{{22026.46579,22026.46579}, {0,0}},
{{0.367879441,0.367879441}, {0,0}},
{{4.53999E-05,4.53999E-05}, {0,0}} });
operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device);
// Test
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < memory_size; ++j) {
for (int k = 0; k < layer_size; ++k) {
AssertPrint(assert_close(output(i, j, k), test(i, j, k)));
}
}
}
}
void test_operationfunctionLogTensorOp()
{
LogTensorOp<double, Eigen::GpuDevice> operation;
const int batch_size = 5;
const int memory_size = 2;
const int layer_size = 2;
hipStream_t stream; AssertPrint(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking) == hipSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size);
input.setValues({
{{0,0}, {0,0}},
{{1,1}, {0,0}},
{{10,10}, {0,0}},
{{-1,-1}, {0,0}},
{{-10,-10}, {0,0}}
});
Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size);
output.setZero();
Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size);
test.setValues({
{{-13.815510557964274,-13.815510557964274}, {0,0}},
{{0,0}, {0,0}},
{{2.3025850929940459,2.3025850929940459}, {0,0}},
{{-13.815510557964274,-13.815510557964274}, {0,0}},
{{-13.815510557964274,-13.815510557964274}, {0,0}}
});
operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device);
// Test
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < memory_size; ++j) {
for (int k = 0; k < layer_size; ++k) {
AssertPrint(assert_close(output(i, j, k), test(i, j, k)));
}
}
}
}
void test_operationfunctionLogGradTensorOp()
{
LogGradTensorOp<double, Eigen::GpuDevice> operation;
const int batch_size = 5;
const int memory_size = 2;
const int layer_size = 2;
hipStream_t stream; AssertPrint(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking) == hipSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size);
input.setValues({
{{0,0}, {0,0}},
{{1,1}, {0,0}},
{{10,10}, {0,0}},
{{-1,-1}, {0,0}},
{{-10,-10}, {0,0}} });
Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size);
output.setZero();
Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size);
test.setValues({
{{1000000000,1000000000}, {0,0}},
{{1,1}, {0,0}},
{{0.1,0.1}, {0,0}},
{{-1,-1}, {0,0}},
{{-0.1,-0.1}, {0,0}} });
operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device);
// Test
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < memory_size; ++j) {
for (int k = 0; k < layer_size; ++k) {
AssertPrint(assert_close(output(i, j, k), test(i, j, k)));
}
}
}
}
void test_operationfunctionPowTensorOp()
{
PowTensorOp<double, Eigen::GpuDevice> operation(0.5);
const int batch_size = 5;
const int memory_size = 2;
const int layer_size = 2;
hipStream_t stream; AssertPrint(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking) == hipSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size);
input.setValues({
{{0,0}, {0,0}},
{{1,1}, {0,0}},
{{10,10}, {0,0}},
{{-1,-1}, {0,0}},
{{-10,-10}, {0,0}} });
Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size);
output.setZero();
Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size);
test.setValues({
{{0,0}, {0,0}},
{{1,1}, {0,0}},
{{3.1622776601683795,3.1622776601683795}, {0,0}},
{{-1.0e9,-1.0e9}, {0,0}}, // TODO: Clip does not fix -nan(ind)
{{-1.0e9,-1.0e9}, {0,0}}});
operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device);
// Test
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < memory_size; ++j) {
for (int k = 0; k < layer_size; ++k) {
AssertPrint(assert_close(output(i, j, k), test(i, j, k)));
}
}
}
}
void test_operationfunctionPowGradTensorOp()
{
PowGradTensorOp<double, Eigen::GpuDevice> operation(0.5);
const int batch_size = 5;
const int memory_size = 2;
const int layer_size = 2;
hipStream_t stream; AssertPrint(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking) == hipSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size);
input.setValues({
{{0,0}, {0,0}},
{{1,1}, {0,0}},
{{10,10}, {0,0}},
{{-1,-1}, {0,0}},
{{-10,-10}, {0,0}} });
Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size);
output.setZero();
Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size);
test.setValues({
{{1.0e9,1.0e9}, {0,0}},
{{0.5,0.5}, {0,0}},
{{0.15811388300841897,0.15811388300841897}, {0,0}},
{{-1.0e9,-1.0e9}, {0,0}}, // TODO: Clip does not fix -nan(ind)
{{-1.0e9,-1.0e9}, {0,0}} });
operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device);
// Test
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < memory_size; ++j) {
for (int k = 0; k < layer_size; ++k) {
AssertPrint(assert_close(output(i, j, k), test(i, j, k)));
}
}
}
}
void test_operationfunctionLeakyReLUTensorOp()
{
LeakyReLUTensorOp<double, Eigen::GpuDevice> operation(0.1);
const int batch_size = 5;
const int memory_size = 2;
const int layer_size = 2;
hipStream_t stream; AssertPrint(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking) == hipSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size);
input.setValues({
{{0,0}, {0,0}},
{{1,1}, {0,0}},
{{10,10}, {0,0}},
{{-1,-1}, {0,0}},
{{-10,-10}, {0,0}} });
Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size);
output.setZero();
Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size);
test.setValues({
{{0,0}, {0,0}},
{{1,1}, {0,0}},
{{10,10}, {0,0}},
{{-0.1,-0.1}, {0,0}},
{{-1,-1}, {0,0}} });
operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device);
// Test
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < memory_size; ++j) {
for (int k = 0; k < layer_size; ++k) {
AssertPrint(assert_close(output(i, j, k), test(i, j, k)));
}
}
}
}
void test_operationfunctionLeakyReLUGradTensorOp()
{
LeakyReLUGradTensorOp<double, Eigen::GpuDevice> operation(0.1);
const int batch_size = 5;
const int memory_size = 2;
const int layer_size = 2;
hipStream_t stream; AssertPrint(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking) == hipSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size);
input.setValues({
{{0,0}, {0,0}},
{{1,1}, {0,0}},
{{10,10}, {0,0}},
{{-1,-1}, {0,0}},
{{-10,-10}, {0,0}} });
Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size);
output.setZero();
Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size);
test.setValues({
{{1,1}, {0,0}},
{{1,1}, {0,0}},
{{1,1}, {0,0}},
{{0.1,0.1}, {0,0}},
{{0.1,0.1}, {0,0}} });
operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device);
// Test
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < memory_size; ++j) {
for (int k = 0; k < layer_size; ++k) {
AssertPrint(assert_close(output(i, j, k), test(i, j, k)));
}
}
}
}
void test_operationfunctionSinTensorOp()
{
SinTensorOp<double, Eigen::GpuDevice> operation;
const int batch_size = 5;
const int memory_size = 2;
const int layer_size = 2;
hipStream_t stream; AssertPrint(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking) == hipSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size);
input.setValues({
{{0,0}, {0,0}},
{{1,1}, {0,0}},
{{10,10}, {0,0}},
{{-1,-1}, {0,0}},
{{-10,-10}, {0,0}} });
Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size);
output.setZero();
Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size);
test.setValues({
{{1,1}, {0,0}},
{{2.718281828,2.718281828}, {0,0}},
{{22026.46579,22026.46579}, {0,0}},
{{0.367879441,0.367879441}, {0,0}},
{{4.53999E-05,4.53999E-05}, {0,0}} });
operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device);
// Test
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < memory_size; ++j) {
for (int k = 0; k < layer_size; ++k) {
//AssertPrint(assert_close(output(i, j, k), test(i, j, k))); //TODO: fixme
}
}
}
}
void test_operationfunctionSinGradTensorOp()
{
SinGradTensorOp<double, Eigen::GpuDevice> operation;
const int batch_size = 5;
const int memory_size = 2;
const int layer_size = 2;
hipStream_t stream; AssertPrint(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking) == hipSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size);
input.setValues({
{{0,0}, {0,0}},
{{1,1}, {0,0}},
{{10,10}, {0,0}},
{{-1,-1}, {0,0}},
{{-10,-10}, {0,0}} });
Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size);
output.setZero();
Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size);
test.setValues({
{{1,1}, {0,0}},
{{2.718281828,2.718281828}, {0,0}},
{{22026.46579,22026.46579}, {0,0}},
{{0.367879441,0.367879441}, {0,0}},
{{4.53999E-05,4.53999E-05}, {0,0}} });
operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device);
// Test
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < memory_size; ++j) {
for (int k = 0; k < layer_size; ++k) {
//AssertPrint(assert_close(output(i, j, k), test(i, j, k))); //TODO: fixme
}
}
}
}
void test_operationfunctionCosTensorOp()
{
CosTensorOp<double, Eigen::GpuDevice> operation;
const int batch_size = 5;
const int memory_size = 2;
const int layer_size = 2;
hipStream_t stream; AssertPrint(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking) == hipSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size);
input.setValues({
{{0,0}, {0,0}},
{{1,1}, {0,0}},
{{10,10}, {0,0}},
{{-1,-1}, {0,0}},
{{-10,-10}, {0,0}} });
Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size);
output.setZero();
Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size);
test.setValues({
{{1,1}, {0,0}},
{{2.718281828,2.718281828}, {0,0}},
{{22026.46579,22026.46579}, {0,0}},
{{0.367879441,0.367879441}, {0,0}},
{{4.53999E-05,4.53999E-05}, {0,0}} });
operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device);
// Test
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < memory_size; ++j) {
for (int k = 0; k < layer_size; ++k) {
//AssertPrint(assert_close(output(i, j, k), test(i, j, k))); //TODO: fixme
}
}
}
}
void test_operationfunctionCosGradTensorOp()
{
CosGradTensorOp<double, Eigen::GpuDevice> operation;
const int batch_size = 5;
const int memory_size = 2;
const int layer_size = 2;
hipStream_t stream; AssertPrint(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking) == hipSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size);
input.setValues({
{{0,0}, {0,0}},
{{1,1}, {0,0}},
{{10,10}, {0,0}},
{{-1,-1}, {0,0}},
{{-10,-10}, {0,0}} });
Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size);
output.setZero();
Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size);
test.setValues({
{{1,1}, {0,0}},
{{2.718281828,2.718281828}, {0,0}},
{{22026.46579,22026.46579}, {0,0}},
{{0.367879441,0.367879441}, {0,0}},
{{4.53999E-05,4.53999E-05}, {0,0}} });
operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device);
// Test
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < memory_size; ++j) {
for (int k = 0; k < layer_size; ++k) {
//AssertPrint(assert_close(output(i, j, k), test(i, j, k))); //TODO: fixme
}
}
}
}
int main(int argc, char** argv)
{
test_operationfunctionReluTensorOp();
test_operationfunctionReluGradTensorOp();
test_operationfunctionEluTensorOp();
test_operationfunctionEluGradTensorOp();
test_operationfunctionSigmoidTensorOp();
test_operationfunctionSigmoidGradTensorOp();
test_operationfunctionTanHTensorOp();
test_operationfunctionTanHGradTensorOp();
test_operationfunctionReTanHTensorOp();
test_operationfunctionReTanHGradTensorOp();
test_operationfunctionLinearTensorOp();
test_operationfunctionLinearGradTensorOp();
test_operationfunctionInverseTensorOp();
test_operationfunctionInverseGradTensorOp();
test_operationfunctionExponentialTensorOp();
test_operationfunctionExponentialGradTensorOp();
test_operationfunctionLogTensorOp();
test_operationfunctionLogGradTensorOp();
test_operationfunctionPowTensorOp();
test_operationfunctionPowGradTensorOp();
test_operationfunctionLeakyReLUTensorOp();
test_operationfunctionLeakyReLUGradTensorOp();
test_operationfunctionSinTensorOp();
test_operationfunctionSinGradTensorOp();
test_operationfunctionCosTensorOp();
test_operationfunctionCosGradTensorOp();
return 0;
}
#endif | 9c0706a8cf1106556bc150b3d04aa15bf7361efe.cu | /**TODO: Add copyright*/
#if COMPILE_WITH_CUDA
#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int
#define EIGEN_USE_GPU
#include <cuda.h>
#include <cuda_runtime.h>
#include <unsupported/Eigen/CXX11/Tensor>
#include <EvoNet/core/Preprocessing.h>
#include <EvoNet/ml/ActivationFunctionTensor.h>
#define AssertPrint(a) if (!a) std::cout<<"Test failed"<<std::endl; // Macro to print instead of abort on test failures
using namespace EvoNet;
using namespace std;
void test_operationfunctionReluTensorOp()
{
ReLUTensorOp<double, Eigen::GpuDevice> operation;
const int batch_size = 5;
const int memory_size = 2;
const int layer_size = 2;
cudaStream_t stream; AssertPrint(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size);
input.setValues({
{{0,0}, {0,0}},
{{1,1}, {0,0}},
{{10,10}, {0,0}},
{{-1,-1}, {0,0}},
{{-10,-10}, {0,0}} });
Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size);
output.setZero();
Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size);
test.setValues({
{{0,0}, {0,0}},
{{1,1}, {0,0}},
{{10,10}, {0,0}},
{{0,0}, {0,0}},
{{0,0}, {0,0}} });
operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device);
// Test
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < memory_size; ++j) {
for (int k = 0; k < layer_size; ++k) {
AssertPrint(assert_close(output(i,j,k), test(i,j,k)));
}
}
}
}
void test_operationfunctionReluGradTensorOp()
{
ReLUGradTensorOp<double, Eigen::GpuDevice> operation;
const int batch_size = 5;
const int memory_size = 2;
const int layer_size = 2;
cudaStream_t stream; AssertPrint(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size);
input.setValues({
{{0,0}, {0,0}},
{{1,1}, {0,0}},
{{10,10}, {0,0}},
{{-1,-1}, {0,0}},
{{-10,-10}, {0,0}} });
Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size);
output.setZero();
Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size);
test.setValues({
{{0,0}, {0,0}},
{{1,1}, {0,0}},
{{1,1}, {0,0}},
{{0,0}, {0,0}},
{{0,0}, {0,0}} });
operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device);
// Test
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < memory_size; ++j) {
for (int k = 0; k < layer_size; ++k) {
AssertPrint(assert_close(output(i,j,k), test(i,j,k)));
}
}
}
}
void test_operationfunctionEluTensorOp()
{
ELUTensorOp<double, Eigen::GpuDevice> operation(1.0);
const int batch_size = 5;
const int memory_size = 2;
const int layer_size = 2;
cudaStream_t stream; AssertPrint(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size);
input.setValues({
{{0,0}, {0,0}},
{{1,1}, {0,0}},
{{10,10}, {0,0}},
{{-1,-1}, {0,0}},
{{-10,-10}, {0,0}} });
Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size);
output.setZero();
Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size);
test.setValues({
{{0,0}, {0,0}},
{{1,1}, {0,0}},
{{10,10}, {0,0}},
{{-0.63212055882855767,-0.63212055882855767}, {0,0}},
{{-0.99995460007023751,-0.99995460007023751}, {0,0}} });
operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device);
// Test
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < memory_size; ++j) {
for (int k = 0; k < layer_size; ++k) {
AssertPrint(assert_close(output(i,j,k), test(i,j,k)));
}
}
}
}
void test_operationfunctionEluGradTensorOp()
{
ELUGradTensorOp<double, Eigen::GpuDevice> operation(1.0);
const int batch_size = 5;
const int memory_size = 2;
const int layer_size = 2;
cudaStream_t stream; AssertPrint(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size);
input.setValues({
{{0,0}, {0,0}},
{{1,1}, {0,0}},
{{10,10}, {0,0}},
{{-1,-1}, {0,0}},
{{-10,-10}, {0,0}} });
Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size);
output.setZero();
Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size);
test.setValues({
{{1,1}, {0,0}},
{{1,1}, {0,0}},
{{1,1}, {0,0}},
{{0.36787944117144233,0.36787944117144233}, {0,0}},
{{4.5399929762490743e-05,4.5399929762490743e-05}, {0,0}} });
operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device);
// Test
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < memory_size; ++j) {
for (int k = 0; k < layer_size; ++k) {
AssertPrint(assert_close(output(i,j,k), test(i,j,k)));
}
}
}
}
void test_operationfunctionSigmoidTensorOp()
{
SigmoidTensorOp<double, Eigen::GpuDevice> operation;
const int batch_size = 5;
const int memory_size = 2;
const int layer_size = 2;
cudaStream_t stream; AssertPrint(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size);
input.setValues({
{{0,0}, {0,0}},
{{1,1}, {0,0}},
{{10,10}, {0,0}},
{{-1,-1}, {0,0}},
{{-10,-10}, {0,0}} });
Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size);
output.setZero();
Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size);
test.setValues({
{{0.5,0.5}, {0,0}},
{{0.7310585786300049,0.7310585786300049}, {0,0}},
{{0.99995460213129761,0.99995460213129761}, {0,0}},
{{0.2689414213699951,0.2689414213699951}, {0,0}},
{{4.5397868702434395e-05,4.5397868702434395e-05}, {0,0}} });
operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device);
// Test
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < memory_size; ++j) {
for (int k = 0; k < layer_size; ++k) {
AssertPrint(assert_close(output(i,j,k), test(i,j,k)));
}
}
}
}
void test_operationfunctionSigmoidGradTensorOp()
{
SigmoidGradTensorOp<double, Eigen::GpuDevice> operation;
const int batch_size = 5;
const int memory_size = 2;
const int layer_size = 2;
cudaStream_t stream; AssertPrint(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size);
input.setValues({
{{0,0}, {0,0}},
{{1,1}, {0,0}},
{{10,10}, {0,0}},
{{-1,-1}, {0,0}},
{{-10,-10}, {0,0}} });
Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size);
output.setZero();
Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size);
test.setValues({
{{0.25,0.25}, {0,0}},
{{0.19661193324148185,0.19661193324148185}, {0,0}},
{{4.5395807735907655e-05,4.5395807735907655e-05}, {0,0}},
{{0.19661193324148185,0.19661193324148185}, {0,0}},
{{4.53958091e-05,4.53958091e-05}, {0,0}} });
operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device);
// Test
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < memory_size; ++j) {
for (int k = 0; k < layer_size; ++k) {
AssertPrint(assert_close(output(i,j,k), test(i,j,k)));
}
}
}
}
void test_operationfunctionTanHTensorOp()
{
TanHTensorOp<double, Eigen::GpuDevice> operation;
const int batch_size = 5;
const int memory_size = 2;
const int layer_size = 2;
cudaStream_t stream; AssertPrint(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size);
input.setValues({
{{0,0}, {0,0}},
{{1,1}, {0,0}},
{{10,10}, {0,0}},
{{-1,-1}, {0,0}},
{{-10,-10}, {0,0}} });
Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size);
output.setZero();
Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size);
test.setValues({
{{0.0,0.0}, {0,0}},
{{0.76159415595576485,0.76159415595576485}, {0,0}},
{{0.99999999587769262,0.99999999587769262}, {0,0}},
{{-0.76159415595576485,-0.76159415595576485}, {0,0}},
{{-0.99999999587769262,-0.99999999587769262}, {0,0}} });
operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device);
// Test
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < memory_size; ++j) {
for (int k = 0; k < layer_size; ++k) {
AssertPrint(assert_close(output(i,j,k), test(i,j,k)));
}
}
}
}
void test_operationfunctionTanHGradTensorOp()
{
TanHGradTensorOp<double, Eigen::GpuDevice> operation;
const int batch_size = 5;
const int memory_size = 2;
const int layer_size = 2;
cudaStream_t stream; AssertPrint(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size);
input.setValues({
{{0,0}, {0,0}},
{{1,1}, {0,0}},
{{10,10}, {0,0}},
{{-1,-1}, {0,0}},
{{-10,-10}, {0,0}} });
Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size);
output.setZero();
Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size);
test.setValues({
{{1,1}, {0,0}},
{{0.41997434161402614,0.41997434161402614}, {0,0}},
{{8.2446145466263943e-09,8.2446145466263943e-09}, {0,0}},
{{0.41997434161402614,0.41997434161402614}, {0,0}},
{{8.2446145466263943e-09,8.2446145466263943e-09}, {0,0}} });
operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device);
// Test
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < memory_size; ++j) {
for (int k = 0; k < layer_size; ++k) {
AssertPrint(assert_close(output(i,j,k), test(i,j,k)));
}
}
}
}
// [TODO: need to re-implement]
void test_operationfunctionReTanHTensorOp()
{
ReTanHTensorOp<double, Eigen::GpuDevice> operation;
const int batch_size = 5;
const int memory_size = 2;
const int layer_size = 2;
cudaStream_t stream; AssertPrint(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size);
input.setValues({
{{0,0}, {0,0}},
{{1,1}, {0,0}},
{{10,10}, {0,0}},
{{-1,-1}, {0,0}},
{{-10,-10}, {0,0}} });
Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size);
output.setZero();
Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size);
test.setValues({
{{0,0}, {0,0}},
{{0.76159415595576485,0.76159415595576485}, {0,0}},
{{0.99999999587769262,0.99999999587769262}, {0,0}},
{{0,0}, {0,0}},
{{0,0}, {0,0}} });
operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device);
// Test
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < memory_size; ++j) {
for (int k = 0; k < layer_size; ++k) {
AssertPrint(assert_close(output(i,j,k), test(i,j,k)));
}
}
}
}
// TODO: need to re-implement
void test_operationfunctionReTanHGradTensorOp()
{
ReTanHGradTensorOp<double, Eigen::GpuDevice> operation;
const int batch_size = 5;
const int memory_size = 2;
const int layer_size = 2;
cudaStream_t stream; AssertPrint(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size);
input.setValues({
{{0,0}, {0,0}},
{{1,1}, {0,0}},
{{10,10}, {0,0}},
{{-1,-1}, {0,0}},
{{-10,-10}, {0,0}} });
Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size);
output.setZero();
Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size);
test.setValues({
{{0,0}, {0,0}},
{{0.41997434161402614,0.41997434161402614}, {0,0}},
{{8.2446147686709992e-09,8.2446147686709992e-09}, {0,0}},
{{0,0}, {0,0}},
{{0,0}, {0,0}} });
operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device);
// Test
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < memory_size; ++j) {
for (int k = 0; k < layer_size; ++k) {
AssertPrint(assert_close(output(i,j,k), test(i,j,k)));
}
}
}
}
void test_operationfunctionLinearTensorOp()
{
LinearTensorOp<double, Eigen::GpuDevice> operation;
const int batch_size = 5;
const int memory_size = 2;
const int layer_size = 2;
cudaStream_t stream; AssertPrint(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size);
input.setValues({
{{0,0}, {0,0}},
{{1,1}, {0,0}},
{{10,10}, {0,0}},
{{-1,-1}, {0,0}},
{{-10,-10}, {0,0}} });
Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size);
output.setZero();
Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size);
test.setValues({
{{0,0}, {0,0}},
{{1,1}, {0,0}},
{{10,10}, {0,0}},
{{-1,-1}, {0,0}},
{{-10,-10}, {0,0}} });
operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device);
// Test
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < memory_size; ++j) {
for (int k = 0; k < layer_size; ++k) {
AssertPrint(assert_close(output(i,j,k), test(i,j,k)));
}
}
}
}
void test_operationfunctionLinearGradTensorOp()
{
LinearGradTensorOp<double, Eigen::GpuDevice> operation;
const int batch_size = 5;
const int memory_size = 2;
const int layer_size = 2;
cudaStream_t stream; AssertPrint(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size);
input.setValues({
{{0,0}, {0,0}},
{{1,1}, {0,0}},
{{10,10}, {0,0}},
{{-1,-1}, {0,0}},
{{-10,-10}, {0,0}} });
Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size);
output.setZero();
Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size);
test.setValues({
{{1,1}, {0,0}},
{{1,1}, {0,0}},
{{1,1}, {0,0}},
{{1,1}, {0,0}},
{{1,1}, {0,0}} });
operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device);
// Test
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < memory_size; ++j) {
for (int k = 0; k < layer_size; ++k) {
AssertPrint(assert_close(output(i,j,k), test(i,j,k)));
}
}
}
}
void test_operationfunctionInverseTensorOp()
{
InverseTensorOp<double, Eigen::GpuDevice> operation;
const int batch_size = 5;
const int memory_size = 2;
const int layer_size = 2;
cudaStream_t stream; AssertPrint(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size);
input.setValues({
{{0,0}, {0,0}},
{{1,1}, {0,0}},
{{10,10}, {0,0}},
{{-1,-1}, {0,0}},
{{-10,-10}, {0,0}} });
Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size);
output.setZero();
Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size);
test.setValues({
{{0,0}, {0,0}},
{{1,1}, {0,0}},
{{0.1,0.1}, {0,0}},
{{-1,-1}, {0,0}},
{{-0.1,-0.1}, {0,0}} });
operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device);
// Test
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < memory_size; ++j) {
for (int k = 0; k < layer_size; ++k) {
AssertPrint(assert_close(output(i,j,k), test(i,j,k)));
}
}
}
}
void test_operationfunctionInverseGradTensorOp()
{
InverseGradTensorOp<double, Eigen::GpuDevice> operation;
const int batch_size = 5;
const int memory_size = 2;
const int layer_size = 2;
cudaStream_t stream; AssertPrint(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size);
input.setValues({
{{0,0}, {0,0}},
{{1,1}, {0,0}},
{{10,10}, {0,0}},
{{-1,-1}, {0,0}},
{{-10,-10}, {0,0}} });
Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size);
output.setZero();
Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size);
test.setValues({
{{0,0}, {0,0}},
{{-1,-1}, {0,0}},
{{-0.01,-0.01}, {0,0}},
{{-1,-1}, {0,0}},
{{-0.01,-0.01}, {0,0}} });
operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device);
// Test
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < memory_size; ++j) {
for (int k = 0; k < layer_size; ++k) {
AssertPrint(assert_close(output(i, j, k), test(i, j, k)));
}
}
}
}
void test_operationfunctionExponentialTensorOp()
{
ExponentialTensorOp<double, Eigen::GpuDevice> operation;
const int batch_size = 5;
const int memory_size = 2;
const int layer_size = 2;
cudaStream_t stream; AssertPrint(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size);
input.setValues({
{{0,0}, {0,0}},
{{1,1}, {0,0}},
{{10,10}, {0,0}},
{{-1,-1}, {0,0}},
{{-10,-10}, {0,0}} });
Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size);
output.setZero();
Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size);
test.setValues({
{{1,1}, {0,0}},
{{2.718281828,2.718281828}, {0,0}},
{{22026.46579,22026.46579}, {0,0}},
{{0.367879441,0.367879441}, {0,0}},
{{4.53999E-05,4.53999E-05}, {0,0}} });
operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device);
// Test
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < memory_size; ++j) {
for (int k = 0; k < layer_size; ++k) {
AssertPrint(assert_close(output(i, j, k), test(i, j, k)));
}
}
}
}
void test_operationfunctionExponentialGradTensorOp()
{
ExponentialGradTensorOp<double, Eigen::GpuDevice> operation;
const int batch_size = 5;
const int memory_size = 2;
const int layer_size = 2;
cudaStream_t stream; AssertPrint(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size);
input.setValues({
{{0,0}, {0,0}},
{{1,1}, {0,0}},
{{10,10}, {0,0}},
{{-1,-1}, {0,0}},
{{-10,-10}, {0,0}} });
Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size);
output.setZero();
Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size);
test.setValues({
{{1,1}, {0,0}},
{{2.718281828,2.718281828}, {0,0}},
{{22026.46579,22026.46579}, {0,0}},
{{0.367879441,0.367879441}, {0,0}},
{{4.53999E-05,4.53999E-05}, {0,0}} });
operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device);
// Test
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < memory_size; ++j) {
for (int k = 0; k < layer_size; ++k) {
AssertPrint(assert_close(output(i, j, k), test(i, j, k)));
}
}
}
}
void test_operationfunctionLogTensorOp()
{
LogTensorOp<double, Eigen::GpuDevice> operation;
const int batch_size = 5;
const int memory_size = 2;
const int layer_size = 2;
cudaStream_t stream; AssertPrint(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size);
input.setValues({
{{0,0}, {0,0}},
{{1,1}, {0,0}},
{{10,10}, {0,0}},
{{-1,-1}, {0,0}},
{{-10,-10}, {0,0}}
});
Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size);
output.setZero();
Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size);
test.setValues({
{{-13.815510557964274,-13.815510557964274}, {0,0}},
{{0,0}, {0,0}},
{{2.3025850929940459,2.3025850929940459}, {0,0}},
{{-13.815510557964274,-13.815510557964274}, {0,0}},
{{-13.815510557964274,-13.815510557964274}, {0,0}}
});
operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device);
// Test
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < memory_size; ++j) {
for (int k = 0; k < layer_size; ++k) {
AssertPrint(assert_close(output(i, j, k), test(i, j, k)));
}
}
}
}
void test_operationfunctionLogGradTensorOp()
{
LogGradTensorOp<double, Eigen::GpuDevice> operation;
const int batch_size = 5;
const int memory_size = 2;
const int layer_size = 2;
cudaStream_t stream; AssertPrint(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size);
input.setValues({
{{0,0}, {0,0}},
{{1,1}, {0,0}},
{{10,10}, {0,0}},
{{-1,-1}, {0,0}},
{{-10,-10}, {0,0}} });
Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size);
output.setZero();
Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size);
test.setValues({
{{1000000000,1000000000}, {0,0}},
{{1,1}, {0,0}},
{{0.1,0.1}, {0,0}},
{{-1,-1}, {0,0}},
{{-0.1,-0.1}, {0,0}} });
operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device);
// Test
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < memory_size; ++j) {
for (int k = 0; k < layer_size; ++k) {
AssertPrint(assert_close(output(i, j, k), test(i, j, k)));
}
}
}
}
void test_operationfunctionPowTensorOp()
{
PowTensorOp<double, Eigen::GpuDevice> operation(0.5);
const int batch_size = 5;
const int memory_size = 2;
const int layer_size = 2;
cudaStream_t stream; AssertPrint(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size);
input.setValues({
{{0,0}, {0,0}},
{{1,1}, {0,0}},
{{10,10}, {0,0}},
{{-1,-1}, {0,0}},
{{-10,-10}, {0,0}} });
Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size);
output.setZero();
Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size);
test.setValues({
{{0,0}, {0,0}},
{{1,1}, {0,0}},
{{3.1622776601683795,3.1622776601683795}, {0,0}},
{{-1.0e9,-1.0e9}, {0,0}}, // TODO: Clip does not fix -nan(ind)
{{-1.0e9,-1.0e9}, {0,0}}});
operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device);
// Test
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < memory_size; ++j) {
for (int k = 0; k < layer_size; ++k) {
AssertPrint(assert_close(output(i, j, k), test(i, j, k)));
}
}
}
}
void test_operationfunctionPowGradTensorOp()
{
PowGradTensorOp<double, Eigen::GpuDevice> operation(0.5);
const int batch_size = 5;
const int memory_size = 2;
const int layer_size = 2;
cudaStream_t stream; AssertPrint(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size);
input.setValues({
{{0,0}, {0,0}},
{{1,1}, {0,0}},
{{10,10}, {0,0}},
{{-1,-1}, {0,0}},
{{-10,-10}, {0,0}} });
Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size);
output.setZero();
Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size);
test.setValues({
{{1.0e9,1.0e9}, {0,0}},
{{0.5,0.5}, {0,0}},
{{0.15811388300841897,0.15811388300841897}, {0,0}},
{{-1.0e9,-1.0e9}, {0,0}}, // TODO: Clip does not fix -nan(ind)
{{-1.0e9,-1.0e9}, {0,0}} });
operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device);
// Test
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < memory_size; ++j) {
for (int k = 0; k < layer_size; ++k) {
AssertPrint(assert_close(output(i, j, k), test(i, j, k)));
}
}
}
}
void test_operationfunctionLeakyReLUTensorOp()
{
LeakyReLUTensorOp<double, Eigen::GpuDevice> operation(0.1);
const int batch_size = 5;
const int memory_size = 2;
const int layer_size = 2;
cudaStream_t stream; AssertPrint(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size);
input.setValues({
{{0,0}, {0,0}},
{{1,1}, {0,0}},
{{10,10}, {0,0}},
{{-1,-1}, {0,0}},
{{-10,-10}, {0,0}} });
Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size);
output.setZero();
Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size);
test.setValues({
{{0,0}, {0,0}},
{{1,1}, {0,0}},
{{10,10}, {0,0}},
{{-0.1,-0.1}, {0,0}},
{{-1,-1}, {0,0}} });
operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device);
// Test
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < memory_size; ++j) {
for (int k = 0; k < layer_size; ++k) {
AssertPrint(assert_close(output(i, j, k), test(i, j, k)));
}
}
}
}
void test_operationfunctionLeakyReLUGradTensorOp()
{
LeakyReLUGradTensorOp<double, Eigen::GpuDevice> operation(0.1);
const int batch_size = 5;
const int memory_size = 2;
const int layer_size = 2;
cudaStream_t stream; AssertPrint(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size);
input.setValues({
{{0,0}, {0,0}},
{{1,1}, {0,0}},
{{10,10}, {0,0}},
{{-1,-1}, {0,0}},
{{-10,-10}, {0,0}} });
Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size);
output.setZero();
Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size);
test.setValues({
{{1,1}, {0,0}},
{{1,1}, {0,0}},
{{1,1}, {0,0}},
{{0.1,0.1}, {0,0}},
{{0.1,0.1}, {0,0}} });
operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device);
// Test
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < memory_size; ++j) {
for (int k = 0; k < layer_size; ++k) {
AssertPrint(assert_close(output(i, j, k), test(i, j, k)));
}
}
}
}
void test_operationfunctionSinTensorOp()
{
SinTensorOp<double, Eigen::GpuDevice> operation;
const int batch_size = 5;
const int memory_size = 2;
const int layer_size = 2;
cudaStream_t stream; AssertPrint(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size);
input.setValues({
{{0,0}, {0,0}},
{{1,1}, {0,0}},
{{10,10}, {0,0}},
{{-1,-1}, {0,0}},
{{-10,-10}, {0,0}} });
Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size);
output.setZero();
Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size);
test.setValues({
{{1,1}, {0,0}},
{{2.718281828,2.718281828}, {0,0}},
{{22026.46579,22026.46579}, {0,0}},
{{0.367879441,0.367879441}, {0,0}},
{{4.53999E-05,4.53999E-05}, {0,0}} });
operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device);
// Test
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < memory_size; ++j) {
for (int k = 0; k < layer_size; ++k) {
//AssertPrint(assert_close(output(i, j, k), test(i, j, k))); //TODO: fixme
}
}
}
}
void test_operationfunctionSinGradTensorOp()
{
SinGradTensorOp<double, Eigen::GpuDevice> operation;
const int batch_size = 5;
const int memory_size = 2;
const int layer_size = 2;
cudaStream_t stream; AssertPrint(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size);
input.setValues({
{{0,0}, {0,0}},
{{1,1}, {0,0}},
{{10,10}, {0,0}},
{{-1,-1}, {0,0}},
{{-10,-10}, {0,0}} });
Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size);
output.setZero();
Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size);
test.setValues({
{{1,1}, {0,0}},
{{2.718281828,2.718281828}, {0,0}},
{{22026.46579,22026.46579}, {0,0}},
{{0.367879441,0.367879441}, {0,0}},
{{4.53999E-05,4.53999E-05}, {0,0}} });
operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device);
// Test
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < memory_size; ++j) {
for (int k = 0; k < layer_size; ++k) {
//AssertPrint(assert_close(output(i, j, k), test(i, j, k))); //TODO: fixme
}
}
}
}
void test_operationfunctionCosTensorOp()
{
CosTensorOp<double, Eigen::GpuDevice> operation;
const int batch_size = 5;
const int memory_size = 2;
const int layer_size = 2;
cudaStream_t stream; AssertPrint(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size);
input.setValues({
{{0,0}, {0,0}},
{{1,1}, {0,0}},
{{10,10}, {0,0}},
{{-1,-1}, {0,0}},
{{-10,-10}, {0,0}} });
Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size);
output.setZero();
Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size);
test.setValues({
{{1,1}, {0,0}},
{{2.718281828,2.718281828}, {0,0}},
{{22026.46579,22026.46579}, {0,0}},
{{0.367879441,0.367879441}, {0,0}},
{{4.53999E-05,4.53999E-05}, {0,0}} });
operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device);
// Test
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < memory_size; ++j) {
for (int k = 0; k < layer_size; ++k) {
//AssertPrint(assert_close(output(i, j, k), test(i, j, k))); //TODO: fixme
}
}
}
}
void test_operationfunctionCosGradTensorOp()
{
CosGradTensorOp<double, Eigen::GpuDevice> operation;
const int batch_size = 5;
const int memory_size = 2;
const int layer_size = 2;
cudaStream_t stream; AssertPrint(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking) == cudaSuccess); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device);
Eigen::Tensor<double, 3> input(batch_size, memory_size, layer_size);
input.setValues({
{{0,0}, {0,0}},
{{1,1}, {0,0}},
{{10,10}, {0,0}},
{{-1,-1}, {0,0}},
{{-10,-10}, {0,0}} });
Eigen::Tensor<double, 3> output(batch_size, memory_size, layer_size);
output.setZero();
Eigen::Tensor<double, 3> test(batch_size, memory_size, layer_size);
test.setValues({
{{1,1}, {0,0}},
{{2.718281828,2.718281828}, {0,0}},
{{22026.46579,22026.46579}, {0,0}},
{{0.367879441,0.367879441}, {0,0}},
{{4.53999E-05,4.53999E-05}, {0,0}} });
operation(input.data(), output.data(), batch_size, memory_size, layer_size, 0, device);
// Test
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < memory_size; ++j) {
for (int k = 0; k < layer_size; ++k) {
//AssertPrint(assert_close(output(i, j, k), test(i, j, k))); //TODO: fixme
}
}
}
}
int main(int argc, char** argv)
{
test_operationfunctionReluTensorOp();
test_operationfunctionReluGradTensorOp();
test_operationfunctionEluTensorOp();
test_operationfunctionEluGradTensorOp();
test_operationfunctionSigmoidTensorOp();
test_operationfunctionSigmoidGradTensorOp();
test_operationfunctionTanHTensorOp();
test_operationfunctionTanHGradTensorOp();
test_operationfunctionReTanHTensorOp();
test_operationfunctionReTanHGradTensorOp();
test_operationfunctionLinearTensorOp();
test_operationfunctionLinearGradTensorOp();
test_operationfunctionInverseTensorOp();
test_operationfunctionInverseGradTensorOp();
test_operationfunctionExponentialTensorOp();
test_operationfunctionExponentialGradTensorOp();
test_operationfunctionLogTensorOp();
test_operationfunctionLogGradTensorOp();
test_operationfunctionPowTensorOp();
test_operationfunctionPowGradTensorOp();
test_operationfunctionLeakyReLUTensorOp();
test_operationfunctionLeakyReLUGradTensorOp();
test_operationfunctionSinTensorOp();
test_operationfunctionSinGradTensorOp();
test_operationfunctionCosTensorOp();
test_operationfunctionCosGradTensorOp();
return 0;
}
#endif |
ecbaded061e03bbd972bb3774ba2157469e80490.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "THHTensorSort.cuh"
void THCudaLongTensor_fillSliceWithIndex(THCState* state,
THCudaLongTensor* t,
int dim) {
int64_t dims = THCudaLongTensor_nDimension(state, t);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
ptrdiff_t inElements = THCudaLongTensor_nElement(state, t);
if (inElements > 0) {
int64_t sliceSize = THCudaLongTensor_size(state, t, dim);
ptrdiff_t numSlices = inElements == 0 ? 0 : inElements / sliceSize;
dim3 grid;
if (!THC_getGridFromTiles(numSlices, grid)) {
THError("Slice to fill with indices is too large");
}
int64_t maxThreads =
THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock;
int64_t numThreads = sliceSize;
if (numThreads > maxThreads) {
numThreads = maxThreads;
}
dim3 block(numThreads);
#define FILL_INDEX(T, DIM) \
hipLaunchKernelGGL(( fillSliceWithIndex<T, DIM>) \
, dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), \
info, numSlices, sliceSize, info.strides[collapseDim])
if (THCTensor_canUse32BitIndexMath(state, t)) {
TensorInfo<int64_t, uint32_t> info =
getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, t);
info.reduceDim(dim);
int collapseDim = info.collapseDims(dim);
if (info.isContiguous()) {
FILL_INDEX(unsigned int, -2);
} else {
if (info.dims == 1) {
FILL_INDEX(unsigned int, 1);
} else if (info.dims == 2) {
FILL_INDEX(unsigned int, 2);
} else {
FILL_INDEX(unsigned int, -1);
}
}
} else {
TensorInfo<int64_t, uint64_t> info =
getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, t);
info.reduceDim(dim);
int collapseDim = info.collapseDims(dim);
// catch-all implementation
FILL_INDEX(uint64_t, -1);
}
#undef FILL_INDEX
THCudaCheck(hipGetLastError());
}
}
| ecbaded061e03bbd972bb3774ba2157469e80490.cu | #include "THCTensorSort.cuh"
void THCudaLongTensor_fillSliceWithIndex(THCState* state,
THCudaLongTensor* t,
int dim) {
int64_t dims = THCudaLongTensor_nDimension(state, t);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
ptrdiff_t inElements = THCudaLongTensor_nElement(state, t);
if (inElements > 0) {
int64_t sliceSize = THCudaLongTensor_size(state, t, dim);
ptrdiff_t numSlices = inElements == 0 ? 0 : inElements / sliceSize;
dim3 grid;
if (!THC_getGridFromTiles(numSlices, grid)) {
THError("Slice to fill with indices is too large");
}
int64_t maxThreads =
THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock;
int64_t numThreads = sliceSize;
if (numThreads > maxThreads) {
numThreads = maxThreads;
}
dim3 block(numThreads);
#define FILL_INDEX(T, DIM) \
fillSliceWithIndex<T, DIM> \
<<<grid, block, 0, THCState_getCurrentStream(state)>>>( \
info, numSlices, sliceSize, info.strides[collapseDim])
if (THCTensor_canUse32BitIndexMath(state, t)) {
TensorInfo<int64_t, uint32_t> info =
getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, t);
info.reduceDim(dim);
int collapseDim = info.collapseDims(dim);
if (info.isContiguous()) {
FILL_INDEX(unsigned int, -2);
} else {
if (info.dims == 1) {
FILL_INDEX(unsigned int, 1);
} else if (info.dims == 2) {
FILL_INDEX(unsigned int, 2);
} else {
FILL_INDEX(unsigned int, -1);
}
}
} else {
TensorInfo<int64_t, uint64_t> info =
getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, t);
info.reduceDim(dim);
int collapseDim = info.collapseDims(dim);
// catch-all implementation
FILL_INDEX(uint64_t, -1);
}
#undef FILL_INDEX
THCudaCheck(cudaGetLastError());
}
}
|
bacaf742c4d5eac3fffba61bebbea53e3b3b3b51.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <algorithm>
#include <iostream>
#include "amir_cuda_util/cuda_util.h"
namespace amirstan {
namespace cuda {
template <class value_type>
__global__ void copy_permute_kernel(value_type *dst, const value_type *src,
int n, TensorSize ts_src_stride,
TensorSize ts_dst_stride,
TensorSize ts_permute, int src_dim) {
int *src_stride = &(ts_src_stride.size[0]);
int *dst_stride = &(ts_dst_stride.size[0]);
int *permute = &(ts_permute.size[0]);
CUDA_KERNEL_LOOP(index, n) {
size_t dst_index = index;
size_t src_index = 0;
for (int i = 0; i < src_dim; ++i) {
int dim_index = dst_index / dst_stride[i];
dst_index = dst_index % dst_stride[i];
src_index += dim_index * src_stride[permute[i]];
}
dst[index] = src[src_index];
}
}
template <class value_type>
void memcpyPermute(value_type *dst, const value_type *src, int *src_size,
int *permute, int src_dim, hipStream_t stream) {
size_t copy_size = 1;
TensorSize ts_permute;
memcpy(&(ts_permute.size[0]), permute, src_dim * sizeof(int));
TensorSize ts_src_stride;
TensorSize ts_dst_stride;
TensorSize ts_dst_size;
int *src_stride = &(ts_src_stride.size[0]);
int *dst_stride = &(ts_dst_stride.size[0]);
int *dst_size = &(ts_dst_size.size[0]);
src_stride[src_dim - 1] = 1;
dst_stride[src_dim - 1] = 1;
for (int i = src_dim - 1; i >= 0; --i) {
dst_size[i] = src_size[permute[i]];
if (i < src_dim - 1) {
src_stride[i] = src_stride[i + 1] * src_size[i + 1];
}
}
for (int i = src_dim - 1; i >= 0; --i) {
copy_size *= dst_size[i];
if (i < src_dim - 1) {
dst_stride[i] = dst_stride[i + 1] * dst_size[i + 1];
}
}
hipLaunchKernelGGL(( copy_permute_kernel<value_type>)
, dim3(GET_BLOCKS(copy_size)), dim3(CUDA_NUM_THREADS), 0, stream,
dst, src, copy_size, ts_src_stride, ts_dst_stride, ts_permute,
src_dim);
}
template void memcpyPermute<float>(float *dst, const float *src, int *src_size,
int *permute, int src_dim,
hipStream_t stream);
} // namespace cuda
} // namespace amirstan
| bacaf742c4d5eac3fffba61bebbea53e3b3b3b51.cu | #include <stdio.h>
#include <algorithm>
#include <iostream>
#include "amir_cuda_util/cuda_util.h"
namespace amirstan {
namespace cuda {
template <class value_type>
__global__ void copy_permute_kernel(value_type *dst, const value_type *src,
int n, TensorSize ts_src_stride,
TensorSize ts_dst_stride,
TensorSize ts_permute, int src_dim) {
int *src_stride = &(ts_src_stride.size[0]);
int *dst_stride = &(ts_dst_stride.size[0]);
int *permute = &(ts_permute.size[0]);
CUDA_KERNEL_LOOP(index, n) {
size_t dst_index = index;
size_t src_index = 0;
for (int i = 0; i < src_dim; ++i) {
int dim_index = dst_index / dst_stride[i];
dst_index = dst_index % dst_stride[i];
src_index += dim_index * src_stride[permute[i]];
}
dst[index] = src[src_index];
}
}
template <class value_type>
void memcpyPermute(value_type *dst, const value_type *src, int *src_size,
int *permute, int src_dim, cudaStream_t stream) {
size_t copy_size = 1;
TensorSize ts_permute;
memcpy(&(ts_permute.size[0]), permute, src_dim * sizeof(int));
TensorSize ts_src_stride;
TensorSize ts_dst_stride;
TensorSize ts_dst_size;
int *src_stride = &(ts_src_stride.size[0]);
int *dst_stride = &(ts_dst_stride.size[0]);
int *dst_size = &(ts_dst_size.size[0]);
src_stride[src_dim - 1] = 1;
dst_stride[src_dim - 1] = 1;
for (int i = src_dim - 1; i >= 0; --i) {
dst_size[i] = src_size[permute[i]];
if (i < src_dim - 1) {
src_stride[i] = src_stride[i + 1] * src_size[i + 1];
}
}
for (int i = src_dim - 1; i >= 0; --i) {
copy_size *= dst_size[i];
if (i < src_dim - 1) {
dst_stride[i] = dst_stride[i + 1] * dst_size[i + 1];
}
}
copy_permute_kernel<value_type>
<<<GET_BLOCKS(copy_size), CUDA_NUM_THREADS, 0, stream>>>(
dst, src, copy_size, ts_src_stride, ts_dst_stride, ts_permute,
src_dim);
}
template void memcpyPermute<float>(float *dst, const float *src, int *src_size,
int *permute, int src_dim,
cudaStream_t stream);
} // namespace cuda
} // namespace amirstan
|
50bb4e5294c3ef27696e8db720257a642a4adfdc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
// Details on this algorithm can be found in:
// Green, O., 2017. "Efficient scalable median filtering using histogram-based operations",
// IEEE Transactions on Image Processing, 27(5), pp.2217-2228.
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/vec_traits.hpp"
#include "opencv2/core/cuda/vec_math.hpp"
#include "opencv2/core/cuda/saturate_cast.hpp"
#include "opencv2/core/cuda/border_interpolate.hpp"
namespace cv { namespace cuda { namespace device
{
// // namespace imgproc
// {
__device__ void histogramAddAndSub8(int* H, const int * hist_colAdd,const int * hist_colSub){
int tx = threadIdx.x;
if (tx<8){
H[tx]+=hist_colAdd[tx]-hist_colSub[tx];
}
}
__device__ void histogramMultipleAdd8(int* H, const int * hist_col,int histCount){
int tx = threadIdx.x;
if (tx<8){
int temp=H[tx];
for(int i=0; i<histCount; i++)
temp+=hist_col[(i<<3)+tx];
H[tx]=temp;
}
}
__device__ void histogramClear8(int* H){
int tx = threadIdx.x;
if (tx<8){
H[tx]=0;
}
}
__device__ void histogramAdd8(int* H, const int * hist_col){
int tx = threadIdx.x;
if (tx<8){
H[tx]+=hist_col[tx];
}
}
__device__ void histogramSub8(int* H, const int * hist_col){
int tx = threadIdx.x;
if (tx<8){
H[tx]-=hist_col[tx];
}
}
__device__ void histogramAdd32(int* H, const int * hist_col){
int tx = threadIdx.x;
if (tx<32){
H[tx]+=hist_col[tx];
}
}
__device__ void histogramAddAndSub32(int* H, const int * hist_colAdd,const int * hist_colSub){
int tx = threadIdx.x;
if (tx<32){
H[tx]+=hist_colAdd[tx]-hist_colSub[tx];
}
}
__device__ void histogramClear32(int* H){
int tx = threadIdx.x;
if (tx<32){
H[tx]=0;
}
}
__device__ void lucClear8(int* luc){
int tx = threadIdx.x;
if (tx<8)
luc[tx]=0;
}
__device__ void histogramMedianPar8LookupOnly(int* H,int* Hscan, const int medPos,int* retval, int* countAtMed){
int tx=threadIdx.x;
*retval=*countAtMed=0;
if(tx<8){
Hscan[tx]=H[tx];
}
__syncthreads();
if (1 <= tx && tx < 8 )
Hscan[tx]+=Hscan[tx-1];
__syncthreads();
if (2 <= tx && tx < 8 )
Hscan[tx]+=Hscan[tx-2];
__syncthreads();
if (4 <= tx && tx < 8 )
Hscan[tx]+=Hscan[tx-4];
__syncthreads();
if(tx<7){
if(Hscan[tx+1] > medPos && Hscan[tx] < medPos){
*retval=tx+1;
*countAtMed=Hscan[tx];
}
else if(Hscan[tx]==medPos){
if(Hscan[tx+1]>medPos){
*retval=tx+1;
*countAtMed=Hscan[tx];
}
}
}
}
__device__ void histogramMedianPar32LookupOnly(int* H,int* Hscan, const int medPos,int* retval, int* countAtMed){
int tx=threadIdx.x;
*retval=*countAtMed=0;
if(tx<32){
Hscan[tx]=H[tx];
}
__syncthreads();
if ( 1 <= tx && tx < 32 )
Hscan[tx]+=Hscan[tx-1];
__syncthreads();
if ( 2 <= tx && tx < 32 )
Hscan[tx]+=Hscan[tx-2];
__syncthreads();
if ( 4 <= tx && tx < 32 )
Hscan[tx]+=Hscan[tx-4];
__syncthreads();
if ( 8 <= tx && tx < 32 )
Hscan[tx]+=Hscan[tx-8];
__syncthreads();
if ( 16 <= tx && tx < 32 )
Hscan[tx]+=Hscan[tx-16];
__syncthreads();
if(tx<31){
if(Hscan[tx+1] > medPos && Hscan[tx] < medPos){
*retval=tx+1;
*countAtMed=Hscan[tx];
}
else if(Hscan[tx]==medPos){
if(Hscan[tx+1]>medPos){
*retval=tx+1;
*countAtMed=Hscan[tx];
}
}
}
}
__global__ void cuMedianFilterMultiBlock(PtrStepSzb src, PtrStepSzb dest, PtrStepSzi histPar, PtrStepSzi coarseHistGrid,int r, int medPos_)
{
__shared__ int HCoarse[8];
__shared__ int HCoarseScan[32];
__shared__ int HFine[8][32];
__shared__ int luc[8];
__shared__ int firstBin,countAtMed, retval;
int rows = src.rows, cols=src.cols;
int extraRowThread=rows%gridDim.x;
int doExtraRow=blockIdx.x<extraRowThread;
int startRow=0, stopRow=0;
int rowsPerBlock= rows/gridDim.x+doExtraRow;
// The following code partitions the work to the blocks. Some blocks will do one row more
// than other blocks. This code is responsible for doing that balancing
if(doExtraRow){
startRow=rowsPerBlock*blockIdx.x;
stopRow=::min(rows, startRow+rowsPerBlock);
}
else{
startRow=(rowsPerBlock+1)*extraRowThread+(rowsPerBlock)*(blockIdx.x-extraRowThread);
stopRow=::min(rows, startRow+rowsPerBlock);
}
int* hist= histPar.data+cols*256*blockIdx.x;
int* histCoarse=coarseHistGrid.data +cols*8*blockIdx.x;
if (blockIdx.x==(gridDim.x-1))
stopRow=rows;
__syncthreads();
int initNeeded=0, initVal, initStartRow, initStopRow;
if(blockIdx.x==0){
initNeeded=1; initVal=r+2; initStartRow=1; initStopRow=r;
}
else if (startRow<(r+2)){
initNeeded=1; initVal=r+2-startRow; initStartRow=1; initStopRow=r+startRow;
}
else{
initNeeded=0; initVal=0; initStartRow=startRow-(r+1); initStopRow=r+startRow;
}
__syncthreads();
// In the original algorithm an initialization phase was required as part of the window was outside the
// image. In this parallel version, the initializtion is required for all thread blocks that part
// of the median filter is outside the window.
// For all threads in the block the same code will be executed.
if (initNeeded){
for (int j=threadIdx.x; j<(cols); j+=blockDim.x){
hist[j*256+src.ptr(0)[j]]=initVal;
histCoarse[j*8+(src.ptr(0)[j]>>5)]=initVal;
}
}
__syncthreads();
// For all remaining rows in the median filter, add the values to the the histogram
for (int j=threadIdx.x; j<cols; j+=blockDim.x){
for(int i=initStartRow; i<initStopRow; i++){
int pos=::min(i,rows-1);
hist[j*256+src.ptr(pos)[j]]++;
histCoarse[j*8+(src.ptr(pos)[j]>>5)]++;
}
}
__syncthreads();
// Going through all the rows that the block is responsible for.
int inc=blockDim.x*256;
int incCoarse=blockDim.x*8;
for(int i=startRow; i< stopRow; i++){
// For every new row that is started the global histogram for the entire window is restarted.
histogramClear8(HCoarse);
lucClear8(luc);
// Computing some necessary indices
int possub=::max(0,i-r-1),posadd=::min(rows-1,i+r);
int histPos=threadIdx.x*256;
int histCoarsePos=threadIdx.x*8;
// Going through all the elements of a specific row. Foeach histogram, a value is taken out and
// one value is added.
for (int j=threadIdx.x; j<cols; j+=blockDim.x){
hist[histPos+ src.ptr(possub)[j] ]--;
hist[histPos+ src.ptr(posadd)[j] ]++;
histCoarse[histCoarsePos+ (src.ptr(possub)[j]>>5) ]--;
histCoarse[histCoarsePos+ (src.ptr(posadd)[j]>>5) ]++;
histPos+=inc;
histCoarsePos+=incCoarse;
}
__syncthreads();
histogramMultipleAdd8(HCoarse,histCoarse, 2*r+1);
// __syncthreads();
int cols_m_1=cols-1;
for(int j=r;j<cols-r;j++){
int possub=::max(j-r,0);
int posadd=::min(j+1+r,cols_m_1);
int medPos=medPos_;
__syncthreads();
histogramMedianPar8LookupOnly(HCoarse,HCoarseScan,medPos, &firstBin,&countAtMed);
__syncthreads();
if ( luc[firstBin] <= (j-r))
{
histogramClear32(HFine[firstBin]);
for ( luc[firstBin] = j-r; luc[firstBin] < ::min(j+r+1,cols); luc[firstBin]++ ){
histogramAdd32(HFine[firstBin], hist+(luc[firstBin]*256+(firstBin<<5) ) );
}
}
else{
for ( ; luc[firstBin] < (j+r+1);luc[firstBin]++ ) {
histogramAddAndSub32(HFine[firstBin],
hist+(::min(luc[firstBin],cols_m_1)*256+(firstBin<<5) ),
hist+(::max(luc[firstBin]-2*r-1,0)*256+(firstBin<<5) ) );
__syncthreads();
}
}
__syncthreads();
int leftOver=medPos-countAtMed;
if(leftOver>=0){
histogramMedianPar32LookupOnly(HFine[firstBin],HCoarseScan,leftOver,&retval,&countAtMed);
}
else retval=0;
__syncthreads();
if (threadIdx.x==0){
dest.ptr(i)[j]=(firstBin<<5) + retval;
}
histogramAddAndSub8(HCoarse, histCoarse+(int)(posadd<<3),histCoarse+(int)(possub<<3));
__syncthreads();
}
__syncthreads();
}
}
void medianFiltering_gpu(const PtrStepSzb src, PtrStepSzb dst, PtrStepSzi devHist, PtrStepSzi devCoarseHist,int kernel, int partitions,hipStream_t stream){
int medPos=2*kernel*kernel+2*kernel;
dim3 gridDim; gridDim.x=partitions;
dim3 blockDim; blockDim.x=32;
hipLaunchKernelGGL(( cuMedianFilterMultiBlock), dim3(gridDim),dim3(blockDim),0, stream, src, dst, devHist,devCoarseHist, kernel, medPos);
if (!stream)
cudaSafeCall( hipDeviceSynchronize() );
}
}}}
#endif
| 50bb4e5294c3ef27696e8db720257a642a4adfdc.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
// Details on this algorithm can be found in:
// Green, O., 2017. "Efficient scalable median filtering using histogram-based operations",
// IEEE Transactions on Image Processing, 27(5), pp.2217-2228.
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/vec_traits.hpp"
#include "opencv2/core/cuda/vec_math.hpp"
#include "opencv2/core/cuda/saturate_cast.hpp"
#include "opencv2/core/cuda/border_interpolate.hpp"
namespace cv { namespace cuda { namespace device
{
// // namespace imgproc
// {
__device__ void histogramAddAndSub8(int* H, const int * hist_colAdd,const int * hist_colSub){
int tx = threadIdx.x;
if (tx<8){
H[tx]+=hist_colAdd[tx]-hist_colSub[tx];
}
}
__device__ void histogramMultipleAdd8(int* H, const int * hist_col,int histCount){
int tx = threadIdx.x;
if (tx<8){
int temp=H[tx];
for(int i=0; i<histCount; i++)
temp+=hist_col[(i<<3)+tx];
H[tx]=temp;
}
}
__device__ void histogramClear8(int* H){
int tx = threadIdx.x;
if (tx<8){
H[tx]=0;
}
}
__device__ void histogramAdd8(int* H, const int * hist_col){
int tx = threadIdx.x;
if (tx<8){
H[tx]+=hist_col[tx];
}
}
__device__ void histogramSub8(int* H, const int * hist_col){
int tx = threadIdx.x;
if (tx<8){
H[tx]-=hist_col[tx];
}
}
__device__ void histogramAdd32(int* H, const int * hist_col){
int tx = threadIdx.x;
if (tx<32){
H[tx]+=hist_col[tx];
}
}
__device__ void histogramAddAndSub32(int* H, const int * hist_colAdd,const int * hist_colSub){
int tx = threadIdx.x;
if (tx<32){
H[tx]+=hist_colAdd[tx]-hist_colSub[tx];
}
}
__device__ void histogramClear32(int* H){
int tx = threadIdx.x;
if (tx<32){
H[tx]=0;
}
}
__device__ void lucClear8(int* luc){
int tx = threadIdx.x;
if (tx<8)
luc[tx]=0;
}
__device__ void histogramMedianPar8LookupOnly(int* H,int* Hscan, const int medPos,int* retval, int* countAtMed){
int tx=threadIdx.x;
*retval=*countAtMed=0;
if(tx<8){
Hscan[tx]=H[tx];
}
__syncthreads();
if (1 <= tx && tx < 8 )
Hscan[tx]+=Hscan[tx-1];
__syncthreads();
if (2 <= tx && tx < 8 )
Hscan[tx]+=Hscan[tx-2];
__syncthreads();
if (4 <= tx && tx < 8 )
Hscan[tx]+=Hscan[tx-4];
__syncthreads();
if(tx<7){
if(Hscan[tx+1] > medPos && Hscan[tx] < medPos){
*retval=tx+1;
*countAtMed=Hscan[tx];
}
else if(Hscan[tx]==medPos){
if(Hscan[tx+1]>medPos){
*retval=tx+1;
*countAtMed=Hscan[tx];
}
}
}
}
__device__ void histogramMedianPar32LookupOnly(int* H,int* Hscan, const int medPos,int* retval, int* countAtMed){
int tx=threadIdx.x;
*retval=*countAtMed=0;
if(tx<32){
Hscan[tx]=H[tx];
}
__syncthreads();
if ( 1 <= tx && tx < 32 )
Hscan[tx]+=Hscan[tx-1];
__syncthreads();
if ( 2 <= tx && tx < 32 )
Hscan[tx]+=Hscan[tx-2];
__syncthreads();
if ( 4 <= tx && tx < 32 )
Hscan[tx]+=Hscan[tx-4];
__syncthreads();
if ( 8 <= tx && tx < 32 )
Hscan[tx]+=Hscan[tx-8];
__syncthreads();
if ( 16 <= tx && tx < 32 )
Hscan[tx]+=Hscan[tx-16];
__syncthreads();
if(tx<31){
if(Hscan[tx+1] > medPos && Hscan[tx] < medPos){
*retval=tx+1;
*countAtMed=Hscan[tx];
}
else if(Hscan[tx]==medPos){
if(Hscan[tx+1]>medPos){
*retval=tx+1;
*countAtMed=Hscan[tx];
}
}
}
}
__global__ void cuMedianFilterMultiBlock(PtrStepSzb src, PtrStepSzb dest, PtrStepSzi histPar, PtrStepSzi coarseHistGrid,int r, int medPos_)
{
__shared__ int HCoarse[8];
__shared__ int HCoarseScan[32];
__shared__ int HFine[8][32];
__shared__ int luc[8];
__shared__ int firstBin,countAtMed, retval;
int rows = src.rows, cols=src.cols;
int extraRowThread=rows%gridDim.x;
int doExtraRow=blockIdx.x<extraRowThread;
int startRow=0, stopRow=0;
int rowsPerBlock= rows/gridDim.x+doExtraRow;
// The following code partitions the work to the blocks. Some blocks will do one row more
// than other blocks. This code is responsible for doing that balancing
if(doExtraRow){
startRow=rowsPerBlock*blockIdx.x;
stopRow=::min(rows, startRow+rowsPerBlock);
}
else{
startRow=(rowsPerBlock+1)*extraRowThread+(rowsPerBlock)*(blockIdx.x-extraRowThread);
stopRow=::min(rows, startRow+rowsPerBlock);
}
int* hist= histPar.data+cols*256*blockIdx.x;
int* histCoarse=coarseHistGrid.data +cols*8*blockIdx.x;
if (blockIdx.x==(gridDim.x-1))
stopRow=rows;
__syncthreads();
int initNeeded=0, initVal, initStartRow, initStopRow;
if(blockIdx.x==0){
initNeeded=1; initVal=r+2; initStartRow=1; initStopRow=r;
}
else if (startRow<(r+2)){
initNeeded=1; initVal=r+2-startRow; initStartRow=1; initStopRow=r+startRow;
}
else{
initNeeded=0; initVal=0; initStartRow=startRow-(r+1); initStopRow=r+startRow;
}
__syncthreads();
// In the original algorithm an initialization phase was required as part of the window was outside the
// image. In this parallel version, the initializtion is required for all thread blocks that part
// of the median filter is outside the window.
// For all threads in the block the same code will be executed.
if (initNeeded){
for (int j=threadIdx.x; j<(cols); j+=blockDim.x){
hist[j*256+src.ptr(0)[j]]=initVal;
histCoarse[j*8+(src.ptr(0)[j]>>5)]=initVal;
}
}
__syncthreads();
// For all remaining rows in the median filter, add the values to the the histogram
for (int j=threadIdx.x; j<cols; j+=blockDim.x){
for(int i=initStartRow; i<initStopRow; i++){
int pos=::min(i,rows-1);
hist[j*256+src.ptr(pos)[j]]++;
histCoarse[j*8+(src.ptr(pos)[j]>>5)]++;
}
}
__syncthreads();
// Going through all the rows that the block is responsible for.
int inc=blockDim.x*256;
int incCoarse=blockDim.x*8;
for(int i=startRow; i< stopRow; i++){
// For every new row that is started the global histogram for the entire window is restarted.
histogramClear8(HCoarse);
lucClear8(luc);
// Computing some necessary indices
int possub=::max(0,i-r-1),posadd=::min(rows-1,i+r);
int histPos=threadIdx.x*256;
int histCoarsePos=threadIdx.x*8;
// Going through all the elements of a specific row. Foeach histogram, a value is taken out and
// one value is added.
for (int j=threadIdx.x; j<cols; j+=blockDim.x){
hist[histPos+ src.ptr(possub)[j] ]--;
hist[histPos+ src.ptr(posadd)[j] ]++;
histCoarse[histCoarsePos+ (src.ptr(possub)[j]>>5) ]--;
histCoarse[histCoarsePos+ (src.ptr(posadd)[j]>>5) ]++;
histPos+=inc;
histCoarsePos+=incCoarse;
}
__syncthreads();
histogramMultipleAdd8(HCoarse,histCoarse, 2*r+1);
// __syncthreads();
int cols_m_1=cols-1;
for(int j=r;j<cols-r;j++){
int possub=::max(j-r,0);
int posadd=::min(j+1+r,cols_m_1);
int medPos=medPos_;
__syncthreads();
histogramMedianPar8LookupOnly(HCoarse,HCoarseScan,medPos, &firstBin,&countAtMed);
__syncthreads();
if ( luc[firstBin] <= (j-r))
{
histogramClear32(HFine[firstBin]);
for ( luc[firstBin] = j-r; luc[firstBin] < ::min(j+r+1,cols); luc[firstBin]++ ){
histogramAdd32(HFine[firstBin], hist+(luc[firstBin]*256+(firstBin<<5) ) );
}
}
else{
for ( ; luc[firstBin] < (j+r+1);luc[firstBin]++ ) {
histogramAddAndSub32(HFine[firstBin],
hist+(::min(luc[firstBin],cols_m_1)*256+(firstBin<<5) ),
hist+(::max(luc[firstBin]-2*r-1,0)*256+(firstBin<<5) ) );
__syncthreads();
}
}
__syncthreads();
int leftOver=medPos-countAtMed;
if(leftOver>=0){
histogramMedianPar32LookupOnly(HFine[firstBin],HCoarseScan,leftOver,&retval,&countAtMed);
}
else retval=0;
__syncthreads();
if (threadIdx.x==0){
dest.ptr(i)[j]=(firstBin<<5) + retval;
}
histogramAddAndSub8(HCoarse, histCoarse+(int)(posadd<<3),histCoarse+(int)(possub<<3));
__syncthreads();
}
__syncthreads();
}
}
void medianFiltering_gpu(const PtrStepSzb src, PtrStepSzb dst, PtrStepSzi devHist, PtrStepSzi devCoarseHist,int kernel, int partitions,cudaStream_t stream){
int medPos=2*kernel*kernel+2*kernel;
dim3 gridDim; gridDim.x=partitions;
dim3 blockDim; blockDim.x=32;
cuMedianFilterMultiBlock<<<gridDim,blockDim,0, stream>>>(src, dst, devHist,devCoarseHist, kernel, medPos);
if (!stream)
cudaSafeCall( cudaDeviceSynchronize() );
}
}}}
#endif
|
a61b322a6212b657ac80e7c2e5d078d438f976b1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int dims_update_halo_kernel2_zvel_minus_2_front [3][2];
static int dims_update_halo_kernel2_zvel_minus_2_front_h [3][2] = {0};
//user function
__device__
inline void update_halo_kernel2_zvel_minus_2_front_gpu(ACC<double> &zvel0,
ACC<double> &zvel1,
const int* fields)
{
if(fields[FIELD_ZVEL0] == 1) zvel0(0,0,0) = -zvel0(0,0,-2);
if(fields[FIELD_ZVEL1] == 1) zvel1(0,0,0) = -zvel1(0,0,-2);
}
__global__ void ops_update_halo_kernel2_zvel_minus_2_front(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel2_zvel_minus_2_front[0][0] + idx_z * 1*1 * dims_update_halo_kernel2_zvel_minus_2_front[0][0] * dims_update_halo_kernel2_zvel_minus_2_front[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel2_zvel_minus_2_front[1][0] + idx_z * 1*1 * dims_update_halo_kernel2_zvel_minus_2_front[1][0] * dims_update_halo_kernel2_zvel_minus_2_front[1][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
ACC<double> argp0(dims_update_halo_kernel2_zvel_minus_2_front[0][0], dims_update_halo_kernel2_zvel_minus_2_front[0][1], arg0);
ACC<double> argp1(dims_update_halo_kernel2_zvel_minus_2_front[1][0], dims_update_halo_kernel2_zvel_minus_2_front[1][1], arg1);
update_halo_kernel2_zvel_minus_2_front_gpu(argp0, argp1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel2_zvel_minus_2_front(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel2_zvel_minus_2_front_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,59)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(59,"update_halo_kernel2_zvel_minus_2_front");
OPS_kernels[59].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 3,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != dims_update_halo_kernel2_zvel_minus_2_front_h[0][0] || ydim0 != dims_update_halo_kernel2_zvel_minus_2_front_h[0][1] || xdim1 != dims_update_halo_kernel2_zvel_minus_2_front_h[1][0] || ydim1 != dims_update_halo_kernel2_zvel_minus_2_front_h[1][1]) {
dims_update_halo_kernel2_zvel_minus_2_front_h[0][0] = xdim0;
dims_update_halo_kernel2_zvel_minus_2_front_h[0][1] = ydim0;
dims_update_halo_kernel2_zvel_minus_2_front_h[1][0] = xdim1;
dims_update_halo_kernel2_zvel_minus_2_front_h[1][1] = ydim1;
cutilSafeCall(hipMemcpyToSymbol( dims_update_halo_kernel2_zvel_minus_2_front, dims_update_halo_kernel2_zvel_minus_2_front_h, sizeof(dims_update_halo_kernel2_zvel_minus_2_front)));
}
int *arg2h = (int *)arg2.data;
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[59].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
hipLaunchKernelGGL(( ops_update_halo_kernel2_zvel_minus_2_front), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[59].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[59].mpi_time += t2-t1;
OPS_kernels[59].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[59].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel2_zvel_minus_2_front(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 59;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 59;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel2_zvel_minus_2_front_execute;
if (OPS_diags > 1) {
ops_timing_realloc(59,"update_halo_kernel2_zvel_minus_2_front");
}
ops_enqueue_kernel(desc);
}
#endif
| a61b322a6212b657ac80e7c2e5d078d438f976b1.cu | //
// auto-generated by ops.py
//
__constant__ int dims_update_halo_kernel2_zvel_minus_2_front [3][2];
static int dims_update_halo_kernel2_zvel_minus_2_front_h [3][2] = {0};
//user function
__device__
inline void update_halo_kernel2_zvel_minus_2_front_gpu(ACC<double> &zvel0,
ACC<double> &zvel1,
const int* fields)
{
if(fields[FIELD_ZVEL0] == 1) zvel0(0,0,0) = -zvel0(0,0,-2);
if(fields[FIELD_ZVEL1] == 1) zvel1(0,0,0) = -zvel1(0,0,-2);
}
__global__ void ops_update_halo_kernel2_zvel_minus_2_front(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel2_zvel_minus_2_front[0][0] + idx_z * 1*1 * dims_update_halo_kernel2_zvel_minus_2_front[0][0] * dims_update_halo_kernel2_zvel_minus_2_front[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel2_zvel_minus_2_front[1][0] + idx_z * 1*1 * dims_update_halo_kernel2_zvel_minus_2_front[1][0] * dims_update_halo_kernel2_zvel_minus_2_front[1][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
ACC<double> argp0(dims_update_halo_kernel2_zvel_minus_2_front[0][0], dims_update_halo_kernel2_zvel_minus_2_front[0][1], arg0);
ACC<double> argp1(dims_update_halo_kernel2_zvel_minus_2_front[1][0], dims_update_halo_kernel2_zvel_minus_2_front[1][1], arg1);
update_halo_kernel2_zvel_minus_2_front_gpu(argp0, argp1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel2_zvel_minus_2_front(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel2_zvel_minus_2_front_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,59)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(59,"update_halo_kernel2_zvel_minus_2_front");
OPS_kernels[59].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 3,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != dims_update_halo_kernel2_zvel_minus_2_front_h[0][0] || ydim0 != dims_update_halo_kernel2_zvel_minus_2_front_h[0][1] || xdim1 != dims_update_halo_kernel2_zvel_minus_2_front_h[1][0] || ydim1 != dims_update_halo_kernel2_zvel_minus_2_front_h[1][1]) {
dims_update_halo_kernel2_zvel_minus_2_front_h[0][0] = xdim0;
dims_update_halo_kernel2_zvel_minus_2_front_h[0][1] = ydim0;
dims_update_halo_kernel2_zvel_minus_2_front_h[1][0] = xdim1;
dims_update_halo_kernel2_zvel_minus_2_front_h[1][1] = ydim1;
cutilSafeCall(cudaMemcpyToSymbol( dims_update_halo_kernel2_zvel_minus_2_front, dims_update_halo_kernel2_zvel_minus_2_front_h, sizeof(dims_update_halo_kernel2_zvel_minus_2_front)));
}
int *arg2h = (int *)arg2.data;
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[59].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
ops_update_halo_kernel2_zvel_minus_2_front<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[59].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[59].mpi_time += t2-t1;
OPS_kernels[59].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[59].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel2_zvel_minus_2_front(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 59;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 59;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel2_zvel_minus_2_front_execute;
if (OPS_diags > 1) {
ops_timing_realloc(59,"update_halo_kernel2_zvel_minus_2_front");
}
ops_enqueue_kernel(desc);
}
#endif
|
8a1f7fbfb0264a528c1b9e27d6af91804f62a053.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// <math.h>
// This file was copied from libc++'s test suite, then modified to test CUDA.
// For the most part, this consists of adding __device__ attributes and
// deleting long double.
// This test requires C++11 (it's mostly decltype checks).
#if __cplusplus >= 201103L
#include <math.h>
#include <type_traits>
#include <cassert>
#include <stdio.h>
// See PR21083
// Ambiguous is a user-defined type that defines its own overloads of cmath
// functions. When the std overloads are candidates too (by using or adl),
// they should not interfere.
struct Ambiguous : std::true_type { // ADL
__device__ operator float () { return 0.f; }
__device__ operator double () { return 0.; }
};
__device__ Ambiguous abs(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous acos(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous asin(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous atan(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous atan2(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous ceil(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous cos(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous cosh(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous exp(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous fabs(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous floor(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous fmod(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous frexp(Ambiguous, int*){ return Ambiguous(); }
__device__ Ambiguous ldexp(Ambiguous, int){ return Ambiguous(); }
__device__ Ambiguous log(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous log10(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous modf(Ambiguous, Ambiguous*){ return Ambiguous(); }
__device__ Ambiguous pow(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous sin(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous sinh(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous sqrt(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous tan(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous tanh(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous signbit(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous fpclassify(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous isfinite(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous isnormal(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous isgreater(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous isgreaterequal(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous isless(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous islessequal(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous islessgreater(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous isunordered(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous acosh(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous asinh(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous atanh(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous cbrt(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous copysign(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous erf(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous erfc(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous exp2(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous expm1(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous fdim(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous fma(Ambiguous, Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous fmax(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous fmin(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous hypot(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous ilogb(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous lgamma(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous llrint(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous llround(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous log1p(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous log2(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous logb(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous lrint(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous lround(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous nearbyint(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous nextafter(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous remainder(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous remquo(Ambiguous, Ambiguous, int*){ return Ambiguous(); }
__device__ Ambiguous rint(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous round(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous scalbln(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous scalbn(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous tgamma(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous trunc(Ambiguous){ return Ambiguous(); }
// helper function to prevent compiler constant-folding test inputs.
template <typename T>
__device__ T V(T input) {
volatile T tmp = input;
return tmp;
}
__device__ void test_abs()
{
static_assert((std::is_same<decltype(abs((float)0)), float>::value), "");
static_assert((std::is_same<decltype(abs((double)0)), double>::value), "");
static_assert((std::is_same<decltype(abs(Ambiguous())), Ambiguous>::value), "");
assert(abs(V(-1)) == 1);
assert(abs(V(-1.)) == 1);
assert(abs(V(-1.f)) == 1);
}
__device__ void test_acos()
{
static_assert((std::is_same<decltype(acos((float)0)), float>::value), "");
static_assert((std::is_same<decltype(acos((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(acos((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(acos((int)0)), double>::value), "");
static_assert((std::is_same<decltype(acos((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(acos((long)0)), double>::value), "");
static_assert((std::is_same<decltype(acos((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(acos((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(acos((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(acos((double)0)), double>::value), "");
static_assert((std::is_same<decltype(acosf(0)), float>::value), "");
static_assert((std::is_same<decltype(acos(Ambiguous())), Ambiguous>::value), "");
assert(acos(V(1)) == 0);
assert(acos(V(1.)) == 0);
assert(acos(V(1.f)) == 0);
}
__device__ void test_asin()
{
static_assert((std::is_same<decltype(asin((float)0)), float>::value), "");
static_assert((std::is_same<decltype(asin((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(asin((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(asin((int)0)), double>::value), "");
static_assert((std::is_same<decltype(asin((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(asin((long)0)), double>::value), "");
static_assert((std::is_same<decltype(asin((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(asin((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(asin((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(asin((double)0)), double>::value), "");
static_assert((std::is_same<decltype(asinf(0)), float>::value), "");
static_assert((std::is_same<decltype(asin(Ambiguous())), Ambiguous>::value), "");
assert(asin(V(0)) == 0);
assert(asin(V(0.)) == 0);
assert(asin(V(0.f)) == 0);
}
__device__ void test_atan()
{
static_assert((std::is_same<decltype(atan((float)0)), float>::value), "");
static_assert((std::is_same<decltype(atan((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(atan((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(atan((int)0)), double>::value), "");
static_assert((std::is_same<decltype(atan((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(atan((long)0)), double>::value), "");
static_assert((std::is_same<decltype(atan((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(atan((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(atan((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(atan((double)0)), double>::value), "");
static_assert((std::is_same<decltype(atanf(0)), float>::value), "");
static_assert((std::is_same<decltype(atan(Ambiguous())), Ambiguous>::value), "");
assert(atan(V(0)) == 0);
assert(atan(V(0.)) == 0);
assert(atan(V(0.f)) == 0);
}
__device__ void test_atan2()
{
static_assert((std::is_same<decltype(atan2((float)0, (float)0)), float>::value), "");
static_assert((std::is_same<decltype(atan2((bool)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(atan2((unsigned short)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(atan2((float)0, (unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(atan2((double)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(atan2((int)0, (long long)0)), double>::value), "");
static_assert((std::is_same<decltype(atan2((int)0, (unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(atan2((double)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(atan2((float)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(atan2f(0,0)), float>::value), "");
static_assert((std::is_same<decltype(atan2((int)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(atan2(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(atan2(V(0), 1) == 0);
assert(atan2(V(0), 1.) == 0);
assert(atan2(V(0), 1.f) == 0);
assert(atan2(V(0.), 1) == 0);
assert(atan2(V(0.), 1.) == 0);
assert(atan2(V(0.), 1.f) == 0);
assert(atan2(V(0.f), 1) == 0);
assert(atan2(V(0.f), 1.) == 0);
assert(atan2(V(0.f), 1.f) == 0);
}
__device__ void test_ceil()
{
static_assert((std::is_same<decltype(ceil((float)0)), float>::value), "");
static_assert((std::is_same<decltype(ceil((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(ceil((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(ceil((int)0)), double>::value), "");
static_assert((std::is_same<decltype(ceil((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(ceil((long)0)), double>::value), "");
static_assert((std::is_same<decltype(ceil((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(ceil((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(ceil((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(ceil((double)0)), double>::value), "");
static_assert((std::is_same<decltype(ceilf(0)), float>::value), "");
static_assert((std::is_same<decltype(ceil(Ambiguous())), Ambiguous>::value), "");
assert(ceil(V(0)) == 0);
assert(ceil(V(0.)) == 0);
assert(ceil(V(0.f)) == 0);
}
__device__ void test_cos()
{
static_assert((std::is_same<decltype(cos((float)0)), float>::value), "");
static_assert((std::is_same<decltype(cos((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(cos((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(cos((int)0)), double>::value), "");
static_assert((std::is_same<decltype(cos((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(cos((long)0)), double>::value), "");
static_assert((std::is_same<decltype(cos((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(cos((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(cos((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(cos((double)0)), double>::value), "");
static_assert((std::is_same<decltype(cosf(0)), float>::value), "");
static_assert((std::is_same<decltype(cos(Ambiguous())), Ambiguous>::value), "");
assert(cos(V(0)) == 1);
assert(cos(V(0.)) == 1);
assert(cos(V(0.f)) == 1);
}
__device__ void test_cosh()
{
static_assert((std::is_same<decltype(cosh((float)0)), float>::value), "");
static_assert((std::is_same<decltype(cosh((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(cosh((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(cosh((int)0)), double>::value), "");
static_assert((std::is_same<decltype(cosh((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(cosh((long)0)), double>::value), "");
static_assert((std::is_same<decltype(cosh((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(cosh((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(cosh((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(cosh((double)0)), double>::value), "");
static_assert((std::is_same<decltype(coshf(0)), float>::value), "");
static_assert((std::is_same<decltype(cosh(Ambiguous())), Ambiguous>::value), "");
assert(cosh(V(0)) == 1);
assert(cosh(V(0.)) == 1);
assert(cosh(V(0.f)) == 1);
}
__device__ void test_exp()
{
static_assert((std::is_same<decltype(exp((float)0)), float>::value), "");
static_assert((std::is_same<decltype(exp((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(exp((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(exp((int)0)), double>::value), "");
static_assert((std::is_same<decltype(exp((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(exp((long)0)), double>::value), "");
static_assert((std::is_same<decltype(exp((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(exp((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(exp((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(exp((double)0)), double>::value), "");
static_assert((std::is_same<decltype(expf(0)), float>::value), "");
static_assert((std::is_same<decltype(exp(Ambiguous())), Ambiguous>::value), "");
assert(exp(V(0)) == 1);
assert(exp(V(0.)) == 1);
assert(exp(V(0.f)) == 1);
}
__device__ void test_fabs()
{
static_assert((std::is_same<decltype(fabs((float)0)), float>::value), "");
static_assert((std::is_same<decltype(fabs((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(fabs((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(fabs((int)0)), double>::value), "");
static_assert((std::is_same<decltype(fabs((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(fabs((long)0)), double>::value), "");
static_assert((std::is_same<decltype(fabs((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(fabs((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(fabs((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(fabs((double)0)), double>::value), "");
static_assert((std::is_same<decltype(fabsf(0.0f)), float>::value), "");
static_assert((std::is_same<decltype(fabs(Ambiguous())), Ambiguous>::value), "");
assert(fabs(V(-1)) == 1);
assert(fabs(V(-1.)) == 1);
assert(fabs(V(-1.f)) == 1);
}
__device__ void test_floor()
{
static_assert((std::is_same<decltype(floor((float)0)), float>::value), "");
static_assert((std::is_same<decltype(floor((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(floor((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(floor((int)0)), double>::value), "");
static_assert((std::is_same<decltype(floor((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(floor((long)0)), double>::value), "");
static_assert((std::is_same<decltype(floor((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(floor((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(floor((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(floor((double)0)), double>::value), "");
static_assert((std::is_same<decltype(floorf(0)), float>::value), "");
static_assert((std::is_same<decltype(floor(Ambiguous())), Ambiguous>::value), "");
assert(floor(V(1)) == 1);
assert(floor(V(1.)) == 1);
assert(floor(V(1.f)) == 1);
}
__device__ void test_fmod()
{
static_assert((std::is_same<decltype(fmod((float)0, (float)0)), float>::value), "");
static_assert((std::is_same<decltype(fmod((bool)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(fmod((unsigned short)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fmod((float)0, (unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(fmod((double)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(fmod((int)0, (long long)0)), double>::value), "");
static_assert((std::is_same<decltype(fmod((int)0, (unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(fmod((double)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fmod((float)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fmodf(0,0)), float>::value), "");
static_assert((std::is_same<decltype(fmod((int)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(fmod(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(fmod(V(1.5), 1) == .5);
assert(fmod(V(1.5), 1.) == .5);
assert(fmod(V(1.5), 1.f) == .5);
assert(fmod(V(1.5f), 1) == .5);
assert(fmod(V(1.5f), 1.) == .5);
assert(fmod(V(1.5f), 1.f) == .5);
assert(fmod(V(2), 1) == 0);
assert(fmod(V(2), 1.) == 0);
assert(fmod(V(2), 1.f) == 0);
}
__device__ void test_frexp()
{
int ip;
static_assert((std::is_same<decltype(frexp((float)0, &ip)), float>::value), "");
static_assert((std::is_same<decltype(frexp((bool)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(frexp((unsigned short)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(frexp((int)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(frexp((unsigned int)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(frexp((long)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(frexp((unsigned long)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(frexp((long long)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(frexp((unsigned long long)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(frexp((double)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(frexpf(0, &ip)), float>::value), "");
static_assert((std::is_same<decltype(frexp(Ambiguous(), &ip)), Ambiguous>::value), "");
assert(frexp(V(0), &ip) == 0);
assert(frexp(V(0.), &ip) == 0);
assert(frexp(V(0.f), &ip) == 0);
}
__device__ void test_ldexp()
{
int ip = 1;
static_assert((std::is_same<decltype(ldexp((float)0, ip)), float>::value), "");
static_assert((std::is_same<decltype(ldexp((bool)0, ip)), double>::value), "");
static_assert((std::is_same<decltype(ldexp((unsigned short)0, ip)), double>::value), "");
static_assert((std::is_same<decltype(ldexp((int)0, ip)), double>::value), "");
static_assert((std::is_same<decltype(ldexp((unsigned int)0, ip)), double>::value), "");
static_assert((std::is_same<decltype(ldexp((long)0, ip)), double>::value), "");
static_assert((std::is_same<decltype(ldexp((unsigned long)0, ip)), double>::value), "");
static_assert((std::is_same<decltype(ldexp((long long)0, ip)), double>::value), "");
static_assert((std::is_same<decltype(ldexp((unsigned long long)0, ip)), double>::value), "");
static_assert((std::is_same<decltype(ldexp((double)0, ip)), double>::value), "");
static_assert((std::is_same<decltype(ldexpf(0, ip)), float>::value), "");
static_assert((std::is_same<decltype(ldexp(Ambiguous(), ip)), Ambiguous>::value), "");
assert(ldexp(V(1), ip) == 2);
assert(ldexp(V(1.), ip) == 2);
assert(ldexp(V(1.f), ip) == 2);
}
__device__ void test_log()
{
static_assert((std::is_same<decltype(log((float)0)), float>::value), "");
static_assert((std::is_same<decltype(log((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(log((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(log((int)0)), double>::value), "");
static_assert((std::is_same<decltype(log((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(log((long)0)), double>::value), "");
static_assert((std::is_same<decltype(log((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(log((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(log((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(log((double)0)), double>::value), "");
static_assert((std::is_same<decltype(logf(0)), float>::value), "");
static_assert((std::is_same<decltype(log(Ambiguous())), Ambiguous>::value), "");
assert(log(V(1)) == 0);
assert(log(V(1.)) == 0);
assert(log(V(1.f)) == 0);
}
__device__ void test_log10()
{
static_assert((std::is_same<decltype(log10((float)0)), float>::value), "");
static_assert((std::is_same<decltype(log10((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(log10((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(log10((int)0)), double>::value), "");
static_assert((std::is_same<decltype(log10((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(log10((long)0)), double>::value), "");
static_assert((std::is_same<decltype(log10((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(log10((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(log10((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(log10((double)0)), double>::value), "");
static_assert((std::is_same<decltype(log10f(0)), float>::value), "");
static_assert((std::is_same<decltype(log10(Ambiguous())), Ambiguous>::value), "");
assert(log10(V(1)) == 0);
assert(log10(V(1.)) == 0);
assert(log10(V(1.f)) == 0);
}
__device__ void test_modf()
{
static_assert((std::is_same<decltype(modf((float)0, (float*)0)), float>::value), "");
static_assert((std::is_same<decltype(modf((double)0, (double*)0)), double>::value), "");
static_assert((std::is_same<decltype(modff(0, (float*)0)), float>::value), "");
static_assert((std::is_same<decltype(modf(Ambiguous(), (Ambiguous*)0)), Ambiguous>::value), "");
double i;
assert(modf(V(1), &i) == 0);
assert(modf(V(1.), &i) == 0);
assert(modf(V(1.f), &i) == 0);
}
__device__ void test_pow()
{
static_assert((std::is_same<decltype(pow((float)0, (float)0)), float>::value), "");
static_assert((std::is_same<decltype(pow((bool)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(pow((unsigned short)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(pow((float)0, (unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(pow((double)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(pow((int)0, (long long)0)), double>::value), "");
static_assert((std::is_same<decltype(pow((int)0, (unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(pow((double)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(pow((float)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(powf(0,0)), float>::value), "");
static_assert((std::is_same<decltype(pow((int)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(pow(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(pow(V(1), 1) == 1);
assert(pow(V(1.), 1) == 1);
assert(pow(V(1.f), 1) == 1);
assert(pow(V(1), 1.) == 1);
assert(pow(V(1.), 1.) == 1);
assert(pow(V(1.f), 1.) == 1);
assert(pow(V(1), 1.f) == 1);
assert(pow(V(1.), 1.f) == 1);
assert(pow(V(1.f), 1.f) == 1);
}
__device__ void test_sin()
{
static_assert((std::is_same<decltype(sin((float)0)), float>::value), "");
static_assert((std::is_same<decltype(sin((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(sin((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(sin((int)0)), double>::value), "");
static_assert((std::is_same<decltype(sin((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(sin((long)0)), double>::value), "");
static_assert((std::is_same<decltype(sin((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(sin((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(sin((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(sin((double)0)), double>::value), "");
static_assert((std::is_same<decltype(sinf(0)), float>::value), "");
static_assert((std::is_same<decltype(sin(Ambiguous())), Ambiguous>::value), "");
assert(sin(0) == 0);
assert(sin(0.) == 0);
assert(sin(0.f) == 0);
}
__device__ void test_sinh()
{
static_assert((std::is_same<decltype(sinh((float)0)), float>::value), "");
static_assert((std::is_same<decltype(sinh((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(sinh((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(sinh((int)0)), double>::value), "");
static_assert((std::is_same<decltype(sinh((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(sinh((long)0)), double>::value), "");
static_assert((std::is_same<decltype(sinh((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(sinh((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(sinh((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(sinh((double)0)), double>::value), "");
static_assert((std::is_same<decltype(sinhf(0)), float>::value), "");
static_assert((std::is_same<decltype(sinh(Ambiguous())), Ambiguous>::value), "");
assert(sinh(V(0)) == 0);
assert(sinh(V(0.)) == 0);
assert(sinh(V(0.f)) == 0);
}
__device__ void test_sqrt()
{
static_assert((std::is_same<decltype(sqrt((float)0)), float>::value), "");
static_assert((std::is_same<decltype(sqrt((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(sqrt((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(sqrt((int)0)), double>::value), "");
static_assert((std::is_same<decltype(sqrt((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(sqrt((long)0)), double>::value), "");
static_assert((std::is_same<decltype(sqrt((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(sqrt((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(sqrt((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(sqrt((double)0)), double>::value), "");
static_assert((std::is_same<decltype(sqrtf(0)), float>::value), "");
static_assert((std::is_same<decltype(sqrt(Ambiguous())), Ambiguous>::value), "");
assert(sqrt(V(4)) == 2);
assert(sqrt(V(4.)) == 2);
assert(sqrt(V(4.f)) == 2);
}
__device__ void test_tan()
{
static_assert((std::is_same<decltype(tan((float)0)), float>::value), "");
static_assert((std::is_same<decltype(tan((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(tan((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(tan((int)0)), double>::value), "");
static_assert((std::is_same<decltype(tan((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(tan((long)0)), double>::value), "");
static_assert((std::is_same<decltype(tan((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(tan((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(tan((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(tan((double)0)), double>::value), "");
static_assert((std::is_same<decltype(tanf(0)), float>::value), "");
static_assert((std::is_same<decltype(tan(Ambiguous())), Ambiguous>::value), "");
assert(tan(V(0)) == 0);
assert(tan(V(0.)) == 0);
assert(tan(V(0.f)) == 0);
}
__device__ void test_tanh()
{
static_assert((std::is_same<decltype(tanh((float)0)), float>::value), "");
static_assert((std::is_same<decltype(tanh((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(tanh((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(tanh((int)0)), double>::value), "");
static_assert((std::is_same<decltype(tanh((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(tanh((long)0)), double>::value), "");
static_assert((std::is_same<decltype(tanh((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(tanh((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(tanh((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(tanh((double)0)), double>::value), "");
static_assert((std::is_same<decltype(tanhf(0)), float>::value), "");
static_assert((std::is_same<decltype(tanh(Ambiguous())), Ambiguous>::value), "");
assert(tanh(V(0)) == 0);
assert(tanh(V(0.)) == 0);
assert(tanh(V(0.f)) == 0);
}
__device__ void test_signbit()
{
#ifdef signbit
#error signbit defined
#endif
static_assert((std::is_same<decltype(signbit((float)0)), bool>::value), "");
static_assert((std::is_same<decltype(signbit((double)0)), bool>::value), "");
static_assert((std::is_same<decltype(signbit(0)), bool>::value), "");
static_assert((std::is_same<decltype(signbit(Ambiguous())), Ambiguous>::value), "");
assert(signbit(V(-1)) == true);
assert(signbit(V(-1.)) == true);
assert(signbit(V(-1.f)) == true);
}
__device__ void test_fpclassify()
{
#ifdef fpclassify
#error fpclassify defined
#endif
static_assert((std::is_same<decltype(fpclassify((float)0)), int>::value), "");
static_assert((std::is_same<decltype(fpclassify((double)0)), int>::value), "");
static_assert((std::is_same<decltype(fpclassify(0)), int>::value), "");
static_assert((std::is_same<decltype(fpclassify(Ambiguous())), Ambiguous>::value), "");
assert(fpclassify(V(-1)) == FP_NORMAL);
assert(fpclassify(V(-1.)) == FP_NORMAL);
assert(fpclassify(V(-1.f)) == FP_NORMAL);
}
__device__ void test_isfinite()
{
#ifdef isfinite
#error isfinite defined
#endif
static_assert((std::is_same<decltype(isfinite((float)0)), bool>::value), "");
static_assert((std::is_same<decltype(isfinite((double)0)), bool>::value), "");
static_assert((std::is_same<decltype(isfinite(0)), bool>::value), "");
static_assert((std::is_same<decltype(isfinite(Ambiguous())), Ambiguous>::value), "");
assert(isfinite(V(-1)) == true);
assert(isfinite(V(-1.)) == true);
assert(isfinite(V(-1.f)) == true);
}
__device__ void test_isnormal()
{
#ifdef isnormal
#error isnormal defined
#endif
static_assert((std::is_same<decltype(isnormal((float)0)), bool>::value), "");
static_assert((std::is_same<decltype(isnormal((double)0)), bool>::value), "");
static_assert((std::is_same<decltype(isnormal(0)), bool>::value), "");
static_assert((std::is_same<decltype(isnormal(Ambiguous())), Ambiguous>::value), "");
assert(std::isnormal(V(-1)) == true);
assert(std::isnormal(V(-1.)) == true);
assert(std::isnormal(V(-1.f)) == true);
}
__device__ void test_isgreater()
{
#ifdef isgreater
#error isgreater defined
#endif
static_assert((std::is_same<decltype(isgreater((float)0, (float)0)), bool>::value), "");
static_assert((std::is_same<decltype(isgreater((float)0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(isgreater((double)0, (float)0)), bool>::value), "");
static_assert((std::is_same<decltype(isgreater((double)0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(isgreater(0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(isgreater(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::isgreater(V(-1), 0) == false);
assert(std::isgreater(V(-1), 0.) == false);
assert(std::isgreater(V(-1), 0.f) == false);
assert(std::isgreater(V(-1.), 0) == false);
assert(std::isgreater(V(-1.), 0.) == false);
assert(std::isgreater(V(-1.), 0.f) == false);
assert(std::isgreater(V(-1.f), 0) == false);
assert(std::isgreater(V(-1.f), 0.) == false);
assert(std::isgreater(V(-1.f), 0.f) == false);
}
__device__ void test_isgreaterequal()
{
#ifdef isgreaterequal
#error isgreaterequal defined
#endif
static_assert((std::is_same<decltype(isgreaterequal((float)0, (float)0)), bool>::value), "");
static_assert((std::is_same<decltype(isgreaterequal((float)0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(isgreaterequal((double)0, (float)0)), bool>::value), "");
static_assert((std::is_same<decltype(isgreaterequal((double)0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(isgreaterequal(0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(isgreaterequal(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::isgreaterequal(V(-1), 0) == false);
assert(std::isgreaterequal(V(-1), 0.) == false);
assert(std::isgreaterequal(V(-1), 0.f) == false);
assert(std::isgreaterequal(V(-1.), 0) == false);
assert(std::isgreaterequal(V(-1.), 0.) == false);
assert(std::isgreaterequal(V(-1.), 0.f) == false);
assert(std::isgreaterequal(V(-1.f), 0) == false);
assert(std::isgreaterequal(V(-1.f), 0.) == false);
assert(std::isgreaterequal(V(-1.f), 0.f) == false);
}
__device__ void test_isinf()
{
#ifdef isinf
#error isinf defined
#endif
static_assert((std::is_same<decltype(isinf((float)0)), bool>::value), "");
typedef decltype(isinf((double)0)) DoubleRetType;
#ifndef __linux__
static_assert((std::is_same<DoubleRetType, bool>::value), "");
#else
// GLIBC < 2.26 defines 'isinf(double)' with a return type of 'int' in
// all C++ dialects. The test should tolerate this.
// See: https://sourceware.org/bugzilla/show_bug.cgi?id=19439
static_assert((std::is_same<DoubleRetType, bool>::value
|| std::is_same<DoubleRetType, int>::value), "");
#endif
static_assert((std::is_same<decltype(isinf(0)), bool>::value), "");
assert(std::isinf(V(-1)) == false);
assert(std::isinf(V(-1.)) == false);
assert(std::isinf(V(-1.f)) == false);
}
__device__ void test_isless()
{
#ifdef isless
#error isless defined
#endif
static_assert((std::is_same<decltype(isless((float)0, (float)0)), bool>::value), "");
static_assert((std::is_same<decltype(isless((float)0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(isless((double)0, (float)0)), bool>::value), "");
static_assert((std::is_same<decltype(isless((double)0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(isless(0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(isless(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::isless(V(-1), 0) == true);
assert(std::isless(V(-1), 0.) == true);
assert(std::isless(V(-1), 0.f) == true);
assert(std::isless(V(-1.), 0) == true);
assert(std::isless(V(-1.), 0.) == true);
assert(std::isless(V(-1.), 0.f) == true);
assert(std::isless(V(-1.f), 0) == true);
assert(std::isless(V(-1.f), 0.) == true);
assert(std::isless(V(-1.f), 0.f) == true);
}
__device__ void test_islessequal()
{
#ifdef islessequal
#error islessequal defined
#endif
static_assert((std::is_same<decltype(islessequal((float)0, (float)0)), bool>::value), "");
static_assert((std::is_same<decltype(islessequal((float)0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(islessequal((double)0, (float)0)), bool>::value), "");
static_assert((std::is_same<decltype(islessequal((double)0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(islessequal(0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(islessequal(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::islessequal(V(-1), 0) == true);
assert(std::islessequal(V(-1), 0.) == true);
assert(std::islessequal(V(-1), 0.f) == true);
assert(std::islessequal(V(-1.), 0) == true);
assert(std::islessequal(V(-1.), 0.) == true);
assert(std::islessequal(V(-1.), 0.f) == true);
assert(std::islessequal(V(-1.f), 0) == true);
assert(std::islessequal(V(-1.f), 0.) == true);
assert(std::islessequal(V(-1.f), 0.f) == true);
}
__device__ void test_islessgreater()
{
#ifdef islessgreater
#error islessgreater defined
#endif
static_assert((std::is_same<decltype(islessgreater((float)0, (float)0)), bool>::value), "");
static_assert((std::is_same<decltype(islessgreater((float)0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(islessgreater((double)0, (float)0)), bool>::value), "");
static_assert((std::is_same<decltype(islessgreater((double)0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(islessgreater(0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(islessgreater(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::islessgreater(V(-1), 0) == true);
assert(std::islessgreater(V(-1), 0.) == true);
assert(std::islessgreater(V(-1), 0.f) == true);
assert(std::islessgreater(V(-1.), 0) == true);
assert(std::islessgreater(V(-1.), 0.) == true);
assert(std::islessgreater(V(-1.), 0.f) == true);
assert(std::islessgreater(V(-1.f), 0) == true);
assert(std::islessgreater(V(-1.f), 0.) == true);
assert(std::islessgreater(V(-1.f), 0.f) == true);
}
__device__ void test_isnan()
{
#ifdef isnan
#error isnan defined
#endif
static_assert((std::is_same<decltype(isnan((float)0)), bool>::value), "");
typedef decltype(isnan((double)0)) DoubleRetType;
#ifndef __linux__
static_assert((std::is_same<DoubleRetType, bool>::value), "");
#else
// GLIBC < 2.26 defines 'isnan(double)' with a return type of 'int' in
// all C++ dialects. The test should tolerate this.
// See: https://sourceware.org/bugzilla/show_bug.cgi?id=19439
static_assert((std::is_same<DoubleRetType, bool>::value
|| std::is_same<DoubleRetType, int>::value), "");
#endif
static_assert((std::is_same<decltype(isnan(0)), bool>::value), "");
assert(std::isnan(V(-1)) == false);
assert(std::isnan(V(-1.)) == false);
assert(std::isnan(V(-1.f)) == false);
}
__device__ void test_isunordered()
{
#ifdef isunordered
#error isunordered defined
#endif
static_assert((std::is_same<decltype(isunordered((float)0, (float)0)), bool>::value), "");
static_assert((std::is_same<decltype(isunordered((float)0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(isunordered((double)0, (float)0)), bool>::value), "");
static_assert((std::is_same<decltype(isunordered((double)0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(isunordered(0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(isunordered(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::isunordered(V(-1), 0) == false);
assert(std::isunordered(V(-1), 0.) == false);
assert(std::isunordered(V(-1), 0.f) == false);
assert(std::isunordered(V(-1.), 0) == false);
assert(std::isunordered(V(-1.), 0.) == false);
assert(std::isunordered(V(-1.), 0.f) == false);
assert(std::isunordered(V(-1.f), 0) == false);
assert(std::isunordered(V(-1.f), 0.) == false);
assert(std::isunordered(V(-1.f), 0.f) == false);
}
__device__ void test_acosh()
{
static_assert((std::is_same<decltype(acosh((float)0)), float>::value), "");
static_assert((std::is_same<decltype(acosh((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(acosh((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(acosh((int)0)), double>::value), "");
static_assert((std::is_same<decltype(acosh((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(acosh((long)0)), double>::value), "");
static_assert((std::is_same<decltype(acosh((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(acosh((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(acosh((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(acosh((double)0)), double>::value), "");
static_assert((std::is_same<decltype(acoshf(0)), float>::value), "");
static_assert((std::is_same<decltype(acosh(Ambiguous())), Ambiguous>::value), "");
assert(std::acosh(V(1)) == 0);
assert(std::acosh(V(1.)) == 0);
assert(std::acosh(V(1.f)) == 0);
}
__device__ void test_asinh()
{
static_assert((std::is_same<decltype(asinh((float)0)), float>::value), "");
static_assert((std::is_same<decltype(asinh((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(asinh((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(asinh((int)0)), double>::value), "");
static_assert((std::is_same<decltype(asinh((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(asinh((long)0)), double>::value), "");
static_assert((std::is_same<decltype(asinh((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(asinh((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(asinh((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(asinh((double)0)), double>::value), "");
static_assert((std::is_same<decltype(asinhf(0)), float>::value), "");
static_assert((std::is_same<decltype(asinh(Ambiguous())), Ambiguous>::value), "");
assert(asinh(V(0)) == 0);
assert(asinh(V(0.)) == 0);
assert(asinh(V(0.f)) == 0);
}
__device__ void test_atanh()
{
static_assert((std::is_same<decltype(atanh((float)0)), float>::value), "");
static_assert((std::is_same<decltype(atanh((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(atanh((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(atanh((int)0)), double>::value), "");
static_assert((std::is_same<decltype(atanh((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(atanh((long)0)), double>::value), "");
static_assert((std::is_same<decltype(atanh((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(atanh((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(atanh((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(atanh((double)0)), double>::value), "");
static_assert((std::is_same<decltype(atanhf(0)), float>::value), "");
static_assert((std::is_same<decltype(atanh(Ambiguous())), Ambiguous>::value), "");
assert(atanh(V(0)) == 0);
assert(atanh(V(0.)) == 0);
assert(atanh(V(0.f)) == 0);
}
__device__ void test_cbrt()
{
static_assert((std::is_same<decltype(cbrt((float)0)), float>::value), "");
static_assert((std::is_same<decltype(cbrt((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(cbrt((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(cbrt((int)0)), double>::value), "");
static_assert((std::is_same<decltype(cbrt((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(cbrt((long)0)), double>::value), "");
static_assert((std::is_same<decltype(cbrt((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(cbrt((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(cbrt((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(cbrt((double)0)), double>::value), "");
static_assert((std::is_same<decltype(cbrtf(0)), float>::value), "");
static_assert((std::is_same<decltype(cbrt(Ambiguous())), Ambiguous>::value), "");
assert(cbrt(V(1)) == 1);
assert(cbrt(V(1.)) == 1);
assert(cbrt(V(1.f)) == 1);
}
__device__ void test_copysign()
{
static_assert((std::is_same<decltype(copysign((float)0, (float)0)), float>::value), "");
static_assert((std::is_same<decltype(copysign((bool)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(copysign((unsigned short)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(copysign((float)0, (unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(copysign((double)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(copysign((int)0, (long long)0)), double>::value), "");
static_assert((std::is_same<decltype(copysign((int)0, (unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(copysign((double)0, (double)0)), double>::value), "");
// CUDA's copysign(float, double) currently returns a float, in violation
// of the spec. We can't easily change this, so accept either one.
static_assert(
(std::is_same<decltype(copysign((float)0, (double)0)), double>::value ||
std::is_same<decltype(copysign((float)0, (double)0)), float>::value),
"");
static_assert((std::is_same<decltype(copysignf(0,0)), float>::value), "");
static_assert((std::is_same<decltype(copysign((int)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(copysign(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::copysign(V(1), 1) == 1);
assert(std::copysign(V(1.), 1) == 1);
assert(std::copysign(V(1.f), 1) == 1);
assert(std::copysign(V(1), 1.) == 1);
assert(std::copysign(V(1.), 1.) == 1);
assert(std::copysign(V(1.f), 1.) == 1);
assert(std::copysign(V(1), 1.f) == 1);
assert(std::copysign(V(1.), 1.f) == 1);
assert(std::copysign(V(1.f), 1.f) == 1);
}
__device__ void test_erf()
{
static_assert((std::is_same<decltype(erf((float)0)), float>::value), "");
static_assert((std::is_same<decltype(erf((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(erf((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(erf((int)0)), double>::value), "");
static_assert((std::is_same<decltype(erf((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(erf((long)0)), double>::value), "");
static_assert((std::is_same<decltype(erf((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(erf((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(erf((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(erf((double)0)), double>::value), "");
static_assert((std::is_same<decltype(erff(0)), float>::value), "");
static_assert((std::is_same<decltype(erf(Ambiguous())), Ambiguous>::value), "");
assert(erf(V(0)) == 0);
assert(erf(V(0.)) == 0);
assert(erf(V(0.f)) == 0);
}
__device__ void test_erfc()
{
static_assert((std::is_same<decltype(erfc((float)0)), float>::value), "");
static_assert((std::is_same<decltype(erfc((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(erfc((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(erfc((int)0)), double>::value), "");
static_assert((std::is_same<decltype(erfc((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(erfc((long)0)), double>::value), "");
static_assert((std::is_same<decltype(erfc((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(erfc((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(erfc((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(erfc((double)0)), double>::value), "");
static_assert((std::is_same<decltype(erfcf(0)), float>::value), "");
static_assert((std::is_same<decltype(erfc(Ambiguous())), Ambiguous>::value), "");
assert(erfc(V(0)) == 1);
assert(erfc(V(0.)) == 1);
assert(erfc(V(0.f)) == 1);
}
__device__ void test_exp2()
{
static_assert((std::is_same<decltype(exp2((float)0)), float>::value), "");
static_assert((std::is_same<decltype(exp2((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(exp2((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(exp2((int)0)), double>::value), "");
static_assert((std::is_same<decltype(exp2((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(exp2((long)0)), double>::value), "");
static_assert((std::is_same<decltype(exp2((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(exp2((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(exp2((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(exp2((double)0)), double>::value), "");
static_assert((std::is_same<decltype(exp2f(0)), float>::value), "");
static_assert((std::is_same<decltype(exp2(Ambiguous())), Ambiguous>::value), "");
assert(exp2(V(1)) == 2);
assert(exp2(V(1.)) == 2);
assert(exp2(V(1.f)) == 2);
}
__device__ void test_expm1()
{
static_assert((std::is_same<decltype(expm1((float)0)), float>::value), "");
static_assert((std::is_same<decltype(expm1((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(expm1((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(expm1((int)0)), double>::value), "");
static_assert((std::is_same<decltype(expm1((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(expm1((long)0)), double>::value), "");
static_assert((std::is_same<decltype(expm1((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(expm1((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(expm1((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(expm1((double)0)), double>::value), "");
static_assert((std::is_same<decltype(expm1f(0)), float>::value), "");
static_assert((std::is_same<decltype(expm1(Ambiguous())), Ambiguous>::value), "");
assert(expm1(V(0)) == 0);
assert(expm1(V(0.)) == 0);
assert(expm1(V(0.f)) == 0);
}
__device__ void test_fdim()
{
static_assert((std::is_same<decltype(fdim((float)0, (float)0)), float>::value), "");
static_assert((std::is_same<decltype(fdim((bool)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(fdim((unsigned short)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fdim((float)0, (unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(fdim((double)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(fdim((int)0, (long long)0)), double>::value), "");
static_assert((std::is_same<decltype(fdim((int)0, (unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(fdim((double)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fdim((float)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fdimf(0,0)), float>::value), "");
static_assert((std::is_same<decltype(fdim((int)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(fdim(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::fdim(V(1), 0) == 1);
assert(std::fdim(V(1.), 0) == 1);
assert(std::fdim(V(1.f), 0) == 1);
assert(std::fdim(V(1), 0.) == 1);
assert(std::fdim(V(1.), 0.) == 1);
assert(std::fdim(V(1.f), 0.) == 1);
assert(std::fdim(V(1), 0.f) == 1);
assert(std::fdim(V(1.), 0.f) == 1);
assert(std::fdim(V(1.f), 0.f) == 1);
}
__device__ void test_fma()
{
static_assert((std::is_same<decltype(fma((bool)0, (float)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(fma((char)0, (float)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(fma((unsigned)0, (float)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(fma((float)0, (int)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(fma((float)0, (long)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(fma((float)0, (float)0, (unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(fma((float)0, (float)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fma((float)0, (float)0, (float)0)), float>::value), "");
static_assert((std::is_same<decltype(fma((bool)0, (double)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fma((char)0, (double)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fma((unsigned)0, (double)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fma((double)0, (int)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fma((double)0, (long)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fma((double)0, (double)0, (unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(fma((double)0, (double)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(fma((double)0, (double)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fmaf(0,0,0)), float>::value), "");
static_assert((std::is_same<decltype(fma(Ambiguous(), Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::fma(V(1), 1, 1) == 2);
assert(std::fma(V(1.), 1, 1) == 2);
assert(std::fma(V(1.f), 1, 1) == 2);
assert(std::fma(V(1), 1., 1) == 2);
assert(std::fma(V(1.), 1., 1) == 2);
assert(std::fma(V(1.f), 1., 1) == 2);
assert(std::fma(V(1), 1.f, 1) == 2);
assert(std::fma(V(1.), 1.f, 1) == 2);
assert(std::fma(V(1.f), 1.f, 1) == 2);
assert(std::fma(V(1), 1, 1.) == 2);
assert(std::fma(V(1.), 1, 1.) == 2);
assert(std::fma(V(1.f), 1, 1.) == 2);
assert(std::fma(V(1), 1., 1.) == 2);
assert(std::fma(V(1.), 1., 1.) == 2);
assert(std::fma(V(1.f), 1., 1.) == 2);
assert(std::fma(V(1), 1.f, 1.) == 2);
assert(std::fma(V(1.), 1.f, 1.) == 2);
assert(std::fma(V(1.f), 1.f, 1.) == 2);
assert(std::fma(V(1), 1, 1.f) == 2);
assert(std::fma(V(1.), 1, 1.f) == 2);
assert(std::fma(V(1.f), 1, 1.f) == 2);
assert(std::fma(V(1), 1., 1.f) == 2);
assert(std::fma(V(1.), 1., 1.f) == 2);
assert(std::fma(V(1.f), 1., 1.f) == 2);
assert(std::fma(V(1), 1.f, 1.f) == 2);
assert(std::fma(V(1.), 1.f, 1.f) == 2);
assert(std::fma(V(1.f), 1.f, 1.f) == 2);
}
__device__ void test_fmax()
{
static_assert((std::is_same<decltype(fmax((float)0, (float)0)), float>::value), "");
static_assert((std::is_same<decltype(fmax((bool)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(fmax((unsigned short)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fmax((float)0, (unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(fmax((double)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(fmax((int)0, (long long)0)), double>::value), "");
static_assert((std::is_same<decltype(fmax((int)0, (unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(fmax((double)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fmax((float)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fmaxf(0,0)), float>::value), "");
static_assert((std::is_same<decltype(fmax((int)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(fmax(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::fmax(V(1), 0) == 1);
assert(std::fmax(V(1.), 0) == 1);
assert(std::fmax(V(1.f), 0) == 1);
assert(std::fmax(V(1), 0.) == 1);
assert(std::fmax(V(1.), 0.) == 1);
assert(std::fmax(V(1.f), 0.) == 1);
assert(std::fmax(V(1), 0.f) == 1);
assert(std::fmax(V(1.), 0.f) == 1);
assert(std::fmax(V(1.f), 0.f) == 1);
}
__device__ void test_fmin()
{
static_assert((std::is_same<decltype(fmin((float)0, (float)0)), float>::value), "");
static_assert((std::is_same<decltype(fmin((bool)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(fmin((unsigned short)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fmin((float)0, (unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(fmin((double)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(fmin((int)0, (long long)0)), double>::value), "");
static_assert((std::is_same<decltype(fmin((int)0, (unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(fmin((double)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fmin((float)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fminf(0,0)), float>::value), "");
static_assert((std::is_same<decltype(fmin((int)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(fmin(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::fmin(V(1), 0) == 0);
assert(std::fmin(V(1.), 0) == 0);
assert(std::fmin(V(1.f), 0) == 0);
assert(std::fmin(V(1), 0.) == 0);
assert(std::fmin(V(1.), 0.) == 0);
assert(std::fmin(V(1.f), 0.) == 0);
assert(std::fmin(V(1), 0.f) == 0);
assert(std::fmin(V(1.), 0.f) == 0);
assert(std::fmin(V(1.f), 0.f) == 0);
}
__device__ void test_hypot()
{
static_assert((std::is_same<decltype(hypot((float)0, (float)0)), float>::value), "");
static_assert((std::is_same<decltype(hypot((bool)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(hypot((unsigned short)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(hypot((float)0, (unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(hypot((double)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(hypot((int)0, (long long)0)), double>::value), "");
static_assert((std::is_same<decltype(hypot((int)0, (unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(hypot((double)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(hypot((float)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(hypotf(0,0)), float>::value), "");
static_assert((std::is_same<decltype(hypot((int)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(hypot(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::hypot(V(3), 4) == 5);
assert(std::hypot(V(3), 4.) == 5);
assert(std::hypot(V(3), 4.f) == 5);
assert(std::hypot(V(3.), 4) == 5);
assert(std::hypot(V(3.), 4.) == 5);
assert(std::hypot(V(3.), 4.f) == 5);
assert(std::hypot(V(3.f), 4) == 5);
assert(std::hypot(V(3.f), 4.) == 5);
assert(std::hypot(V(3.f), 4.f) == 5);
}
__device__ void test_ilogb()
{
static_assert((std::is_same<decltype(ilogb((float)0)), int>::value), "");
static_assert((std::is_same<decltype(ilogb((bool)0)), int>::value), "");
static_assert((std::is_same<decltype(ilogb((unsigned short)0)), int>::value), "");
static_assert((std::is_same<decltype(ilogb((int)0)), int>::value), "");
static_assert((std::is_same<decltype(ilogb((unsigned int)0)), int>::value), "");
static_assert((std::is_same<decltype(ilogb((long)0)), int>::value), "");
static_assert((std::is_same<decltype(ilogb((unsigned long)0)), int>::value), "");
static_assert((std::is_same<decltype(ilogb((long long)0)), int>::value), "");
static_assert((std::is_same<decltype(ilogb((unsigned long long)0)), int>::value), "");
static_assert((std::is_same<decltype(ilogb((double)0)), int>::value), "");
static_assert((std::is_same<decltype(ilogbf(0)), int>::value), "");
static_assert((std::is_same<decltype(ilogb(Ambiguous())), Ambiguous>::value), "");
assert(ilogb(V(1)) == 0);
assert(ilogb(V(1.)) == 0);
assert(ilogb(V(1.f)) == 0);
}
__device__ void test_lgamma()
{
static_assert((std::is_same<decltype(lgamma((float)0)), float>::value), "");
static_assert((std::is_same<decltype(lgamma((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(lgamma((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(lgamma((int)0)), double>::value), "");
static_assert((std::is_same<decltype(lgamma((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(lgamma((long)0)), double>::value), "");
static_assert((std::is_same<decltype(lgamma((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(lgamma((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(lgamma((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(lgamma((double)0)), double>::value), "");
static_assert((std::is_same<decltype(lgammaf(0)), float>::value), "");
static_assert((std::is_same<decltype(lgamma(Ambiguous())), Ambiguous>::value), "");
assert(lgamma(V(1)) == 0);
assert(lgamma(V(1.)) == 0);
assert(lgamma(V(1.f)) == 0);
}
__device__ void test_llrint()
{
static_assert((std::is_same<decltype(llrint((float)0)), long long>::value), "");
static_assert((std::is_same<decltype(llrint((bool)0)), long long>::value), "");
static_assert((std::is_same<decltype(llrint((unsigned short)0)), long long>::value), "");
static_assert((std::is_same<decltype(llrint((int)0)), long long>::value), "");
static_assert((std::is_same<decltype(llrint((unsigned int)0)), long long>::value), "");
static_assert((std::is_same<decltype(llrint((long)0)), long long>::value), "");
static_assert((std::is_same<decltype(llrint((unsigned long)0)), long long>::value), "");
static_assert((std::is_same<decltype(llrint((long long)0)), long long>::value), "");
static_assert((std::is_same<decltype(llrint((unsigned long long)0)), long long>::value), "");
static_assert((std::is_same<decltype(llrint((double)0)), long long>::value), "");
static_assert((std::is_same<decltype(llrintf(0)), long long>::value), "");
static_assert((std::is_same<decltype(llrint(Ambiguous())), Ambiguous>::value), "");
assert(llrint(V(1)) == 1LL);
assert(llrint(V(1.)) == 1LL);
#if TORCH_HIP_VERSION > 7050
assert(llrint(V(1.f)) == 1LL);
#endif
}
__device__ void test_llround()
{
static_assert((std::is_same<decltype(llround((float)0)), long long>::value), "");
static_assert((std::is_same<decltype(llround((bool)0)), long long>::value), "");
static_assert((std::is_same<decltype(llround((unsigned short)0)), long long>::value), "");
static_assert((std::is_same<decltype(llround((int)0)), long long>::value), "");
static_assert((std::is_same<decltype(llround((unsigned int)0)), long long>::value), "");
static_assert((std::is_same<decltype(llround((long)0)), long long>::value), "");
static_assert((std::is_same<decltype(llround((unsigned long)0)), long long>::value), "");
static_assert((std::is_same<decltype(llround((long long)0)), long long>::value), "");
static_assert((std::is_same<decltype(llround((unsigned long long)0)), long long>::value), "");
static_assert((std::is_same<decltype(llround((double)0)), long long>::value), "");
static_assert((std::is_same<decltype(llroundf(0)), long long>::value), "");
static_assert((std::is_same<decltype(llround(Ambiguous())), Ambiguous>::value), "");
assert(llround(V(1)) == 1LL);
assert(llround(V(1.)) == 1LL);
assert(llround(V(1.f)) == 1LL);
}
__device__ void test_log1p()
{
static_assert((std::is_same<decltype(log1p((float)0)), float>::value), "");
static_assert((std::is_same<decltype(log1p((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(log1p((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(log1p((int)0)), double>::value), "");
static_assert((std::is_same<decltype(log1p((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(log1p((long)0)), double>::value), "");
static_assert((std::is_same<decltype(log1p((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(log1p((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(log1p((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(log1p((double)0)), double>::value), "");
static_assert((std::is_same<decltype(log1pf(0)), float>::value), "");
static_assert((std::is_same<decltype(log1p(Ambiguous())), Ambiguous>::value), "");
assert(log1p(V(0)) == 0);
assert(log1p(V(0.)) == 0);
assert(log1p(V(0.f)) == 0);
}
__device__ void test_log2()
{
static_assert((std::is_same<decltype(log2((float)0)), float>::value), "");
static_assert((std::is_same<decltype(log2((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(log2((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(log2((int)0)), double>::value), "");
static_assert((std::is_same<decltype(log2((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(log2((long)0)), double>::value), "");
static_assert((std::is_same<decltype(log2((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(log2((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(log2((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(log2((double)0)), double>::value), "");
static_assert((std::is_same<decltype(log2f(0)), float>::value), "");
static_assert((std::is_same<decltype(log2(Ambiguous())), Ambiguous>::value), "");
assert(log2(V(1)) == 0);
assert(log2(V(1.)) == 0);
assert(log2(V(1.f)) == 0);
}
__device__ void test_logb()
{
static_assert((std::is_same<decltype(logb((float)0)), float>::value), "");
static_assert((std::is_same<decltype(logb((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(logb((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(logb((int)0)), double>::value), "");
static_assert((std::is_same<decltype(logb((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(logb((long)0)), double>::value), "");
static_assert((std::is_same<decltype(logb((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(logb((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(logb((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(logb((double)0)), double>::value), "");
static_assert((std::is_same<decltype(logbf(0)), float>::value), "");
static_assert((std::is_same<decltype(logb(Ambiguous())), Ambiguous>::value), "");
assert(logb(V(1)) == 0);
assert(logb(V(1.)) == 0);
assert(logb(V(1.f)) == 0);
}
__device__ void test_lrint()
{
static_assert((std::is_same<decltype(lrint((float)0)), long>::value), "");
static_assert((std::is_same<decltype(lrint((bool)0)), long>::value), "");
static_assert((std::is_same<decltype(lrint((unsigned short)0)), long>::value), "");
static_assert((std::is_same<decltype(lrint((int)0)), long>::value), "");
static_assert((std::is_same<decltype(lrint((unsigned int)0)), long>::value), "");
static_assert((std::is_same<decltype(lrint((long)0)), long>::value), "");
static_assert((std::is_same<decltype(lrint((unsigned long)0)), long>::value), "");
static_assert((std::is_same<decltype(lrint((long long)0)), long>::value), "");
static_assert((std::is_same<decltype(lrint((unsigned long long)0)), long>::value), "");
static_assert((std::is_same<decltype(lrint((double)0)), long>::value), "");
static_assert((std::is_same<decltype(lrintf(0)), long>::value), "");
static_assert((std::is_same<decltype(lrint(Ambiguous())), Ambiguous>::value), "");
assert(lrint(V(1)) == 1L);
assert(lrint(V(1.)) == 1L);
#if TORCH_HIP_VERSION > 7050
assert(lrint(V(1.f)) == 1L);
#endif
}
__device__ void test_lround()
{
static_assert((std::is_same<decltype(lround((float)0)), long>::value), "");
static_assert((std::is_same<decltype(lround((bool)0)), long>::value), "");
static_assert((std::is_same<decltype(lround((unsigned short)0)), long>::value), "");
static_assert((std::is_same<decltype(lround((int)0)), long>::value), "");
static_assert((std::is_same<decltype(lround((unsigned int)0)), long>::value), "");
static_assert((std::is_same<decltype(lround((long)0)), long>::value), "");
static_assert((std::is_same<decltype(lround((unsigned long)0)), long>::value), "");
static_assert((std::is_same<decltype(lround((long long)0)), long>::value), "");
static_assert((std::is_same<decltype(lround((unsigned long long)0)), long>::value), "");
static_assert((std::is_same<decltype(lround((double)0)), long>::value), "");
static_assert((std::is_same<decltype(lroundf(0)), long>::value), "");
static_assert((std::is_same<decltype(lround(Ambiguous())), Ambiguous>::value), "");
assert(lround(V(1)) == 1L);
assert(lround(V(1.)) == 1L);
assert(lround(V(1.f)) == 1L);
}
__device__ void test_nan()
{
static_assert((std::is_same<decltype(nan("")), double>::value), "");
static_assert((std::is_same<decltype(nanf("")), float>::value), "");
}
__device__ void test_nearbyint()
{
static_assert((std::is_same<decltype(nearbyint((float)0)), float>::value), "");
static_assert((std::is_same<decltype(nearbyint((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(nearbyint((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(nearbyint((int)0)), double>::value), "");
static_assert((std::is_same<decltype(nearbyint((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(nearbyint((long)0)), double>::value), "");
static_assert((std::is_same<decltype(nearbyint((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(nearbyint((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(nearbyint((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(nearbyint((double)0)), double>::value), "");
static_assert((std::is_same<decltype(nearbyintf(0)), float>::value), "");
static_assert((std::is_same<decltype(nearbyint(Ambiguous())), Ambiguous>::value), "");
assert(nearbyint(V(1)) == 1);
assert(nearbyint(V(1.)) == 1);
assert(nearbyint(V(1.f)) == 1);
// There are more checks in test_rint(). rint and nearbyint behave the same
// way on the GPU, so we only test them in one place.
}
__device__ void test_nextafter()
{
static_assert((std::is_same<decltype(nextafter((float)0, (float)0)), float>::value), "");
static_assert((std::is_same<decltype(nextafter((bool)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(nextafter((unsigned short)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(nextafter((float)0, (unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(nextafter((double)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(nextafter((int)0, (long long)0)), double>::value), "");
static_assert((std::is_same<decltype(nextafter((int)0, (unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(nextafter((double)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(nextafter((float)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(nextafterf(0,0)), float>::value), "");
static_assert((std::is_same<decltype(nextafter((int)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(nextafter(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
//assert(nextafter(0,1) == hexfloat<double>(0x1, 0, -1074));
// Invoke all our overloads. Even though we don't check the exact result
// (this is pretty annoying to do for this function), we make sure to *use*
// the results so that these function calls can't be DCE'ed.
assert(nextafter(V(0), 1) != 0);
assert(nextafter(V(0), 1.) != 0);
assert(nextafter(V(0), 1.f) != 0);
assert(nextafter(V(0.), 1) != 0);
assert(nextafter(V(0.), 1.) != 0);
assert(nextafter(V(0.), 1.f) != 0);
assert(nextafter(V(0.f), 1) != 0);
assert(nextafter(V(0.f), 1.) != 0);
assert(nextafter(V(0.f), 1.f) != 0);
}
__device__ void test_remainder()
{
static_assert((std::is_same<decltype(remainder((float)0, (float)0)), float>::value), "");
static_assert((std::is_same<decltype(remainder((bool)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(remainder((unsigned short)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(remainder((float)0, (unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(remainder((double)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(remainder((int)0, (long long)0)), double>::value), "");
static_assert((std::is_same<decltype(remainder((int)0, (unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(remainder((double)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(remainder((float)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(remainderf(0,0)), float>::value), "");
static_assert((std::is_same<decltype(remainder((int)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(remainder(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(::remainder(V(1.5), 1) == -.5);
assert(::remainder(V(1.5), 1.) == -.5);
assert(::remainder(V(1.5), 1.f) == -.5);
assert(::remainder(V(1.5f), 1) == -.5);
assert(::remainder(V(1.5f), 1.) == -.5);
assert(::remainder(V(1.5f), 1.f) == -.5);
assert(::remainder(V(2), 1) == 0);
assert(::remainder(V(2), 1.) == 0);
assert(::remainder(V(2), 1.f) == 0);
}
__device__ void test_remquo()
{
int ip;
static_assert((std::is_same<decltype(remquo((float)0, (float)0, &ip)), float>::value), "");
static_assert((std::is_same<decltype(remquo((bool)0, (float)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(remquo((unsigned short)0, (double)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(remquo((float)0, (unsigned int)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(remquo((double)0, (long)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(remquo((int)0, (long long)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(remquo((int)0, (unsigned long long)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(remquo((double)0, (double)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(remquo((float)0, (double)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(remquof(0,0, &ip)), float>::value), "");
static_assert((std::is_same<decltype(remquo((int)0, (int)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(remquo(Ambiguous(), Ambiguous(), &ip)), Ambiguous>::value), "");
assert(std::remquo(V(1), 1, &ip) == 0);
assert(std::remquo(V(1), 1., &ip) == 0);
assert(std::remquo(V(1), 1.f, &ip) == 0);
assert(std::remquo(V(0.5), 1, &ip) == 0.5);
assert(std::remquo(V(0.5), 1., &ip) == 0.5);
assert(std::remquo(V(0.5), 1.f, &ip) == 0.5);
assert(std::remquo(V(0.5f), 1, &ip) == 0.5);
assert(std::remquo(V(0.5f), 1., &ip) == 0.5);
assert(std::remquo(V(0.5f), 1.f, &ip) == 0.5);
}
__device__ void test_rint_nearbyint()
{
static_assert((std::is_same<decltype(rint((float)0)), float>::value), "");
static_assert((std::is_same<decltype(rint((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(rint((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(rint((int)0)), double>::value), "");
static_assert((std::is_same<decltype(rint((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(rint((long)0)), double>::value), "");
static_assert((std::is_same<decltype(rint((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(rint((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(rint((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(rint((double)0)), double>::value), "");
static_assert((std::is_same<decltype(rintf(0)), float>::value), "");
static_assert((std::is_same<decltype(rint(Ambiguous())), Ambiguous>::value), "");
// Verify that rint/nearbyint produce identical correct results
auto check = [](double input, double fpresult) {
// FP rint()/nearbyint must match the expected result.
assert(rint(V(float(input))) == float(fpresult));
assert(nearbyint(V(float(input))) == float(fpresult));
assert(rint(V(input)) == fpresult);
assert(nearbyint(V(input)) == fpresult);
// for integral types, std::rint(input) == std::rint(double(input))
int iinput = input;
assert(std::rint(V(iinput)) == std::rint(double(V(iinput))));
assert(std::nearbyint(V(iinput)) == std::nearbyint(double(V(iinput))));
};
// Whole values round to themselves and do not change sign.
check(0.0, 0.0);
check(-0.0, -0.0);
check(1.0, 1.0);
check(-1.0, -1.0);
// Half-way values round towards nearest even number.
check(2.5, 2.0);
check(-2.5, -2.0);
check(3.5, 4.0);
check(-3.5, -4.0);
// Everything else is rounded towards nearest integer.
check(2.1, 2.0);
check(-2.1, -2.0);
check(2.7, 3.0);
check(-2.7, -3.0);
check(3.9, 4.0);
check(-3.9, -4.0);
}
__device__ void test_round()
{
static_assert((std::is_same<decltype(round((float)0)), float>::value), "");
static_assert((std::is_same<decltype(round((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(round((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(round((int)0)), double>::value), "");
static_assert((std::is_same<decltype(round((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(round((long)0)), double>::value), "");
static_assert((std::is_same<decltype(round((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(round((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(round((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(round((double)0)), double>::value), "");
static_assert((std::is_same<decltype(roundf(0)), float>::value), "");
static_assert((std::is_same<decltype(round(Ambiguous())), Ambiguous>::value), "");
assert(round(V(1)) == 1);
assert(round(V(1.)) == 1);
assert(round(V(1.f)) == 1);
}
__device__ void test_scalbln()
{
static_assert((std::is_same<decltype(scalbln((float)0, (long)0)), float>::value), "");
static_assert((std::is_same<decltype(scalbln((bool)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(scalbln((unsigned short)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(scalbln((int)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(scalbln((unsigned int)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(scalbln((long)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(scalbln((unsigned long)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(scalbln((long long)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(scalbln((unsigned long long)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(scalbln((double)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(scalblnf(0, (long)0)), float>::value), "");
static_assert((std::is_same<decltype(scalbln(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::scalbln(V(1), 1) == 2);
assert(std::scalbln(V(1), 1.) == 2);
assert(std::scalbln(V(1), 1.f) == 2);
assert(std::scalbln(V(1.), 1) == 2);
assert(std::scalbln(V(1.), 1.) == 2);
assert(std::scalbln(V(1.), 1.f) == 2);
assert(std::scalbln(V(1.f), 1) == 2);
assert(std::scalbln(V(1.f), 1.) == 2);
assert(std::scalbln(V(1.f), 1.f) == 2);
}
__device__ void test_scalbn()
{
static_assert((std::is_same<decltype(scalbn((float)0, (int)0)), float>::value), "");
static_assert((std::is_same<decltype(scalbn((bool)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(scalbn((unsigned short)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(scalbn((int)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(scalbn((unsigned int)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(scalbn((long)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(scalbn((unsigned long)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(scalbn((long long)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(scalbn((unsigned long long)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(scalbn((double)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(scalbnf(0, (int)0)), float>::value), "");
static_assert((std::is_same<decltype(scalbn(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::scalbn(V(1), 1) == 2);
assert(std::scalbn(V(1), 1.) == 2);
assert(std::scalbn(V(1), 1.f) == 2);
assert(std::scalbn(V(1.), 1) == 2);
assert(std::scalbn(V(1.), 1.) == 2);
assert(std::scalbn(V(1.), 1.f) == 2);
assert(std::scalbn(V(1.f), 1) == 2);
assert(std::scalbn(V(1.f), 1.) == 2);
assert(std::scalbn(V(1.f), 1.f) == 2);
}
__device__ void test_tgamma()
{
static_assert((std::is_same<decltype(tgamma((float)0)), float>::value), "");
static_assert((std::is_same<decltype(tgamma((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(tgamma((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(tgamma((int)0)), double>::value), "");
static_assert((std::is_same<decltype(tgamma((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(tgamma((long)0)), double>::value), "");
static_assert((std::is_same<decltype(tgamma((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(tgamma((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(tgamma((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(tgamma((double)0)), double>::value), "");
static_assert((std::is_same<decltype(tgammaf(0)), float>::value), "");
static_assert((std::is_same<decltype(tgamma(Ambiguous())), Ambiguous>::value), "");
assert(tgamma(V(1)) == 1);
assert(tgamma(V(1.)) == 1);
assert(tgamma(V(1.f)) == 1);
}
__device__ void test_trunc()
{
static_assert((std::is_same<decltype(trunc((float)0)), float>::value), "");
static_assert((std::is_same<decltype(trunc((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(trunc((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(trunc((int)0)), double>::value), "");
static_assert((std::is_same<decltype(trunc((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(trunc((long)0)), double>::value), "");
static_assert((std::is_same<decltype(trunc((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(trunc((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(trunc((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(trunc((double)0)), double>::value), "");
static_assert((std::is_same<decltype(truncf(0)), float>::value), "");
static_assert((std::is_same<decltype(trunc(Ambiguous())), Ambiguous>::value), "");
assert(trunc(V(1)) == 1);
assert(trunc(V(1.)) == 1);
assert(trunc(V(1.f)) == 1);
}
__global__ void tests()
{
test_abs();
test_acos();
test_asin();
test_atan();
test_atan2();
test_ceil();
test_cos();
test_cosh();
test_exp();
test_fabs();
test_floor();
test_fmod();
test_frexp();
test_ldexp();
test_log();
test_log10();
test_modf();
test_pow();
test_sin();
test_sinh();
test_sqrt();
test_tan();
test_tanh();
test_signbit();
test_fpclassify();
test_isfinite();
test_isnormal();
test_isgreater();
test_isgreaterequal();
test_isinf();
test_isless();
test_islessequal();
test_islessgreater();
test_isnan();
test_isunordered();
test_acosh();
test_asinh();
test_atanh();
test_cbrt();
test_copysign();
test_erf();
test_erfc();
test_exp2();
test_expm1();
test_fdim();
test_fma();
test_fmax();
test_fmin();
test_hypot();
test_ilogb();
test_lgamma();
test_llrint();
test_llround();
test_log1p();
test_log2();
test_logb();
test_lrint();
test_lround();
test_nan();
test_nearbyint();
test_nextafter();
test_remainder();
test_remquo();
test_rint_nearbyint();
test_round();
test_scalbln();
test_scalbn();
test_tgamma();
test_trunc();
}
int main() {
hipLaunchKernelGGL(( tests), dim3(1),dim3(1), 0, 0, );
hipError_t err = hipDeviceSynchronize();
if (err != hipSuccess) {
printf("CUDA error %d\n", (int)err);
return 1;
}
printf("Success!\n");
return 0;
}
#else
#include <stdio.h>
// No C++11; test is a nop.
int main() {
printf("Success!\n");
return 0;
}
#endif // __cplusplus < 201103L
| 8a1f7fbfb0264a528c1b9e27d6af91804f62a053.cu | //===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// <math.h>
// This file was copied from libc++'s test suite, then modified to test CUDA.
// For the most part, this consists of adding __device__ attributes and
// deleting long double.
// This test requires C++11 (it's mostly decltype checks).
#if __cplusplus >= 201103L
#include <math.h>
#include <type_traits>
#include <cassert>
#include <stdio.h>
// See PR21083
// Ambiguous is a user-defined type that defines its own overloads of cmath
// functions. When the std overloads are candidates too (by using or adl),
// they should not interfere.
struct Ambiguous : std::true_type { // ADL
__device__ operator float () { return 0.f; }
__device__ operator double () { return 0.; }
};
__device__ Ambiguous abs(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous acos(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous asin(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous atan(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous atan2(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous ceil(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous cos(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous cosh(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous exp(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous fabs(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous floor(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous fmod(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous frexp(Ambiguous, int*){ return Ambiguous(); }
__device__ Ambiguous ldexp(Ambiguous, int){ return Ambiguous(); }
__device__ Ambiguous log(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous log10(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous modf(Ambiguous, Ambiguous*){ return Ambiguous(); }
__device__ Ambiguous pow(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous sin(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous sinh(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous sqrt(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous tan(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous tanh(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous signbit(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous fpclassify(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous isfinite(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous isnormal(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous isgreater(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous isgreaterequal(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous isless(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous islessequal(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous islessgreater(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous isunordered(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous acosh(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous asinh(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous atanh(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous cbrt(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous copysign(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous erf(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous erfc(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous exp2(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous expm1(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous fdim(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous fma(Ambiguous, Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous fmax(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous fmin(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous hypot(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous ilogb(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous lgamma(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous llrint(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous llround(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous log1p(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous log2(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous logb(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous lrint(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous lround(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous nearbyint(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous nextafter(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous remainder(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous remquo(Ambiguous, Ambiguous, int*){ return Ambiguous(); }
__device__ Ambiguous rint(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous round(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous scalbln(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous scalbn(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous tgamma(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous trunc(Ambiguous){ return Ambiguous(); }
// helper function to prevent compiler constant-folding test inputs.
template <typename T>
__device__ T V(T input) {
volatile T tmp = input;
return tmp;
}
__device__ void test_abs()
{
static_assert((std::is_same<decltype(abs((float)0)), float>::value), "");
static_assert((std::is_same<decltype(abs((double)0)), double>::value), "");
static_assert((std::is_same<decltype(abs(Ambiguous())), Ambiguous>::value), "");
assert(abs(V(-1)) == 1);
assert(abs(V(-1.)) == 1);
assert(abs(V(-1.f)) == 1);
}
__device__ void test_acos()
{
static_assert((std::is_same<decltype(acos((float)0)), float>::value), "");
static_assert((std::is_same<decltype(acos((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(acos((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(acos((int)0)), double>::value), "");
static_assert((std::is_same<decltype(acos((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(acos((long)0)), double>::value), "");
static_assert((std::is_same<decltype(acos((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(acos((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(acos((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(acos((double)0)), double>::value), "");
static_assert((std::is_same<decltype(acosf(0)), float>::value), "");
static_assert((std::is_same<decltype(acos(Ambiguous())), Ambiguous>::value), "");
assert(acos(V(1)) == 0);
assert(acos(V(1.)) == 0);
assert(acos(V(1.f)) == 0);
}
__device__ void test_asin()
{
static_assert((std::is_same<decltype(asin((float)0)), float>::value), "");
static_assert((std::is_same<decltype(asin((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(asin((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(asin((int)0)), double>::value), "");
static_assert((std::is_same<decltype(asin((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(asin((long)0)), double>::value), "");
static_assert((std::is_same<decltype(asin((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(asin((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(asin((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(asin((double)0)), double>::value), "");
static_assert((std::is_same<decltype(asinf(0)), float>::value), "");
static_assert((std::is_same<decltype(asin(Ambiguous())), Ambiguous>::value), "");
assert(asin(V(0)) == 0);
assert(asin(V(0.)) == 0);
assert(asin(V(0.f)) == 0);
}
__device__ void test_atan()
{
static_assert((std::is_same<decltype(atan((float)0)), float>::value), "");
static_assert((std::is_same<decltype(atan((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(atan((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(atan((int)0)), double>::value), "");
static_assert((std::is_same<decltype(atan((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(atan((long)0)), double>::value), "");
static_assert((std::is_same<decltype(atan((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(atan((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(atan((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(atan((double)0)), double>::value), "");
static_assert((std::is_same<decltype(atanf(0)), float>::value), "");
static_assert((std::is_same<decltype(atan(Ambiguous())), Ambiguous>::value), "");
assert(atan(V(0)) == 0);
assert(atan(V(0.)) == 0);
assert(atan(V(0.f)) == 0);
}
__device__ void test_atan2()
{
static_assert((std::is_same<decltype(atan2((float)0, (float)0)), float>::value), "");
static_assert((std::is_same<decltype(atan2((bool)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(atan2((unsigned short)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(atan2((float)0, (unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(atan2((double)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(atan2((int)0, (long long)0)), double>::value), "");
static_assert((std::is_same<decltype(atan2((int)0, (unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(atan2((double)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(atan2((float)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(atan2f(0,0)), float>::value), "");
static_assert((std::is_same<decltype(atan2((int)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(atan2(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(atan2(V(0), 1) == 0);
assert(atan2(V(0), 1.) == 0);
assert(atan2(V(0), 1.f) == 0);
assert(atan2(V(0.), 1) == 0);
assert(atan2(V(0.), 1.) == 0);
assert(atan2(V(0.), 1.f) == 0);
assert(atan2(V(0.f), 1) == 0);
assert(atan2(V(0.f), 1.) == 0);
assert(atan2(V(0.f), 1.f) == 0);
}
__device__ void test_ceil()
{
static_assert((std::is_same<decltype(ceil((float)0)), float>::value), "");
static_assert((std::is_same<decltype(ceil((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(ceil((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(ceil((int)0)), double>::value), "");
static_assert((std::is_same<decltype(ceil((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(ceil((long)0)), double>::value), "");
static_assert((std::is_same<decltype(ceil((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(ceil((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(ceil((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(ceil((double)0)), double>::value), "");
static_assert((std::is_same<decltype(ceilf(0)), float>::value), "");
static_assert((std::is_same<decltype(ceil(Ambiguous())), Ambiguous>::value), "");
assert(ceil(V(0)) == 0);
assert(ceil(V(0.)) == 0);
assert(ceil(V(0.f)) == 0);
}
__device__ void test_cos()
{
static_assert((std::is_same<decltype(cos((float)0)), float>::value), "");
static_assert((std::is_same<decltype(cos((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(cos((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(cos((int)0)), double>::value), "");
static_assert((std::is_same<decltype(cos((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(cos((long)0)), double>::value), "");
static_assert((std::is_same<decltype(cos((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(cos((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(cos((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(cos((double)0)), double>::value), "");
static_assert((std::is_same<decltype(cosf(0)), float>::value), "");
static_assert((std::is_same<decltype(cos(Ambiguous())), Ambiguous>::value), "");
assert(cos(V(0)) == 1);
assert(cos(V(0.)) == 1);
assert(cos(V(0.f)) == 1);
}
__device__ void test_cosh()
{
static_assert((std::is_same<decltype(cosh((float)0)), float>::value), "");
static_assert((std::is_same<decltype(cosh((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(cosh((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(cosh((int)0)), double>::value), "");
static_assert((std::is_same<decltype(cosh((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(cosh((long)0)), double>::value), "");
static_assert((std::is_same<decltype(cosh((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(cosh((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(cosh((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(cosh((double)0)), double>::value), "");
static_assert((std::is_same<decltype(coshf(0)), float>::value), "");
static_assert((std::is_same<decltype(cosh(Ambiguous())), Ambiguous>::value), "");
assert(cosh(V(0)) == 1);
assert(cosh(V(0.)) == 1);
assert(cosh(V(0.f)) == 1);
}
__device__ void test_exp()
{
static_assert((std::is_same<decltype(exp((float)0)), float>::value), "");
static_assert((std::is_same<decltype(exp((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(exp((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(exp((int)0)), double>::value), "");
static_assert((std::is_same<decltype(exp((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(exp((long)0)), double>::value), "");
static_assert((std::is_same<decltype(exp((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(exp((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(exp((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(exp((double)0)), double>::value), "");
static_assert((std::is_same<decltype(expf(0)), float>::value), "");
static_assert((std::is_same<decltype(exp(Ambiguous())), Ambiguous>::value), "");
assert(exp(V(0)) == 1);
assert(exp(V(0.)) == 1);
assert(exp(V(0.f)) == 1);
}
__device__ void test_fabs()
{
static_assert((std::is_same<decltype(fabs((float)0)), float>::value), "");
static_assert((std::is_same<decltype(fabs((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(fabs((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(fabs((int)0)), double>::value), "");
static_assert((std::is_same<decltype(fabs((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(fabs((long)0)), double>::value), "");
static_assert((std::is_same<decltype(fabs((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(fabs((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(fabs((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(fabs((double)0)), double>::value), "");
static_assert((std::is_same<decltype(fabsf(0.0f)), float>::value), "");
static_assert((std::is_same<decltype(fabs(Ambiguous())), Ambiguous>::value), "");
assert(fabs(V(-1)) == 1);
assert(fabs(V(-1.)) == 1);
assert(fabs(V(-1.f)) == 1);
}
__device__ void test_floor()
{
static_assert((std::is_same<decltype(floor((float)0)), float>::value), "");
static_assert((std::is_same<decltype(floor((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(floor((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(floor((int)0)), double>::value), "");
static_assert((std::is_same<decltype(floor((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(floor((long)0)), double>::value), "");
static_assert((std::is_same<decltype(floor((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(floor((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(floor((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(floor((double)0)), double>::value), "");
static_assert((std::is_same<decltype(floorf(0)), float>::value), "");
static_assert((std::is_same<decltype(floor(Ambiguous())), Ambiguous>::value), "");
assert(floor(V(1)) == 1);
assert(floor(V(1.)) == 1);
assert(floor(V(1.f)) == 1);
}
__device__ void test_fmod()
{
static_assert((std::is_same<decltype(fmod((float)0, (float)0)), float>::value), "");
static_assert((std::is_same<decltype(fmod((bool)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(fmod((unsigned short)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fmod((float)0, (unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(fmod((double)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(fmod((int)0, (long long)0)), double>::value), "");
static_assert((std::is_same<decltype(fmod((int)0, (unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(fmod((double)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fmod((float)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fmodf(0,0)), float>::value), "");
static_assert((std::is_same<decltype(fmod((int)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(fmod(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(fmod(V(1.5), 1) == .5);
assert(fmod(V(1.5), 1.) == .5);
assert(fmod(V(1.5), 1.f) == .5);
assert(fmod(V(1.5f), 1) == .5);
assert(fmod(V(1.5f), 1.) == .5);
assert(fmod(V(1.5f), 1.f) == .5);
assert(fmod(V(2), 1) == 0);
assert(fmod(V(2), 1.) == 0);
assert(fmod(V(2), 1.f) == 0);
}
__device__ void test_frexp()
{
int ip;
static_assert((std::is_same<decltype(frexp((float)0, &ip)), float>::value), "");
static_assert((std::is_same<decltype(frexp((bool)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(frexp((unsigned short)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(frexp((int)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(frexp((unsigned int)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(frexp((long)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(frexp((unsigned long)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(frexp((long long)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(frexp((unsigned long long)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(frexp((double)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(frexpf(0, &ip)), float>::value), "");
static_assert((std::is_same<decltype(frexp(Ambiguous(), &ip)), Ambiguous>::value), "");
assert(frexp(V(0), &ip) == 0);
assert(frexp(V(0.), &ip) == 0);
assert(frexp(V(0.f), &ip) == 0);
}
__device__ void test_ldexp()
{
int ip = 1;
static_assert((std::is_same<decltype(ldexp((float)0, ip)), float>::value), "");
static_assert((std::is_same<decltype(ldexp((bool)0, ip)), double>::value), "");
static_assert((std::is_same<decltype(ldexp((unsigned short)0, ip)), double>::value), "");
static_assert((std::is_same<decltype(ldexp((int)0, ip)), double>::value), "");
static_assert((std::is_same<decltype(ldexp((unsigned int)0, ip)), double>::value), "");
static_assert((std::is_same<decltype(ldexp((long)0, ip)), double>::value), "");
static_assert((std::is_same<decltype(ldexp((unsigned long)0, ip)), double>::value), "");
static_assert((std::is_same<decltype(ldexp((long long)0, ip)), double>::value), "");
static_assert((std::is_same<decltype(ldexp((unsigned long long)0, ip)), double>::value), "");
static_assert((std::is_same<decltype(ldexp((double)0, ip)), double>::value), "");
static_assert((std::is_same<decltype(ldexpf(0, ip)), float>::value), "");
static_assert((std::is_same<decltype(ldexp(Ambiguous(), ip)), Ambiguous>::value), "");
assert(ldexp(V(1), ip) == 2);
assert(ldexp(V(1.), ip) == 2);
assert(ldexp(V(1.f), ip) == 2);
}
__device__ void test_log()
{
static_assert((std::is_same<decltype(log((float)0)), float>::value), "");
static_assert((std::is_same<decltype(log((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(log((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(log((int)0)), double>::value), "");
static_assert((std::is_same<decltype(log((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(log((long)0)), double>::value), "");
static_assert((std::is_same<decltype(log((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(log((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(log((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(log((double)0)), double>::value), "");
static_assert((std::is_same<decltype(logf(0)), float>::value), "");
static_assert((std::is_same<decltype(log(Ambiguous())), Ambiguous>::value), "");
assert(log(V(1)) == 0);
assert(log(V(1.)) == 0);
assert(log(V(1.f)) == 0);
}
__device__ void test_log10()
{
static_assert((std::is_same<decltype(log10((float)0)), float>::value), "");
static_assert((std::is_same<decltype(log10((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(log10((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(log10((int)0)), double>::value), "");
static_assert((std::is_same<decltype(log10((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(log10((long)0)), double>::value), "");
static_assert((std::is_same<decltype(log10((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(log10((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(log10((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(log10((double)0)), double>::value), "");
static_assert((std::is_same<decltype(log10f(0)), float>::value), "");
static_assert((std::is_same<decltype(log10(Ambiguous())), Ambiguous>::value), "");
assert(log10(V(1)) == 0);
assert(log10(V(1.)) == 0);
assert(log10(V(1.f)) == 0);
}
__device__ void test_modf()
{
static_assert((std::is_same<decltype(modf((float)0, (float*)0)), float>::value), "");
static_assert((std::is_same<decltype(modf((double)0, (double*)0)), double>::value), "");
static_assert((std::is_same<decltype(modff(0, (float*)0)), float>::value), "");
static_assert((std::is_same<decltype(modf(Ambiguous(), (Ambiguous*)0)), Ambiguous>::value), "");
double i;
assert(modf(V(1), &i) == 0);
assert(modf(V(1.), &i) == 0);
assert(modf(V(1.f), &i) == 0);
}
__device__ void test_pow()
{
static_assert((std::is_same<decltype(pow((float)0, (float)0)), float>::value), "");
static_assert((std::is_same<decltype(pow((bool)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(pow((unsigned short)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(pow((float)0, (unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(pow((double)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(pow((int)0, (long long)0)), double>::value), "");
static_assert((std::is_same<decltype(pow((int)0, (unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(pow((double)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(pow((float)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(powf(0,0)), float>::value), "");
static_assert((std::is_same<decltype(pow((int)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(pow(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(pow(V(1), 1) == 1);
assert(pow(V(1.), 1) == 1);
assert(pow(V(1.f), 1) == 1);
assert(pow(V(1), 1.) == 1);
assert(pow(V(1.), 1.) == 1);
assert(pow(V(1.f), 1.) == 1);
assert(pow(V(1), 1.f) == 1);
assert(pow(V(1.), 1.f) == 1);
assert(pow(V(1.f), 1.f) == 1);
}
__device__ void test_sin()
{
static_assert((std::is_same<decltype(sin((float)0)), float>::value), "");
static_assert((std::is_same<decltype(sin((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(sin((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(sin((int)0)), double>::value), "");
static_assert((std::is_same<decltype(sin((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(sin((long)0)), double>::value), "");
static_assert((std::is_same<decltype(sin((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(sin((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(sin((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(sin((double)0)), double>::value), "");
static_assert((std::is_same<decltype(sinf(0)), float>::value), "");
static_assert((std::is_same<decltype(sin(Ambiguous())), Ambiguous>::value), "");
assert(sin(0) == 0);
assert(sin(0.) == 0);
assert(sin(0.f) == 0);
}
__device__ void test_sinh()
{
static_assert((std::is_same<decltype(sinh((float)0)), float>::value), "");
static_assert((std::is_same<decltype(sinh((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(sinh((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(sinh((int)0)), double>::value), "");
static_assert((std::is_same<decltype(sinh((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(sinh((long)0)), double>::value), "");
static_assert((std::is_same<decltype(sinh((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(sinh((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(sinh((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(sinh((double)0)), double>::value), "");
static_assert((std::is_same<decltype(sinhf(0)), float>::value), "");
static_assert((std::is_same<decltype(sinh(Ambiguous())), Ambiguous>::value), "");
assert(sinh(V(0)) == 0);
assert(sinh(V(0.)) == 0);
assert(sinh(V(0.f)) == 0);
}
__device__ void test_sqrt()
{
static_assert((std::is_same<decltype(sqrt((float)0)), float>::value), "");
static_assert((std::is_same<decltype(sqrt((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(sqrt((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(sqrt((int)0)), double>::value), "");
static_assert((std::is_same<decltype(sqrt((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(sqrt((long)0)), double>::value), "");
static_assert((std::is_same<decltype(sqrt((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(sqrt((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(sqrt((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(sqrt((double)0)), double>::value), "");
static_assert((std::is_same<decltype(sqrtf(0)), float>::value), "");
static_assert((std::is_same<decltype(sqrt(Ambiguous())), Ambiguous>::value), "");
assert(sqrt(V(4)) == 2);
assert(sqrt(V(4.)) == 2);
assert(sqrt(V(4.f)) == 2);
}
__device__ void test_tan()
{
static_assert((std::is_same<decltype(tan((float)0)), float>::value), "");
static_assert((std::is_same<decltype(tan((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(tan((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(tan((int)0)), double>::value), "");
static_assert((std::is_same<decltype(tan((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(tan((long)0)), double>::value), "");
static_assert((std::is_same<decltype(tan((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(tan((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(tan((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(tan((double)0)), double>::value), "");
static_assert((std::is_same<decltype(tanf(0)), float>::value), "");
static_assert((std::is_same<decltype(tan(Ambiguous())), Ambiguous>::value), "");
assert(tan(V(0)) == 0);
assert(tan(V(0.)) == 0);
assert(tan(V(0.f)) == 0);
}
__device__ void test_tanh()
{
static_assert((std::is_same<decltype(tanh((float)0)), float>::value), "");
static_assert((std::is_same<decltype(tanh((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(tanh((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(tanh((int)0)), double>::value), "");
static_assert((std::is_same<decltype(tanh((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(tanh((long)0)), double>::value), "");
static_assert((std::is_same<decltype(tanh((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(tanh((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(tanh((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(tanh((double)0)), double>::value), "");
static_assert((std::is_same<decltype(tanhf(0)), float>::value), "");
static_assert((std::is_same<decltype(tanh(Ambiguous())), Ambiguous>::value), "");
assert(tanh(V(0)) == 0);
assert(tanh(V(0.)) == 0);
assert(tanh(V(0.f)) == 0);
}
__device__ void test_signbit()
{
#ifdef signbit
#error signbit defined
#endif
static_assert((std::is_same<decltype(signbit((float)0)), bool>::value), "");
static_assert((std::is_same<decltype(signbit((double)0)), bool>::value), "");
static_assert((std::is_same<decltype(signbit(0)), bool>::value), "");
static_assert((std::is_same<decltype(signbit(Ambiguous())), Ambiguous>::value), "");
assert(signbit(V(-1)) == true);
assert(signbit(V(-1.)) == true);
assert(signbit(V(-1.f)) == true);
}
__device__ void test_fpclassify()
{
#ifdef fpclassify
#error fpclassify defined
#endif
static_assert((std::is_same<decltype(fpclassify((float)0)), int>::value), "");
static_assert((std::is_same<decltype(fpclassify((double)0)), int>::value), "");
static_assert((std::is_same<decltype(fpclassify(0)), int>::value), "");
static_assert((std::is_same<decltype(fpclassify(Ambiguous())), Ambiguous>::value), "");
assert(fpclassify(V(-1)) == FP_NORMAL);
assert(fpclassify(V(-1.)) == FP_NORMAL);
assert(fpclassify(V(-1.f)) == FP_NORMAL);
}
__device__ void test_isfinite()
{
#ifdef isfinite
#error isfinite defined
#endif
static_assert((std::is_same<decltype(isfinite((float)0)), bool>::value), "");
static_assert((std::is_same<decltype(isfinite((double)0)), bool>::value), "");
static_assert((std::is_same<decltype(isfinite(0)), bool>::value), "");
static_assert((std::is_same<decltype(isfinite(Ambiguous())), Ambiguous>::value), "");
assert(isfinite(V(-1)) == true);
assert(isfinite(V(-1.)) == true);
assert(isfinite(V(-1.f)) == true);
}
__device__ void test_isnormal()
{
#ifdef isnormal
#error isnormal defined
#endif
static_assert((std::is_same<decltype(isnormal((float)0)), bool>::value), "");
static_assert((std::is_same<decltype(isnormal((double)0)), bool>::value), "");
static_assert((std::is_same<decltype(isnormal(0)), bool>::value), "");
static_assert((std::is_same<decltype(isnormal(Ambiguous())), Ambiguous>::value), "");
assert(std::isnormal(V(-1)) == true);
assert(std::isnormal(V(-1.)) == true);
assert(std::isnormal(V(-1.f)) == true);
}
__device__ void test_isgreater()
{
#ifdef isgreater
#error isgreater defined
#endif
static_assert((std::is_same<decltype(isgreater((float)0, (float)0)), bool>::value), "");
static_assert((std::is_same<decltype(isgreater((float)0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(isgreater((double)0, (float)0)), bool>::value), "");
static_assert((std::is_same<decltype(isgreater((double)0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(isgreater(0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(isgreater(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::isgreater(V(-1), 0) == false);
assert(std::isgreater(V(-1), 0.) == false);
assert(std::isgreater(V(-1), 0.f) == false);
assert(std::isgreater(V(-1.), 0) == false);
assert(std::isgreater(V(-1.), 0.) == false);
assert(std::isgreater(V(-1.), 0.f) == false);
assert(std::isgreater(V(-1.f), 0) == false);
assert(std::isgreater(V(-1.f), 0.) == false);
assert(std::isgreater(V(-1.f), 0.f) == false);
}
__device__ void test_isgreaterequal()
{
#ifdef isgreaterequal
#error isgreaterequal defined
#endif
static_assert((std::is_same<decltype(isgreaterequal((float)0, (float)0)), bool>::value), "");
static_assert((std::is_same<decltype(isgreaterequal((float)0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(isgreaterequal((double)0, (float)0)), bool>::value), "");
static_assert((std::is_same<decltype(isgreaterequal((double)0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(isgreaterequal(0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(isgreaterequal(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::isgreaterequal(V(-1), 0) == false);
assert(std::isgreaterequal(V(-1), 0.) == false);
assert(std::isgreaterequal(V(-1), 0.f) == false);
assert(std::isgreaterequal(V(-1.), 0) == false);
assert(std::isgreaterequal(V(-1.), 0.) == false);
assert(std::isgreaterequal(V(-1.), 0.f) == false);
assert(std::isgreaterequal(V(-1.f), 0) == false);
assert(std::isgreaterequal(V(-1.f), 0.) == false);
assert(std::isgreaterequal(V(-1.f), 0.f) == false);
}
__device__ void test_isinf()
{
#ifdef isinf
#error isinf defined
#endif
static_assert((std::is_same<decltype(isinf((float)0)), bool>::value), "");
typedef decltype(isinf((double)0)) DoubleRetType;
#ifndef __linux__
static_assert((std::is_same<DoubleRetType, bool>::value), "");
#else
// GLIBC < 2.26 defines 'isinf(double)' with a return type of 'int' in
// all C++ dialects. The test should tolerate this.
// See: https://sourceware.org/bugzilla/show_bug.cgi?id=19439
static_assert((std::is_same<DoubleRetType, bool>::value
|| std::is_same<DoubleRetType, int>::value), "");
#endif
static_assert((std::is_same<decltype(isinf(0)), bool>::value), "");
assert(std::isinf(V(-1)) == false);
assert(std::isinf(V(-1.)) == false);
assert(std::isinf(V(-1.f)) == false);
}
__device__ void test_isless()
{
#ifdef isless
#error isless defined
#endif
static_assert((std::is_same<decltype(isless((float)0, (float)0)), bool>::value), "");
static_assert((std::is_same<decltype(isless((float)0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(isless((double)0, (float)0)), bool>::value), "");
static_assert((std::is_same<decltype(isless((double)0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(isless(0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(isless(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::isless(V(-1), 0) == true);
assert(std::isless(V(-1), 0.) == true);
assert(std::isless(V(-1), 0.f) == true);
assert(std::isless(V(-1.), 0) == true);
assert(std::isless(V(-1.), 0.) == true);
assert(std::isless(V(-1.), 0.f) == true);
assert(std::isless(V(-1.f), 0) == true);
assert(std::isless(V(-1.f), 0.) == true);
assert(std::isless(V(-1.f), 0.f) == true);
}
__device__ void test_islessequal()
{
#ifdef islessequal
#error islessequal defined
#endif
static_assert((std::is_same<decltype(islessequal((float)0, (float)0)), bool>::value), "");
static_assert((std::is_same<decltype(islessequal((float)0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(islessequal((double)0, (float)0)), bool>::value), "");
static_assert((std::is_same<decltype(islessequal((double)0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(islessequal(0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(islessequal(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::islessequal(V(-1), 0) == true);
assert(std::islessequal(V(-1), 0.) == true);
assert(std::islessequal(V(-1), 0.f) == true);
assert(std::islessequal(V(-1.), 0) == true);
assert(std::islessequal(V(-1.), 0.) == true);
assert(std::islessequal(V(-1.), 0.f) == true);
assert(std::islessequal(V(-1.f), 0) == true);
assert(std::islessequal(V(-1.f), 0.) == true);
assert(std::islessequal(V(-1.f), 0.f) == true);
}
__device__ void test_islessgreater()
{
#ifdef islessgreater
#error islessgreater defined
#endif
static_assert((std::is_same<decltype(islessgreater((float)0, (float)0)), bool>::value), "");
static_assert((std::is_same<decltype(islessgreater((float)0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(islessgreater((double)0, (float)0)), bool>::value), "");
static_assert((std::is_same<decltype(islessgreater((double)0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(islessgreater(0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(islessgreater(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::islessgreater(V(-1), 0) == true);
assert(std::islessgreater(V(-1), 0.) == true);
assert(std::islessgreater(V(-1), 0.f) == true);
assert(std::islessgreater(V(-1.), 0) == true);
assert(std::islessgreater(V(-1.), 0.) == true);
assert(std::islessgreater(V(-1.), 0.f) == true);
assert(std::islessgreater(V(-1.f), 0) == true);
assert(std::islessgreater(V(-1.f), 0.) == true);
assert(std::islessgreater(V(-1.f), 0.f) == true);
}
__device__ void test_isnan()
{
#ifdef isnan
#error isnan defined
#endif
static_assert((std::is_same<decltype(isnan((float)0)), bool>::value), "");
typedef decltype(isnan((double)0)) DoubleRetType;
#ifndef __linux__
static_assert((std::is_same<DoubleRetType, bool>::value), "");
#else
// GLIBC < 2.26 defines 'isnan(double)' with a return type of 'int' in
// all C++ dialects. The test should tolerate this.
// See: https://sourceware.org/bugzilla/show_bug.cgi?id=19439
static_assert((std::is_same<DoubleRetType, bool>::value
|| std::is_same<DoubleRetType, int>::value), "");
#endif
static_assert((std::is_same<decltype(isnan(0)), bool>::value), "");
assert(std::isnan(V(-1)) == false);
assert(std::isnan(V(-1.)) == false);
assert(std::isnan(V(-1.f)) == false);
}
__device__ void test_isunordered()
{
#ifdef isunordered
#error isunordered defined
#endif
static_assert((std::is_same<decltype(isunordered((float)0, (float)0)), bool>::value), "");
static_assert((std::is_same<decltype(isunordered((float)0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(isunordered((double)0, (float)0)), bool>::value), "");
static_assert((std::is_same<decltype(isunordered((double)0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(isunordered(0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(isunordered(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::isunordered(V(-1), 0) == false);
assert(std::isunordered(V(-1), 0.) == false);
assert(std::isunordered(V(-1), 0.f) == false);
assert(std::isunordered(V(-1.), 0) == false);
assert(std::isunordered(V(-1.), 0.) == false);
assert(std::isunordered(V(-1.), 0.f) == false);
assert(std::isunordered(V(-1.f), 0) == false);
assert(std::isunordered(V(-1.f), 0.) == false);
assert(std::isunordered(V(-1.f), 0.f) == false);
}
__device__ void test_acosh()
{
static_assert((std::is_same<decltype(acosh((float)0)), float>::value), "");
static_assert((std::is_same<decltype(acosh((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(acosh((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(acosh((int)0)), double>::value), "");
static_assert((std::is_same<decltype(acosh((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(acosh((long)0)), double>::value), "");
static_assert((std::is_same<decltype(acosh((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(acosh((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(acosh((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(acosh((double)0)), double>::value), "");
static_assert((std::is_same<decltype(acoshf(0)), float>::value), "");
static_assert((std::is_same<decltype(acosh(Ambiguous())), Ambiguous>::value), "");
assert(std::acosh(V(1)) == 0);
assert(std::acosh(V(1.)) == 0);
assert(std::acosh(V(1.f)) == 0);
}
__device__ void test_asinh()
{
static_assert((std::is_same<decltype(asinh((float)0)), float>::value), "");
static_assert((std::is_same<decltype(asinh((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(asinh((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(asinh((int)0)), double>::value), "");
static_assert((std::is_same<decltype(asinh((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(asinh((long)0)), double>::value), "");
static_assert((std::is_same<decltype(asinh((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(asinh((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(asinh((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(asinh((double)0)), double>::value), "");
static_assert((std::is_same<decltype(asinhf(0)), float>::value), "");
static_assert((std::is_same<decltype(asinh(Ambiguous())), Ambiguous>::value), "");
assert(asinh(V(0)) == 0);
assert(asinh(V(0.)) == 0);
assert(asinh(V(0.f)) == 0);
}
__device__ void test_atanh()
{
static_assert((std::is_same<decltype(atanh((float)0)), float>::value), "");
static_assert((std::is_same<decltype(atanh((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(atanh((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(atanh((int)0)), double>::value), "");
static_assert((std::is_same<decltype(atanh((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(atanh((long)0)), double>::value), "");
static_assert((std::is_same<decltype(atanh((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(atanh((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(atanh((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(atanh((double)0)), double>::value), "");
static_assert((std::is_same<decltype(atanhf(0)), float>::value), "");
static_assert((std::is_same<decltype(atanh(Ambiguous())), Ambiguous>::value), "");
assert(atanh(V(0)) == 0);
assert(atanh(V(0.)) == 0);
assert(atanh(V(0.f)) == 0);
}
__device__ void test_cbrt()
{
static_assert((std::is_same<decltype(cbrt((float)0)), float>::value), "");
static_assert((std::is_same<decltype(cbrt((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(cbrt((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(cbrt((int)0)), double>::value), "");
static_assert((std::is_same<decltype(cbrt((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(cbrt((long)0)), double>::value), "");
static_assert((std::is_same<decltype(cbrt((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(cbrt((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(cbrt((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(cbrt((double)0)), double>::value), "");
static_assert((std::is_same<decltype(cbrtf(0)), float>::value), "");
static_assert((std::is_same<decltype(cbrt(Ambiguous())), Ambiguous>::value), "");
assert(cbrt(V(1)) == 1);
assert(cbrt(V(1.)) == 1);
assert(cbrt(V(1.f)) == 1);
}
__device__ void test_copysign()
{
static_assert((std::is_same<decltype(copysign((float)0, (float)0)), float>::value), "");
static_assert((std::is_same<decltype(copysign((bool)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(copysign((unsigned short)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(copysign((float)0, (unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(copysign((double)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(copysign((int)0, (long long)0)), double>::value), "");
static_assert((std::is_same<decltype(copysign((int)0, (unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(copysign((double)0, (double)0)), double>::value), "");
// CUDA's copysign(float, double) currently returns a float, in violation
// of the spec. We can't easily change this, so accept either one.
static_assert(
(std::is_same<decltype(copysign((float)0, (double)0)), double>::value ||
std::is_same<decltype(copysign((float)0, (double)0)), float>::value),
"");
static_assert((std::is_same<decltype(copysignf(0,0)), float>::value), "");
static_assert((std::is_same<decltype(copysign((int)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(copysign(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::copysign(V(1), 1) == 1);
assert(std::copysign(V(1.), 1) == 1);
assert(std::copysign(V(1.f), 1) == 1);
assert(std::copysign(V(1), 1.) == 1);
assert(std::copysign(V(1.), 1.) == 1);
assert(std::copysign(V(1.f), 1.) == 1);
assert(std::copysign(V(1), 1.f) == 1);
assert(std::copysign(V(1.), 1.f) == 1);
assert(std::copysign(V(1.f), 1.f) == 1);
}
__device__ void test_erf()
{
static_assert((std::is_same<decltype(erf((float)0)), float>::value), "");
static_assert((std::is_same<decltype(erf((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(erf((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(erf((int)0)), double>::value), "");
static_assert((std::is_same<decltype(erf((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(erf((long)0)), double>::value), "");
static_assert((std::is_same<decltype(erf((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(erf((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(erf((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(erf((double)0)), double>::value), "");
static_assert((std::is_same<decltype(erff(0)), float>::value), "");
static_assert((std::is_same<decltype(erf(Ambiguous())), Ambiguous>::value), "");
assert(erf(V(0)) == 0);
assert(erf(V(0.)) == 0);
assert(erf(V(0.f)) == 0);
}
__device__ void test_erfc()
{
static_assert((std::is_same<decltype(erfc((float)0)), float>::value), "");
static_assert((std::is_same<decltype(erfc((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(erfc((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(erfc((int)0)), double>::value), "");
static_assert((std::is_same<decltype(erfc((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(erfc((long)0)), double>::value), "");
static_assert((std::is_same<decltype(erfc((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(erfc((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(erfc((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(erfc((double)0)), double>::value), "");
static_assert((std::is_same<decltype(erfcf(0)), float>::value), "");
static_assert((std::is_same<decltype(erfc(Ambiguous())), Ambiguous>::value), "");
assert(erfc(V(0)) == 1);
assert(erfc(V(0.)) == 1);
assert(erfc(V(0.f)) == 1);
}
__device__ void test_exp2()
{
static_assert((std::is_same<decltype(exp2((float)0)), float>::value), "");
static_assert((std::is_same<decltype(exp2((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(exp2((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(exp2((int)0)), double>::value), "");
static_assert((std::is_same<decltype(exp2((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(exp2((long)0)), double>::value), "");
static_assert((std::is_same<decltype(exp2((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(exp2((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(exp2((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(exp2((double)0)), double>::value), "");
static_assert((std::is_same<decltype(exp2f(0)), float>::value), "");
static_assert((std::is_same<decltype(exp2(Ambiguous())), Ambiguous>::value), "");
assert(exp2(V(1)) == 2);
assert(exp2(V(1.)) == 2);
assert(exp2(V(1.f)) == 2);
}
__device__ void test_expm1()
{
static_assert((std::is_same<decltype(expm1((float)0)), float>::value), "");
static_assert((std::is_same<decltype(expm1((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(expm1((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(expm1((int)0)), double>::value), "");
static_assert((std::is_same<decltype(expm1((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(expm1((long)0)), double>::value), "");
static_assert((std::is_same<decltype(expm1((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(expm1((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(expm1((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(expm1((double)0)), double>::value), "");
static_assert((std::is_same<decltype(expm1f(0)), float>::value), "");
static_assert((std::is_same<decltype(expm1(Ambiguous())), Ambiguous>::value), "");
assert(expm1(V(0)) == 0);
assert(expm1(V(0.)) == 0);
assert(expm1(V(0.f)) == 0);
}
__device__ void test_fdim()
{
static_assert((std::is_same<decltype(fdim((float)0, (float)0)), float>::value), "");
static_assert((std::is_same<decltype(fdim((bool)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(fdim((unsigned short)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fdim((float)0, (unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(fdim((double)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(fdim((int)0, (long long)0)), double>::value), "");
static_assert((std::is_same<decltype(fdim((int)0, (unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(fdim((double)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fdim((float)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fdimf(0,0)), float>::value), "");
static_assert((std::is_same<decltype(fdim((int)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(fdim(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::fdim(V(1), 0) == 1);
assert(std::fdim(V(1.), 0) == 1);
assert(std::fdim(V(1.f), 0) == 1);
assert(std::fdim(V(1), 0.) == 1);
assert(std::fdim(V(1.), 0.) == 1);
assert(std::fdim(V(1.f), 0.) == 1);
assert(std::fdim(V(1), 0.f) == 1);
assert(std::fdim(V(1.), 0.f) == 1);
assert(std::fdim(V(1.f), 0.f) == 1);
}
__device__ void test_fma()
{
static_assert((std::is_same<decltype(fma((bool)0, (float)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(fma((char)0, (float)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(fma((unsigned)0, (float)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(fma((float)0, (int)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(fma((float)0, (long)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(fma((float)0, (float)0, (unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(fma((float)0, (float)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fma((float)0, (float)0, (float)0)), float>::value), "");
static_assert((std::is_same<decltype(fma((bool)0, (double)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fma((char)0, (double)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fma((unsigned)0, (double)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fma((double)0, (int)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fma((double)0, (long)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fma((double)0, (double)0, (unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(fma((double)0, (double)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(fma((double)0, (double)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fmaf(0,0,0)), float>::value), "");
static_assert((std::is_same<decltype(fma(Ambiguous(), Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::fma(V(1), 1, 1) == 2);
assert(std::fma(V(1.), 1, 1) == 2);
assert(std::fma(V(1.f), 1, 1) == 2);
assert(std::fma(V(1), 1., 1) == 2);
assert(std::fma(V(1.), 1., 1) == 2);
assert(std::fma(V(1.f), 1., 1) == 2);
assert(std::fma(V(1), 1.f, 1) == 2);
assert(std::fma(V(1.), 1.f, 1) == 2);
assert(std::fma(V(1.f), 1.f, 1) == 2);
assert(std::fma(V(1), 1, 1.) == 2);
assert(std::fma(V(1.), 1, 1.) == 2);
assert(std::fma(V(1.f), 1, 1.) == 2);
assert(std::fma(V(1), 1., 1.) == 2);
assert(std::fma(V(1.), 1., 1.) == 2);
assert(std::fma(V(1.f), 1., 1.) == 2);
assert(std::fma(V(1), 1.f, 1.) == 2);
assert(std::fma(V(1.), 1.f, 1.) == 2);
assert(std::fma(V(1.f), 1.f, 1.) == 2);
assert(std::fma(V(1), 1, 1.f) == 2);
assert(std::fma(V(1.), 1, 1.f) == 2);
assert(std::fma(V(1.f), 1, 1.f) == 2);
assert(std::fma(V(1), 1., 1.f) == 2);
assert(std::fma(V(1.), 1., 1.f) == 2);
assert(std::fma(V(1.f), 1., 1.f) == 2);
assert(std::fma(V(1), 1.f, 1.f) == 2);
assert(std::fma(V(1.), 1.f, 1.f) == 2);
assert(std::fma(V(1.f), 1.f, 1.f) == 2);
}
__device__ void test_fmax()
{
static_assert((std::is_same<decltype(fmax((float)0, (float)0)), float>::value), "");
static_assert((std::is_same<decltype(fmax((bool)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(fmax((unsigned short)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fmax((float)0, (unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(fmax((double)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(fmax((int)0, (long long)0)), double>::value), "");
static_assert((std::is_same<decltype(fmax((int)0, (unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(fmax((double)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fmax((float)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fmaxf(0,0)), float>::value), "");
static_assert((std::is_same<decltype(fmax((int)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(fmax(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::fmax(V(1), 0) == 1);
assert(std::fmax(V(1.), 0) == 1);
assert(std::fmax(V(1.f), 0) == 1);
assert(std::fmax(V(1), 0.) == 1);
assert(std::fmax(V(1.), 0.) == 1);
assert(std::fmax(V(1.f), 0.) == 1);
assert(std::fmax(V(1), 0.f) == 1);
assert(std::fmax(V(1.), 0.f) == 1);
assert(std::fmax(V(1.f), 0.f) == 1);
}
__device__ void test_fmin()
{
static_assert((std::is_same<decltype(fmin((float)0, (float)0)), float>::value), "");
static_assert((std::is_same<decltype(fmin((bool)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(fmin((unsigned short)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fmin((float)0, (unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(fmin((double)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(fmin((int)0, (long long)0)), double>::value), "");
static_assert((std::is_same<decltype(fmin((int)0, (unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(fmin((double)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fmin((float)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fminf(0,0)), float>::value), "");
static_assert((std::is_same<decltype(fmin((int)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(fmin(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::fmin(V(1), 0) == 0);
assert(std::fmin(V(1.), 0) == 0);
assert(std::fmin(V(1.f), 0) == 0);
assert(std::fmin(V(1), 0.) == 0);
assert(std::fmin(V(1.), 0.) == 0);
assert(std::fmin(V(1.f), 0.) == 0);
assert(std::fmin(V(1), 0.f) == 0);
assert(std::fmin(V(1.), 0.f) == 0);
assert(std::fmin(V(1.f), 0.f) == 0);
}
__device__ void test_hypot()
{
static_assert((std::is_same<decltype(hypot((float)0, (float)0)), float>::value), "");
static_assert((std::is_same<decltype(hypot((bool)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(hypot((unsigned short)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(hypot((float)0, (unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(hypot((double)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(hypot((int)0, (long long)0)), double>::value), "");
static_assert((std::is_same<decltype(hypot((int)0, (unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(hypot((double)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(hypot((float)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(hypotf(0,0)), float>::value), "");
static_assert((std::is_same<decltype(hypot((int)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(hypot(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::hypot(V(3), 4) == 5);
assert(std::hypot(V(3), 4.) == 5);
assert(std::hypot(V(3), 4.f) == 5);
assert(std::hypot(V(3.), 4) == 5);
assert(std::hypot(V(3.), 4.) == 5);
assert(std::hypot(V(3.), 4.f) == 5);
assert(std::hypot(V(3.f), 4) == 5);
assert(std::hypot(V(3.f), 4.) == 5);
assert(std::hypot(V(3.f), 4.f) == 5);
}
__device__ void test_ilogb()
{
static_assert((std::is_same<decltype(ilogb((float)0)), int>::value), "");
static_assert((std::is_same<decltype(ilogb((bool)0)), int>::value), "");
static_assert((std::is_same<decltype(ilogb((unsigned short)0)), int>::value), "");
static_assert((std::is_same<decltype(ilogb((int)0)), int>::value), "");
static_assert((std::is_same<decltype(ilogb((unsigned int)0)), int>::value), "");
static_assert((std::is_same<decltype(ilogb((long)0)), int>::value), "");
static_assert((std::is_same<decltype(ilogb((unsigned long)0)), int>::value), "");
static_assert((std::is_same<decltype(ilogb((long long)0)), int>::value), "");
static_assert((std::is_same<decltype(ilogb((unsigned long long)0)), int>::value), "");
static_assert((std::is_same<decltype(ilogb((double)0)), int>::value), "");
static_assert((std::is_same<decltype(ilogbf(0)), int>::value), "");
static_assert((std::is_same<decltype(ilogb(Ambiguous())), Ambiguous>::value), "");
assert(ilogb(V(1)) == 0);
assert(ilogb(V(1.)) == 0);
assert(ilogb(V(1.f)) == 0);
}
__device__ void test_lgamma()
{
static_assert((std::is_same<decltype(lgamma((float)0)), float>::value), "");
static_assert((std::is_same<decltype(lgamma((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(lgamma((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(lgamma((int)0)), double>::value), "");
static_assert((std::is_same<decltype(lgamma((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(lgamma((long)0)), double>::value), "");
static_assert((std::is_same<decltype(lgamma((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(lgamma((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(lgamma((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(lgamma((double)0)), double>::value), "");
static_assert((std::is_same<decltype(lgammaf(0)), float>::value), "");
static_assert((std::is_same<decltype(lgamma(Ambiguous())), Ambiguous>::value), "");
assert(lgamma(V(1)) == 0);
assert(lgamma(V(1.)) == 0);
assert(lgamma(V(1.f)) == 0);
}
__device__ void test_llrint()
{
static_assert((std::is_same<decltype(llrint((float)0)), long long>::value), "");
static_assert((std::is_same<decltype(llrint((bool)0)), long long>::value), "");
static_assert((std::is_same<decltype(llrint((unsigned short)0)), long long>::value), "");
static_assert((std::is_same<decltype(llrint((int)0)), long long>::value), "");
static_assert((std::is_same<decltype(llrint((unsigned int)0)), long long>::value), "");
static_assert((std::is_same<decltype(llrint((long)0)), long long>::value), "");
static_assert((std::is_same<decltype(llrint((unsigned long)0)), long long>::value), "");
static_assert((std::is_same<decltype(llrint((long long)0)), long long>::value), "");
static_assert((std::is_same<decltype(llrint((unsigned long long)0)), long long>::value), "");
static_assert((std::is_same<decltype(llrint((double)0)), long long>::value), "");
static_assert((std::is_same<decltype(llrintf(0)), long long>::value), "");
static_assert((std::is_same<decltype(llrint(Ambiguous())), Ambiguous>::value), "");
assert(llrint(V(1)) == 1LL);
assert(llrint(V(1.)) == 1LL);
#if CUDA_VERSION > 7050
assert(llrint(V(1.f)) == 1LL);
#endif
}
__device__ void test_llround()
{
static_assert((std::is_same<decltype(llround((float)0)), long long>::value), "");
static_assert((std::is_same<decltype(llround((bool)0)), long long>::value), "");
static_assert((std::is_same<decltype(llround((unsigned short)0)), long long>::value), "");
static_assert((std::is_same<decltype(llround((int)0)), long long>::value), "");
static_assert((std::is_same<decltype(llround((unsigned int)0)), long long>::value), "");
static_assert((std::is_same<decltype(llround((long)0)), long long>::value), "");
static_assert((std::is_same<decltype(llround((unsigned long)0)), long long>::value), "");
static_assert((std::is_same<decltype(llround((long long)0)), long long>::value), "");
static_assert((std::is_same<decltype(llround((unsigned long long)0)), long long>::value), "");
static_assert((std::is_same<decltype(llround((double)0)), long long>::value), "");
static_assert((std::is_same<decltype(llroundf(0)), long long>::value), "");
static_assert((std::is_same<decltype(llround(Ambiguous())), Ambiguous>::value), "");
assert(llround(V(1)) == 1LL);
assert(llround(V(1.)) == 1LL);
assert(llround(V(1.f)) == 1LL);
}
__device__ void test_log1p()
{
static_assert((std::is_same<decltype(log1p((float)0)), float>::value), "");
static_assert((std::is_same<decltype(log1p((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(log1p((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(log1p((int)0)), double>::value), "");
static_assert((std::is_same<decltype(log1p((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(log1p((long)0)), double>::value), "");
static_assert((std::is_same<decltype(log1p((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(log1p((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(log1p((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(log1p((double)0)), double>::value), "");
static_assert((std::is_same<decltype(log1pf(0)), float>::value), "");
static_assert((std::is_same<decltype(log1p(Ambiguous())), Ambiguous>::value), "");
assert(log1p(V(0)) == 0);
assert(log1p(V(0.)) == 0);
assert(log1p(V(0.f)) == 0);
}
__device__ void test_log2()
{
static_assert((std::is_same<decltype(log2((float)0)), float>::value), "");
static_assert((std::is_same<decltype(log2((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(log2((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(log2((int)0)), double>::value), "");
static_assert((std::is_same<decltype(log2((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(log2((long)0)), double>::value), "");
static_assert((std::is_same<decltype(log2((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(log2((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(log2((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(log2((double)0)), double>::value), "");
static_assert((std::is_same<decltype(log2f(0)), float>::value), "");
static_assert((std::is_same<decltype(log2(Ambiguous())), Ambiguous>::value), "");
assert(log2(V(1)) == 0);
assert(log2(V(1.)) == 0);
assert(log2(V(1.f)) == 0);
}
__device__ void test_logb()
{
static_assert((std::is_same<decltype(logb((float)0)), float>::value), "");
static_assert((std::is_same<decltype(logb((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(logb((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(logb((int)0)), double>::value), "");
static_assert((std::is_same<decltype(logb((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(logb((long)0)), double>::value), "");
static_assert((std::is_same<decltype(logb((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(logb((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(logb((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(logb((double)0)), double>::value), "");
static_assert((std::is_same<decltype(logbf(0)), float>::value), "");
static_assert((std::is_same<decltype(logb(Ambiguous())), Ambiguous>::value), "");
assert(logb(V(1)) == 0);
assert(logb(V(1.)) == 0);
assert(logb(V(1.f)) == 0);
}
__device__ void test_lrint()
{
static_assert((std::is_same<decltype(lrint((float)0)), long>::value), "");
static_assert((std::is_same<decltype(lrint((bool)0)), long>::value), "");
static_assert((std::is_same<decltype(lrint((unsigned short)0)), long>::value), "");
static_assert((std::is_same<decltype(lrint((int)0)), long>::value), "");
static_assert((std::is_same<decltype(lrint((unsigned int)0)), long>::value), "");
static_assert((std::is_same<decltype(lrint((long)0)), long>::value), "");
static_assert((std::is_same<decltype(lrint((unsigned long)0)), long>::value), "");
static_assert((std::is_same<decltype(lrint((long long)0)), long>::value), "");
static_assert((std::is_same<decltype(lrint((unsigned long long)0)), long>::value), "");
static_assert((std::is_same<decltype(lrint((double)0)), long>::value), "");
static_assert((std::is_same<decltype(lrintf(0)), long>::value), "");
static_assert((std::is_same<decltype(lrint(Ambiguous())), Ambiguous>::value), "");
assert(lrint(V(1)) == 1L);
assert(lrint(V(1.)) == 1L);
#if CUDA_VERSION > 7050
assert(lrint(V(1.f)) == 1L);
#endif
}
__device__ void test_lround()
{
static_assert((std::is_same<decltype(lround((float)0)), long>::value), "");
static_assert((std::is_same<decltype(lround((bool)0)), long>::value), "");
static_assert((std::is_same<decltype(lround((unsigned short)0)), long>::value), "");
static_assert((std::is_same<decltype(lround((int)0)), long>::value), "");
static_assert((std::is_same<decltype(lround((unsigned int)0)), long>::value), "");
static_assert((std::is_same<decltype(lround((long)0)), long>::value), "");
static_assert((std::is_same<decltype(lround((unsigned long)0)), long>::value), "");
static_assert((std::is_same<decltype(lround((long long)0)), long>::value), "");
static_assert((std::is_same<decltype(lround((unsigned long long)0)), long>::value), "");
static_assert((std::is_same<decltype(lround((double)0)), long>::value), "");
static_assert((std::is_same<decltype(lroundf(0)), long>::value), "");
static_assert((std::is_same<decltype(lround(Ambiguous())), Ambiguous>::value), "");
assert(lround(V(1)) == 1L);
assert(lround(V(1.)) == 1L);
assert(lround(V(1.f)) == 1L);
}
__device__ void test_nan()
{
static_assert((std::is_same<decltype(nan("")), double>::value), "");
static_assert((std::is_same<decltype(nanf("")), float>::value), "");
}
__device__ void test_nearbyint()
{
static_assert((std::is_same<decltype(nearbyint((float)0)), float>::value), "");
static_assert((std::is_same<decltype(nearbyint((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(nearbyint((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(nearbyint((int)0)), double>::value), "");
static_assert((std::is_same<decltype(nearbyint((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(nearbyint((long)0)), double>::value), "");
static_assert((std::is_same<decltype(nearbyint((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(nearbyint((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(nearbyint((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(nearbyint((double)0)), double>::value), "");
static_assert((std::is_same<decltype(nearbyintf(0)), float>::value), "");
static_assert((std::is_same<decltype(nearbyint(Ambiguous())), Ambiguous>::value), "");
assert(nearbyint(V(1)) == 1);
assert(nearbyint(V(1.)) == 1);
assert(nearbyint(V(1.f)) == 1);
// There are more checks in test_rint(). rint and nearbyint behave the same
// way on the GPU, so we only test them in one place.
}
__device__ void test_nextafter()
{
static_assert((std::is_same<decltype(nextafter((float)0, (float)0)), float>::value), "");
static_assert((std::is_same<decltype(nextafter((bool)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(nextafter((unsigned short)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(nextafter((float)0, (unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(nextafter((double)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(nextafter((int)0, (long long)0)), double>::value), "");
static_assert((std::is_same<decltype(nextafter((int)0, (unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(nextafter((double)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(nextafter((float)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(nextafterf(0,0)), float>::value), "");
static_assert((std::is_same<decltype(nextafter((int)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(nextafter(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
//assert(nextafter(0,1) == hexfloat<double>(0x1, 0, -1074));
// Invoke all our overloads. Even though we don't check the exact result
// (this is pretty annoying to do for this function), we make sure to *use*
// the results so that these function calls can't be DCE'ed.
assert(nextafter(V(0), 1) != 0);
assert(nextafter(V(0), 1.) != 0);
assert(nextafter(V(0), 1.f) != 0);
assert(nextafter(V(0.), 1) != 0);
assert(nextafter(V(0.), 1.) != 0);
assert(nextafter(V(0.), 1.f) != 0);
assert(nextafter(V(0.f), 1) != 0);
assert(nextafter(V(0.f), 1.) != 0);
assert(nextafter(V(0.f), 1.f) != 0);
}
__device__ void test_remainder()
{
static_assert((std::is_same<decltype(remainder((float)0, (float)0)), float>::value), "");
static_assert((std::is_same<decltype(remainder((bool)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(remainder((unsigned short)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(remainder((float)0, (unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(remainder((double)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(remainder((int)0, (long long)0)), double>::value), "");
static_assert((std::is_same<decltype(remainder((int)0, (unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(remainder((double)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(remainder((float)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(remainderf(0,0)), float>::value), "");
static_assert((std::is_same<decltype(remainder((int)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(remainder(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::remainder(V(1.5), 1) == -.5);
assert(std::remainder(V(1.5), 1.) == -.5);
assert(std::remainder(V(1.5), 1.f) == -.5);
assert(std::remainder(V(1.5f), 1) == -.5);
assert(std::remainder(V(1.5f), 1.) == -.5);
assert(std::remainder(V(1.5f), 1.f) == -.5);
assert(std::remainder(V(2), 1) == 0);
assert(std::remainder(V(2), 1.) == 0);
assert(std::remainder(V(2), 1.f) == 0);
}
__device__ void test_remquo()
{
int ip;
static_assert((std::is_same<decltype(remquo((float)0, (float)0, &ip)), float>::value), "");
static_assert((std::is_same<decltype(remquo((bool)0, (float)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(remquo((unsigned short)0, (double)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(remquo((float)0, (unsigned int)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(remquo((double)0, (long)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(remquo((int)0, (long long)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(remquo((int)0, (unsigned long long)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(remquo((double)0, (double)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(remquo((float)0, (double)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(remquof(0,0, &ip)), float>::value), "");
static_assert((std::is_same<decltype(remquo((int)0, (int)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(remquo(Ambiguous(), Ambiguous(), &ip)), Ambiguous>::value), "");
assert(std::remquo(V(1), 1, &ip) == 0);
assert(std::remquo(V(1), 1., &ip) == 0);
assert(std::remquo(V(1), 1.f, &ip) == 0);
assert(std::remquo(V(0.5), 1, &ip) == 0.5);
assert(std::remquo(V(0.5), 1., &ip) == 0.5);
assert(std::remquo(V(0.5), 1.f, &ip) == 0.5);
assert(std::remquo(V(0.5f), 1, &ip) == 0.5);
assert(std::remquo(V(0.5f), 1., &ip) == 0.5);
assert(std::remquo(V(0.5f), 1.f, &ip) == 0.5);
}
__device__ void test_rint_nearbyint()
{
static_assert((std::is_same<decltype(rint((float)0)), float>::value), "");
static_assert((std::is_same<decltype(rint((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(rint((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(rint((int)0)), double>::value), "");
static_assert((std::is_same<decltype(rint((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(rint((long)0)), double>::value), "");
static_assert((std::is_same<decltype(rint((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(rint((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(rint((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(rint((double)0)), double>::value), "");
static_assert((std::is_same<decltype(rintf(0)), float>::value), "");
static_assert((std::is_same<decltype(rint(Ambiguous())), Ambiguous>::value), "");
// Verify that rint/nearbyint produce identical correct results
auto check = [](double input, double fpresult) {
// FP rint()/nearbyint must match the expected result.
assert(rint(V(float(input))) == float(fpresult));
assert(nearbyint(V(float(input))) == float(fpresult));
assert(rint(V(input)) == fpresult);
assert(nearbyint(V(input)) == fpresult);
// for integral types, std::rint(input) == std::rint(double(input))
int iinput = input;
assert(std::rint(V(iinput)) == std::rint(double(V(iinput))));
assert(std::nearbyint(V(iinput)) == std::nearbyint(double(V(iinput))));
};
// Whole values round to themselves and do not change sign.
check(0.0, 0.0);
check(-0.0, -0.0);
check(1.0, 1.0);
check(-1.0, -1.0);
// Half-way values round towards nearest even number.
check(2.5, 2.0);
check(-2.5, -2.0);
check(3.5, 4.0);
check(-3.5, -4.0);
// Everything else is rounded towards nearest integer.
check(2.1, 2.0);
check(-2.1, -2.0);
check(2.7, 3.0);
check(-2.7, -3.0);
check(3.9, 4.0);
check(-3.9, -4.0);
}
__device__ void test_round()
{
static_assert((std::is_same<decltype(round((float)0)), float>::value), "");
static_assert((std::is_same<decltype(round((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(round((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(round((int)0)), double>::value), "");
static_assert((std::is_same<decltype(round((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(round((long)0)), double>::value), "");
static_assert((std::is_same<decltype(round((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(round((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(round((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(round((double)0)), double>::value), "");
static_assert((std::is_same<decltype(roundf(0)), float>::value), "");
static_assert((std::is_same<decltype(round(Ambiguous())), Ambiguous>::value), "");
assert(round(V(1)) == 1);
assert(round(V(1.)) == 1);
assert(round(V(1.f)) == 1);
}
__device__ void test_scalbln()
{
static_assert((std::is_same<decltype(scalbln((float)0, (long)0)), float>::value), "");
static_assert((std::is_same<decltype(scalbln((bool)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(scalbln((unsigned short)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(scalbln((int)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(scalbln((unsigned int)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(scalbln((long)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(scalbln((unsigned long)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(scalbln((long long)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(scalbln((unsigned long long)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(scalbln((double)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(scalblnf(0, (long)0)), float>::value), "");
static_assert((std::is_same<decltype(scalbln(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::scalbln(V(1), 1) == 2);
assert(std::scalbln(V(1), 1.) == 2);
assert(std::scalbln(V(1), 1.f) == 2);
assert(std::scalbln(V(1.), 1) == 2);
assert(std::scalbln(V(1.), 1.) == 2);
assert(std::scalbln(V(1.), 1.f) == 2);
assert(std::scalbln(V(1.f), 1) == 2);
assert(std::scalbln(V(1.f), 1.) == 2);
assert(std::scalbln(V(1.f), 1.f) == 2);
}
__device__ void test_scalbn()
{
static_assert((std::is_same<decltype(scalbn((float)0, (int)0)), float>::value), "");
static_assert((std::is_same<decltype(scalbn((bool)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(scalbn((unsigned short)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(scalbn((int)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(scalbn((unsigned int)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(scalbn((long)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(scalbn((unsigned long)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(scalbn((long long)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(scalbn((unsigned long long)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(scalbn((double)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(scalbnf(0, (int)0)), float>::value), "");
static_assert((std::is_same<decltype(scalbn(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::scalbn(V(1), 1) == 2);
assert(std::scalbn(V(1), 1.) == 2);
assert(std::scalbn(V(1), 1.f) == 2);
assert(std::scalbn(V(1.), 1) == 2);
assert(std::scalbn(V(1.), 1.) == 2);
assert(std::scalbn(V(1.), 1.f) == 2);
assert(std::scalbn(V(1.f), 1) == 2);
assert(std::scalbn(V(1.f), 1.) == 2);
assert(std::scalbn(V(1.f), 1.f) == 2);
}
__device__ void test_tgamma()
{
static_assert((std::is_same<decltype(tgamma((float)0)), float>::value), "");
static_assert((std::is_same<decltype(tgamma((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(tgamma((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(tgamma((int)0)), double>::value), "");
static_assert((std::is_same<decltype(tgamma((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(tgamma((long)0)), double>::value), "");
static_assert((std::is_same<decltype(tgamma((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(tgamma((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(tgamma((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(tgamma((double)0)), double>::value), "");
static_assert((std::is_same<decltype(tgammaf(0)), float>::value), "");
static_assert((std::is_same<decltype(tgamma(Ambiguous())), Ambiguous>::value), "");
assert(tgamma(V(1)) == 1);
assert(tgamma(V(1.)) == 1);
assert(tgamma(V(1.f)) == 1);
}
__device__ void test_trunc()
{
static_assert((std::is_same<decltype(trunc((float)0)), float>::value), "");
static_assert((std::is_same<decltype(trunc((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(trunc((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(trunc((int)0)), double>::value), "");
static_assert((std::is_same<decltype(trunc((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(trunc((long)0)), double>::value), "");
static_assert((std::is_same<decltype(trunc((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(trunc((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(trunc((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(trunc((double)0)), double>::value), "");
static_assert((std::is_same<decltype(truncf(0)), float>::value), "");
static_assert((std::is_same<decltype(trunc(Ambiguous())), Ambiguous>::value), "");
assert(trunc(V(1)) == 1);
assert(trunc(V(1.)) == 1);
assert(trunc(V(1.f)) == 1);
}
__global__ void tests()
{
test_abs();
test_acos();
test_asin();
test_atan();
test_atan2();
test_ceil();
test_cos();
test_cosh();
test_exp();
test_fabs();
test_floor();
test_fmod();
test_frexp();
test_ldexp();
test_log();
test_log10();
test_modf();
test_pow();
test_sin();
test_sinh();
test_sqrt();
test_tan();
test_tanh();
test_signbit();
test_fpclassify();
test_isfinite();
test_isnormal();
test_isgreater();
test_isgreaterequal();
test_isinf();
test_isless();
test_islessequal();
test_islessgreater();
test_isnan();
test_isunordered();
test_acosh();
test_asinh();
test_atanh();
test_cbrt();
test_copysign();
test_erf();
test_erfc();
test_exp2();
test_expm1();
test_fdim();
test_fma();
test_fmax();
test_fmin();
test_hypot();
test_ilogb();
test_lgamma();
test_llrint();
test_llround();
test_log1p();
test_log2();
test_logb();
test_lrint();
test_lround();
test_nan();
test_nearbyint();
test_nextafter();
test_remainder();
test_remquo();
test_rint_nearbyint();
test_round();
test_scalbln();
test_scalbn();
test_tgamma();
test_trunc();
}
int main() {
tests<<<1,1>>>();
cudaError_t err = cudaDeviceSynchronize();
if (err != cudaSuccess) {
printf("CUDA error %d\n", (int)err);
return 1;
}
printf("Success!\n");
return 0;
}
#else
#include <stdio.h>
// No C++11; test is a nop.
int main() {
printf("Success!\n");
return 0;
}
#endif // __cplusplus < 201103L
|
bb13ba3d56eac632b90dd3c90f161bbbc545f0ba.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layers/quant_conv_layer.hpp"
#define SHIFT_MAX 13
#define EPSILON 1e-6
namespace caffe {
template <typename Dtype>
__global__ void ConvForward(const int nthreads, const Dtype* const bottom_data, const int num,
const int channels, const int height, const int width, const int conved_height,
const int conved_width, const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w, Dtype* const top_data,
const Dtype* const weight, const Dtype* const bias, const bool bias_term_) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % conved_width;
const int ph = (index / conved_width) % conved_height;
const int c = (index / conved_width / conved_height) % channels;
const int n = index / conved_width / conved_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height + pad_h);
int wend = min(wstart + kernel_w, width + pad_w);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, height);
wend = min(wend, width);
Dtype aveval = 0;
const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width;
const Dtype* const weight_slice = weight + c * kernel_h * kernel_w;
int khstart = hend < kernel_h ? kernel_h - hend : 0;
int kwstart = wend < kernel_w ? kernel_w - wend : 0;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
aveval += bottom_slice[h * width + w] * weight_slice[(khstart + h - hstart) * kernel_w + (kwstart + w - wstart)];
}
}
if(bias_term_) {
aveval += bias[c];
}
top_data[index] = aveval;
}
}
template <typename Dtype>
__global__ void ConvBackward(const int nthreads, const Dtype* const top_diff, const int num,
const int channels, const int height, const int width, const int conved_height,
const int conved_width, const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w, Dtype* const bottom_diff,
const Dtype* const weight) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int w = index % width + pad_w;
const int h = (index / width) % height + pad_h;
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
const int phend = min(h / stride_h + 1, conved_height);
const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
const int pwend = min(w / stride_w + 1, conved_width);
const int khstart = (h >= kernel_h) ? ((h - kernel_h) % stride_h) + (kernel_h - stride_h) : h;
const int kwstart = (w >= kernel_w) ? ((w - kernel_w) % stride_w) + (kernel_w - stride_w) : w;
Dtype gradient = 0;
const Dtype* const top_diff_slice = top_diff + (n * channels + c) * conved_height * conved_width;
const Dtype* const weight_slice = weight + c * kernel_h * kernel_w;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
int kh = khstart - (ph - phstart) * stride_h;
int kw = kwstart - (pw - pwstart) * stride_w;
gradient += top_diff_slice[ph * conved_width + pw] * weight_slice[kh * kernel_w + kw];
}
}
bottom_diff[index] = gradient;
}
}
__device__ float atomicAddme(float* address, float val) {
return atomicAdd(address,val);
}
__device__ double atomicAddme(double* address, double val) {
unsigned long long int* address_as_ull = (unsigned long long int*) address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
#define DIVIDE_CEIL(a,b) a / b + ((a / b * b) < a)
template <typename Dtype>
__global__ void ConvBackwardWeight(const int nthreads, const Dtype* const top_diff,
const int num, const int channels, const int height, const int width,
const int conved_height, const int conved_width, const int kernel_h,
const int kernel_w, const int stride_h, const int stride_w, const int pad_h,
const int pad_w, Dtype* const weight_diff, const Dtype* const bottom_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int kw = index % kernel_w;
const int kh = (index / kernel_w) % kernel_h;
const int c = index / kernel_w / kernel_h;
Dtype gradient = 0;
for (int n = 0; n < num; n++) {
const Dtype* const top_diff_slice = top_diff + (n * channels + c) * conved_height * conved_width;
const Dtype* const bottom_data_slice = bottom_data + (n * channels + c) * height * width;
const int phstart = max(DIVIDE_CEIL((pad_h - kh), stride_h), 0);
const int phend = min(DIVIDE_CEIL((height + pad_h - kh), stride_h), conved_height);
const int pwstart = max(DIVIDE_CEIL((pad_w - kw), stride_w), 0);
const int pwend = min(DIVIDE_CEIL((width + pad_w - kw), stride_w), conved_width);
for (int ph = phstart; ph < phend; ph++) {
for (int pw = pwstart; pw < pwend; pw++) {
const int h = ph * stride_h + kh - pad_h;
const int w = pw * stride_w + kw - pad_w;
gradient += top_diff_slice[ph * conved_width + pw] * bottom_data_slice[h * width + w];
}
}
}
weight_diff[c * kernel_h * kernel_w + kh * kernel_w + kw] += gradient;
}
}
template <typename Dtype>
__global__ void ConvBackwardBias(const int nthreads, const Dtype* const top_diff,
const int num, const int channels, const int height, const int width,
const int conved_height, const int conved_width, const int kernel_h,
const int kernel_w, const int stride_h, const int stride_w, const int pad_h,
const int pad_w, Dtype* const bias_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int c = index;
Dtype gradient=0;
for(int n = 0; n < num; n++) {
const Dtype* const top_diff_slice = top_diff + (n * channels + c) * conved_height * conved_width;
for(int ph = 0; ph < conved_height; ph++) {
for (int pw = 0; pw < conved_width; pw++) {
gradient += top_diff_slice[ph * conved_width + pw];
}
}
}
bias_diff[c] += gradient;
}
}
// Dynamic fixed-point model: linearly quantize a block of data between {-max, +max}
// Shift is determined by weight (bias is represented by 20-bit, no need to consider its shift)
template <typename Dtype>
void quantize_block_data_dynamic_fxp(Dtype *dst_weight, Dtype *src_weight, int count_weight, int bw_param, bool shift_enable, Dtype *dst_bias=NULL, Dtype *src_bias=NULL, int count_bias=0)
{
int num_steps = (1<<(bw_param-1)) - 1; // half side quantize steps, +/-127 for 8-bits
// 1: search abs max:
double abs_data, abs_max_w;
double grid, shift_decimal;
abs_max_w = 0;
for (int i = 0; i < count_weight; i++) {
abs_data = fabs(src_weight[i]);
abs_max_w = (abs_max_w < abs_data) ? abs_data : abs_max_w;
}
int shift_max_value = SHIFT_MAX;
if (bw_param == 8) shift_max_value = 15;
// 2: quantize weight and bias
if (shift_enable == true) {
// Shift_based quantization
int shift_value = floor(::log(double(num_steps) / abs_max_w) / ::log(2.0));
shift_value = ::max(shift_value, 0);
shift_value = ::min(shift_value, shift_max_value);
shift_decimal = double(1 << shift_value);
for (int i = 0; i < count_weight; i++) {
Dtype data = double(src_weight[i]);
if (data >= 0)
dst_weight[i] = floor(data * shift_decimal + 0.5) / shift_decimal;
else
dst_weight[i] = ceil(data * shift_decimal - 0.5) / shift_decimal;
}
if((dst_bias) && (src_bias)){
for (int i = 0; i < count_bias; i ++) {
Dtype data = double(src_bias[i]);
if (data >= 0)
dst_bias[i] = floor(data * shift_decimal + 0.5) / shift_decimal;
else
dst_bias[i] = ceil(data * shift_decimal - 0.5) / shift_decimal;
}
}
}
else {
// Direct quantization using max_weight
grid = double(abs_max_w/num_steps);
for (int i = 0; i < count_weight; i++) {
//Dtype data = ::min(::max(double(src_weight[i]), -abs_max_w), abs_max_w);
Dtype data = double(src_weight[i]);
if (data >= 0)
dst_weight[i] = floor(data/grid + 0.5) * grid;
else
dst_weight[i] = ceil(data/grid - 0.5) * grid;
}
// bias not quantized here
if((dst_bias) && (src_bias)){
for (int i = 0; i < count_bias; i ++) {
dst_bias[i] = src_bias[i];
}
}
}
}
// Fixed-point model: quantize the data of the whole layer using 1/2/3/5-bit:
template <typename Dtype>
void quantize_block_data_shift_quant(Dtype *dst_ptr, const Dtype *src_ptr, const int out_ch_num, const int in_ch_num, const int kernel_size, const int bw_param, bool shift_enable, const int quant_bit, Dtype *dst_bias=NULL, const Dtype * src_bias=NULL)
{
int bw_bias = 20;
if (bw_param == 12){
if (quant_bit == QuantConvolutionParameter_Precision_ONE_BIT) bw_bias = 12;
else if (quant_bit == QuantConvolutionParameter_Precision_TWO_BITS) bw_bias = 12;
else if (quant_bit == QuantConvolutionParameter_Precision_THREE_BITS) bw_bias = 18;
else if (quant_bit == QuantConvolutionParameter_Precision_FIVE_BITS) bw_bias = 16;
}
// Step 1a: calculate max scalar/max weight/max bias per layer:
double abs_max = 0;
double abs_data;
int offset = 0;
for (int out_ch = 0; out_ch < out_ch_num; out_ch++) {
for (int in_ch = 0; in_ch < in_ch_num; in_ch++) {
for (int i = 0; i < kernel_size; i++) {
abs_data = fabs(*(src_ptr + offset + i));
abs_max = (abs_max < abs_data) ? abs_data : abs_max;
}
offset += kernel_size;
}
}
// step 1b: calculate shift
int shift_max_value = SHIFT_MAX;
if (bw_param == 8) shift_max_value = 15;
int shift_w, shift;
int shift_b = shift_max_value;
double shift_decimal;
int num_steps = (1<<(bw_param-1)) - 1;
int num_steps_bias = (1<<(bw_bias-1)) - 1;
shift_w = floor(::log(double(num_steps)/abs_max)/::log(2.0));
if((dst_bias) && (src_bias)){
double abs_max_bias = 0;
for (int i = 0; i < out_ch_num; i ++) {
abs_data = double(fabs(src_bias[i]));
abs_max_bias = (abs_max_bias < abs_data) ? abs_data : abs_max_bias;
}
shift_b = floor(::log(double(num_steps_bias)/(abs_max_bias+EPSILON))/::log(2.0));
}
shift = ::min(shift_w, shift_b);
shift = ::min(::max(shift, 0), shift_max_value);
shift_decimal = double(1 << shift);
double grid_scalar, quant_scalar, var;
Dtype coef, qcoef;
if (quant_bit == QuantConvolutionParameter_Precision_ONE_BIT) {
// step 2: use shift to quantize scalar
offset = 0;
for (int out_ch = 0; out_ch < out_ch_num; out_ch++) {
for (int in_ch = 0; in_ch < in_ch_num; in_ch++) {
var = 0;
for (int i = 0; i< kernel_size; i++){
abs_data = fabs(*(src_ptr + offset + i));
var += abs_data;
}
var /= kernel_size;
grid_scalar = (var + EPSILON)/1.f;
if(shift_enable == true)
quant_scalar = floor(grid_scalar*shift_decimal + 0.5) / shift_decimal;
else
quant_scalar = grid_scalar;
for (int i = 0; i < kernel_size; i++) {
coef = *(src_ptr + offset + i);
qcoef = (coef >= 0) ? quant_scalar : -quant_scalar;
*(dst_ptr + offset + i) = qcoef;
}
offset += kernel_size;
}
}
}
else if (quant_bit == QuantConvolutionParameter_Precision_TWO_BITS) {
// step 2: use shift to quantize scalar
offset = 0;
for (int out_ch = 0; out_ch < out_ch_num; out_ch++) {
for (int in_ch = 0; in_ch < in_ch_num; in_ch++) {
var = 0;
for (int i = 0; i< kernel_size; i++){
abs_data = fabs(*(src_ptr + offset + i));
var += abs_data;
}
grid_scalar = var/kernel_size;
if(shift_enable == true)
quant_scalar = floor(grid_scalar*shift_decimal + 0.5) / shift_decimal;
else
quant_scalar = grid_scalar;
for (int i = 0; i < kernel_size; i++) {
coef = *(src_ptr + offset + i);
qcoef = (fabs(coef) < 0.25*quant_scalar) ? 0.0 : (coef >= 0) ? quant_scalar : -quant_scalar;
*(dst_ptr +offset + i) = qcoef;
}
offset += kernel_size;
}
}
}
else if (quant_bit == QuantConvolutionParameter_Precision_THREE_BITS) {
Dtype abs_coef;
// step 2: use shift to quantize scalar
offset = 0;
for (int out_ch = 0; out_ch < out_ch_num; out_ch++) {
for (int in_ch = 0; in_ch < in_ch_num; in_ch++) {
var = 0;
for (int i = 0; i< kernel_size; i++){
abs_data = fabs(*(src_ptr + offset + i));
var += abs_data;
}
var /= kernel_size;
grid_scalar = (var + EPSILON)/4.f;
if(shift_enable == true)
quant_scalar = floor(grid_scalar*shift_decimal + 0.5) / shift_decimal;
else
quant_scalar = grid_scalar;
for (int i = 0; i < kernel_size; i++) {
coef = *(src_ptr + offset + i);
int abs_coefInt = fabs(coef)/grid_scalar;
abs_coefInt = (abs_coefInt < 3) ? abs_coefInt : 4;
abs_coef = abs_coefInt*quant_scalar;
qcoef = (coef >= 0) ? abs_coef : -abs_coef;
*(dst_ptr + offset + i) = qcoef;
}
offset += kernel_size;
}
}
}
else if (quant_bit == QuantConvolutionParameter_Precision_FIVE_BITS) {
// step 2: use shift to quantize scalar
int n_bit = 5;
offset = 0;
for (int out_ch = 0; out_ch < out_ch_num; out_ch++) {
for (int in_ch = 0; in_ch < in_ch_num; in_ch++) {
var = 0;
for (int i = 0; i< kernel_size; i++){
abs_data = fabs(*(src_ptr + offset + i));
var = (var < abs_data) ? abs_data : var;
}
grid_scalar = (var + EPSILON)/double((1 << (n_bit - 1)) - 1);
if(shift_enable == true)
quant_scalar = floor(grid_scalar*shift_decimal + 0.5) / shift_decimal;
else
quant_scalar = grid_scalar;
for (int i = 0; i < kernel_size; i++) {
coef = *(src_ptr + offset +i);
if (coef >= 0)
qcoef = floor(coef/grid_scalar + 0.5)*quant_scalar;
else
qcoef = ceil(coef/grid_scalar - 0.5)*quant_scalar;
*(dst_ptr + offset + i) = qcoef;
}
offset += kernel_size;
}
}
}
else {
printf("Error QuantConvolutionParameter: Precision\n");
exit(0);
}
if((dst_bias) && (src_bias)){
if(shift_enable == true)
for (int i = 0; i < out_ch_num; i ++) {
Dtype data = double(src_bias[i]);
if (data >= 0)
dst_bias[i] = floor(data * shift_decimal + 0.5) / shift_decimal;
else
dst_bias[i] = ceil(data * shift_decimal - 0.5) / shift_decimal;
}
else
// bias not quantized here
for (int i = 0; i < out_ch_num; i ++) {
dst_bias[i] = src_bias[i];
}
}
}
template <typename Dtype>
void QuantConvolutionLayer<Dtype>::forward_conv_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top, const Dtype* weight, const Dtype* bias) {
for (int i = 0; i < bottom.size(); ++i) {
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* top_data = top[i]->mutable_gpu_data();
for (int n = 0; n < this->num_; ++n) {
this->forward_gpu_gemm(bottom_data + n * this->bottom_dim_, weight, top_data + n * this->top_dim_);
if (this->bias_term_) {
this->forward_gpu_bias(top_data + n * this->top_dim_, bias);
}
}
}
}
template<typename Dtype>
void QuantConvolutionLayer<Dtype>::forward_dw_conv_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top, const Dtype* weight, const Dtype* bias) {
int* kernel_shape_data = this->kernel_shape_.mutable_cpu_data();
int* stride_data = this->stride_.mutable_cpu_data();
int* pad_data = this->pad_.mutable_cpu_data();
for (int i = 0; i < bottom.size(); ++i) {
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* top_data = top[i]->mutable_gpu_data();
const int count = top[i]->count();
vector<int> shape_ = bottom[i]->shape();
const int channels_ = shape_[1];
const int height_ = shape_[2];
const int width_ = shape_[3];
const int kernel_h_ = kernel_shape_data[0];
const int kernel_w_ = kernel_shape_data[1];
const int stride_h_ = stride_data[0];
const int stride_w_ = stride_data[1];
const int pad_h_ = pad_data[0];
const int pad_w_ = pad_data[1];
const int conved_height = this->output_shape_[0];
const int conved_weight = this->output_shape_[1];
const bool bias_term_ = this->bias_term_;
if (bias_term_) {
hipLaunchKernelGGL(( ConvForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, bottom[i]->num(), channels_,
height_, width_,conved_height,conved_weight,kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data,weight,bias,bias_term_);
} else {
hipLaunchKernelGGL(( ConvForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, bottom[i]->num(), channels_,
height_, width_,conved_height,conved_weight,kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data,weight,0,bias_term_);
}
}
}
template<typename Dtype>
void QuantConvolutionLayer<Dtype>::backward_conv_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom, const Dtype* weight) {
Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff();
for (int i = 0; i < top.size(); ++i) {
const Dtype* top_diff = top[i]->gpu_diff();
// Bias gradient, if necessary.
if (this->bias_term_ && this->param_propagate_down_[1]) {
Dtype* bias_diff = this->blobs_[1]->mutable_gpu_diff();
for (int n = 0; n < this->num_; ++n) {
this->backward_gpu_bias(bias_diff, top_diff + n * this->top_dim_);
}
}
if (this->param_propagate_down_[0] || propagate_down[i]) {
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* bottom_diff = bottom[i]->mutable_gpu_diff();
for (int n = 0; n < this->num_; ++n) {
// gradient w.r.t. weight. Note that we will accumulate diffs.
if (this->param_propagate_down_[0]) {
this->weight_gpu_gemm(bottom_data + n * this->bottom_dim_,
top_diff + n * this->top_dim_, weight_diff);
}
// gradient w.r.t. bottom data, if necessary.
if (propagate_down[i]) {
this->backward_gpu_gemm(top_diff + n * this->top_dim_, weight,
bottom_diff + n * this->bottom_dim_);
}
}
}
}
}
template<typename Dtype>
void QuantConvolutionLayer<Dtype>::backward_dw_conv_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom, const Dtype* weight) {
int* kernel_shape_data = this->kernel_shape_.mutable_cpu_data();
int* stride_data = this->stride_.mutable_cpu_data();
int* pad_data = this->pad_.mutable_cpu_data();
Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff();
const bool bias_term_ = this->bias_term_;
Dtype* bias_diff = bias_term_ ? this->blobs_[1]->mutable_gpu_diff() : 0;
const bool bias_propagate_down_ = this->param_propagate_down_[1];
const bool weight_propagate_down_ = this->param_propagate_down_[0];
const int kernel_h_ = kernel_shape_data[0];
const int kernel_w_ = kernel_shape_data[1];
const int stride_h_ = stride_data[0];
const int stride_w_ = stride_data[1];
const int pad_h_ = pad_data[0];
const int pad_w_ = pad_data[1];
const int conved_height = this->output_shape_[0];
const int conved_weight = this->output_shape_[1];
for (int i = 0; i < top.size(); ++i) {
const Dtype* top_diff = top[i]->gpu_diff();
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* bottom_diff = bottom[i]->mutable_gpu_diff();
vector<int> shape_ = bottom[i]->shape();
const int channels_ = shape_[1];
const int height_ = shape_[2];
const int width_ = shape_[3];
// Bias gradient, if necessary.
if (bias_term_ && bias_propagate_down_) {
const int count_bias = channels_;
hipLaunchKernelGGL(( ConvBackwardBias<Dtype>), dim3(CAFFE_GET_BLOCKS(count_bias)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count_bias, top_diff, bottom[i]->num(), channels_, height_, width_, conved_height,
conved_weight, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bias_diff);
}
// gradient w.r.t. weight. Note that we will accumulate diffs.
if (weight_propagate_down_) {
const int count_weight = channels_ * kernel_h_ * kernel_w_;
hipLaunchKernelGGL(( ConvBackwardWeight<Dtype>), dim3(CAFFE_GET_BLOCKS(count_weight)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count_weight, top_diff, bottom[i]->num(), channels_, height_, width_, conved_height,
conved_weight, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, weight_diff, bottom_data);
}
// gradient w.r.t. bottom data, if necessary.
if (propagate_down[i]) {
const int count_bottom=bottom[i]->count();
hipLaunchKernelGGL(( ConvBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count_bottom)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count_bottom, top_diff, bottom[i]->num(), channels_, height_, width_, conved_height, conved_weight,
kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff, weight);
}
}
}
template <typename Dtype>
void QuantConvolutionLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
//quantize coef and run forward process:
int CoefPrecision = this->layer_param_.quant_convolution_param().coef_precision();
bool shift_enable = this->layer_param_.quant_convolution_param().shift_enable();
int bw_param = this->layer_param_.quant_convolution_param().bw_params();
int num_output = this->layer_param_.convolution_param().num_output();
int group = this->layer_param_.convolution_param().group();
bool dw = num_output == group;
if (CoefPrecision == QuantConvolutionParameter_Precision_FLOATING_POINT) { // floating point coef scheme:
const Dtype* weight = this->blobs_[0]->gpu_data();
const Dtype* bias = (this->bias_term_) ? this->blobs_[1]->gpu_data() : 0;
dw ? forward_dw_conv_gpu(bottom, top, weight, bias) : forward_conv_gpu(bottom, top, weight, bias);
}
else if ((CoefPrecision == QuantConvolutionParameter_Precision_ONE_BIT) || // one bit coef scheme:
(CoefPrecision == QuantConvolutionParameter_Precision_TWO_BITS)|| // two bits coef scheme:
(CoefPrecision == QuantConvolutionParameter_Precision_THREE_BITS)|| // three bits coef scheme:
(CoefPrecision == QuantConvolutionParameter_Precision_FIVE_BITS)){ // five bits coef scheme:
// coef quantization: one or two bits
Blob<Dtype> blob_binary_coef;
blob_binary_coef.ReshapeLike(*this->blobs_[0]);
// convert coefficeints:
int Output_Channels = blob_binary_coef.shape(0);
int Input_Channels = blob_binary_coef.shape(1);
int Kernel_Height = blob_binary_coef.shape(2);
int Kernel_Width = blob_binary_coef.shape(3);
int Kernel_Size = Kernel_Height*Kernel_Width;
const Dtype* pt_origin_coef = this->blobs_[0]->cpu_data(); // in cpu
Dtype* pt_binary_coef = blob_binary_coef.mutable_cpu_data();
// weight quantization with a floating scalar
Blob<Dtype> blob_binary_bias;
if (this->bias_term_){
blob_binary_bias.ReshapeLike(*this->blobs_[1]);
const Dtype* pt_origin_bias = (Dtype*) this->blobs_[1]->cpu_data();
Dtype* pt_binary_bias = (Dtype*)blob_binary_bias.mutable_cpu_data();
quantize_block_data_shift_quant(pt_binary_coef, pt_origin_coef, Output_Channels, Input_Channels, Kernel_Size, bw_param, shift_enable, CoefPrecision, pt_binary_bias, pt_origin_bias);
}
else {
quantize_block_data_shift_quant(pt_binary_coef, pt_origin_coef, Output_Channels, Input_Channels, Kernel_Size, bw_param, shift_enable, CoefPrecision);
}
// run convolution:
const Dtype* weight = blob_binary_coef.gpu_data();
const Dtype* bias = (this->bias_term_) ? blob_binary_bias.gpu_data() : 0;
dw ? forward_dw_conv_gpu(bottom, top, weight, bias) : forward_conv_gpu(bottom, top, weight, bias);
}
else if (CoefPrecision == QuantConvolutionParameter_Precision_DYNAMIC_FIXED_POINT) {
Blob<Dtype> blob_binary_coef;
blob_binary_coef.ReshapeLike(*this->blobs_[0]);
int Output_Channels = blob_binary_coef.shape(0);
int Input_Channels = blob_binary_coef.shape(1);
int Kernel_Height = blob_binary_coef.shape(2);
int Kernel_Width = blob_binary_coef.shape(3);
Dtype* src_weight = (Dtype*) this->blobs_[0]->cpu_data();
Dtype* dst_weight = (Dtype*)blob_binary_coef.mutable_cpu_data();
int count_weight = Output_Channels*Input_Channels*Kernel_Height*Kernel_Width;
Blob<Dtype> blob_binary_bias;
if (this->bias_term_){
blob_binary_bias.ReshapeLike(*this->blobs_[1]);
Dtype* src_bias = (Dtype*) this->blobs_[1]->cpu_data();
Dtype* dst_bias = (Dtype*)blob_binary_bias.mutable_cpu_data();
int count_bias = Output_Channels;
quantize_block_data_dynamic_fxp(dst_weight, src_weight, count_weight, bw_param, shift_enable, dst_bias, src_bias, count_bias);
}
else {
quantize_block_data_dynamic_fxp(dst_weight, src_weight, count_weight, bw_param, shift_enable);
}
// run convolution:
const Dtype* weight = blob_binary_coef.gpu_data();
const Dtype* bias = (this->bias_term_) ? blob_binary_bias.gpu_data() : 0;
dw ? forward_dw_conv_gpu(bottom, top, weight, bias) : forward_conv_gpu(bottom, top, weight, bias);
}
else {
printf("Error quantization scheme !!!\n");
}
}
template <typename Dtype>
void QuantConvolutionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
int CoefPrecision = this->layer_param_.quant_convolution_param().coef_precision();
bool shift_enable = this->layer_param_.quant_convolution_param().shift_enable();
int bw_param = this->layer_param_.quant_convolution_param().bw_params();
int num_output = this->layer_param_.convolution_param().num_output();
int group = this->layer_param_.convolution_param().group();
bool dw = num_output == group;
if (CoefPrecision == QuantConvolutionParameter_Precision_FLOATING_POINT) { // floating point coef scheme:
const Dtype* weight = this->blobs_[0]->gpu_data();
dw ? backward_dw_conv_gpu(top, propagate_down, bottom, weight) : backward_conv_gpu(top, propagate_down, bottom, weight);
}
else if ((CoefPrecision == QuantConvolutionParameter_Precision_ONE_BIT) || // one bit coef scheme:
(CoefPrecision == QuantConvolutionParameter_Precision_TWO_BITS)|| // two bits coef scheme:
(CoefPrecision == QuantConvolutionParameter_Precision_THREE_BITS)|| // three bits coef scheme:
(CoefPrecision == QuantConvolutionParameter_Precision_FIVE_BITS)) { // five bits coef scheme:
//std::cout << "======= Binary_Backward_cpu ============\n";
Blob<Dtype> blob_binary_coef;
blob_binary_coef.ReshapeLike(*this->blobs_[0]);
// convert coefficeints:
int Output_Channels = blob_binary_coef.shape(0);
int Input_Channels = blob_binary_coef.shape(1);
int Kernel_Height = blob_binary_coef.shape(2);
int Kernel_Width = blob_binary_coef.shape(3);
int Kernel_Size = Kernel_Height*Kernel_Width;
const Dtype* pt_origin_coef = this->blobs_[0]->cpu_data(); // in cpu
Dtype* pt_binary_coef = blob_binary_coef.mutable_cpu_data();
if (this->bias_term_){
Blob<Dtype> blob_binary_bias;
blob_binary_bias.ReshapeLike(*this->blobs_[1]);
const Dtype* pt_origin_bias = (Dtype*) this->blobs_[1]->cpu_data();
Dtype* pt_binary_bias = (Dtype*)blob_binary_bias.mutable_cpu_data();
quantize_block_data_shift_quant(pt_binary_coef, pt_origin_coef, Output_Channels, Input_Channels, Kernel_Size, bw_param, shift_enable, CoefPrecision, pt_binary_bias, pt_origin_bias);
}
else {
quantize_block_data_shift_quant(pt_binary_coef, pt_origin_coef, Output_Channels, Input_Channels, Kernel_Size, bw_param, shift_enable, CoefPrecision);
}
const Dtype* weight = blob_binary_coef.gpu_data();
dw ? backward_dw_conv_gpu(top, propagate_down, bottom, weight) : backward_conv_gpu(top, propagate_down, bottom, weight);
}
else if (CoefPrecision == QuantConvolutionParameter_Precision_DYNAMIC_FIXED_POINT) {
Blob<Dtype> blob_binary_coef;
blob_binary_coef.ReshapeLike(*this->blobs_[0]);
int Output_Channels = blob_binary_coef.shape(0);
int Input_Channels = blob_binary_coef.shape(1);
int Kernel_Height = blob_binary_coef.shape(2);
int Kernel_Width = blob_binary_coef.shape(3);
Dtype* src_weight = (Dtype*) this->blobs_[0]->cpu_data();
Dtype* dst_weight = (Dtype*) blob_binary_coef.mutable_cpu_data();
int count_weight = Output_Channels*Input_Channels*Kernel_Height*Kernel_Width;
if (this->bias_term_){
Blob<Dtype> blob_binary_bias;
blob_binary_bias.ReshapeLike(*this->blobs_[1]);
Dtype* src_bias = (Dtype*) this->blobs_[1]->cpu_data();
Dtype* dst_bias = (Dtype*)blob_binary_bias.mutable_cpu_data();
int count_bias = Output_Channels;
quantize_block_data_dynamic_fxp(dst_weight, src_weight, count_weight, bw_param, shift_enable, dst_bias, src_bias, count_bias);
}
else {
quantize_block_data_dynamic_fxp(dst_weight, src_weight, count_weight, bw_param, shift_enable);
}
// backward process
const Dtype* weight = blob_binary_coef.gpu_data();
dw ? backward_dw_conv_gpu(top, propagate_down, bottom, weight) : backward_conv_gpu(top, propagate_down, bottom, weight);
}
else {
printf("Error quantization scheme !!!\n");
}
}
INSTANTIATE_LAYER_GPU_FUNCS(QuantConvolutionLayer);
//need these lines to instantiate static functions from hpp that are not precisely forward_gpu and backward_gpu
//as instantiate_layer_gpu_funcs does this under the hood.
//regular forward conv
template void QuantConvolutionLayer<float>::forward_conv_gpu(const vector<Blob<float>*>& bottom,
const vector<Blob<float>*>& top, const float* weight, const float* bias);
template void QuantConvolutionLayer<double>::forward_conv_gpu(const vector<Blob<double>*>& bottom,
const vector<Blob<double>*>& top, const double* weight, const double* bias);
//dw forward conv
template void QuantConvolutionLayer<float>::forward_dw_conv_gpu(const vector<Blob<float>*>& bottom,
const vector<Blob<float>*>& top, const float* weight, const float* bias);
template void QuantConvolutionLayer<double>::forward_dw_conv_gpu(const vector<Blob<double>*>& bottom,
const vector<Blob<double>*>& top, const double* weight, const double* bias);
//regular backward conv
template void QuantConvolutionLayer<float>::backward_conv_gpu(
const vector<Blob<float>*>& top, const vector<bool>& propagate_down,
const vector<Blob<float>*>& bottom, const float* weight);
template void QuantConvolutionLayer<double>::backward_conv_gpu(
const vector<Blob<double>*>& top, const vector<bool>& propagate_down,
const vector<Blob<double>*>& bottom, const double* weight);
//dw backward conv
template void QuantConvolutionLayer<float>::backward_dw_conv_gpu(
const vector<Blob<float>*>& top, const vector<bool>& propagate_down,
const vector<Blob<float>*>& bottom, const float* weight);
template void QuantConvolutionLayer<double>::backward_dw_conv_gpu(
const vector<Blob<double>*>& top, const vector<bool>& propagate_down,
const vector<Blob<double>*>& bottom, const double* weight);
} // namespace caffe
| bb13ba3d56eac632b90dd3c90f161bbbc545f0ba.cu | #include <vector>
#include "caffe/layers/quant_conv_layer.hpp"
#define SHIFT_MAX 13
#define EPSILON 1e-6
namespace caffe {
template <typename Dtype>
__global__ void ConvForward(const int nthreads, const Dtype* const bottom_data, const int num,
const int channels, const int height, const int width, const int conved_height,
const int conved_width, const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w, Dtype* const top_data,
const Dtype* const weight, const Dtype* const bias, const bool bias_term_) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % conved_width;
const int ph = (index / conved_width) % conved_height;
const int c = (index / conved_width / conved_height) % channels;
const int n = index / conved_width / conved_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height + pad_h);
int wend = min(wstart + kernel_w, width + pad_w);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, height);
wend = min(wend, width);
Dtype aveval = 0;
const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width;
const Dtype* const weight_slice = weight + c * kernel_h * kernel_w;
int khstart = hend < kernel_h ? kernel_h - hend : 0;
int kwstart = wend < kernel_w ? kernel_w - wend : 0;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
aveval += bottom_slice[h * width + w] * weight_slice[(khstart + h - hstart) * kernel_w + (kwstart + w - wstart)];
}
}
if(bias_term_) {
aveval += bias[c];
}
top_data[index] = aveval;
}
}
template <typename Dtype>
__global__ void ConvBackward(const int nthreads, const Dtype* const top_diff, const int num,
const int channels, const int height, const int width, const int conved_height,
const int conved_width, const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w, Dtype* const bottom_diff,
const Dtype* const weight) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int w = index % width + pad_w;
const int h = (index / width) % height + pad_h;
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
const int phend = min(h / stride_h + 1, conved_height);
const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
const int pwend = min(w / stride_w + 1, conved_width);
const int khstart = (h >= kernel_h) ? ((h - kernel_h) % stride_h) + (kernel_h - stride_h) : h;
const int kwstart = (w >= kernel_w) ? ((w - kernel_w) % stride_w) + (kernel_w - stride_w) : w;
Dtype gradient = 0;
const Dtype* const top_diff_slice = top_diff + (n * channels + c) * conved_height * conved_width;
const Dtype* const weight_slice = weight + c * kernel_h * kernel_w;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
int kh = khstart - (ph - phstart) * stride_h;
int kw = kwstart - (pw - pwstart) * stride_w;
gradient += top_diff_slice[ph * conved_width + pw] * weight_slice[kh * kernel_w + kw];
}
}
bottom_diff[index] = gradient;
}
}
__device__ float atomicAddme(float* address, float val) {
return atomicAdd(address,val);
}
__device__ double atomicAddme(double* address, double val) {
unsigned long long int* address_as_ull = (unsigned long long int*) address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
#define DIVIDE_CEIL(a,b) a / b + ((a / b * b) < a)
template <typename Dtype>
__global__ void ConvBackwardWeight(const int nthreads, const Dtype* const top_diff,
const int num, const int channels, const int height, const int width,
const int conved_height, const int conved_width, const int kernel_h,
const int kernel_w, const int stride_h, const int stride_w, const int pad_h,
const int pad_w, Dtype* const weight_diff, const Dtype* const bottom_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int kw = index % kernel_w;
const int kh = (index / kernel_w) % kernel_h;
const int c = index / kernel_w / kernel_h;
Dtype gradient = 0;
for (int n = 0; n < num; n++) {
const Dtype* const top_diff_slice = top_diff + (n * channels + c) * conved_height * conved_width;
const Dtype* const bottom_data_slice = bottom_data + (n * channels + c) * height * width;
const int phstart = max(DIVIDE_CEIL((pad_h - kh), stride_h), 0);
const int phend = min(DIVIDE_CEIL((height + pad_h - kh), stride_h), conved_height);
const int pwstart = max(DIVIDE_CEIL((pad_w - kw), stride_w), 0);
const int pwend = min(DIVIDE_CEIL((width + pad_w - kw), stride_w), conved_width);
for (int ph = phstart; ph < phend; ph++) {
for (int pw = pwstart; pw < pwend; pw++) {
const int h = ph * stride_h + kh - pad_h;
const int w = pw * stride_w + kw - pad_w;
gradient += top_diff_slice[ph * conved_width + pw] * bottom_data_slice[h * width + w];
}
}
}
weight_diff[c * kernel_h * kernel_w + kh * kernel_w + kw] += gradient;
}
}
template <typename Dtype>
__global__ void ConvBackwardBias(const int nthreads, const Dtype* const top_diff,
const int num, const int channels, const int height, const int width,
const int conved_height, const int conved_width, const int kernel_h,
const int kernel_w, const int stride_h, const int stride_w, const int pad_h,
const int pad_w, Dtype* const bias_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int c = index;
Dtype gradient=0;
for(int n = 0; n < num; n++) {
const Dtype* const top_diff_slice = top_diff + (n * channels + c) * conved_height * conved_width;
for(int ph = 0; ph < conved_height; ph++) {
for (int pw = 0; pw < conved_width; pw++) {
gradient += top_diff_slice[ph * conved_width + pw];
}
}
}
bias_diff[c] += gradient;
}
}
// Dynamic fixed-point model: linearly quantize a block of data between {-max, +max}
// Shift is determined by weight (bias is represented by 20-bit, no need to consider its shift)
template <typename Dtype>
void quantize_block_data_dynamic_fxp(Dtype *dst_weight, Dtype *src_weight, int count_weight, int bw_param, bool shift_enable, Dtype *dst_bias=NULL, Dtype *src_bias=NULL, int count_bias=0)
{
int num_steps = (1<<(bw_param-1)) - 1; // half side quantize steps, +/-127 for 8-bits
// 1: search abs max:
double abs_data, abs_max_w;
double grid, shift_decimal;
abs_max_w = 0;
for (int i = 0; i < count_weight; i++) {
abs_data = fabs(src_weight[i]);
abs_max_w = (abs_max_w < abs_data) ? abs_data : abs_max_w;
}
int shift_max_value = SHIFT_MAX;
if (bw_param == 8) shift_max_value = 15;
// 2: quantize weight and bias
if (shift_enable == true) {
// Shift_based quantization
int shift_value = floor(std::log(double(num_steps) / abs_max_w) / std::log(2.0));
shift_value = std::max(shift_value, 0);
shift_value = std::min(shift_value, shift_max_value);
shift_decimal = double(1 << shift_value);
for (int i = 0; i < count_weight; i++) {
Dtype data = double(src_weight[i]);
if (data >= 0)
dst_weight[i] = floor(data * shift_decimal + 0.5) / shift_decimal;
else
dst_weight[i] = ceil(data * shift_decimal - 0.5) / shift_decimal;
}
if((dst_bias) && (src_bias)){
for (int i = 0; i < count_bias; i ++) {
Dtype data = double(src_bias[i]);
if (data >= 0)
dst_bias[i] = floor(data * shift_decimal + 0.5) / shift_decimal;
else
dst_bias[i] = ceil(data * shift_decimal - 0.5) / shift_decimal;
}
}
}
else {
// Direct quantization using max_weight
grid = double(abs_max_w/num_steps);
for (int i = 0; i < count_weight; i++) {
//Dtype data = std::min(std::max(double(src_weight[i]), -abs_max_w), abs_max_w);
Dtype data = double(src_weight[i]);
if (data >= 0)
dst_weight[i] = floor(data/grid + 0.5) * grid;
else
dst_weight[i] = ceil(data/grid - 0.5) * grid;
}
// bias not quantized here
if((dst_bias) && (src_bias)){
for (int i = 0; i < count_bias; i ++) {
dst_bias[i] = src_bias[i];
}
}
}
}
// Fixed-point model: quantize the data of the whole layer using 1/2/3/5-bit:
template <typename Dtype>
void quantize_block_data_shift_quant(Dtype *dst_ptr, const Dtype *src_ptr, const int out_ch_num, const int in_ch_num, const int kernel_size, const int bw_param, bool shift_enable, const int quant_bit, Dtype *dst_bias=NULL, const Dtype * src_bias=NULL)
{
int bw_bias = 20;
if (bw_param == 12){
if (quant_bit == QuantConvolutionParameter_Precision_ONE_BIT) bw_bias = 12;
else if (quant_bit == QuantConvolutionParameter_Precision_TWO_BITS) bw_bias = 12;
else if (quant_bit == QuantConvolutionParameter_Precision_THREE_BITS) bw_bias = 18;
else if (quant_bit == QuantConvolutionParameter_Precision_FIVE_BITS) bw_bias = 16;
}
// Step 1a: calculate max scalar/max weight/max bias per layer:
double abs_max = 0;
double abs_data;
int offset = 0;
for (int out_ch = 0; out_ch < out_ch_num; out_ch++) {
for (int in_ch = 0; in_ch < in_ch_num; in_ch++) {
for (int i = 0; i < kernel_size; i++) {
abs_data = fabs(*(src_ptr + offset + i));
abs_max = (abs_max < abs_data) ? abs_data : abs_max;
}
offset += kernel_size;
}
}
// step 1b: calculate shift
int shift_max_value = SHIFT_MAX;
if (bw_param == 8) shift_max_value = 15;
int shift_w, shift;
int shift_b = shift_max_value;
double shift_decimal;
int num_steps = (1<<(bw_param-1)) - 1;
int num_steps_bias = (1<<(bw_bias-1)) - 1;
shift_w = floor(std::log(double(num_steps)/abs_max)/std::log(2.0));
if((dst_bias) && (src_bias)){
double abs_max_bias = 0;
for (int i = 0; i < out_ch_num; i ++) {
abs_data = double(fabs(src_bias[i]));
abs_max_bias = (abs_max_bias < abs_data) ? abs_data : abs_max_bias;
}
shift_b = floor(std::log(double(num_steps_bias)/(abs_max_bias+EPSILON))/std::log(2.0));
}
shift = std::min(shift_w, shift_b);
shift = std::min(std::max(shift, 0), shift_max_value);
shift_decimal = double(1 << shift);
double grid_scalar, quant_scalar, var;
Dtype coef, qcoef;
if (quant_bit == QuantConvolutionParameter_Precision_ONE_BIT) {
// step 2: use shift to quantize scalar
offset = 0;
for (int out_ch = 0; out_ch < out_ch_num; out_ch++) {
for (int in_ch = 0; in_ch < in_ch_num; in_ch++) {
var = 0;
for (int i = 0; i< kernel_size; i++){
abs_data = fabs(*(src_ptr + offset + i));
var += abs_data;
}
var /= kernel_size;
grid_scalar = (var + EPSILON)/1.f;
if(shift_enable == true)
quant_scalar = floor(grid_scalar*shift_decimal + 0.5) / shift_decimal;
else
quant_scalar = grid_scalar;
for (int i = 0; i < kernel_size; i++) {
coef = *(src_ptr + offset + i);
qcoef = (coef >= 0) ? quant_scalar : -quant_scalar;
*(dst_ptr + offset + i) = qcoef;
}
offset += kernel_size;
}
}
}
else if (quant_bit == QuantConvolutionParameter_Precision_TWO_BITS) {
// step 2: use shift to quantize scalar
offset = 0;
for (int out_ch = 0; out_ch < out_ch_num; out_ch++) {
for (int in_ch = 0; in_ch < in_ch_num; in_ch++) {
var = 0;
for (int i = 0; i< kernel_size; i++){
abs_data = fabs(*(src_ptr + offset + i));
var += abs_data;
}
grid_scalar = var/kernel_size;
if(shift_enable == true)
quant_scalar = floor(grid_scalar*shift_decimal + 0.5) / shift_decimal;
else
quant_scalar = grid_scalar;
for (int i = 0; i < kernel_size; i++) {
coef = *(src_ptr + offset + i);
qcoef = (fabs(coef) < 0.25*quant_scalar) ? 0.0 : (coef >= 0) ? quant_scalar : -quant_scalar;
*(dst_ptr +offset + i) = qcoef;
}
offset += kernel_size;
}
}
}
else if (quant_bit == QuantConvolutionParameter_Precision_THREE_BITS) {
Dtype abs_coef;
// step 2: use shift to quantize scalar
offset = 0;
for (int out_ch = 0; out_ch < out_ch_num; out_ch++) {
for (int in_ch = 0; in_ch < in_ch_num; in_ch++) {
var = 0;
for (int i = 0; i< kernel_size; i++){
abs_data = fabs(*(src_ptr + offset + i));
var += abs_data;
}
var /= kernel_size;
grid_scalar = (var + EPSILON)/4.f;
if(shift_enable == true)
quant_scalar = floor(grid_scalar*shift_decimal + 0.5) / shift_decimal;
else
quant_scalar = grid_scalar;
for (int i = 0; i < kernel_size; i++) {
coef = *(src_ptr + offset + i);
int abs_coefInt = fabs(coef)/grid_scalar;
abs_coefInt = (abs_coefInt < 3) ? abs_coefInt : 4;
abs_coef = abs_coefInt*quant_scalar;
qcoef = (coef >= 0) ? abs_coef : -abs_coef;
*(dst_ptr + offset + i) = qcoef;
}
offset += kernel_size;
}
}
}
else if (quant_bit == QuantConvolutionParameter_Precision_FIVE_BITS) {
// step 2: use shift to quantize scalar
int n_bit = 5;
offset = 0;
for (int out_ch = 0; out_ch < out_ch_num; out_ch++) {
for (int in_ch = 0; in_ch < in_ch_num; in_ch++) {
var = 0;
for (int i = 0; i< kernel_size; i++){
abs_data = fabs(*(src_ptr + offset + i));
var = (var < abs_data) ? abs_data : var;
}
grid_scalar = (var + EPSILON)/double((1 << (n_bit - 1)) - 1);
if(shift_enable == true)
quant_scalar = floor(grid_scalar*shift_decimal + 0.5) / shift_decimal;
else
quant_scalar = grid_scalar;
for (int i = 0; i < kernel_size; i++) {
coef = *(src_ptr + offset +i);
if (coef >= 0)
qcoef = floor(coef/grid_scalar + 0.5)*quant_scalar;
else
qcoef = ceil(coef/grid_scalar - 0.5)*quant_scalar;
*(dst_ptr + offset + i) = qcoef;
}
offset += kernel_size;
}
}
}
else {
printf("Error QuantConvolutionParameter: Precision\n");
exit(0);
}
if((dst_bias) && (src_bias)){
if(shift_enable == true)
for (int i = 0; i < out_ch_num; i ++) {
Dtype data = double(src_bias[i]);
if (data >= 0)
dst_bias[i] = floor(data * shift_decimal + 0.5) / shift_decimal;
else
dst_bias[i] = ceil(data * shift_decimal - 0.5) / shift_decimal;
}
else
// bias not quantized here
for (int i = 0; i < out_ch_num; i ++) {
dst_bias[i] = src_bias[i];
}
}
}
template <typename Dtype>
void QuantConvolutionLayer<Dtype>::forward_conv_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top, const Dtype* weight, const Dtype* bias) {
for (int i = 0; i < bottom.size(); ++i) {
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* top_data = top[i]->mutable_gpu_data();
for (int n = 0; n < this->num_; ++n) {
this->forward_gpu_gemm(bottom_data + n * this->bottom_dim_, weight, top_data + n * this->top_dim_);
if (this->bias_term_) {
this->forward_gpu_bias(top_data + n * this->top_dim_, bias);
}
}
}
}
template<typename Dtype>
void QuantConvolutionLayer<Dtype>::forward_dw_conv_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top, const Dtype* weight, const Dtype* bias) {
int* kernel_shape_data = this->kernel_shape_.mutable_cpu_data();
int* stride_data = this->stride_.mutable_cpu_data();
int* pad_data = this->pad_.mutable_cpu_data();
for (int i = 0; i < bottom.size(); ++i) {
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* top_data = top[i]->mutable_gpu_data();
const int count = top[i]->count();
vector<int> shape_ = bottom[i]->shape();
const int channels_ = shape_[1];
const int height_ = shape_[2];
const int width_ = shape_[3];
const int kernel_h_ = kernel_shape_data[0];
const int kernel_w_ = kernel_shape_data[1];
const int stride_h_ = stride_data[0];
const int stride_w_ = stride_data[1];
const int pad_h_ = pad_data[0];
const int pad_w_ = pad_data[1];
const int conved_height = this->output_shape_[0];
const int conved_weight = this->output_shape_[1];
const bool bias_term_ = this->bias_term_;
if (bias_term_) {
ConvForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, bottom[i]->num(), channels_,
height_, width_,conved_height,conved_weight,kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data,weight,bias,bias_term_);
} else {
ConvForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, bottom[i]->num(), channels_,
height_, width_,conved_height,conved_weight,kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data,weight,0,bias_term_);
}
}
}
template<typename Dtype>
void QuantConvolutionLayer<Dtype>::backward_conv_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom, const Dtype* weight) {
Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff();
for (int i = 0; i < top.size(); ++i) {
const Dtype* top_diff = top[i]->gpu_diff();
// Bias gradient, if necessary.
if (this->bias_term_ && this->param_propagate_down_[1]) {
Dtype* bias_diff = this->blobs_[1]->mutable_gpu_diff();
for (int n = 0; n < this->num_; ++n) {
this->backward_gpu_bias(bias_diff, top_diff + n * this->top_dim_);
}
}
if (this->param_propagate_down_[0] || propagate_down[i]) {
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* bottom_diff = bottom[i]->mutable_gpu_diff();
for (int n = 0; n < this->num_; ++n) {
// gradient w.r.t. weight. Note that we will accumulate diffs.
if (this->param_propagate_down_[0]) {
this->weight_gpu_gemm(bottom_data + n * this->bottom_dim_,
top_diff + n * this->top_dim_, weight_diff);
}
// gradient w.r.t. bottom data, if necessary.
if (propagate_down[i]) {
this->backward_gpu_gemm(top_diff + n * this->top_dim_, weight,
bottom_diff + n * this->bottom_dim_);
}
}
}
}
}
template<typename Dtype>
void QuantConvolutionLayer<Dtype>::backward_dw_conv_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom, const Dtype* weight) {
int* kernel_shape_data = this->kernel_shape_.mutable_cpu_data();
int* stride_data = this->stride_.mutable_cpu_data();
int* pad_data = this->pad_.mutable_cpu_data();
Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff();
const bool bias_term_ = this->bias_term_;
Dtype* bias_diff = bias_term_ ? this->blobs_[1]->mutable_gpu_diff() : 0;
const bool bias_propagate_down_ = this->param_propagate_down_[1];
const bool weight_propagate_down_ = this->param_propagate_down_[0];
const int kernel_h_ = kernel_shape_data[0];
const int kernel_w_ = kernel_shape_data[1];
const int stride_h_ = stride_data[0];
const int stride_w_ = stride_data[1];
const int pad_h_ = pad_data[0];
const int pad_w_ = pad_data[1];
const int conved_height = this->output_shape_[0];
const int conved_weight = this->output_shape_[1];
for (int i = 0; i < top.size(); ++i) {
const Dtype* top_diff = top[i]->gpu_diff();
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* bottom_diff = bottom[i]->mutable_gpu_diff();
vector<int> shape_ = bottom[i]->shape();
const int channels_ = shape_[1];
const int height_ = shape_[2];
const int width_ = shape_[3];
// Bias gradient, if necessary.
if (bias_term_ && bias_propagate_down_) {
const int count_bias = channels_;
ConvBackwardBias<Dtype><<<CAFFE_GET_BLOCKS(count_bias), CAFFE_CUDA_NUM_THREADS>>>(
count_bias, top_diff, bottom[i]->num(), channels_, height_, width_, conved_height,
conved_weight, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bias_diff);
}
// gradient w.r.t. weight. Note that we will accumulate diffs.
if (weight_propagate_down_) {
const int count_weight = channels_ * kernel_h_ * kernel_w_;
ConvBackwardWeight<Dtype><<<CAFFE_GET_BLOCKS(count_weight), CAFFE_CUDA_NUM_THREADS>>>(
count_weight, top_diff, bottom[i]->num(), channels_, height_, width_, conved_height,
conved_weight, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, weight_diff, bottom_data);
}
// gradient w.r.t. bottom data, if necessary.
if (propagate_down[i]) {
const int count_bottom=bottom[i]->count();
ConvBackward<Dtype><<<CAFFE_GET_BLOCKS(count_bottom), CAFFE_CUDA_NUM_THREADS>>>(
count_bottom, top_diff, bottom[i]->num(), channels_, height_, width_, conved_height, conved_weight,
kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff, weight);
}
}
}
template <typename Dtype>
void QuantConvolutionLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
//quantize coef and run forward process:
int CoefPrecision = this->layer_param_.quant_convolution_param().coef_precision();
bool shift_enable = this->layer_param_.quant_convolution_param().shift_enable();
int bw_param = this->layer_param_.quant_convolution_param().bw_params();
int num_output = this->layer_param_.convolution_param().num_output();
int group = this->layer_param_.convolution_param().group();
bool dw = num_output == group;
if (CoefPrecision == QuantConvolutionParameter_Precision_FLOATING_POINT) { // floating point coef scheme:
const Dtype* weight = this->blobs_[0]->gpu_data();
const Dtype* bias = (this->bias_term_) ? this->blobs_[1]->gpu_data() : 0;
dw ? forward_dw_conv_gpu(bottom, top, weight, bias) : forward_conv_gpu(bottom, top, weight, bias);
}
else if ((CoefPrecision == QuantConvolutionParameter_Precision_ONE_BIT) || // one bit coef scheme:
(CoefPrecision == QuantConvolutionParameter_Precision_TWO_BITS)|| // two bits coef scheme:
(CoefPrecision == QuantConvolutionParameter_Precision_THREE_BITS)|| // three bits coef scheme:
(CoefPrecision == QuantConvolutionParameter_Precision_FIVE_BITS)){ // five bits coef scheme:
// coef quantization: one or two bits
Blob<Dtype> blob_binary_coef;
blob_binary_coef.ReshapeLike(*this->blobs_[0]);
// convert coefficeints:
int Output_Channels = blob_binary_coef.shape(0);
int Input_Channels = blob_binary_coef.shape(1);
int Kernel_Height = blob_binary_coef.shape(2);
int Kernel_Width = blob_binary_coef.shape(3);
int Kernel_Size = Kernel_Height*Kernel_Width;
const Dtype* pt_origin_coef = this->blobs_[0]->cpu_data(); // in cpu
Dtype* pt_binary_coef = blob_binary_coef.mutable_cpu_data();
// weight quantization with a floating scalar
Blob<Dtype> blob_binary_bias;
if (this->bias_term_){
blob_binary_bias.ReshapeLike(*this->blobs_[1]);
const Dtype* pt_origin_bias = (Dtype*) this->blobs_[1]->cpu_data();
Dtype* pt_binary_bias = (Dtype*)blob_binary_bias.mutable_cpu_data();
quantize_block_data_shift_quant(pt_binary_coef, pt_origin_coef, Output_Channels, Input_Channels, Kernel_Size, bw_param, shift_enable, CoefPrecision, pt_binary_bias, pt_origin_bias);
}
else {
quantize_block_data_shift_quant(pt_binary_coef, pt_origin_coef, Output_Channels, Input_Channels, Kernel_Size, bw_param, shift_enable, CoefPrecision);
}
// run convolution:
const Dtype* weight = blob_binary_coef.gpu_data();
const Dtype* bias = (this->bias_term_) ? blob_binary_bias.gpu_data() : 0;
dw ? forward_dw_conv_gpu(bottom, top, weight, bias) : forward_conv_gpu(bottom, top, weight, bias);
}
else if (CoefPrecision == QuantConvolutionParameter_Precision_DYNAMIC_FIXED_POINT) {
Blob<Dtype> blob_binary_coef;
blob_binary_coef.ReshapeLike(*this->blobs_[0]);
int Output_Channels = blob_binary_coef.shape(0);
int Input_Channels = blob_binary_coef.shape(1);
int Kernel_Height = blob_binary_coef.shape(2);
int Kernel_Width = blob_binary_coef.shape(3);
Dtype* src_weight = (Dtype*) this->blobs_[0]->cpu_data();
Dtype* dst_weight = (Dtype*)blob_binary_coef.mutable_cpu_data();
int count_weight = Output_Channels*Input_Channels*Kernel_Height*Kernel_Width;
Blob<Dtype> blob_binary_bias;
if (this->bias_term_){
blob_binary_bias.ReshapeLike(*this->blobs_[1]);
Dtype* src_bias = (Dtype*) this->blobs_[1]->cpu_data();
Dtype* dst_bias = (Dtype*)blob_binary_bias.mutable_cpu_data();
int count_bias = Output_Channels;
quantize_block_data_dynamic_fxp(dst_weight, src_weight, count_weight, bw_param, shift_enable, dst_bias, src_bias, count_bias);
}
else {
quantize_block_data_dynamic_fxp(dst_weight, src_weight, count_weight, bw_param, shift_enable);
}
// run convolution:
const Dtype* weight = blob_binary_coef.gpu_data();
const Dtype* bias = (this->bias_term_) ? blob_binary_bias.gpu_data() : 0;
dw ? forward_dw_conv_gpu(bottom, top, weight, bias) : forward_conv_gpu(bottom, top, weight, bias);
}
else {
printf("Error quantization scheme !!!\n");
}
}
template <typename Dtype>
void QuantConvolutionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
int CoefPrecision = this->layer_param_.quant_convolution_param().coef_precision();
bool shift_enable = this->layer_param_.quant_convolution_param().shift_enable();
int bw_param = this->layer_param_.quant_convolution_param().bw_params();
int num_output = this->layer_param_.convolution_param().num_output();
int group = this->layer_param_.convolution_param().group();
bool dw = num_output == group;
if (CoefPrecision == QuantConvolutionParameter_Precision_FLOATING_POINT) { // floating point coef scheme:
const Dtype* weight = this->blobs_[0]->gpu_data();
dw ? backward_dw_conv_gpu(top, propagate_down, bottom, weight) : backward_conv_gpu(top, propagate_down, bottom, weight);
}
else if ((CoefPrecision == QuantConvolutionParameter_Precision_ONE_BIT) || // one bit coef scheme:
(CoefPrecision == QuantConvolutionParameter_Precision_TWO_BITS)|| // two bits coef scheme:
(CoefPrecision == QuantConvolutionParameter_Precision_THREE_BITS)|| // three bits coef scheme:
(CoefPrecision == QuantConvolutionParameter_Precision_FIVE_BITS)) { // five bits coef scheme:
//std::cout << "======= Binary_Backward_cpu ============\n";
Blob<Dtype> blob_binary_coef;
blob_binary_coef.ReshapeLike(*this->blobs_[0]);
// convert coefficeints:
int Output_Channels = blob_binary_coef.shape(0);
int Input_Channels = blob_binary_coef.shape(1);
int Kernel_Height = blob_binary_coef.shape(2);
int Kernel_Width = blob_binary_coef.shape(3);
int Kernel_Size = Kernel_Height*Kernel_Width;
const Dtype* pt_origin_coef = this->blobs_[0]->cpu_data(); // in cpu
Dtype* pt_binary_coef = blob_binary_coef.mutable_cpu_data();
if (this->bias_term_){
Blob<Dtype> blob_binary_bias;
blob_binary_bias.ReshapeLike(*this->blobs_[1]);
const Dtype* pt_origin_bias = (Dtype*) this->blobs_[1]->cpu_data();
Dtype* pt_binary_bias = (Dtype*)blob_binary_bias.mutable_cpu_data();
quantize_block_data_shift_quant(pt_binary_coef, pt_origin_coef, Output_Channels, Input_Channels, Kernel_Size, bw_param, shift_enable, CoefPrecision, pt_binary_bias, pt_origin_bias);
}
else {
quantize_block_data_shift_quant(pt_binary_coef, pt_origin_coef, Output_Channels, Input_Channels, Kernel_Size, bw_param, shift_enable, CoefPrecision);
}
const Dtype* weight = blob_binary_coef.gpu_data();
dw ? backward_dw_conv_gpu(top, propagate_down, bottom, weight) : backward_conv_gpu(top, propagate_down, bottom, weight);
}
else if (CoefPrecision == QuantConvolutionParameter_Precision_DYNAMIC_FIXED_POINT) {
Blob<Dtype> blob_binary_coef;
blob_binary_coef.ReshapeLike(*this->blobs_[0]);
int Output_Channels = blob_binary_coef.shape(0);
int Input_Channels = blob_binary_coef.shape(1);
int Kernel_Height = blob_binary_coef.shape(2);
int Kernel_Width = blob_binary_coef.shape(3);
Dtype* src_weight = (Dtype*) this->blobs_[0]->cpu_data();
Dtype* dst_weight = (Dtype*) blob_binary_coef.mutable_cpu_data();
int count_weight = Output_Channels*Input_Channels*Kernel_Height*Kernel_Width;
if (this->bias_term_){
Blob<Dtype> blob_binary_bias;
blob_binary_bias.ReshapeLike(*this->blobs_[1]);
Dtype* src_bias = (Dtype*) this->blobs_[1]->cpu_data();
Dtype* dst_bias = (Dtype*)blob_binary_bias.mutable_cpu_data();
int count_bias = Output_Channels;
quantize_block_data_dynamic_fxp(dst_weight, src_weight, count_weight, bw_param, shift_enable, dst_bias, src_bias, count_bias);
}
else {
quantize_block_data_dynamic_fxp(dst_weight, src_weight, count_weight, bw_param, shift_enable);
}
// backward process
const Dtype* weight = blob_binary_coef.gpu_data();
dw ? backward_dw_conv_gpu(top, propagate_down, bottom, weight) : backward_conv_gpu(top, propagate_down, bottom, weight);
}
else {
printf("Error quantization scheme !!!\n");
}
}
INSTANTIATE_LAYER_GPU_FUNCS(QuantConvolutionLayer);
//need these lines to instantiate static functions from hpp that are not precisely forward_gpu and backward_gpu
//as instantiate_layer_gpu_funcs does this under the hood.
//regular forward conv
template void QuantConvolutionLayer<float>::forward_conv_gpu(const vector<Blob<float>*>& bottom,
const vector<Blob<float>*>& top, const float* weight, const float* bias);
template void QuantConvolutionLayer<double>::forward_conv_gpu(const vector<Blob<double>*>& bottom,
const vector<Blob<double>*>& top, const double* weight, const double* bias);
//dw forward conv
template void QuantConvolutionLayer<float>::forward_dw_conv_gpu(const vector<Blob<float>*>& bottom,
const vector<Blob<float>*>& top, const float* weight, const float* bias);
template void QuantConvolutionLayer<double>::forward_dw_conv_gpu(const vector<Blob<double>*>& bottom,
const vector<Blob<double>*>& top, const double* weight, const double* bias);
//regular backward conv
template void QuantConvolutionLayer<float>::backward_conv_gpu(
const vector<Blob<float>*>& top, const vector<bool>& propagate_down,
const vector<Blob<float>*>& bottom, const float* weight);
template void QuantConvolutionLayer<double>::backward_conv_gpu(
const vector<Blob<double>*>& top, const vector<bool>& propagate_down,
const vector<Blob<double>*>& bottom, const double* weight);
//dw backward conv
template void QuantConvolutionLayer<float>::backward_dw_conv_gpu(
const vector<Blob<float>*>& top, const vector<bool>& propagate_down,
const vector<Blob<float>*>& bottom, const float* weight);
template void QuantConvolutionLayer<double>::backward_dw_conv_gpu(
const vector<Blob<double>*>& top, const vector<bool>& propagate_down,
const vector<Blob<double>*>& bottom, const double* weight);
} // namespace caffe
|
1ea366308478232869d1c69cb6498933f511ccfd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "common_cuda_funcs.cuh"
#include "common_hip_defs.cuh"
enum class ScaleType
{
SINGLE_SCALE,
PER_WEIGHT_CHANNEL,
PER_ACTIVATION_CHANNEL
};
ScaleType get_scale_type(const at::Tensor& input, const at::Tensor& input_low, const at::Tensor& input_range)
{
TORCH_CHECK(input_low.dim() == input_range.dim(), "input_low and input_range have different dimensionality");
uint64_t scale_dim = input_range.dim();
for (int i = 0; i < scale_dim; i++)
{
TORCH_CHECK(input_low.size(i) == input_range.size(i), "input_low and input_range have different dimension sizes");
}
uint64_t scale_count = input_range.numel();
if (scale_dim > 0)
{
// For (NxCxHxW) input/output tensors, it is assumed that input_range is
// either (1) for single-scale quantization, or (Nx1x1x1) for
// per-channel scale weights quantization, or (1xCx1x1) for per-channel
// activation quantization
if (input_range.size(0) > 1)
{
TORCH_CHECK(input_range.size(0) == input.size(0), "Scale count and weights input channel count is different");
TORCH_CHECK(input_range.size(0) == scale_count, "Scale shape is not flat");
return ScaleType::PER_WEIGHT_CHANNEL;
}
else if (scale_dim >= 2 and input_range.size(1) > 1)
{
TORCH_CHECK(input_range.size(1) == input.size(1), "Scale count and activations channel count is different");
TORCH_CHECK(input_range.size(1) == scale_count, "Scale shape is not flat");
return ScaleType::PER_ACTIVATION_CHANNEL;
}
}
return ScaleType::SINGLE_SCALE;
}
namespace {
template <typename scalar_t>
__device__ void fakeQuantize(
scalar_t* __restrict__ output,
const scalar_t* __restrict__ input,
const scalar_t* __restrict__ input_low,
const scalar_t* __restrict__ input_range,
const scalar_t levels
) {
scalar_t s = (levels - 1) / (*input_range);
(*output) = round((min(max((*input), (*input_low)), (*input_low) + (*input_range)) - (*input_low)) * s) / s + (*input_low);
}
template <typename scalar_t>
__global__ void q_cuda_forward_kernel(
scalar_t* __restrict__ output,
const scalar_t* __restrict__ input,
const scalar_t* __restrict__ input_low,
const scalar_t* __restrict__ input_range,
const scalar_t levels,
const uint64_t size,
const uint64_t contiguous_elements_per_scale,
const uint64_t scale_count) {
const uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) {
// "Scales" are derived from input_low/input_range
uint64_t scale_idx = static_cast<uint64_t>(idx / contiguous_elements_per_scale) % scale_count;
fakeQuantize<scalar_t>((output + idx), (input + idx), input_low + scale_idx, input_range + scale_idx, levels);
}
}
template <typename scalar_t>
__device__ void calcGrad(
scalar_t* __restrict__ val_grad_input,
scalar_t* __restrict__ val_grad_input_low,
scalar_t* __restrict__ val_grad_input_range,
const scalar_t* __restrict__ grad_output,
const scalar_t* __restrict__ input,
const scalar_t* __restrict__ output,
const scalar_t range_low,
const scalar_t range_high,
const scalar_t reverted_range,
const scalar_t val_low_grad) {
*val_grad_input_range = 0;
*val_grad_input_low = 0;
*val_grad_input = 0;
if ((*input) < range_low) {
(*val_grad_input_range) = val_low_grad * (*grad_output);
(*val_grad_input_low) = (*grad_output);
} else if ((*input) > range_high) {
(*val_grad_input_range) = (*grad_output);
(*val_grad_input_low) = (*grad_output);
} else {
(*val_grad_input_range) = (*grad_output) * (((*output) - (*input)) * reverted_range);
(*val_grad_input) = (*grad_output);
}
}
template <typename scalar_t>
__global__ void q_single_scale_cuda_backward_kernel(
scalar_t* __restrict__ grad_input,
scalar_t* __restrict__ grad_input_low,
scalar_t* __restrict__ grad_input_range,
scalar_t* __restrict__ dev_tmp_range,
scalar_t* __restrict__ dev_tmp_low,
int32_t* __restrict__ dev_last_block_counter_range,
int32_t* __restrict__ dev_last_block_counter_low,
const scalar_t* __restrict__ grad_output,
const scalar_t* __restrict__ input,
const scalar_t* __restrict__ input_low,
const scalar_t* __restrict__ input_range,
const scalar_t levels,
const scalar_t level_low,
const scalar_t level_high,
const size_t size) {
const uint16_t tidx = threadIdx.x;
const uint32_t bidx = blockIdx.x;
const uint64_t gtidx = bidx * CUDA_MAX_NUM_THREADS_PER_BLOCK + tidx;
const uint64_t grid_size = CUDA_MAX_NUM_THREADS_PER_BLOCK * gridDim.x;
scalar_t sum_range = 0, sum_low = 0;
scalar_t output, val_grad_input_range, val_grad_input_low;
scalar_t alpha = level_low / level_high;
scalar_t range_low = (*input_low);
scalar_t range_high = (*input_low) + (*input_range);
scalar_t reverted_range = 1 / (*input_range);
for (size_t i = gtidx; i < size; i += grid_size) {
fakeQuantize<scalar_t>(&output, (input + i), input_low, input_range, levels);
calcGrad<scalar_t>((grad_input + i), &val_grad_input_low, &val_grad_input_range, (grad_output + i),
(input + i), &output, range_low, range_high, reverted_range, alpha);
sum_range += val_grad_input_range;
sum_low += val_grad_input_low;
}
__shared__ scalar_t sh_grad_range[CUDA_MAX_NUM_THREADS_PER_BLOCK];
__shared__ scalar_t sh_grad_low[CUDA_MAX_NUM_THREADS_PER_BLOCK];
reduce_with_shared_memory<scalar_t>(sh_grad_range, sum_range, tidx, bidx, dev_tmp_range, dev_last_block_counter_range, grad_input_range, gridDim.x);
reduce_with_shared_memory<scalar_t>(sh_grad_low, sum_low, tidx, bidx, dev_tmp_low, dev_last_block_counter_low, grad_input_low, gridDim.x);
}
template <typename scalar_t>
__global__ void q_scale_per_weight_channel_cuda_backward_kernel(
scalar_t* __restrict__ grad_input,
scalar_t* __restrict__ grad_input_low,
scalar_t* __restrict__ grad_input_range,
scalar_t* __restrict__ dev_tmp_range,
scalar_t* __restrict__ dev_tmp_low,
int32_t* __restrict__ dev_last_block_counter_range,
int32_t* __restrict__ dev_last_block_counter_low,
const scalar_t* __restrict__ grad_output,
const scalar_t* __restrict__ input,
const scalar_t* __restrict__ input_low,
const scalar_t* __restrict__ input_range,
const scalar_t levels,
const scalar_t level_low,
const scalar_t level_high,
const size_t elements_per_scale) {
const uint16_t tidx = threadIdx.x;
const uint32_t scale_idx = blockIdx.x;
const uint32_t per_scale_block_idx = blockIdx.y;
const uint64_t per_scale_tidx = per_scale_block_idx * CUDA_MAX_NUM_THREADS_PER_BLOCK + tidx;
const uint32_t total_blocks_per_scale = gridDim.y;
const uint64_t total_threads_per_scale = total_blocks_per_scale * CUDA_MAX_NUM_THREADS_PER_BLOCK;
// Applying scale data offsets
input_low += scale_idx;
input_range += scale_idx;
dev_tmp_low += scale_idx * total_blocks_per_scale;
dev_tmp_range += scale_idx * total_blocks_per_scale;
dev_last_block_counter_low += scale_idx;
dev_last_block_counter_range += scale_idx;
grad_input_low += scale_idx;
grad_input_range += scale_idx;
const size_t offset_for_scaled_quantized_elements = scale_idx * elements_per_scale;
input += offset_for_scaled_quantized_elements;
grad_input += offset_for_scaled_quantized_elements;
grad_output += offset_for_scaled_quantized_elements;
scalar_t per_thread_grad_sum_range = 0, per_thread_grad_sum_low = 0;
scalar_t output, val_grad_input_range, val_grad_input_low;
scalar_t alpha = level_low / level_high;
scalar_t range_low = (*input_low);
scalar_t range_high = (*input_low) + (*input_range);
scalar_t reverted_range = 1 / (*input_range);
for (size_t i = per_scale_tidx; i < elements_per_scale; i += total_threads_per_scale) {
fakeQuantize<scalar_t>(&output, (input + i), input_low, input_range, levels);
calcGrad<scalar_t>((grad_input + i), &val_grad_input_low, &val_grad_input_range, (grad_output + i),
(input + i), &output, range_low, range_high, reverted_range, alpha);
per_thread_grad_sum_range += val_grad_input_range;
per_thread_grad_sum_low += val_grad_input_low;
}
__shared__ scalar_t sh_grad_range[CUDA_MAX_NUM_THREADS_PER_BLOCK];
__shared__ scalar_t sh_grad_low[CUDA_MAX_NUM_THREADS_PER_BLOCK];
reduce_with_shared_memory<scalar_t>(sh_grad_range, per_thread_grad_sum_range, tidx, per_scale_block_idx, dev_tmp_range, dev_last_block_counter_range, grad_input_range, total_blocks_per_scale);
reduce_with_shared_memory<scalar_t>(sh_grad_low, per_thread_grad_sum_low, tidx, per_scale_block_idx, dev_tmp_low, dev_last_block_counter_low, grad_input_low, total_blocks_per_scale);
}
template <typename scalar_t>
__global__ void q_scale_per_activation_channel_cuda_backward_kernel(
scalar_t* __restrict__ grad_input,
scalar_t* __restrict__ grad_input_low,
scalar_t* __restrict__ grad_input_range,
scalar_t* __restrict__ dev_tmp_range,
scalar_t* __restrict__ dev_tmp_low,
int32_t* __restrict__ dev_last_block_counter_range,
int32_t* __restrict__ dev_last_block_counter_low,
const scalar_t* __restrict__ grad_output,
const scalar_t* __restrict__ input,
const scalar_t* __restrict__ input_low,
const scalar_t* __restrict__ input_range,
const scalar_t levels,
const scalar_t level_low,
const scalar_t level_high,
const int64_t total_elements_per_scale,
const int64_t contiguous_elements_per_scale,
const int64_t scale_count,
const int64_t leading_channel_offset) {
const uint16_t tidx = threadIdx.x;
const uint32_t scale_idx = blockIdx.x;
const uint32_t per_scale_block_idx = blockIdx.y;
const uint64_t per_scale_tidx = per_scale_block_idx * CUDA_MAX_NUM_THREADS_PER_BLOCK + tidx;
const uint32_t total_blocks_per_scale = gridDim.y;
const uint64_t total_threads_per_scale = total_blocks_per_scale * CUDA_MAX_NUM_THREADS_PER_BLOCK;
// Applying scale data offsets
input_low += scale_idx;
input_range += scale_idx;
dev_tmp_low += scale_idx * total_blocks_per_scale;
dev_tmp_range += scale_idx * total_blocks_per_scale;
dev_last_block_counter_low += scale_idx;
dev_last_block_counter_range += scale_idx;
grad_input_low += scale_idx;
grad_input_range += scale_idx;
scalar_t per_thread_grad_sum_range = 0, per_thread_grad_sum_low = 0;
scalar_t output, val_grad_input_range, val_grad_input_low;
scalar_t alpha = level_low / level_high;
scalar_t range_low = (*input_low);
scalar_t range_high = (*input_low) + (*input_range);
scalar_t reverted_range = 1 / (*input_range);
// The blocks of values belonging to one and the same scale here are interleaved with a period
// equal to contiguous_elements_per_scale. Will apply an offset to the beginning of the first
// block of values belonging to the current scale of the thread block, and then, in the for loop, map
// a contiguously changing loop iteration index into a value-block-skipping offset calculation pattern.
const size_t initial_offset = scale_idx * contiguous_elements_per_scale;
input += initial_offset;
grad_input += initial_offset;
grad_output += initial_offset;
for (uint64_t i = per_scale_tidx; i < total_elements_per_scale; i += total_threads_per_scale) {
size_t additional_offset = (i / contiguous_elements_per_scale) * leading_channel_offset + (i % contiguous_elements_per_scale);
fakeQuantize<scalar_t>(&output, (input + additional_offset), input_low, input_range, levels);
calcGrad<scalar_t>((grad_input + additional_offset), &val_grad_input_low, &val_grad_input_range, (grad_output + additional_offset),
(input + additional_offset), &output, range_low, range_high, reverted_range, alpha);
per_thread_grad_sum_range += val_grad_input_range;
per_thread_grad_sum_low += val_grad_input_low;
}
__shared__ scalar_t sh_grad_range[CUDA_MAX_NUM_THREADS_PER_BLOCK];
__shared__ scalar_t sh_grad_low[CUDA_MAX_NUM_THREADS_PER_BLOCK];
reduce_with_shared_memory<scalar_t>(sh_grad_range, per_thread_grad_sum_range, tidx, per_scale_block_idx, dev_tmp_range, dev_last_block_counter_range, grad_input_range, total_blocks_per_scale);
reduce_with_shared_memory<scalar_t>(sh_grad_low, per_thread_grad_sum_low, tidx, per_scale_block_idx, dev_tmp_low, dev_last_block_counter_low, grad_input_low, total_blocks_per_scale);
}
}
at::Tensor q_cuda_forward(
at::Tensor input,
at::Tensor input_low,
at::Tensor input_range,
int levels) {
at::DeviceGuard guard(input.device());
const auto quantized_elements_count = input.numel();
ScaleType scale_type = get_scale_type(input, input_low, input_range);
uint64_t contiguous_elements_per_scale = 0;
uint64_t scale_count = input_range.numel();
switch (scale_type)
{
case ScaleType::PER_ACTIVATION_CHANNEL:
// Scale count should be equal to 1-st input tensor dimension
contiguous_elements_per_scale = quantized_elements_count / (input.size(0) * scale_count);
break;
case ScaleType::PER_WEIGHT_CHANNEL:
// Scale count should be equal to 0-th input tensor dimension
contiguous_elements_per_scale = quantized_elements_count / scale_count;
break;
default:
contiguous_elements_per_scale = quantized_elements_count;
break;
}
auto output = at::empty_like(input);
PROFILE(AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "q_cuda_forward", ([&] {
hipLaunchKernelGGL(( q_cuda_forward_kernel<scalar_t>), dim3(GET_BLOCKS(quantized_elements_count)), dim3(CUDA_MAX_NUM_THREADS_PER_BLOCK), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
output.data_ptr<scalar_t>(),
input.data_ptr<scalar_t>(),
input_low.data_ptr<scalar_t>(),
input_range.data_ptr<scalar_t>(),
levels,
quantized_elements_count,
contiguous_elements_per_scale,
scale_count);
}));)
return output;
}
std::vector<at::Tensor> q_single_scale_cuda_backward(at::Tensor grad_output,
at::Tensor input,
at::Tensor input_low,
at::Tensor input_range,
int levels,
int level_low,
int level_high) {
at::DeviceGuard guard(input.device());
const auto size = input.numel();
auto grad_input = at::empty_like(grad_output);
auto grad_input_range = at::empty({1}, grad_output.options());
auto grad_input_low = at::empty({1}, grad_output.options());
auto grid_size = ::min(GET_BLOCKS(size), CUDA_BLOCKS_PER_GRID_FOR_UNIFORM_ELTWISE);
auto dev_tmp_range = at::empty({grid_size}, grad_output.options());
auto dev_tmp_low = at::empty({grid_size}, grad_output.options());
auto dev_last_block_counter_range = at::zeros({1}, at::device(grad_output.options().device()).dtype(at::kInt));
auto dev_last_block_counter_low = at::zeros({1}, at::device(grad_output.options().device()).dtype(at::kInt));
PROFILE(AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "q_single_scale_cuda_backward", ([&] {
hipLaunchKernelGGL(( q_single_scale_cuda_backward_kernel<scalar_t>), dim3(grid_size), dim3(CUDA_MAX_NUM_THREADS_PER_BLOCK), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
grad_input.data_ptr<scalar_t>(),
grad_input_low.data_ptr<scalar_t>(),
grad_input_range.data_ptr<scalar_t>(),
dev_tmp_range.data_ptr<scalar_t>(),
dev_tmp_low.data_ptr<scalar_t>(),
dev_last_block_counter_range.data_ptr<int32_t>(),
dev_last_block_counter_low.data_ptr<int32_t>(),
grad_output.data_ptr<scalar_t>(),
input.data_ptr<scalar_t>(),
input_low.data_ptr<scalar_t>(),
input_range.data_ptr<scalar_t>(),
levels,
level_low,
level_high,
size);
}));)
return {grad_input, grad_input_low, grad_input_range};
}
std::vector<at::Tensor> q_scale_per_weight_channel_cuda_backward(at::Tensor grad_output,
at::Tensor input,
at::Tensor input_low,
at::Tensor input_range,
int levels,
int level_low,
int level_high) {
at::DeviceGuard guard(input.device());
const auto scale_count = input_range.size(0);
const auto elements_per_scale = input.numel() / scale_count;
auto grad_input = at::empty_like(grad_output);
auto grad_input_low = at::empty(input_range.sizes(), grad_output.options());
auto grad_input_range = at::empty(input_range.sizes(), grad_output.options());
dim3 grid_size = get_2d_grid_size_for_per_channel(scale_count);
auto dev_tmp_range = at::zeros({grid_size.x, grid_size.y}, grad_output.options());
auto dev_tmp_low = at::zeros({grid_size.x, grid_size.y}, grad_output.options());
auto dev_last_block_counter_range = at::zeros({grid_size.x, 1}, at::device(grad_output.options().device()).dtype(at::kInt));
auto dev_last_block_counter_low = at::zeros({grid_size.x, 1}, at::device(grad_output.options().device()).dtype(at::kInt));
PROFILE(AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "q_single_scale_cuda_backward", ([&] {
hipLaunchKernelGGL(( q_scale_per_weight_channel_cuda_backward_kernel<scalar_t>), dim3(grid_size), dim3(CUDA_MAX_NUM_THREADS_PER_BLOCK), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
grad_input.data_ptr<scalar_t>(),
grad_input_low.data_ptr<scalar_t>(),
grad_input_range.data_ptr<scalar_t>(),
dev_tmp_range.data_ptr<scalar_t>(),
dev_tmp_low.data_ptr<scalar_t>(),
dev_last_block_counter_range.data_ptr<int32_t>(),
dev_last_block_counter_low.data_ptr<int32_t>(),
grad_output.data_ptr<scalar_t>(),
input.data_ptr<scalar_t>(),
input_low.data_ptr<scalar_t>(),
input_range.data_ptr<scalar_t>(),
levels,
level_low,
level_high,
elements_per_scale);
}));
)
return {grad_input, grad_input_low, grad_input_range};
}
std::vector<at::Tensor> q_scale_per_activation_channel_cuda_backward(at::Tensor grad_output,
at::Tensor input,
at::Tensor input_low,
at::Tensor input_range,
int levels,
int level_low,
int level_high) {
at::DeviceGuard guard(input.device());
const auto scale_count = input_range.size(1);
const auto total_elements_per_scale = input.numel() / scale_count;
const auto contiguous_elements_per_scale = input.numel() / (scale_count * input.size(0));
const auto leading_channel_offset = input.numel() / input.size(0);
auto grad_input = at::empty_like(grad_output);
auto grad_input_low = at::empty(input_range.sizes(), grad_output.options());
auto grad_input_range = at::empty(input_range.sizes(), grad_output.options());
dim3 grid_size = get_2d_grid_size_for_per_channel(scale_count);
auto dev_tmp_range = at::zeros({grid_size.x, grid_size.y}, grad_output.options());
auto dev_tmp_low = at::zeros({grid_size.x, grid_size.y}, grad_output.options());
auto dev_last_block_counter_range = at::zeros({grid_size.x, 1}, at::device(grad_output.options().device()).dtype(at::kInt));
auto dev_last_block_counter_low = at::zeros({grid_size.x, 1}, at::device(grad_output.options().device()).dtype(at::kInt));
PROFILE(
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "q_scale_per_activation_channel_cuda_backward", ([&] {
hipLaunchKernelGGL(( q_scale_per_activation_channel_cuda_backward_kernel<scalar_t>), dim3(grid_size), dim3(CUDA_MAX_NUM_THREADS_PER_BLOCK), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
grad_input.data_ptr<scalar_t>(),
grad_input_low.data_ptr<scalar_t>(),
grad_input_range.data_ptr<scalar_t>(),
dev_tmp_range.data_ptr<scalar_t>(),
dev_tmp_low.data_ptr<scalar_t>(),
dev_last_block_counter_range.data_ptr<int32_t>(),
dev_last_block_counter_low.data_ptr<int32_t>(),
grad_output.data_ptr<scalar_t>(),
input.data_ptr<scalar_t>(),
input_low.data_ptr<scalar_t>(),
input_range.data_ptr<scalar_t>(),
levels,
level_low,
level_high,
total_elements_per_scale,
contiguous_elements_per_scale,
scale_count,
leading_channel_offset);
}));
)
return {grad_input, grad_input_low, grad_input_range};
}
std::vector<at::Tensor> q_cuda_backward(
at::Tensor grad_output,
at::Tensor input,
at::Tensor input_low,
at::Tensor input_range,
int levels,
int level_low,
int level_high) {
at::DeviceGuard guard(input.device());
ScaleType scale_type = get_scale_type(input, input_low, input_range);
switch (scale_type)
{
case ScaleType::PER_ACTIVATION_CHANNEL:
return q_scale_per_activation_channel_cuda_backward(
grad_output,
input,
input_low,
input_range,
levels,
level_low,
level_high);
case ScaleType::PER_WEIGHT_CHANNEL:
return q_scale_per_weight_channel_cuda_backward(
grad_output,
input,
input_low,
input_range,
levels,
level_low,
level_high);
case ScaleType::SINGLE_SCALE:
default:
return q_single_scale_cuda_backward(
grad_output,
input,
input_low,
input_range,
levels,
level_low,
level_high);
};
}
| 1ea366308478232869d1c69cb6498933f511ccfd.cu | #include "common_cuda_funcs.cuh"
#include "common_cuda_defs.cuh"
enum class ScaleType
{
SINGLE_SCALE,
PER_WEIGHT_CHANNEL,
PER_ACTIVATION_CHANNEL
};
ScaleType get_scale_type(const at::Tensor& input, const at::Tensor& input_low, const at::Tensor& input_range)
{
TORCH_CHECK(input_low.dim() == input_range.dim(), "input_low and input_range have different dimensionality");
uint64_t scale_dim = input_range.dim();
for (int i = 0; i < scale_dim; i++)
{
TORCH_CHECK(input_low.size(i) == input_range.size(i), "input_low and input_range have different dimension sizes");
}
uint64_t scale_count = input_range.numel();
if (scale_dim > 0)
{
// For (NxCxHxW) input/output tensors, it is assumed that input_range is
// either (1) for single-scale quantization, or (Nx1x1x1) for
// per-channel scale weights quantization, or (1xCx1x1) for per-channel
// activation quantization
if (input_range.size(0) > 1)
{
TORCH_CHECK(input_range.size(0) == input.size(0), "Scale count and weights input channel count is different");
TORCH_CHECK(input_range.size(0) == scale_count, "Scale shape is not flat");
return ScaleType::PER_WEIGHT_CHANNEL;
}
else if (scale_dim >= 2 and input_range.size(1) > 1)
{
TORCH_CHECK(input_range.size(1) == input.size(1), "Scale count and activations channel count is different");
TORCH_CHECK(input_range.size(1) == scale_count, "Scale shape is not flat");
return ScaleType::PER_ACTIVATION_CHANNEL;
}
}
return ScaleType::SINGLE_SCALE;
}
namespace {
template <typename scalar_t>
__device__ void fakeQuantize(
scalar_t* __restrict__ output,
const scalar_t* __restrict__ input,
const scalar_t* __restrict__ input_low,
const scalar_t* __restrict__ input_range,
const scalar_t levels
) {
scalar_t s = (levels - 1) / (*input_range);
(*output) = round((min(max((*input), (*input_low)), (*input_low) + (*input_range)) - (*input_low)) * s) / s + (*input_low);
}
template <typename scalar_t>
__global__ void q_cuda_forward_kernel(
scalar_t* __restrict__ output,
const scalar_t* __restrict__ input,
const scalar_t* __restrict__ input_low,
const scalar_t* __restrict__ input_range,
const scalar_t levels,
const uint64_t size,
const uint64_t contiguous_elements_per_scale,
const uint64_t scale_count) {
const uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) {
// "Scales" are derived from input_low/input_range
uint64_t scale_idx = static_cast<uint64_t>(idx / contiguous_elements_per_scale) % scale_count;
fakeQuantize<scalar_t>((output + idx), (input + idx), input_low + scale_idx, input_range + scale_idx, levels);
}
}
template <typename scalar_t>
__device__ void calcGrad(
scalar_t* __restrict__ val_grad_input,
scalar_t* __restrict__ val_grad_input_low,
scalar_t* __restrict__ val_grad_input_range,
const scalar_t* __restrict__ grad_output,
const scalar_t* __restrict__ input,
const scalar_t* __restrict__ output,
const scalar_t range_low,
const scalar_t range_high,
const scalar_t reverted_range,
const scalar_t val_low_grad) {
*val_grad_input_range = 0;
*val_grad_input_low = 0;
*val_grad_input = 0;
if ((*input) < range_low) {
(*val_grad_input_range) = val_low_grad * (*grad_output);
(*val_grad_input_low) = (*grad_output);
} else if ((*input) > range_high) {
(*val_grad_input_range) = (*grad_output);
(*val_grad_input_low) = (*grad_output);
} else {
(*val_grad_input_range) = (*grad_output) * (((*output) - (*input)) * reverted_range);
(*val_grad_input) = (*grad_output);
}
}
template <typename scalar_t>
__global__ void q_single_scale_cuda_backward_kernel(
scalar_t* __restrict__ grad_input,
scalar_t* __restrict__ grad_input_low,
scalar_t* __restrict__ grad_input_range,
scalar_t* __restrict__ dev_tmp_range,
scalar_t* __restrict__ dev_tmp_low,
int32_t* __restrict__ dev_last_block_counter_range,
int32_t* __restrict__ dev_last_block_counter_low,
const scalar_t* __restrict__ grad_output,
const scalar_t* __restrict__ input,
const scalar_t* __restrict__ input_low,
const scalar_t* __restrict__ input_range,
const scalar_t levels,
const scalar_t level_low,
const scalar_t level_high,
const size_t size) {
const uint16_t tidx = threadIdx.x;
const uint32_t bidx = blockIdx.x;
const uint64_t gtidx = bidx * CUDA_MAX_NUM_THREADS_PER_BLOCK + tidx;
const uint64_t grid_size = CUDA_MAX_NUM_THREADS_PER_BLOCK * gridDim.x;
scalar_t sum_range = 0, sum_low = 0;
scalar_t output, val_grad_input_range, val_grad_input_low;
scalar_t alpha = level_low / level_high;
scalar_t range_low = (*input_low);
scalar_t range_high = (*input_low) + (*input_range);
scalar_t reverted_range = 1 / (*input_range);
for (size_t i = gtidx; i < size; i += grid_size) {
fakeQuantize<scalar_t>(&output, (input + i), input_low, input_range, levels);
calcGrad<scalar_t>((grad_input + i), &val_grad_input_low, &val_grad_input_range, (grad_output + i),
(input + i), &output, range_low, range_high, reverted_range, alpha);
sum_range += val_grad_input_range;
sum_low += val_grad_input_low;
}
__shared__ scalar_t sh_grad_range[CUDA_MAX_NUM_THREADS_PER_BLOCK];
__shared__ scalar_t sh_grad_low[CUDA_MAX_NUM_THREADS_PER_BLOCK];
reduce_with_shared_memory<scalar_t>(sh_grad_range, sum_range, tidx, bidx, dev_tmp_range, dev_last_block_counter_range, grad_input_range, gridDim.x);
reduce_with_shared_memory<scalar_t>(sh_grad_low, sum_low, tidx, bidx, dev_tmp_low, dev_last_block_counter_low, grad_input_low, gridDim.x);
}
template <typename scalar_t>
__global__ void q_scale_per_weight_channel_cuda_backward_kernel(
scalar_t* __restrict__ grad_input,
scalar_t* __restrict__ grad_input_low,
scalar_t* __restrict__ grad_input_range,
scalar_t* __restrict__ dev_tmp_range,
scalar_t* __restrict__ dev_tmp_low,
int32_t* __restrict__ dev_last_block_counter_range,
int32_t* __restrict__ dev_last_block_counter_low,
const scalar_t* __restrict__ grad_output,
const scalar_t* __restrict__ input,
const scalar_t* __restrict__ input_low,
const scalar_t* __restrict__ input_range,
const scalar_t levels,
const scalar_t level_low,
const scalar_t level_high,
const size_t elements_per_scale) {
const uint16_t tidx = threadIdx.x;
const uint32_t scale_idx = blockIdx.x;
const uint32_t per_scale_block_idx = blockIdx.y;
const uint64_t per_scale_tidx = per_scale_block_idx * CUDA_MAX_NUM_THREADS_PER_BLOCK + tidx;
const uint32_t total_blocks_per_scale = gridDim.y;
const uint64_t total_threads_per_scale = total_blocks_per_scale * CUDA_MAX_NUM_THREADS_PER_BLOCK;
// Applying scale data offsets
input_low += scale_idx;
input_range += scale_idx;
dev_tmp_low += scale_idx * total_blocks_per_scale;
dev_tmp_range += scale_idx * total_blocks_per_scale;
dev_last_block_counter_low += scale_idx;
dev_last_block_counter_range += scale_idx;
grad_input_low += scale_idx;
grad_input_range += scale_idx;
const size_t offset_for_scaled_quantized_elements = scale_idx * elements_per_scale;
input += offset_for_scaled_quantized_elements;
grad_input += offset_for_scaled_quantized_elements;
grad_output += offset_for_scaled_quantized_elements;
scalar_t per_thread_grad_sum_range = 0, per_thread_grad_sum_low = 0;
scalar_t output, val_grad_input_range, val_grad_input_low;
scalar_t alpha = level_low / level_high;
scalar_t range_low = (*input_low);
scalar_t range_high = (*input_low) + (*input_range);
scalar_t reverted_range = 1 / (*input_range);
for (size_t i = per_scale_tidx; i < elements_per_scale; i += total_threads_per_scale) {
fakeQuantize<scalar_t>(&output, (input + i), input_low, input_range, levels);
calcGrad<scalar_t>((grad_input + i), &val_grad_input_low, &val_grad_input_range, (grad_output + i),
(input + i), &output, range_low, range_high, reverted_range, alpha);
per_thread_grad_sum_range += val_grad_input_range;
per_thread_grad_sum_low += val_grad_input_low;
}
__shared__ scalar_t sh_grad_range[CUDA_MAX_NUM_THREADS_PER_BLOCK];
__shared__ scalar_t sh_grad_low[CUDA_MAX_NUM_THREADS_PER_BLOCK];
reduce_with_shared_memory<scalar_t>(sh_grad_range, per_thread_grad_sum_range, tidx, per_scale_block_idx, dev_tmp_range, dev_last_block_counter_range, grad_input_range, total_blocks_per_scale);
reduce_with_shared_memory<scalar_t>(sh_grad_low, per_thread_grad_sum_low, tidx, per_scale_block_idx, dev_tmp_low, dev_last_block_counter_low, grad_input_low, total_blocks_per_scale);
}
template <typename scalar_t>
__global__ void q_scale_per_activation_channel_cuda_backward_kernel(
scalar_t* __restrict__ grad_input,
scalar_t* __restrict__ grad_input_low,
scalar_t* __restrict__ grad_input_range,
scalar_t* __restrict__ dev_tmp_range,
scalar_t* __restrict__ dev_tmp_low,
int32_t* __restrict__ dev_last_block_counter_range,
int32_t* __restrict__ dev_last_block_counter_low,
const scalar_t* __restrict__ grad_output,
const scalar_t* __restrict__ input,
const scalar_t* __restrict__ input_low,
const scalar_t* __restrict__ input_range,
const scalar_t levels,
const scalar_t level_low,
const scalar_t level_high,
const int64_t total_elements_per_scale,
const int64_t contiguous_elements_per_scale,
const int64_t scale_count,
const int64_t leading_channel_offset) {
const uint16_t tidx = threadIdx.x;
const uint32_t scale_idx = blockIdx.x;
const uint32_t per_scale_block_idx = blockIdx.y;
const uint64_t per_scale_tidx = per_scale_block_idx * CUDA_MAX_NUM_THREADS_PER_BLOCK + tidx;
const uint32_t total_blocks_per_scale = gridDim.y;
const uint64_t total_threads_per_scale = total_blocks_per_scale * CUDA_MAX_NUM_THREADS_PER_BLOCK;
// Applying scale data offsets
input_low += scale_idx;
input_range += scale_idx;
dev_tmp_low += scale_idx * total_blocks_per_scale;
dev_tmp_range += scale_idx * total_blocks_per_scale;
dev_last_block_counter_low += scale_idx;
dev_last_block_counter_range += scale_idx;
grad_input_low += scale_idx;
grad_input_range += scale_idx;
scalar_t per_thread_grad_sum_range = 0, per_thread_grad_sum_low = 0;
scalar_t output, val_grad_input_range, val_grad_input_low;
scalar_t alpha = level_low / level_high;
scalar_t range_low = (*input_low);
scalar_t range_high = (*input_low) + (*input_range);
scalar_t reverted_range = 1 / (*input_range);
// The blocks of values belonging to one and the same scale here are interleaved with a period
// equal to contiguous_elements_per_scale. Will apply an offset to the beginning of the first
// block of values belonging to the current scale of the thread block, and then, in the for loop, map
// a contiguously changing loop iteration index into a value-block-skipping offset calculation pattern.
const size_t initial_offset = scale_idx * contiguous_elements_per_scale;
input += initial_offset;
grad_input += initial_offset;
grad_output += initial_offset;
for (uint64_t i = per_scale_tidx; i < total_elements_per_scale; i += total_threads_per_scale) {
size_t additional_offset = (i / contiguous_elements_per_scale) * leading_channel_offset + (i % contiguous_elements_per_scale);
fakeQuantize<scalar_t>(&output, (input + additional_offset), input_low, input_range, levels);
calcGrad<scalar_t>((grad_input + additional_offset), &val_grad_input_low, &val_grad_input_range, (grad_output + additional_offset),
(input + additional_offset), &output, range_low, range_high, reverted_range, alpha);
per_thread_grad_sum_range += val_grad_input_range;
per_thread_grad_sum_low += val_grad_input_low;
}
__shared__ scalar_t sh_grad_range[CUDA_MAX_NUM_THREADS_PER_BLOCK];
__shared__ scalar_t sh_grad_low[CUDA_MAX_NUM_THREADS_PER_BLOCK];
reduce_with_shared_memory<scalar_t>(sh_grad_range, per_thread_grad_sum_range, tidx, per_scale_block_idx, dev_tmp_range, dev_last_block_counter_range, grad_input_range, total_blocks_per_scale);
reduce_with_shared_memory<scalar_t>(sh_grad_low, per_thread_grad_sum_low, tidx, per_scale_block_idx, dev_tmp_low, dev_last_block_counter_low, grad_input_low, total_blocks_per_scale);
}
}
at::Tensor q_cuda_forward(
at::Tensor input,
at::Tensor input_low,
at::Tensor input_range,
int levels) {
at::DeviceGuard guard(input.device());
const auto quantized_elements_count = input.numel();
ScaleType scale_type = get_scale_type(input, input_low, input_range);
uint64_t contiguous_elements_per_scale = 0;
uint64_t scale_count = input_range.numel();
switch (scale_type)
{
case ScaleType::PER_ACTIVATION_CHANNEL:
// Scale count should be equal to 1-st input tensor dimension
contiguous_elements_per_scale = quantized_elements_count / (input.size(0) * scale_count);
break;
case ScaleType::PER_WEIGHT_CHANNEL:
// Scale count should be equal to 0-th input tensor dimension
contiguous_elements_per_scale = quantized_elements_count / scale_count;
break;
default:
contiguous_elements_per_scale = quantized_elements_count;
break;
}
auto output = at::empty_like(input);
PROFILE(AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "q_cuda_forward", ([&] {
q_cuda_forward_kernel<scalar_t><<<GET_BLOCKS(quantized_elements_count), CUDA_MAX_NUM_THREADS_PER_BLOCK, 0, at::cuda::getCurrentCUDAStream()>>>(
output.data_ptr<scalar_t>(),
input.data_ptr<scalar_t>(),
input_low.data_ptr<scalar_t>(),
input_range.data_ptr<scalar_t>(),
levels,
quantized_elements_count,
contiguous_elements_per_scale,
scale_count);
}));)
return output;
}
std::vector<at::Tensor> q_single_scale_cuda_backward(at::Tensor grad_output,
at::Tensor input,
at::Tensor input_low,
at::Tensor input_range,
int levels,
int level_low,
int level_high) {
at::DeviceGuard guard(input.device());
const auto size = input.numel();
auto grad_input = at::empty_like(grad_output);
auto grad_input_range = at::empty({1}, grad_output.options());
auto grad_input_low = at::empty({1}, grad_output.options());
auto grid_size = std::min(GET_BLOCKS(size), CUDA_BLOCKS_PER_GRID_FOR_UNIFORM_ELTWISE);
auto dev_tmp_range = at::empty({grid_size}, grad_output.options());
auto dev_tmp_low = at::empty({grid_size}, grad_output.options());
auto dev_last_block_counter_range = at::zeros({1}, at::device(grad_output.options().device()).dtype(at::kInt));
auto dev_last_block_counter_low = at::zeros({1}, at::device(grad_output.options().device()).dtype(at::kInt));
PROFILE(AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "q_single_scale_cuda_backward", ([&] {
q_single_scale_cuda_backward_kernel<scalar_t><<<grid_size, CUDA_MAX_NUM_THREADS_PER_BLOCK, 0, at::cuda::getCurrentCUDAStream()>>>(
grad_input.data_ptr<scalar_t>(),
grad_input_low.data_ptr<scalar_t>(),
grad_input_range.data_ptr<scalar_t>(),
dev_tmp_range.data_ptr<scalar_t>(),
dev_tmp_low.data_ptr<scalar_t>(),
dev_last_block_counter_range.data_ptr<int32_t>(),
dev_last_block_counter_low.data_ptr<int32_t>(),
grad_output.data_ptr<scalar_t>(),
input.data_ptr<scalar_t>(),
input_low.data_ptr<scalar_t>(),
input_range.data_ptr<scalar_t>(),
levels,
level_low,
level_high,
size);
}));)
return {grad_input, grad_input_low, grad_input_range};
}
std::vector<at::Tensor> q_scale_per_weight_channel_cuda_backward(at::Tensor grad_output,
at::Tensor input,
at::Tensor input_low,
at::Tensor input_range,
int levels,
int level_low,
int level_high) {
at::DeviceGuard guard(input.device());
const auto scale_count = input_range.size(0);
const auto elements_per_scale = input.numel() / scale_count;
auto grad_input = at::empty_like(grad_output);
auto grad_input_low = at::empty(input_range.sizes(), grad_output.options());
auto grad_input_range = at::empty(input_range.sizes(), grad_output.options());
dim3 grid_size = get_2d_grid_size_for_per_channel(scale_count);
auto dev_tmp_range = at::zeros({grid_size.x, grid_size.y}, grad_output.options());
auto dev_tmp_low = at::zeros({grid_size.x, grid_size.y}, grad_output.options());
auto dev_last_block_counter_range = at::zeros({grid_size.x, 1}, at::device(grad_output.options().device()).dtype(at::kInt));
auto dev_last_block_counter_low = at::zeros({grid_size.x, 1}, at::device(grad_output.options().device()).dtype(at::kInt));
PROFILE(AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "q_single_scale_cuda_backward", ([&] {
q_scale_per_weight_channel_cuda_backward_kernel<scalar_t><<<grid_size, CUDA_MAX_NUM_THREADS_PER_BLOCK, 0, at::cuda::getCurrentCUDAStream()>>>(
grad_input.data_ptr<scalar_t>(),
grad_input_low.data_ptr<scalar_t>(),
grad_input_range.data_ptr<scalar_t>(),
dev_tmp_range.data_ptr<scalar_t>(),
dev_tmp_low.data_ptr<scalar_t>(),
dev_last_block_counter_range.data_ptr<int32_t>(),
dev_last_block_counter_low.data_ptr<int32_t>(),
grad_output.data_ptr<scalar_t>(),
input.data_ptr<scalar_t>(),
input_low.data_ptr<scalar_t>(),
input_range.data_ptr<scalar_t>(),
levels,
level_low,
level_high,
elements_per_scale);
}));
)
return {grad_input, grad_input_low, grad_input_range};
}
std::vector<at::Tensor> q_scale_per_activation_channel_cuda_backward(at::Tensor grad_output,
at::Tensor input,
at::Tensor input_low,
at::Tensor input_range,
int levels,
int level_low,
int level_high) {
at::DeviceGuard guard(input.device());
const auto scale_count = input_range.size(1);
const auto total_elements_per_scale = input.numel() / scale_count;
const auto contiguous_elements_per_scale = input.numel() / (scale_count * input.size(0));
const auto leading_channel_offset = input.numel() / input.size(0);
auto grad_input = at::empty_like(grad_output);
auto grad_input_low = at::empty(input_range.sizes(), grad_output.options());
auto grad_input_range = at::empty(input_range.sizes(), grad_output.options());
dim3 grid_size = get_2d_grid_size_for_per_channel(scale_count);
auto dev_tmp_range = at::zeros({grid_size.x, grid_size.y}, grad_output.options());
auto dev_tmp_low = at::zeros({grid_size.x, grid_size.y}, grad_output.options());
auto dev_last_block_counter_range = at::zeros({grid_size.x, 1}, at::device(grad_output.options().device()).dtype(at::kInt));
auto dev_last_block_counter_low = at::zeros({grid_size.x, 1}, at::device(grad_output.options().device()).dtype(at::kInt));
PROFILE(
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "q_scale_per_activation_channel_cuda_backward", ([&] {
q_scale_per_activation_channel_cuda_backward_kernel<scalar_t><<<grid_size, CUDA_MAX_NUM_THREADS_PER_BLOCK, 0, at::cuda::getCurrentCUDAStream()>>>(
grad_input.data_ptr<scalar_t>(),
grad_input_low.data_ptr<scalar_t>(),
grad_input_range.data_ptr<scalar_t>(),
dev_tmp_range.data_ptr<scalar_t>(),
dev_tmp_low.data_ptr<scalar_t>(),
dev_last_block_counter_range.data_ptr<int32_t>(),
dev_last_block_counter_low.data_ptr<int32_t>(),
grad_output.data_ptr<scalar_t>(),
input.data_ptr<scalar_t>(),
input_low.data_ptr<scalar_t>(),
input_range.data_ptr<scalar_t>(),
levels,
level_low,
level_high,
total_elements_per_scale,
contiguous_elements_per_scale,
scale_count,
leading_channel_offset);
}));
)
return {grad_input, grad_input_low, grad_input_range};
}
std::vector<at::Tensor> q_cuda_backward(
at::Tensor grad_output,
at::Tensor input,
at::Tensor input_low,
at::Tensor input_range,
int levels,
int level_low,
int level_high) {
at::DeviceGuard guard(input.device());
ScaleType scale_type = get_scale_type(input, input_low, input_range);
switch (scale_type)
{
case ScaleType::PER_ACTIVATION_CHANNEL:
return q_scale_per_activation_channel_cuda_backward(
grad_output,
input,
input_low,
input_range,
levels,
level_low,
level_high);
case ScaleType::PER_WEIGHT_CHANNEL:
return q_scale_per_weight_channel_cuda_backward(
grad_output,
input,
input_low,
input_range,
levels,
level_low,
level_high);
case ScaleType::SINGLE_SCALE:
default:
return q_single_scale_cuda_backward(
grad_output,
input,
input_low,
input_range,
levels,
level_low,
level_high);
};
}
|
08d0a1aa19d0ce0f0f965a15e2fbee13ba3a38bb.hip | // !!! This is a file automatically generated by hipify!!!
#include "computeTMatrix.h"
template <typename T>
void check(T result, char const *const func, const char *const file,
int const line) {
if (result) {
fprintf(stderr, "CUDA error at %s:%d code=%d(%s) \"%s\" \n", file, line,
static_cast<unsigned int>(result), _cudaGetErrorEnum(result), func);
exit(EXIT_FAILURE);
}
}
#define chkCudaErr(val) check((val), #val, __FILE__, __LINE__)
static const char *_cudaGetErrorEnum(hipError_t error) {
return hipGetErrorName(error);
}
void computeTMatrixCUBLAS(hipDoubleComplex* T_d,
hipDoubleComplex* F_d,
int matLength, int TLabLength,
hipblasStatus_t status,
hipblasHandle_t handle) {
using microseconds = std::chrono::microseconds;
// Host variables
hipDoubleComplex** Fptr_array_h;
hipDoubleComplex** Tptr_array_h;
Fptr_array_h = (hipDoubleComplex**)malloc(TLabLength * sizeof(hipDoubleComplex*));
Tptr_array_h = (hipDoubleComplex**)malloc(TLabLength * sizeof(hipDoubleComplex*));
// Device variables
hipDoubleComplex** Fptr_array_d;
hipDoubleComplex** Tptr_array_d;
int* pivotArray_d;
int* trfInfo_d;
int trsInfo_d;
if (status != HIPBLAS_STATUS_SUCCESS) {
printf("> ERROR: cuBLAS initialization failed\n");
}
// Allocate memory for device variables
chkCudaErr(hipMalloc((void**)&pivotArray_d, matLength * TLabLength * sizeof(int)));
chkCudaErr(hipMalloc((void**)&trfInfo_d, TLabLength * sizeof(int)));
chkCudaErr(hipMalloc((void**)&Fptr_array_d, TLabLength * sizeof(hipDoubleComplex*)));
chkCudaErr(hipMalloc((void**)&Tptr_array_d, TLabLength * sizeof(hipDoubleComplex*)));
// Create pointer array for matrices
for (int i = 0; i < TLabLength; i++) {
Fptr_array_h[i] = F_d + (i * matLength * matLength);
Tptr_array_h[i] = T_d + (i * matLength);
}
// Copy pointer array to device memory
chkCudaErr(hipMemcpy(Fptr_array_d, Fptr_array_h,
TLabLength * sizeof(hipDoubleComplex*),
hipMemcpyHostToDevice));
chkCudaErr(hipMemcpy(Tptr_array_d, Tptr_array_h,
TLabLength * sizeof(hipDoubleComplex*),
hipMemcpyHostToDevice));
// Perform LU decomposition
status = hipblasZgetrfBatched(handle, matLength, Fptr_array_d, matLength, pivotArray_d,
trfInfo_d, TLabLength);
// Calculate the T matrix
status = hipblasZgetrsBatched(handle, HIPBLAS_OP_N, matLength, 1, Fptr_array_d,
matLength, pivotArray_d, Tptr_array_d, matLength, &trsInfo_d,
TLabLength);
// Copy data to host from device
// chkCudaErr(hipMemcpy(T_d, V_d, TLabLength*matLength*matLength *
// sizeof(hipDoubleComplex), hipMemcpyDeviceToDevice));
// Free device variables
chkCudaErr(hipFree(Fptr_array_d));
chkCudaErr(hipFree(Tptr_array_d));
chkCudaErr(hipFree(trfInfo_d));
chkCudaErr(hipFree(pivotArray_d));
chkCudaErr(hipFree(F_d));
// chkCudaErr(hipFree(V_d));
// Destroy cuBLAS handle
status = hipblasDestroy(handle);
if (status != HIPBLAS_STATUS_SUCCESS) {
printf("> ERROR: cuBLAS uninitialization failed...\n");
}
}
| 08d0a1aa19d0ce0f0f965a15e2fbee13ba3a38bb.cu | #include "computeTMatrix.h"
template <typename T>
void check(T result, char const *const func, const char *const file,
int const line) {
if (result) {
fprintf(stderr, "CUDA error at %s:%d code=%d(%s) \"%s\" \n", file, line,
static_cast<unsigned int>(result), _cudaGetErrorEnum(result), func);
exit(EXIT_FAILURE);
}
}
#define chkCudaErr(val) check((val), #val, __FILE__, __LINE__)
static const char *_cudaGetErrorEnum(cudaError_t error) {
return cudaGetErrorName(error);
}
void computeTMatrixCUBLAS(cuDoubleComplex* T_d,
cuDoubleComplex* F_d,
int matLength, int TLabLength,
cublasStatus_t status,
cublasHandle_t handle) {
using microseconds = std::chrono::microseconds;
// Host variables
cuDoubleComplex** Fptr_array_h;
cuDoubleComplex** Tptr_array_h;
Fptr_array_h = (cuDoubleComplex**)malloc(TLabLength * sizeof(cuDoubleComplex*));
Tptr_array_h = (cuDoubleComplex**)malloc(TLabLength * sizeof(cuDoubleComplex*));
// Device variables
cuDoubleComplex** Fptr_array_d;
cuDoubleComplex** Tptr_array_d;
int* pivotArray_d;
int* trfInfo_d;
int trsInfo_d;
if (status != CUBLAS_STATUS_SUCCESS) {
printf("> ERROR: cuBLAS initialization failed\n");
}
// Allocate memory for device variables
chkCudaErr(cudaMalloc((void**)&pivotArray_d, matLength * TLabLength * sizeof(int)));
chkCudaErr(cudaMalloc((void**)&trfInfo_d, TLabLength * sizeof(int)));
chkCudaErr(cudaMalloc((void**)&Fptr_array_d, TLabLength * sizeof(cuDoubleComplex*)));
chkCudaErr(cudaMalloc((void**)&Tptr_array_d, TLabLength * sizeof(cuDoubleComplex*)));
// Create pointer array for matrices
for (int i = 0; i < TLabLength; i++) {
Fptr_array_h[i] = F_d + (i * matLength * matLength);
Tptr_array_h[i] = T_d + (i * matLength);
}
// Copy pointer array to device memory
chkCudaErr(cudaMemcpy(Fptr_array_d, Fptr_array_h,
TLabLength * sizeof(cuDoubleComplex*),
cudaMemcpyHostToDevice));
chkCudaErr(cudaMemcpy(Tptr_array_d, Tptr_array_h,
TLabLength * sizeof(cuDoubleComplex*),
cudaMemcpyHostToDevice));
// Perform LU decomposition
status = cublasZgetrfBatched(handle, matLength, Fptr_array_d, matLength, pivotArray_d,
trfInfo_d, TLabLength);
// Calculate the T matrix
status = cublasZgetrsBatched(handle, CUBLAS_OP_N, matLength, 1, Fptr_array_d,
matLength, pivotArray_d, Tptr_array_d, matLength, &trsInfo_d,
TLabLength);
// Copy data to host from device
// chkCudaErr(cudaMemcpy(T_d, V_d, TLabLength*matLength*matLength *
// sizeof(cuDoubleComplex), cudaMemcpyDeviceToDevice));
// Free device variables
chkCudaErr(cudaFree(Fptr_array_d));
chkCudaErr(cudaFree(Tptr_array_d));
chkCudaErr(cudaFree(trfInfo_d));
chkCudaErr(cudaFree(pivotArray_d));
chkCudaErr(cudaFree(F_d));
// chkCudaErr(cudaFree(V_d));
// Destroy cuBLAS handle
status = cublasDestroy(handle);
if (status != CUBLAS_STATUS_SUCCESS) {
printf("> ERROR: cuBLAS uninitialization failed...\n");
}
}
|
a597c422e4a3e883d185b12cc8ca466774b05e85.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/* Example of program using the interval_gpu<T> template class and operators:
* Search for roots of a function using an interval Newton method.
*
* Use the command-line argument "--n=<N>" to select which GPU implementation to use,
* otherwise the naive implementation will be used by default.
* 0: the naive implementation
* 1: the optimized implementation
* 2: the recursive implementation
*
*/
const static char *sSDKsample = "Interval Computing";
#include <iostream>
#include <stdio.h>
#include <shrQATest.h>
#include "cutil_inline.h"
#include "interval.h"
#include "cuda_interval.h"
#include "cpu_interval.h"
int main(int argc,char *argv[])
{
int implementation_choice = 0;
shrQAStart(argc, argv);
printf("[%s] starting ...\n\n", sSDKsample);
if (argc > 1)
{
cutGetCmdLineArgumenti( argc, (const char**) argv, "n", (int *) &implementation_choice);
}
// Pick the best GPU available, or if the developer selects one at the command line
int devID = cutilChooseCudaDevice(argc, argv);
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, devID);
printf("> GPU Device has Compute Capabilities SM %d.%d\n\n", deviceProp.major, deviceProp.minor);
int version = (deviceProp.major * 0x10 + deviceProp.minor);
if (version < 0x13) {
printf("%s: requires minimum of Compute Capability 1.3 or higher, waiving test...\n", sSDKsample);
cutilDeviceReset();
shrQAFinishExit(argc, (const char **)argv, QA_PASSED);
}
switch (implementation_choice)
{
case 0:
printf ("GPU naive implementation\n");
break;
case 1:
printf ("GPU optimized implementation\n");
break;
case 2:
if (deviceProp.major >= 2)
printf ("GPU recursive implementation (requires Compute SM 2.0+)\n");
else
{
printf ("GPU naive implementation is used instead of the recursive implementation, which requires a GPU with CUDA capability 2.0+\n");
implementation_choice = 0;
}
break;
default:
printf ("GPU naive implementation\n");
}
interval_gpu<T> * d_result;
int * d_nresults;
int h_nresults[THREADS];
hipEvent_t start, stop;
CHECKED_CALL(hipSetDevice(devID));
CHECKED_CALL(hipMalloc((void**)&d_result, THREADS * DEPTH_RESULT * sizeof(*d_result)));
CHECKED_CALL(hipMalloc((void**)&d_nresults, THREADS * sizeof(*d_nresults)));
CHECKED_CALL(hipEventCreate(&start));
CHECKED_CALL(hipEventCreate(&stop));
if (deviceProp.major >= 2)
{
// We need L1 cache to store the stack (only applicable to sm_20 and higher)
CHECKED_CALL(hipFuncSetCacheConfig(test_interval_newton<T>, hipFuncCachePreferL1));
// Increase the stack size large enought for the non-inlined and recursive function calls (only applicable to sm_20 and higher)
#if CUDART_VERSION >= 4000
CHECKED_CALL(hipDeviceSetLimit (hipLimitStackSize, 8192));
#else
CHECKED_CALL(hipThreadSetLimit (hipLimitStackSize, 8192));
#endif
}
interval_gpu<T> i(0.01f, 4.0f);
std::cout << "Searching for roots in [" << i.lower() << ", " << i.upper() << "]...\n";
CHECKED_CALL(hipEventRecord(start, 0));
for(int it = 0; it < NUM_RUNS; ++it)
{
hipLaunchKernelGGL(( test_interval_newton<T>), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, d_result, d_nresults, i, implementation_choice);
CHECKED_CALL(hipGetLastError());
}
CHECKED_CALL(hipEventRecord(stop, 0));
CHECKED_CALL(cutilDeviceSynchronize());
I_CPU * h_result = new I_CPU[THREADS * DEPTH_RESULT];
CHECKED_CALL(hipMemcpy(h_result, d_result, THREADS * DEPTH_RESULT * sizeof(*d_result), hipMemcpyDeviceToHost));
CHECKED_CALL(hipMemcpy(h_nresults, d_nresults, THREADS * sizeof(*d_nresults), hipMemcpyDeviceToHost));
std::cout << "Found " << h_nresults[0] << " intervals that may contain the root(s)\n";
std::cout.precision(15);
for(int i = 0; i != h_nresults[0]; ++i)
{
std::cout << " i[" << i << "] ="
<< " [" << h_result[THREADS * i + 0].lower()
<< ", " << h_result[THREADS * i + 0].upper() << "]\n";
}
float time;
CHECKED_CALL(hipEventElapsedTime(&time, start, stop));
std::cout << "Number of equations solved: " << THREADS << "\n";
std::cout << "Time per equation: " << 1000000.0f * (time / (float)(THREADS)) / NUM_RUNS << " us\n";
CHECKED_CALL(hipEventDestroy(start));
CHECKED_CALL(hipEventDestroy(stop));
CHECKED_CALL(hipFree(d_result));
CHECKED_CALL(hipFree(d_nresults));
// Compute the results using a CPU implementation based on the Boost library
I_CPU i_cpu(0.01f, 4.0f);
I_CPU * h_result_cpu = new I_CPU[THREADS * DEPTH_RESULT];
int h_nresults_cpu[THREADS];
test_interval_newton_cpu<I_CPU>(h_result_cpu, h_nresults_cpu, i_cpu);
// Compare the CPU and GPU results
bool bTestResult = checkAgainstHost(h_nresults, h_nresults_cpu, h_result, h_result_cpu);
delete [] h_result_cpu;
delete [] h_result;
cutilDeviceReset();
shrQAFinishExit(argc, (const char **)argv, (bTestResult ? QA_PASSED : QA_FAILED));
}
| a597c422e4a3e883d185b12cc8ca466774b05e85.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/* Example of program using the interval_gpu<T> template class and operators:
* Search for roots of a function using an interval Newton method.
*
* Use the command-line argument "--n=<N>" to select which GPU implementation to use,
* otherwise the naive implementation will be used by default.
* 0: the naive implementation
* 1: the optimized implementation
* 2: the recursive implementation
*
*/
const static char *sSDKsample = "Interval Computing";
#include <iostream>
#include <stdio.h>
#include <shrQATest.h>
#include "cutil_inline.h"
#include "interval.h"
#include "cuda_interval.h"
#include "cpu_interval.h"
int main(int argc,char *argv[])
{
int implementation_choice = 0;
shrQAStart(argc, argv);
printf("[%s] starting ...\n\n", sSDKsample);
if (argc > 1)
{
cutGetCmdLineArgumenti( argc, (const char**) argv, "n", (int *) &implementation_choice);
}
// Pick the best GPU available, or if the developer selects one at the command line
int devID = cutilChooseCudaDevice(argc, argv);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, devID);
printf("> GPU Device has Compute Capabilities SM %d.%d\n\n", deviceProp.major, deviceProp.minor);
int version = (deviceProp.major * 0x10 + deviceProp.minor);
if (version < 0x13) {
printf("%s: requires minimum of Compute Capability 1.3 or higher, waiving test...\n", sSDKsample);
cutilDeviceReset();
shrQAFinishExit(argc, (const char **)argv, QA_PASSED);
}
switch (implementation_choice)
{
case 0:
printf ("GPU naive implementation\n");
break;
case 1:
printf ("GPU optimized implementation\n");
break;
case 2:
if (deviceProp.major >= 2)
printf ("GPU recursive implementation (requires Compute SM 2.0+)\n");
else
{
printf ("GPU naive implementation is used instead of the recursive implementation, which requires a GPU with CUDA capability 2.0+\n");
implementation_choice = 0;
}
break;
default:
printf ("GPU naive implementation\n");
}
interval_gpu<T> * d_result;
int * d_nresults;
int h_nresults[THREADS];
cudaEvent_t start, stop;
CHECKED_CALL(cudaSetDevice(devID));
CHECKED_CALL(cudaMalloc((void**)&d_result, THREADS * DEPTH_RESULT * sizeof(*d_result)));
CHECKED_CALL(cudaMalloc((void**)&d_nresults, THREADS * sizeof(*d_nresults)));
CHECKED_CALL(cudaEventCreate(&start));
CHECKED_CALL(cudaEventCreate(&stop));
if (deviceProp.major >= 2)
{
// We need L1 cache to store the stack (only applicable to sm_20 and higher)
CHECKED_CALL(cudaFuncSetCacheConfig(test_interval_newton<T>, cudaFuncCachePreferL1));
// Increase the stack size large enought for the non-inlined and recursive function calls (only applicable to sm_20 and higher)
#if CUDART_VERSION >= 4000
CHECKED_CALL(cudaDeviceSetLimit (cudaLimitStackSize, 8192));
#else
CHECKED_CALL(cudaThreadSetLimit (cudaLimitStackSize, 8192));
#endif
}
interval_gpu<T> i(0.01f, 4.0f);
std::cout << "Searching for roots in [" << i.lower() << ", " << i.upper() << "]...\n";
CHECKED_CALL(cudaEventRecord(start, 0));
for(int it = 0; it < NUM_RUNS; ++it)
{
test_interval_newton<T><<<GRID_SIZE, BLOCK_SIZE>>>(d_result, d_nresults, i, implementation_choice);
CHECKED_CALL(cudaGetLastError());
}
CHECKED_CALL(cudaEventRecord(stop, 0));
CHECKED_CALL(cutilDeviceSynchronize());
I_CPU * h_result = new I_CPU[THREADS * DEPTH_RESULT];
CHECKED_CALL(cudaMemcpy(h_result, d_result, THREADS * DEPTH_RESULT * sizeof(*d_result), cudaMemcpyDeviceToHost));
CHECKED_CALL(cudaMemcpy(h_nresults, d_nresults, THREADS * sizeof(*d_nresults), cudaMemcpyDeviceToHost));
std::cout << "Found " << h_nresults[0] << " intervals that may contain the root(s)\n";
std::cout.precision(15);
for(int i = 0; i != h_nresults[0]; ++i)
{
std::cout << " i[" << i << "] ="
<< " [" << h_result[THREADS * i + 0].lower()
<< ", " << h_result[THREADS * i + 0].upper() << "]\n";
}
float time;
CHECKED_CALL(cudaEventElapsedTime(&time, start, stop));
std::cout << "Number of equations solved: " << THREADS << "\n";
std::cout << "Time per equation: " << 1000000.0f * (time / (float)(THREADS)) / NUM_RUNS << " us\n";
CHECKED_CALL(cudaEventDestroy(start));
CHECKED_CALL(cudaEventDestroy(stop));
CHECKED_CALL(cudaFree(d_result));
CHECKED_CALL(cudaFree(d_nresults));
// Compute the results using a CPU implementation based on the Boost library
I_CPU i_cpu(0.01f, 4.0f);
I_CPU * h_result_cpu = new I_CPU[THREADS * DEPTH_RESULT];
int h_nresults_cpu[THREADS];
test_interval_newton_cpu<I_CPU>(h_result_cpu, h_nresults_cpu, i_cpu);
// Compare the CPU and GPU results
bool bTestResult = checkAgainstHost(h_nresults, h_nresults_cpu, h_result, h_result_cpu);
delete [] h_result_cpu;
delete [] h_result;
cutilDeviceReset();
shrQAFinishExit(argc, (const char **)argv, (bTestResult ? QA_PASSED : QA_FAILED));
}
|
02d81f1bad9bdb9f7f2ccefa6c514e83659178a5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Created by fenix on 6/8/20.
//
#include <cuda_impl/cuda_integrate.cuh>
double cuda_integrate(size_t steps, const ConfigFileOpt &config) {
double res = 0.0;
// dxy - the length of the side of one integration square
double dxy = (config.get_y().second - config.get_y().first) / static_cast<double>(steps);
size_t steps_per_thread = steps / config.get_flow_num();
double *d_res, *d_c, *d_a1, *d_a2; //! Device
gpuErrorCheck(hipMalloc((void **) &d_res, config.get_flow_num() * sizeof(double)))\
gpuErrorCheck(hipMalloc((void **) &d_c, COEF_NUM * sizeof(double)))\
gpuErrorCheck(hipMalloc((void **) &d_a1, COEF_NUM * sizeof(double)))\
gpuErrorCheck(hipMalloc((void **) &d_a2, COEF_NUM * sizeof(double)))\
gpuErrorCheck(hipMemcpy(d_c, &(config.get_c()[0]), COEF_NUM * sizeof(double), hipMemcpyHostToDevice));\
gpuErrorCheck(hipMemcpy(d_a1, &(config.get_a1()[0]), COEF_NUM * sizeof(double), hipMemcpyHostToDevice));\
gpuErrorCheck(hipMemcpy(d_a2, &(config.get_a2()[0]), COEF_NUM * sizeof(double), hipMemcpyHostToDevice));\
hipLaunchKernelGGL(( cuda_thread_integrate), dim3(1), dim3(config.get_flow_num()), 0, 0, config.get_x().first, config.get_x().second,
config.get_y().first, config.get_y().second,
dxy, steps_per_thread, d_res, d_c, d_a1, d_a2);
/////////////////////////////////// Finalize result ///////////////////////////////////
double h_res[config.get_flow_num()]; // output buffer
gpuErrorCheck(hipMemcpy(h_res, d_res, config.get_flow_num() * sizeof(double), hipMemcpyDeviceToHost));
for (ptrdiff_t i = 0; i < config.get_flow_num(); ++i) {
res += h_res[i];
}
gpuErrorCheck(hipFree(d_res))\
gpuErrorCheck(hipFree(d_c))\
gpuErrorCheck(hipFree(d_a1))\
gpuErrorCheck(hipFree(d_a2))\
return res;
}
__global__ void
cuda_thread_integrate(const double start_x, const double end_x, double start_y, double end_y, double dxy,
size_t steps_per_thread, double *res, const double *d_c, const double *d_a1,
const double *d_a2) {
// cashed_device "array name" [size]
__shared__ double ch_d_c[COEF_NUM], ch_d_a1[COEF_NUM], ch_d_a2[COEF_NUM];
if (threadIdx.x == 0)
for (int i = 0; i < COEF_NUM; ++i) {
ch_d_c[i] = d_c[i];
ch_d_a1[i] = d_a1[i];
ch_d_a2[i] = d_a2[i];
}
// __shared__ double local_res[MAX_THREAD_NUM]; // local result
double diag, l_res = 0.0; // local result
start_y += dxy * steps_per_thread * threadIdx.x;
end_y = start_y + dxy * steps_per_thread;
double x = start_x, y = start_y;
while (y < end_y) {
while (x < end_x) {
for (uint8_t i = 0; i < COEF_NUM; ++i) {
diag = (x - ch_d_a1[i]) * (x - ch_d_a1[i]) + (y - ch_d_a2[i]) * (y - ch_d_a2[i]);
l_res += ch_d_c[i] * exp(-diag / static_cast<double>(M_PI)) * cos(static_cast<double>(M_PI) * diag);
}
x += dxy;
}
x = start_x;
y += dxy;
}
res[threadIdx.x] = -l_res * dxy * dxy;
}
| 02d81f1bad9bdb9f7f2ccefa6c514e83659178a5.cu | //
// Created by fenix on 6/8/20.
//
#include <cuda_impl/cuda_integrate.cuh>
double cuda_integrate(size_t steps, const ConfigFileOpt &config) {
double res = 0.0;
// dxy - the length of the side of one integration square
double dxy = (config.get_y().second - config.get_y().first) / static_cast<double>(steps);
size_t steps_per_thread = steps / config.get_flow_num();
double *d_res, *d_c, *d_a1, *d_a2; //! Device
gpuErrorCheck(cudaMalloc((void **) &d_res, config.get_flow_num() * sizeof(double)))\
gpuErrorCheck(cudaMalloc((void **) &d_c, COEF_NUM * sizeof(double)))\
gpuErrorCheck(cudaMalloc((void **) &d_a1, COEF_NUM * sizeof(double)))\
gpuErrorCheck(cudaMalloc((void **) &d_a2, COEF_NUM * sizeof(double)))\
gpuErrorCheck(cudaMemcpy(d_c, &(config.get_c()[0]), COEF_NUM * sizeof(double), cudaMemcpyHostToDevice));\
gpuErrorCheck(cudaMemcpy(d_a1, &(config.get_a1()[0]), COEF_NUM * sizeof(double), cudaMemcpyHostToDevice));\
gpuErrorCheck(cudaMemcpy(d_a2, &(config.get_a2()[0]), COEF_NUM * sizeof(double), cudaMemcpyHostToDevice));\
cuda_thread_integrate<<<1, config.get_flow_num()>>>(config.get_x().first, config.get_x().second,
config.get_y().first, config.get_y().second,
dxy, steps_per_thread, d_res, d_c, d_a1, d_a2);
/////////////////////////////////// Finalize result ///////////////////////////////////
double h_res[config.get_flow_num()]; // output buffer
gpuErrorCheck(cudaMemcpy(h_res, d_res, config.get_flow_num() * sizeof(double), cudaMemcpyDeviceToHost));
for (ptrdiff_t i = 0; i < config.get_flow_num(); ++i) {
res += h_res[i];
}
gpuErrorCheck(cudaFree(d_res))\
gpuErrorCheck(cudaFree(d_c))\
gpuErrorCheck(cudaFree(d_a1))\
gpuErrorCheck(cudaFree(d_a2))\
return res;
}
__global__ void
cuda_thread_integrate(const double start_x, const double end_x, double start_y, double end_y, double dxy,
size_t steps_per_thread, double *res, const double *d_c, const double *d_a1,
const double *d_a2) {
// cashed_device "array name" [size]
__shared__ double ch_d_c[COEF_NUM], ch_d_a1[COEF_NUM], ch_d_a2[COEF_NUM];
if (threadIdx.x == 0)
for (int i = 0; i < COEF_NUM; ++i) {
ch_d_c[i] = d_c[i];
ch_d_a1[i] = d_a1[i];
ch_d_a2[i] = d_a2[i];
}
// __shared__ double local_res[MAX_THREAD_NUM]; // local result
double diag, l_res = 0.0; // local result
start_y += dxy * steps_per_thread * threadIdx.x;
end_y = start_y + dxy * steps_per_thread;
double x = start_x, y = start_y;
while (y < end_y) {
while (x < end_x) {
for (uint8_t i = 0; i < COEF_NUM; ++i) {
diag = (x - ch_d_a1[i]) * (x - ch_d_a1[i]) + (y - ch_d_a2[i]) * (y - ch_d_a2[i]);
l_res += ch_d_c[i] * exp(-diag / static_cast<double>(M_PI)) * cos(static_cast<double>(M_PI) * diag);
}
x += dxy;
}
x = start_x;
y += dxy;
}
res[threadIdx.x] = -l_res * dxy * dxy;
}
|
shift.hip | // !!! This is a file automatically generated by hipify!!!
/* -*- Mode: C++; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
// This file is a part of ChASE.
// Copyright (c) 2015-2021, Simulation and Data Laboratory Quantum Materials,
// Forschungszentrum Juelich GmbH, Germany. All rights reserved.
// License is 3-clause BSD:
// https://github.com/ChASE-library/ChASE
#include <hip/hip_complex.h>
#include <hip/hip_runtime.h>
#include <omp.h>
#include <complex>
#include "rocblas.h"
#define BLOCKDIM 256
__global__ void zshift_matrix(hipDoubleComplex* A, int n, double shift) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < n) A[(idx)*n + idx].x += shift;
}
__global__ void zshift_mpi_matrix(hipDoubleComplex* A, std::size_t* off,
std::size_t n, std::size_t m, double shift) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (off[0] + j == (i + off[1]) && i < n && j < m) {
A[i * m + j].x += shift;
}
}
void chase_zshift_matrix(std::complex<double>* A, int n, double shift,
hipStream_t* stream_) {
int num_blocks = (n + (BLOCKDIM - 1)) / BLOCKDIM;
hipLaunchKernelGGL(( zshift_matrix), dim3(num_blocks), dim3(BLOCKDIM), 0, *stream_,
reinterpret_cast<hipDoubleComplex*>(A), n, shift);
}
void chase_zshift_mpi_matrix(std::complex<double>* A, std::size_t* off,
std::size_t n, std::size_t m, double shift,
hipStream_t* stream_) {
// x ^= i \in [0,n_]
// y ^= j \in [0,m_]
//dim3 threadsPerBlock(16, 16);
// dim3 numBlocks(n + (threadsPerBlock.x - 1) / threadsPerBlock.x,
// m + (threadsPerBlock.y - 1) / threadsPerBlock.y);
dim3 threadsPerBlock(16, 16);
dim3 numBlocks(1, 1);
hipLaunchKernelGGL(( zshift_mpi_matrix), dim3(numBlocks), dim3(threadsPerBlock), 0, *stream_, //
reinterpret_cast<hipDoubleComplex*>(A), off, n, m, shift);
}
| shift.cu | /* -*- Mode: C++; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
// This file is a part of ChASE.
// Copyright (c) 2015-2021, Simulation and Data Laboratory Quantum Materials,
// Forschungszentrum Juelich GmbH, Germany. All rights reserved.
// License is 3-clause BSD:
// https://github.com/ChASE-library/ChASE
#include <cuComplex.h>
#include <cuda_runtime.h>
#include <omp.h>
#include <complex>
#include "cublas_v2.h"
#define BLOCKDIM 256
__global__ void zshift_matrix(cuDoubleComplex* A, int n, double shift) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < n) A[(idx)*n + idx].x += shift;
}
__global__ void zshift_mpi_matrix(cuDoubleComplex* A, std::size_t* off,
std::size_t n, std::size_t m, double shift) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (off[0] + j == (i + off[1]) && i < n && j < m) {
A[i * m + j].x += shift;
}
}
void chase_zshift_matrix(std::complex<double>* A, int n, double shift,
cudaStream_t* stream_) {
int num_blocks = (n + (BLOCKDIM - 1)) / BLOCKDIM;
zshift_matrix<<<num_blocks, BLOCKDIM, 0, *stream_>>>(
reinterpret_cast<cuDoubleComplex*>(A), n, shift);
}
void chase_zshift_mpi_matrix(std::complex<double>* A, std::size_t* off,
std::size_t n, std::size_t m, double shift,
cudaStream_t* stream_) {
// x ^= i \in [0,n_]
// y ^= j \in [0,m_]
//dim3 threadsPerBlock(16, 16);
// dim3 numBlocks(n + (threadsPerBlock.x - 1) / threadsPerBlock.x,
// m + (threadsPerBlock.y - 1) / threadsPerBlock.y);
dim3 threadsPerBlock(16, 16);
dim3 numBlocks(1, 1);
zshift_mpi_matrix<<<numBlocks, threadsPerBlock, 0, *stream_>>>( //
reinterpret_cast<cuDoubleComplex*>(A), off, n, m, shift);
}
|
b7aa0001f1d11cae60ab52cb97e0bc99151241d0.hip | // !!! This is a file automatically generated by hipify!!!
//-nvcc -arch=sm_11 -m64 -O3 main.cu -o atomic.bin
#include<iostream>
#include<cstdlib>
#include <hip/hip_runtime.h>
#include <cassert>
#include <vector>
#define CHECK_ERROR(call) do { \
if( hipSuccess != call) { \
std::cerr << std::endl << "CUDA ERRO: " << \
hipGetErrorString(call) << " in file: " << __FILE__ \
<< " in line: " << __LINE__ << std::endl; \
exit(0); \
} } while (0)
__global__
void kernel (int *vet, int *flag){
unsigned int index = blockDim.x * blockIdx.x + threadIdx.x;
vet[index] = index + 1;
if (threadIdx.x == 0)
atomicAdd(&flag[0], 1);
}
using namespace std;
int main(int argc, char *argv[]){
int dominio = 32,
threads = 4;
vector <int> h_vet;
int *d_Vet = NULL,
*d_Flag = NULL;
cout << "\nOperacao atomica\n";
//Reset no device
CHECK_ERROR(hipDeviceReset());
//Alocando memria
h_vet.resize(dominio);
hipMalloc(reinterpret_cast<void**> (&d_Vet), dominio * sizeof(int));
hipMalloc(reinterpret_cast<void**> (&d_Flag), 1 * sizeof(int));
//Inicializando variveis
bzero(&(h_vet[0]), dominio * sizeof(float));
CHECK_ERROR(hipMemset(d_Vet, 0, dominio * sizeof(int)));
CHECK_ERROR(hipMemset(d_Flag, 0, 1 * sizeof(int)));
int blocos = dominio / threads;
cout << "Blocos: " << blocos << endl;
cout << "Threads: " << threads << endl;
hipLaunchKernelGGL(( kernel), dim3(blocos), dim3(threads), 0, 0, d_Vet, d_Flag);
CHECK_ERROR(hipDeviceSynchronize());
hipMemcpy(&(h_vet[0]), d_Vet, dominio * sizeof(int), hipMemcpyDeviceToHost);
for (int k = 0; k < dominio; k++)
cout << h_vet[k] << endl;
cout << endl;
hipMemcpy(&(h_vet[0]), d_Flag, 1 * sizeof(int), hipMemcpyDeviceToHost);
cout << "cada thread[0] soma 1: " << h_vet[0] << endl;
hipFree(d_Vet);
hipFree(d_Flag);
return EXIT_SUCCESS;
}
| b7aa0001f1d11cae60ab52cb97e0bc99151241d0.cu | //-nvcc -arch=sm_11 -m64 -O3 main.cu -o atomic.bin
#include<iostream>
#include<cstdlib>
#include <cuda_runtime.h>
#include <cassert>
#include <vector>
#define CHECK_ERROR(call) do { \
if( cudaSuccess != call) { \
std::cerr << std::endl << "CUDA ERRO: " << \
cudaGetErrorString(call) << " in file: " << __FILE__ \
<< " in line: " << __LINE__ << std::endl; \
exit(0); \
} } while (0)
__global__
void kernel (int *vet, int *flag){
unsigned int index = blockDim.x * blockIdx.x + threadIdx.x;
vet[index] = index + 1;
if (threadIdx.x == 0)
atomicAdd(&flag[0], 1);
}
using namespace std;
int main(int argc, char *argv[]){
int dominio = 32,
threads = 4;
vector <int> h_vet;
int *d_Vet = NULL,
*d_Flag = NULL;
cout << "\nOperacao atomica\n";
//Reset no device
CHECK_ERROR(cudaDeviceReset());
//Alocando memória
h_vet.resize(dominio);
cudaMalloc(reinterpret_cast<void**> (&d_Vet), dominio * sizeof(int));
cudaMalloc(reinterpret_cast<void**> (&d_Flag), 1 * sizeof(int));
//Inicializando variáveis
bzero(&(h_vet[0]), dominio * sizeof(float));
CHECK_ERROR(cudaMemset(d_Vet, 0, dominio * sizeof(int)));
CHECK_ERROR(cudaMemset(d_Flag, 0, 1 * sizeof(int)));
int blocos = dominio / threads;
cout << "Blocos: " << blocos << endl;
cout << "Threads: " << threads << endl;
kernel<<<blocos, threads>>> (d_Vet, d_Flag);
CHECK_ERROR(cudaDeviceSynchronize());
cudaMemcpy(&(h_vet[0]), d_Vet, dominio * sizeof(int), cudaMemcpyDeviceToHost);
for (int k = 0; k < dominio; k++)
cout << h_vet[k] << endl;
cout << endl;
cudaMemcpy(&(h_vet[0]), d_Flag, 1 * sizeof(int), cudaMemcpyDeviceToHost);
cout << "cada thread[0] soma 1: " << h_vet[0] << endl;
cudaFree(d_Vet);
cudaFree(d_Flag);
return EXIT_SUCCESS;
}
|
2071cc2027cf5b2967931435a4d7a446732c1912.hip | // !!! This is a file automatically generated by hipify!!!
#include "funset.hpp"
#include <iostream>
#include <chrono>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include "common.hpp"
namespace {
/* __global__: ;;,3.2
;void;,
;,
gridblock,(<<< >>>);
a kernel,(GPUCUDAkernel(
),__global__);*/
__global__ void histogram(const unsigned char* src, int length, int* dst)
{
/* __shared__: __shared____device__
blockblock
block__shared____constant__
__shared__extern
__shared__CUDA C
__shared__CUDA C
*/
// clear out the accumulation buffer called temp since we are launched with
// 256 threads, it is easy to clear that memory with one write per thread
__shared__ int temp[256]; //
temp[threadIdx.x] = 0;
/* __syncthreads: CUDA
__syncthreads()
__syncthreads();block(shared
memory)(kernel
__syncthreads())clock()
clock()
__syncthreads()block
threadblock
thread */
__syncthreads();
/* gridDim: ,,,
,,.
dim3
blockDim: ,block.dim3,
block;,,
;
blockIdx: ,;
threadblockgrid,blockIdx.x
[0,gridDim.x-1],blockIdx.y[0, gridDim.y-1].uint3,
blockgrid;
threadIdx: ,;
threadblock;threadIdx.x,
threadIdx.y,threadIdx.z;uint3
,threadblock */
// calculate the starting index and the offset to the next block that each thread will be processing
int i = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while (i < length) {
/* atomicAdd: ,
addr(atomic
function)3264
read-modify-write
atomicAdd(addr,y)
addryaddr */
atomicAdd(&temp[src[i]], 1);
i += stride;
}
// sync the data from the above writes to shared memory then add the shared memory values to the values from
// the other thread blocks using global memory atomic adds same as before, since we have 256 threads,
// updating the global histogram is just one write per thread!
__syncthreads();
//
atomicAdd(&(dst[threadIdx.x]), temp[threadIdx.x]);
}
__global__ void equalization(const unsigned char* src, int length, unsigned char* dst)
{
}
} // namespace
int histogram_equalization_gpu(const unsigned char* src, int width, int height, unsigned char* dst, float* elapsed_time)
{
const int hist_sz{ 256 }, length{ width * height }, byte_sz{ (int)sizeof(unsigned char) * length};
unsigned char *dev_src{ nullptr }, *dev_dst{ nullptr };
int* dev_hist{ nullptr };
// hipMalloc:
hipMalloc(&dev_src, byte_sz);
hipMalloc(&dev_dst, byte_sz);
hipMalloc(&dev_hist, hist_sz * sizeof(int));
/* hipMemcpy: ,:
(1). hipMemcpyHostToHost:
(2). hipMemcpyHostToDevice:
(3). hipMemcpyDeviceToHost:
(4). hipMemcpyDeviceToDevice:
(5). hipMemcpyDefault: ,
(CUDA6.0)
cudaMemcpy */
hipMemcpy(dev_src, src, byte_sz, hipMemcpyHostToDevice);
/* hipMemset: ,GPU
*/
hipMemset(dev_hist, 0, hist_sz * sizeof(int));
// hipDeviceProp_t: cuda
// kernel launch - 2x the number of mps gave best timing
hipDeviceProp_t prop;
// hipGetDeviceProperties: GPU
hipGetDeviceProperties(&prop, 0);
// hipDeviceProp_t::multiProcessorCount:
int blocks = prop.multiProcessorCount;
TIME_START_GPU
/* <<< >>>: CUDA,,
CUDA,,
;,
,,
;;
kernel,kernel,
GPU,;
API,<<<Dg,Db,Ns,S>>>
,Dgdim3,grid
.Dg,gridDg.x*Dg.y*Dg.zblock;Db
dim3,block.Db,
blockDb.x*Db.y*Db.zthread;Nsunsigned int,
,
(extern __shared__);Ns,0;S
cudaStream_t,.S,0. */
// GPU2
// Note: vectordata()cudaMalloccudaMemcpyvector
histogram << <blocks * 2, 256 >> >(dev_src, length, dev_hist);
TIME_END_GPU
hipMemcpy(dst, dev_dst, byte_sz, hipMemcpyDeviceToHost);
// hipFree: cudaMalloc
hipFree(dev_src);
hipFree(dev_hist);
hipFree(dev_dst);
return 0;
}
| 2071cc2027cf5b2967931435a4d7a446732c1912.cu | #include "funset.hpp"
#include <iostream>
#include <chrono>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include "common.hpp"
namespace {
/* __global__: 函数类型限定符;在设备上运行;在主机端调用,计算能力3.2及以上可以在
设备端调用;声明的函数的返回值必须是void类型;对此类型函数的调用是异步的,即在
设备完全完成它的运行之前就返回了;对此类型函数的调用必须指定执行配置,即用于在
设备上执行函数时的grid和block的维度,以及相关的流(即插入<<< >>>运算符);
a kernel,表示此函数为内核函数(运行在GPU上的CUDA并行计算函数称为kernel(内核函
数),内核函数必须通过__global__函数类型限定符定义);*/
__global__ void histogram(const unsigned char* src, int length, int* dst)
{
/* __shared__: 变量类型限定符;使用__shared__限定符,或者与__device__限
定符连用,此时声明的变量位于block中的共享存储器空间中,与block具有相同
的生命周期,仅可通过block内的所有线程访问;__shared__和__constant__变量
默认为是静态存储;在__shared__前可以加extern关键字,但表示的是变量大小
由执行参数确定;__shared__变量在声明时不能初始化;可以将CUDA C的关键字
__shared__添加到变量声明中,这将使这个变量驻留在共享内存中;CUDA C编译
器对共享内存中的变量与普通变量将分别采取不同的处理方式 */
// clear out the accumulation buffer called temp since we are launched with
// 256 threads, it is easy to clear that memory with one write per thread
__shared__ int temp[256]; // 共享内存缓冲区
temp[threadIdx.x] = 0;
/* __syncthreads: 对线程块中的线程进行同步;CUDA架构将确保,除非线程块
中的每个线程都执行了__syncthreads(),否则没有任何线程能执行
__syncthreads()之后的指令;在同一个block中的线程通过共享存储器(shared
memory)交换数据,并通过栅栏同步(可以在kernel函数中需要同步的位置调用
__syncthreads()函数)保证线程间能够正确地共享数据;使用clock()函数计时,
在内核函数中要测量的一段代码的开始和结束的位置分别调用一次clock()函数,
并将结果记录下来。由于调用__syncthreads()函数后,一个block中的所有
thread需要的时间是相同的,因此只需要记录每个block执行需要的时间就行了,
而不需要记录每个thread的时间 */
__syncthreads();
/* gridDim: 内置变量,用于描述线程网格的维度,对于所有线程块来说,这个
变量是一个常数,用来保存线程格每一维的大小,即每个线程格中线程块的数量.
为dim3类型;
blockDim: 内置变量,用于说明每个block的维度与尺寸.为dim3类型,包含
了block在三个维度上的尺寸信息;对于所有线程块来说,这个变量是一个常数,
保存的是线程块中每一维的线程数量;
blockIdx: 内置变量,变量中包含的值就是当前执行设备代码的线程块的索引;用
于说明当前thread所在的block在整个grid中的位置,blockIdx.x取值范围是
[0,gridDim.x-1],blockIdx.y取值范围是[0, gridDim.y-1].为uint3类型,
包含了一个block在grid中各个维度上的索引信息;
threadIdx: 内置变量,变量中包含的值就是当前执行设备代码的线程索引;用于
说明当前thread在block中的位置;如果线程是一维的可获取threadIdx.x,如果
是二维的还可获取threadIdx.y,如果是三维的还可获取threadIdx.z;为uint3类
型,包含了一个thread在block中各个维度的索引信息 */
// calculate the starting index and the offset to the next block that each thread will be processing
int i = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while (i < length) {
/* atomicAdd: 原子操作,底层硬件将确保当执行这些原子操作时,其
它任何线程都不会读取或写入地址addr上的值。原子函数(atomic
function)对位于全局或共享存储器的一个32位或64位字执行
read-modify-write的原子操作。也就是说,当多个线程同时访问全局或
共享存储器的同一位置时,保证每个线程能够实现对共享可写数据的互
斥操作:在一个操作完成之前,其它任何线程都无法访问此地址。之所
以将这一过程称为原子操作,是因为每个线程的操作都不会影响到其它
线程。换句话说,原子操作能够保证对一个地址的当前操作完成之前,
其它线程都不能访问这个地址。
atomicAdd(addr,y):将生成一个原子的操作序列,这个操作序列包括读
取地址addr处的值,将y增加到这个值,以及将结果保存回地址addr。 */
atomicAdd(&temp[src[i]], 1);
i += stride;
}
// sync the data from the above writes to shared memory then add the shared memory values to the values from
// the other thread blocks using global memory atomic adds same as before, since we have 256 threads,
// updating the global histogram is just one write per thread!
__syncthreads();
// 将每个线程块的直方图合并为单个最终的直方图
atomicAdd(&(dst[threadIdx.x]), temp[threadIdx.x]);
}
__global__ void equalization(const unsigned char* src, int length, unsigned char* dst)
{
}
} // namespace
int histogram_equalization_gpu(const unsigned char* src, int width, int height, unsigned char* dst, float* elapsed_time)
{
const int hist_sz{ 256 }, length{ width * height }, byte_sz{ (int)sizeof(unsigned char) * length};
unsigned char *dev_src{ nullptr }, *dev_dst{ nullptr };
int* dev_hist{ nullptr };
// cudaMalloc: 在设备端分配内存
cudaMalloc(&dev_src, byte_sz);
cudaMalloc(&dev_dst, byte_sz);
cudaMalloc(&dev_hist, hist_sz * sizeof(int));
/* cudaMemcpy: 在主机端和设备端拷贝数据,此函数第四个参数仅能是下面之一:
(1). cudaMemcpyHostToHost: 拷贝数据从主机端到主机端
(2). cudaMemcpyHostToDevice: 拷贝数据从主机端到设备端
(3). cudaMemcpyDeviceToHost: 拷贝数据从设备端到主机端
(4). cudaMemcpyDeviceToDevice: 拷贝数据从设备端到设备端
(5). cudaMemcpyDefault: 从指针值自动推断拷贝数据方向,需要支持
统一虚拟寻址(CUDA6.0及以上版本)
cudaMemcpy函数对于主机是同步的 */
cudaMemcpy(dev_src, src, byte_sz, cudaMemcpyHostToDevice);
/* cudaMemset: 存储器初始化函数,在GPU内存上执行。用指定的值初始化或设置
设备内存 */
cudaMemset(dev_hist, 0, hist_sz * sizeof(int));
// cudaDeviceProp: cuda设备属性结构体
// kernel launch - 2x the number of mps gave best timing
cudaDeviceProp prop;
// cudaGetDeviceProperties: 获取GPU设备相关信息
cudaGetDeviceProperties(&prop, 0);
// cudaDeviceProp::multiProcessorCount: 设备上多处理器的数量
int blocks = prop.multiProcessorCount;
TIME_START_GPU
/* <<< >>>: 为CUDA引入的运算符,指定线程网格和线程块维度等,传递执行参
数给CUDA编译器和运行时系统,用于说明内核函数中的线程数量,以及线程是如何
组织的;尖括号中这些参数并不是传递给设备代码的参数,而是告诉运行时如何
启动设备代码,传递给设备代码本身的参数是放在圆括号中传递的,就像标准的函
数调用一样;不同计算能力的设备对线程的总数和组织方式有不同的约束;必须
先为kernel中用到的数组或变量分配好足够的空间,再调用kernel函数,否则在
GPU计算时会发生错误,例如越界等;
使用运行时API时,需要在调用的内核函数名与参数列表直接以<<<Dg,Db,Ns,S>>>
的形式设置执行配置,其中:Dg是一个dim3型变量,用于设置grid的维度和各个
维度上的尺寸.设置好Dg后,grid中将有Dg.x*Dg.y*Dg.z个block;Db是
一个dim3型变量,用于设置block的维度和各个维度上的尺寸.设置好Db后,每个
block中将有Db.x*Db.y*Db.z个thread;Ns是一个unsigned int型变量,指定各块为此调
用动态分配的共享存储器大小,这些动态分配的存储器可供声明为外部数组
(extern __shared__)的其他任何变量使用;Ns是一个可选参数,默认值为0;S为
cudaStream_t类型,用于设置与内核函数关联的流.S是一个可选参数,默认值0. */
// 当线程块的数量为GPU中处理器数量的2倍时,将达到最优性能
// Note: 核函数不支持传入参数为vector的data()指针,需要cudaMalloc和cudaMemcpy,因为vector是在主机内存中
histogram << <blocks * 2, 256 >> >(dev_src, length, dev_hist);
TIME_END_GPU
cudaMemcpy(dst, dev_dst, byte_sz, cudaMemcpyDeviceToHost);
// cudaFree: 释放设备上由cudaMalloc函数分配的内存
cudaFree(dev_src);
cudaFree(dev_hist);
cudaFree(dev_dst);
return 0;
}
|
bdc51488cfe3939765bfa83b70e17fe28e1c7a38.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
hipError_t addWithCuda(int *c, const int *a, const int *b, size_t size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
int main()
{
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
// Add vectors in parallel.
hipError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t addWithCuda(int *c, const int *a, const int *b, size_t size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
hipLaunchKernelGGL(( addKernel), dim3(1), dim3(size), 0, 0, dev_c, dev_a, dev_b);
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
return cudaStatus;
}
| bdc51488cfe3939765bfa83b70e17fe28e1c7a38.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
cudaError_t addWithCuda(int *c, const int *a, const int *b, size_t size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
int main()
{
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
// Add vectors in parallel.
cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t addWithCuda(int *c, const int *a, const int *b, size_t size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
addKernel<<<1, size>>>(dev_c, dev_a, dev_b);
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
}
|
cabddb6cc48952dd62ca8c1bb9b142d6cbdf018e.hip | // !!! This is a file automatically generated by hipify!!!
#include "../../../cub-1.8.0/hipcub/hipcub.hpp" // or equivalently <cub/device/device_histogram.cuh>
#include "histo-helper.cu.h"
#define GPU_RUNS 200
int main (int argc, char * argv[]) {
if(argc != 3) {
printf("Expects two arguments: the image size and the histogram size! argc:%d\n", argc);
}
const uint32_t N = atoi(argv[1]);
const uint32_t H = atoi(argv[2]);
printf("Computing for image size: %d and histogram size: %d\n", N, H);
//Allocate and Initialize Host data with random values
uint32_t* h_data = (uint32_t*)malloc(N*sizeof(uint32_t));
uint32_t* h_histo = (uint32_t*)malloc(H*sizeof(uint32_t));
uint32_t* g_histo = (uint32_t*)malloc(H*sizeof(uint32_t));
randomInit(h_data, N, H);
{ // golden sequential histogram
double elapsed;
struct timeval t_start, t_end, t_diff;
gettimeofday(&t_start, NULL);
histoGold(h_data, N, H, g_histo);
gettimeofday(&t_end, NULL);
timeval_subtract(&t_diff, &t_end, &t_start);
elapsed = (t_diff.tv_sec*1e6+t_diff.tv_usec);
printf("Golden (Sequential) Histogram runs in: %.2f microsecs\n", elapsed);
}
//Allocate and Initialize Device data
uint32_t* d_data;
uint32_t* d_histo;
hipMalloc ((void**) &d_data, N * sizeof(uint32_t));
hipMalloc ((void**) &d_histo, H * sizeof(uint32_t));
hipMemcpy(d_data, h_data, N * sizeof(uint32_t), hipMemcpyHostToDevice);
void *d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
{ // CUB histogram version
//hipMemset(d_histo, 0, H * sizeof(uint32_t));
// Determine temporary device storage requirements
cub::DeviceHistogram::HistogramEven( d_temp_storage, temp_storage_bytes
, d_data, d_histo, H+1, (uint32_t)0
, H, (int32_t)N);
// Allocate temporary storage
hipMalloc(&d_temp_storage, temp_storage_bytes);
{ // one dry run
cub::DeviceHistogram::HistogramEven( d_temp_storage, temp_storage_bytes
, d_data, d_histo, H+1, (uint32_t)0
, H, (int32_t)N );
hipDeviceSynchronize();
}
double elapsed;
struct timeval t_start, t_end, t_diff;
gettimeofday(&t_start, NULL);
// Compute histogram
for(uint32_t k=0; k<GPU_RUNS; k++) {
cub::DeviceHistogram::HistogramEven( d_temp_storage, temp_storage_bytes
, d_data, d_histo, H+1, (uint32_t)0
, H, (int32_t)N );
}
hipDeviceSynchronize();
gettimeofday(&t_end, NULL);
timeval_subtract(&t_diff, &t_end, &t_start);
elapsed = (t_diff.tv_sec*1e6+t_diff.tv_usec) / ((double)GPU_RUNS);
hipMemcpy (h_histo, d_histo, H*sizeof(uint32_t), hipMemcpyDeviceToHost);
printf("CUB Histogram ... ");
validate(g_histo, h_histo, H);
printf("CUB Histogram runs in: %.2f microsecs\n", elapsed);
double gigaBytesPerSec = 3 * N * sizeof(uint32_t) * 1.0e-3f / elapsed;
printf( "CUB Histogram GBytes/sec = %.2f!\n", gigaBytesPerSec);
}
// Cleanup and closing
hipFree(d_data); hipFree(d_histo); hipFree(d_temp_storage);
free(h_data); free(g_histo); free(h_histo);
return 0;
}
| cabddb6cc48952dd62ca8c1bb9b142d6cbdf018e.cu | #include "../../../cub-1.8.0/cub/cub.cuh" // or equivalently <cub/device/device_histogram.cuh>
#include "histo-helper.cu.h"
#define GPU_RUNS 200
int main (int argc, char * argv[]) {
if(argc != 3) {
printf("Expects two arguments: the image size and the histogram size! argc:%d\n", argc);
}
const uint32_t N = atoi(argv[1]);
const uint32_t H = atoi(argv[2]);
printf("Computing for image size: %d and histogram size: %d\n", N, H);
//Allocate and Initialize Host data with random values
uint32_t* h_data = (uint32_t*)malloc(N*sizeof(uint32_t));
uint32_t* h_histo = (uint32_t*)malloc(H*sizeof(uint32_t));
uint32_t* g_histo = (uint32_t*)malloc(H*sizeof(uint32_t));
randomInit(h_data, N, H);
{ // golden sequential histogram
double elapsed;
struct timeval t_start, t_end, t_diff;
gettimeofday(&t_start, NULL);
histoGold(h_data, N, H, g_histo);
gettimeofday(&t_end, NULL);
timeval_subtract(&t_diff, &t_end, &t_start);
elapsed = (t_diff.tv_sec*1e6+t_diff.tv_usec);
printf("Golden (Sequential) Histogram runs in: %.2f microsecs\n", elapsed);
}
//Allocate and Initialize Device data
uint32_t* d_data;
uint32_t* d_histo;
cudaMalloc ((void**) &d_data, N * sizeof(uint32_t));
cudaMalloc ((void**) &d_histo, H * sizeof(uint32_t));
cudaMemcpy(d_data, h_data, N * sizeof(uint32_t), cudaMemcpyHostToDevice);
void *d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
{ // CUB histogram version
//cudaMemset(d_histo, 0, H * sizeof(uint32_t));
// Determine temporary device storage requirements
cub::DeviceHistogram::HistogramEven( d_temp_storage, temp_storage_bytes
, d_data, d_histo, H+1, (uint32_t)0
, H, (int32_t)N);
// Allocate temporary storage
cudaMalloc(&d_temp_storage, temp_storage_bytes);
{ // one dry run
cub::DeviceHistogram::HistogramEven( d_temp_storage, temp_storage_bytes
, d_data, d_histo, H+1, (uint32_t)0
, H, (int32_t)N );
cudaThreadSynchronize();
}
double elapsed;
struct timeval t_start, t_end, t_diff;
gettimeofday(&t_start, NULL);
// Compute histogram
for(uint32_t k=0; k<GPU_RUNS; k++) {
cub::DeviceHistogram::HistogramEven( d_temp_storage, temp_storage_bytes
, d_data, d_histo, H+1, (uint32_t)0
, H, (int32_t)N );
}
cudaThreadSynchronize();
gettimeofday(&t_end, NULL);
timeval_subtract(&t_diff, &t_end, &t_start);
elapsed = (t_diff.tv_sec*1e6+t_diff.tv_usec) / ((double)GPU_RUNS);
cudaMemcpy (h_histo, d_histo, H*sizeof(uint32_t), cudaMemcpyDeviceToHost);
printf("CUB Histogram ... ");
validate(g_histo, h_histo, H);
printf("CUB Histogram runs in: %.2f microsecs\n", elapsed);
double gigaBytesPerSec = 3 * N * sizeof(uint32_t) * 1.0e-3f / elapsed;
printf( "CUB Histogram GBytes/sec = %.2f!\n", gigaBytesPerSec);
}
// Cleanup and closing
cudaFree(d_data); cudaFree(d_histo); cudaFree(d_temp_storage);
free(h_data); free(g_histo); free(h_histo);
return 0;
}
|
832ad18f050608966c47688f4376b84b61564fcf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/******************************************************************************
* Simple demonstration of hipcub::BlockScan
*
* To compile using the command line:
* nvcc -arch=sm_XX example_block_scan.cu -I../.. -lcudart -O3
*
******************************************************************************/
// Ensure printing of CUDA runtime errors to console (define before including cub.h)
#define CUB_STDERR
#include <stdio.h>
#include <iostream>
#include <hipcub/hipcub.hpp>
#include <cub/block/block_store.cuh>
#include <cub/block/block_scan.cuh>
#include "../../test/test_util.h"
using namespace cub;
//---------------------------------------------------------------------
// Globals, constants and typedefs
//---------------------------------------------------------------------
/// Verbose output
bool g_verbose = false;
/// Timing iterations
int g_timing_iterations = 100;
/// Default grid size
int g_grid_size = 1;
//---------------------------------------------------------------------
// Kernels
//---------------------------------------------------------------------
/**
* Simple kernel for performing a block-wide exclusive prefix sum over integers
*/
template <
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
BlockScanAlgorithm ALGORITHM>
__global__ void BlockPrefixSumKernel(
int *d_in, // Tile of input
int *d_out, // Tile of output
clock_t *d_elapsed) // Elapsed cycle count of block scan
{
// Specialize BlockLoad type for our thread block (uses warp-striped loads for coalescing, then transposes in shared memory to a blocked arrangement)
typedef BlockLoad<int, BLOCK_THREADS, ITEMS_PER_THREAD, BLOCK_LOAD_WARP_TRANSPOSE> BlockLoadT;
// Specialize BlockStore type for our thread block (uses warp-striped loads for coalescing, then transposes in shared memory to a blocked arrangement)
typedef BlockStore<int, BLOCK_THREADS, ITEMS_PER_THREAD, BLOCK_STORE_WARP_TRANSPOSE> BlockStoreT;
// Specialize BlockScan type for our thread block
typedef BlockScan<int, BLOCK_THREADS, ALGORITHM> BlockScanT;
// Shared memory
__shared__ union TempStorage
{
typename BlockLoadT::TempStorage load;
typename BlockStoreT::TempStorage store;
typename BlockScanT::TempStorage scan;
} temp_storage;
// Per-thread tile data
int data[ITEMS_PER_THREAD];
// Load items into a blocked arrangement
BlockLoadT(temp_storage.load).Load(d_in, data);
// Barrier for smem reuse
__syncthreads();
// Start cycle timer
clock_t start = clock();
// Compute exclusive prefix sum
int aggregate;
BlockScanT(temp_storage.scan).ExclusiveSum(data, data, aggregate);
// Stop cycle timer
clock_t stop = clock();
// Barrier for smem reuse
__syncthreads();
// Store items from a blocked arrangement
BlockStoreT(temp_storage.store).Store(d_out, data);
// Store aggregate and elapsed clocks
if (threadIdx.x == 0)
{
*d_elapsed = (start > stop) ? start - stop : stop - start;
d_out[BLOCK_THREADS * ITEMS_PER_THREAD] = aggregate;
}
}
//---------------------------------------------------------------------
// Host utilities
//---------------------------------------------------------------------
/**
* Initialize exclusive prefix sum problem (and solution).
* Returns the aggregate
*/
int Initialize(
int *h_in,
int *h_reference,
int num_items)
{
int inclusive = 0;
for (int i = 0; i < num_items; ++i)
{
h_in[i] = i % 17;
h_reference[i] = inclusive;
inclusive += h_in[i];
}
return inclusive;
}
/**
* Test thread block scan
*/
template <
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
BlockScanAlgorithm ALGORITHM>
void Test()
{
const int TILE_SIZE = BLOCK_THREADS * ITEMS_PER_THREAD;
// Allocate host arrays
int *h_in = new int[TILE_SIZE];
int *h_reference = new int[TILE_SIZE];
int *h_gpu = new int[TILE_SIZE + 1];
// Initialize problem and reference output on host
int h_aggregate = Initialize(h_in, h_reference, TILE_SIZE);
// Initialize device arrays
int *d_in = NULL;
int *d_out = NULL;
clock_t *d_elapsed = NULL;
hipMalloc((void**)&d_in, sizeof(int) * TILE_SIZE);
hipMalloc((void**)&d_out, sizeof(int) * (TILE_SIZE + 1));
hipMalloc((void**)&d_elapsed, sizeof(clock_t));
// Display input problem data
if (g_verbose)
{
printf("Input data: ");
for (int i = 0; i < TILE_SIZE; i++)
printf("%d, ", h_in[i]);
printf("\n\n");
}
// Kernel props
int max_sm_occupancy;
CubDebugExit(MaxSmOccupancy(max_sm_occupancy, BlockPrefixSumKernel<BLOCK_THREADS, ITEMS_PER_THREAD, ALGORITHM>, BLOCK_THREADS));
// Copy problem to device
hipMemcpy(d_in, h_in, sizeof(int) * TILE_SIZE, hipMemcpyHostToDevice);
printf("BlockScan algorithm %s on %d items (%d timing iterations, %d blocks, %d threads, %d items per thread, %d SM occupancy):\n",
(ALGORITHM == BLOCK_SCAN_RAKING) ? "BLOCK_SCAN_RAKING" : (ALGORITHM == BLOCK_SCAN_RAKING_MEMOIZE) ? "BLOCK_SCAN_RAKING_MEMOIZE" : "BLOCK_SCAN_WARP_SCANS",
TILE_SIZE, g_timing_iterations, g_grid_size, BLOCK_THREADS, ITEMS_PER_THREAD, max_sm_occupancy);
// Run aggregate/prefix kernel
hipLaunchKernelGGL(( BlockPrefixSumKernel<BLOCK_THREADS, ITEMS_PER_THREAD, ALGORITHM>), dim3(g_grid_size), dim3(BLOCK_THREADS), 0, 0,
d_in,
d_out,
d_elapsed);
// Check results
printf("\tOutput items: ");
int compare = CompareDeviceResults(h_reference, d_out, TILE_SIZE, g_verbose, g_verbose);
printf("%s\n", compare ? "FAIL" : "PASS");
AssertEquals(0, compare);
// Check total aggregate
printf("\tAggregate: ");
compare = CompareDeviceResults(&h_aggregate, d_out + TILE_SIZE, 1, g_verbose, g_verbose);
printf("%s\n", compare ? "FAIL" : "PASS");
AssertEquals(0, compare);
// Run this several times and average the performance results
GpuTimer timer;
float elapsed_millis = 0.0;
clock_t elapsed_clocks = 0;
for (int i = 0; i < g_timing_iterations; ++i)
{
// Copy problem to device
hipMemcpy(d_in, h_in, sizeof(int) * TILE_SIZE, hipMemcpyHostToDevice);
timer.Start();
// Run aggregate/prefix kernel
hipLaunchKernelGGL(( BlockPrefixSumKernel<BLOCK_THREADS, ITEMS_PER_THREAD, ALGORITHM>), dim3(g_grid_size), dim3(BLOCK_THREADS), 0, 0,
d_in,
d_out,
d_elapsed);
timer.Stop();
elapsed_millis += timer.ElapsedMillis();
// Copy clocks from device
clock_t clocks;
CubDebugExit(hipMemcpy(&clocks, d_elapsed, sizeof(clock_t), hipMemcpyDeviceToHost));
elapsed_clocks += clocks;
}
// Check for kernel errors and STDIO from the kernel, if any
CubDebugExit(hipPeekAtLastError());
CubDebugExit(hipDeviceSynchronize());
// Display timing results
float avg_millis = elapsed_millis / g_timing_iterations;
float avg_items_per_sec = float(TILE_SIZE * g_grid_size) / avg_millis / 1000.0f;
float avg_clocks = float(elapsed_clocks) / g_timing_iterations;
float avg_clocks_per_item = avg_clocks / TILE_SIZE;
printf("\tAverage BlockScan::Sum clocks: %.3f\n", avg_clocks);
printf("\tAverage BlockScan::Sum clocks per item: %.3f\n", avg_clocks_per_item);
printf("\tAverage kernel millis: %.4f\n", avg_millis);
printf("\tAverage million items / sec: %.4f\n", avg_items_per_sec);
// Cleanup
if (h_in) delete[] h_in;
if (h_reference) delete[] h_reference;
if (h_gpu) delete[] h_gpu;
if (d_in) hipFree(d_in);
if (d_out) hipFree(d_out);
if (d_elapsed) hipFree(d_elapsed);
}
/**
* Main
*/
int main(int argc, char** argv)
{
// Initialize command line
CommandLineArgs args(argc, argv);
g_verbose = args.CheckCmdLineFlag("v");
args.GetCmdLineArgument("i", g_timing_iterations);
args.GetCmdLineArgument("grid-size", g_grid_size);
// Print usage
if (args.CheckCmdLineFlag("help"))
{
printf("%s "
"[--device=<device-id>] "
"[--i=<timing iterations (default:%d)>]"
"[--grid-size=<grid size (default:%d)>]"
"[--v] "
"\n", argv[0], g_timing_iterations, g_grid_size);
exit(0);
}
// Initialize device
CubDebugExit(args.DeviceInit());
// Run tests
Test<1024, 1, BLOCK_SCAN_RAKING>();
Test<512, 2, BLOCK_SCAN_RAKING>();
Test<256, 4, BLOCK_SCAN_RAKING>();
Test<128, 8, BLOCK_SCAN_RAKING>();
Test<64, 16, BLOCK_SCAN_RAKING>();
Test<32, 32, BLOCK_SCAN_RAKING>();
printf("-------------\n");
Test<1024, 1, BLOCK_SCAN_RAKING_MEMOIZE>();
Test<512, 2, BLOCK_SCAN_RAKING_MEMOIZE>();
Test<256, 4, BLOCK_SCAN_RAKING_MEMOIZE>();
Test<128, 8, BLOCK_SCAN_RAKING_MEMOIZE>();
Test<64, 16, BLOCK_SCAN_RAKING_MEMOIZE>();
Test<32, 32, BLOCK_SCAN_RAKING_MEMOIZE>();
printf("-------------\n");
Test<1024, 1, BLOCK_SCAN_WARP_SCANS>();
Test<512, 2, BLOCK_SCAN_WARP_SCANS>();
Test<256, 4, BLOCK_SCAN_WARP_SCANS>();
Test<128, 8, BLOCK_SCAN_WARP_SCANS>();
Test<64, 16, BLOCK_SCAN_WARP_SCANS>();
Test<32, 32, BLOCK_SCAN_WARP_SCANS>();
return 0;
}
| 832ad18f050608966c47688f4376b84b61564fcf.cu | /******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/******************************************************************************
* Simple demonstration of cub::BlockScan
*
* To compile using the command line:
* nvcc -arch=sm_XX example_block_scan.cu -I../.. -lcudart -O3
*
******************************************************************************/
// Ensure printing of CUDA runtime errors to console (define before including cub.h)
#define CUB_STDERR
#include <stdio.h>
#include <iostream>
#include <cub/block/block_load.cuh>
#include <cub/block/block_store.cuh>
#include <cub/block/block_scan.cuh>
#include "../../test/test_util.h"
using namespace cub;
//---------------------------------------------------------------------
// Globals, constants and typedefs
//---------------------------------------------------------------------
/// Verbose output
bool g_verbose = false;
/// Timing iterations
int g_timing_iterations = 100;
/// Default grid size
int g_grid_size = 1;
//---------------------------------------------------------------------
// Kernels
//---------------------------------------------------------------------
/**
* Simple kernel for performing a block-wide exclusive prefix sum over integers
*/
template <
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
BlockScanAlgorithm ALGORITHM>
__global__ void BlockPrefixSumKernel(
int *d_in, // Tile of input
int *d_out, // Tile of output
clock_t *d_elapsed) // Elapsed cycle count of block scan
{
// Specialize BlockLoad type for our thread block (uses warp-striped loads for coalescing, then transposes in shared memory to a blocked arrangement)
typedef BlockLoad<int, BLOCK_THREADS, ITEMS_PER_THREAD, BLOCK_LOAD_WARP_TRANSPOSE> BlockLoadT;
// Specialize BlockStore type for our thread block (uses warp-striped loads for coalescing, then transposes in shared memory to a blocked arrangement)
typedef BlockStore<int, BLOCK_THREADS, ITEMS_PER_THREAD, BLOCK_STORE_WARP_TRANSPOSE> BlockStoreT;
// Specialize BlockScan type for our thread block
typedef BlockScan<int, BLOCK_THREADS, ALGORITHM> BlockScanT;
// Shared memory
__shared__ union TempStorage
{
typename BlockLoadT::TempStorage load;
typename BlockStoreT::TempStorage store;
typename BlockScanT::TempStorage scan;
} temp_storage;
// Per-thread tile data
int data[ITEMS_PER_THREAD];
// Load items into a blocked arrangement
BlockLoadT(temp_storage.load).Load(d_in, data);
// Barrier for smem reuse
__syncthreads();
// Start cycle timer
clock_t start = clock();
// Compute exclusive prefix sum
int aggregate;
BlockScanT(temp_storage.scan).ExclusiveSum(data, data, aggregate);
// Stop cycle timer
clock_t stop = clock();
// Barrier for smem reuse
__syncthreads();
// Store items from a blocked arrangement
BlockStoreT(temp_storage.store).Store(d_out, data);
// Store aggregate and elapsed clocks
if (threadIdx.x == 0)
{
*d_elapsed = (start > stop) ? start - stop : stop - start;
d_out[BLOCK_THREADS * ITEMS_PER_THREAD] = aggregate;
}
}
//---------------------------------------------------------------------
// Host utilities
//---------------------------------------------------------------------
/**
* Initialize exclusive prefix sum problem (and solution).
* Returns the aggregate
*/
int Initialize(
int *h_in,
int *h_reference,
int num_items)
{
int inclusive = 0;
for (int i = 0; i < num_items; ++i)
{
h_in[i] = i % 17;
h_reference[i] = inclusive;
inclusive += h_in[i];
}
return inclusive;
}
/**
* Test thread block scan
*/
template <
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
BlockScanAlgorithm ALGORITHM>
void Test()
{
const int TILE_SIZE = BLOCK_THREADS * ITEMS_PER_THREAD;
// Allocate host arrays
int *h_in = new int[TILE_SIZE];
int *h_reference = new int[TILE_SIZE];
int *h_gpu = new int[TILE_SIZE + 1];
// Initialize problem and reference output on host
int h_aggregate = Initialize(h_in, h_reference, TILE_SIZE);
// Initialize device arrays
int *d_in = NULL;
int *d_out = NULL;
clock_t *d_elapsed = NULL;
cudaMalloc((void**)&d_in, sizeof(int) * TILE_SIZE);
cudaMalloc((void**)&d_out, sizeof(int) * (TILE_SIZE + 1));
cudaMalloc((void**)&d_elapsed, sizeof(clock_t));
// Display input problem data
if (g_verbose)
{
printf("Input data: ");
for (int i = 0; i < TILE_SIZE; i++)
printf("%d, ", h_in[i]);
printf("\n\n");
}
// Kernel props
int max_sm_occupancy;
CubDebugExit(MaxSmOccupancy(max_sm_occupancy, BlockPrefixSumKernel<BLOCK_THREADS, ITEMS_PER_THREAD, ALGORITHM>, BLOCK_THREADS));
// Copy problem to device
cudaMemcpy(d_in, h_in, sizeof(int) * TILE_SIZE, cudaMemcpyHostToDevice);
printf("BlockScan algorithm %s on %d items (%d timing iterations, %d blocks, %d threads, %d items per thread, %d SM occupancy):\n",
(ALGORITHM == BLOCK_SCAN_RAKING) ? "BLOCK_SCAN_RAKING" : (ALGORITHM == BLOCK_SCAN_RAKING_MEMOIZE) ? "BLOCK_SCAN_RAKING_MEMOIZE" : "BLOCK_SCAN_WARP_SCANS",
TILE_SIZE, g_timing_iterations, g_grid_size, BLOCK_THREADS, ITEMS_PER_THREAD, max_sm_occupancy);
// Run aggregate/prefix kernel
BlockPrefixSumKernel<BLOCK_THREADS, ITEMS_PER_THREAD, ALGORITHM><<<g_grid_size, BLOCK_THREADS>>>(
d_in,
d_out,
d_elapsed);
// Check results
printf("\tOutput items: ");
int compare = CompareDeviceResults(h_reference, d_out, TILE_SIZE, g_verbose, g_verbose);
printf("%s\n", compare ? "FAIL" : "PASS");
AssertEquals(0, compare);
// Check total aggregate
printf("\tAggregate: ");
compare = CompareDeviceResults(&h_aggregate, d_out + TILE_SIZE, 1, g_verbose, g_verbose);
printf("%s\n", compare ? "FAIL" : "PASS");
AssertEquals(0, compare);
// Run this several times and average the performance results
GpuTimer timer;
float elapsed_millis = 0.0;
clock_t elapsed_clocks = 0;
for (int i = 0; i < g_timing_iterations; ++i)
{
// Copy problem to device
cudaMemcpy(d_in, h_in, sizeof(int) * TILE_SIZE, cudaMemcpyHostToDevice);
timer.Start();
// Run aggregate/prefix kernel
BlockPrefixSumKernel<BLOCK_THREADS, ITEMS_PER_THREAD, ALGORITHM><<<g_grid_size, BLOCK_THREADS>>>(
d_in,
d_out,
d_elapsed);
timer.Stop();
elapsed_millis += timer.ElapsedMillis();
// Copy clocks from device
clock_t clocks;
CubDebugExit(cudaMemcpy(&clocks, d_elapsed, sizeof(clock_t), cudaMemcpyDeviceToHost));
elapsed_clocks += clocks;
}
// Check for kernel errors and STDIO from the kernel, if any
CubDebugExit(cudaPeekAtLastError());
CubDebugExit(cudaDeviceSynchronize());
// Display timing results
float avg_millis = elapsed_millis / g_timing_iterations;
float avg_items_per_sec = float(TILE_SIZE * g_grid_size) / avg_millis / 1000.0f;
float avg_clocks = float(elapsed_clocks) / g_timing_iterations;
float avg_clocks_per_item = avg_clocks / TILE_SIZE;
printf("\tAverage BlockScan::Sum clocks: %.3f\n", avg_clocks);
printf("\tAverage BlockScan::Sum clocks per item: %.3f\n", avg_clocks_per_item);
printf("\tAverage kernel millis: %.4f\n", avg_millis);
printf("\tAverage million items / sec: %.4f\n", avg_items_per_sec);
// Cleanup
if (h_in) delete[] h_in;
if (h_reference) delete[] h_reference;
if (h_gpu) delete[] h_gpu;
if (d_in) cudaFree(d_in);
if (d_out) cudaFree(d_out);
if (d_elapsed) cudaFree(d_elapsed);
}
/**
* Main
*/
int main(int argc, char** argv)
{
// Initialize command line
CommandLineArgs args(argc, argv);
g_verbose = args.CheckCmdLineFlag("v");
args.GetCmdLineArgument("i", g_timing_iterations);
args.GetCmdLineArgument("grid-size", g_grid_size);
// Print usage
if (args.CheckCmdLineFlag("help"))
{
printf("%s "
"[--device=<device-id>] "
"[--i=<timing iterations (default:%d)>]"
"[--grid-size=<grid size (default:%d)>]"
"[--v] "
"\n", argv[0], g_timing_iterations, g_grid_size);
exit(0);
}
// Initialize device
CubDebugExit(args.DeviceInit());
// Run tests
Test<1024, 1, BLOCK_SCAN_RAKING>();
Test<512, 2, BLOCK_SCAN_RAKING>();
Test<256, 4, BLOCK_SCAN_RAKING>();
Test<128, 8, BLOCK_SCAN_RAKING>();
Test<64, 16, BLOCK_SCAN_RAKING>();
Test<32, 32, BLOCK_SCAN_RAKING>();
printf("-------------\n");
Test<1024, 1, BLOCK_SCAN_RAKING_MEMOIZE>();
Test<512, 2, BLOCK_SCAN_RAKING_MEMOIZE>();
Test<256, 4, BLOCK_SCAN_RAKING_MEMOIZE>();
Test<128, 8, BLOCK_SCAN_RAKING_MEMOIZE>();
Test<64, 16, BLOCK_SCAN_RAKING_MEMOIZE>();
Test<32, 32, BLOCK_SCAN_RAKING_MEMOIZE>();
printf("-------------\n");
Test<1024, 1, BLOCK_SCAN_WARP_SCANS>();
Test<512, 2, BLOCK_SCAN_WARP_SCANS>();
Test<256, 4, BLOCK_SCAN_WARP_SCANS>();
Test<128, 8, BLOCK_SCAN_WARP_SCANS>();
Test<64, 16, BLOCK_SCAN_WARP_SCANS>();
Test<32, 32, BLOCK_SCAN_WARP_SCANS>();
return 0;
}
|
b0f4cdc4bef9d334fc667f8304b5b6c44e44b5a5.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <stdio.h>
#include <helper_cuda.h>
#include <helper_functions.h>
#include <hip/hip_runtime.h>
#include "max_pooling_common.h"
#include "max_pooling.cuh"
using namespace std;
extern "C" float max_pooling(
float *input,
float *output,
int width_input,
int height_input,
int deep_input,
int width_kernel,
int height_kernel,
int width_output,
int height_output,
int deep_output,
int stride_x,
int stride_y,
int padding_x,
int padding_y
){
dim3 grid(width_output, height_output, deep_output);
dim3 thread(width_kernel,height_kernel);
hipError_t error;
hipEvent_t start;
error = hipEventCreate(&start);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to create start event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
hipEvent_t stop;
error = hipEventCreate(&stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to create stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Record the start event
error = hipEventRecord(start, NULL);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to record start event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
hipLaunchKernelGGL(( max_pooling_kernel), dim3(grid), dim3(thread) , 0, 0,
input,
output,
width_input,
height_input,
deep_input,
width_output,
height_output,
deep_output,
stride_x,
stride_y,
padding_x,
padding_y
);
// Record the stop event
error = hipEventRecord(stop, NULL);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to record stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Wait for the stop event to complete
error = hipEventSynchronize(stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
float msecTotal = 0.0f;
error = hipEventElapsedTime(&msecTotal, start, stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
return msecTotal;
} | b0f4cdc4bef9d334fc667f8304b5b6c44e44b5a5.cu | #include <iostream>
#include <stdio.h>
#include <helper_cuda.h>
#include <helper_functions.h>
#include <cuda_runtime.h>
#include "max_pooling_common.h"
#include "max_pooling.cuh"
using namespace std;
extern "C" float max_pooling(
float *input,
float *output,
int width_input,
int height_input,
int deep_input,
int width_kernel,
int height_kernel,
int width_output,
int height_output,
int deep_output,
int stride_x,
int stride_y,
int padding_x,
int padding_y
){
dim3 grid(width_output, height_output, deep_output);
dim3 thread(width_kernel,height_kernel);
cudaError_t error;
cudaEvent_t start;
error = cudaEventCreate(&start);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to create start event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
cudaEvent_t stop;
error = cudaEventCreate(&stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to create stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Record the start event
error = cudaEventRecord(start, NULL);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to record start event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
max_pooling_kernel<<< grid, thread >>>(
input,
output,
width_input,
height_input,
deep_input,
width_output,
height_output,
deep_output,
stride_x,
stride_y,
padding_x,
padding_y
);
// Record the stop event
error = cudaEventRecord(stop, NULL);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to record stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Wait for the stop event to complete
error = cudaEventSynchronize(stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
float msecTotal = 0.0f;
error = cudaEventElapsedTime(&msecTotal, start, stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
return msecTotal;
} |
f39bb576f2aba1a6b3e1b825b4bef4eff0b949cc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "conv_kernels.cuh"
#include "im2col.cuh"
#include "utils.cuh"
#include <iostream>
#include <stdint.h>
void conv_1x1_im2col_test() {
// input size
const int64_t BATCH_SIZE = 64;
const int64_t Ci = 128;
const int64_t Hi = 128;
const int64_t Wi = 128;
// kernel size
const int64_t Co = 128;
const int64_t Hk = 1;
const int64_t Wk = 1;
// padding, stride and dilation
const int64_t pad_h = 0;
const int64_t pad_w = 0;
const int64_t stride_h = 1;
const int64_t stride_w = 1;
const int64_t dilation_h = 1;
const int64_t dilation_w = 1;
// output size
const int64_t Ho = Hi;
const int64_t Wo = Wi;
// host data
float* input = nullptr;
float* kernel = nullptr;
float* output = nullptr;
const int64_t size_input = BATCH_SIZE * Ci * Hi * Wi;
const int64_t size_kernel = Co * Ci * Hk * Wk;
const int64_t size_output = BATCH_SIZE * Co * Ho * Wo;
malloc_and_init(&input, size_input);
malloc_and_init(&kernel, size_kernel);
malloc_and_init(&output, size_output);
// device data
float* d_input = nullptr;
float* d_kernel = nullptr;
float* d_output = nullptr;
CUDA_CHECK(hipMalloc(&d_input, size_input * sizeof(float)));
CUDA_CHECK(hipMalloc(&d_kernel, size_kernel * sizeof(float)));
CUDA_CHECK(hipMalloc(&d_output, size_output * sizeof(float)));
CUDA_CHECK(hipMemcpy(d_input, input, size_input * sizeof(float), hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpy(d_kernel, kernel, size_kernel * sizeof(float), hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpy(d_output, output, size_output * sizeof(float), hipMemcpyHostToDevice));
// prepare matrix size
const int64_t M = Co;
const int64_t K = Ci;
const int64_t N = Ho * Wo;
// conv ref
float* output_ref = nullptr;
float* d_output_ref = nullptr;
malloc_and_init(&output_ref, size_output);
CUDA_CHECK(hipMalloc(&d_output_ref, size_output * sizeof(float)));
CUDA_CHECK(hipMemcpy(d_output_ref, output_ref, size_output * sizeof(float), hipMemcpyHostToDevice));
float time_cudnn;
convCuDNN(BATCH_SIZE, Ci, Hi, Wi, d_input,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
Co, Hk, Wk, d_kernel, Ho, Wo, d_output_ref, &time_cudnn);
time_cudnn = time_cudnn / n_rounds;
printf("kernel %-20s: %8.2f ms, speedup=%.2f.\n",
"conv_cudnn",
time_cudnn,
1.0);
// conv1x1
float elapsedTime_conv_1x1;
dim3 dims_block_conv_1x1(NTX, NTY);
dim3 dims_grid_conv_1x1(CEIL_DIV(N, BLOCK_SIZE_L), BATCH_SIZE * CEIL_DIV(M, BLOCK_SIZE_L));
// warm up
hipLaunchKernelGGL(( kernel_conv_im2col_align), dim3(dims_grid_conv_1x1), dim3(dims_block_conv_1x1), 0, 0,
M, N, K, (float4*)d_kernel, (float4*)d_input, d_output);
hipEvent_t start_conv_1x1, stop_conv_1x1;
CUDA_CHECK(hipEventCreate(&start_conv_1x1));
CUDA_CHECK(hipEventCreate(&stop_conv_1x1));
CUDA_CHECK(hipEventRecord(start_conv_1x1, 0));
for (int64_t i = 0; i < n_rounds; ++i) {
hipLaunchKernelGGL(( kernel_conv_im2col_align), dim3(dims_grid_conv_1x1), dim3(dims_block_conv_1x1), 0, 0,
M, N, K, (float4*)d_kernel, (float4*)d_input, d_output);
}
CUDA_CHECK(hipEventRecord(stop_conv_1x1, 0));
CUDA_CHECK(hipEventSynchronize(stop_conv_1x1));
CUDA_CHECK(hipEventElapsedTime(&elapsedTime_conv_1x1, start_conv_1x1, stop_conv_1x1));
CUDA_CHECK(hipEventDestroy(start_conv_1x1));
CUDA_CHECK(hipEventDestroy(stop_conv_1x1));
elapsedTime_conv_1x1 = elapsedTime_conv_1x1 / n_rounds;
printf("kernel %-20s: %8.2f ms, speedup=%.2f.\n",
"conv_1x1",
elapsedTime_conv_1x1,
time_cudnn/ elapsedTime_conv_1x1);
// copy result to host
CUDA_CHECK(hipMemcpy(output_ref, d_output_ref, size_output * sizeof(float), hipMemcpyDeviceToHost));
CUDA_CHECK(hipMemcpy(output, d_output, size_output * sizeof(float), hipMemcpyDeviceToHost));
// check
std::cout << "check correctness..." << std::endl;
bool error = false;
for (int64_t i = 0; i < BATCH_SIZE * Co * Ho * Wo; ++i) {
error = error || (std::abs(output[i] - output_ref[i]) > 1e-3);
}
std::cout << "error: " << error << std::endl;
// free memory
delete[] input;
delete[] kernel;
delete[] output;
delete[] output_ref;
hipFree(d_input);
hipFree(d_kernel);
hipFree(d_output);
hipFree(d_output_ref);
}
void conv_NxN_im2col_with_batch_test() {
// input size
const int64_t BATCH_SIZE = 32;
const int64_t Ci = 128;
const int64_t Hi = 128;
const int64_t Wi = 128;
// kernel size
const int64_t Co = 128;
const int64_t Hk = 4;
const int64_t Wk = 4;
// padding, stride and dilation
const int64_t pad_h = 2;
const int64_t pad_w = 2;
const int64_t stride_h = 2;
const int64_t stride_w = 2;
const int64_t dilation_h = 1;
const int64_t dilation_w = 1;
// output size
const int64_t Ho = (Hi - ((Hk-1)*dilation_h+1) + 2*pad_h) / stride_h + 1;
const int64_t Wo = (Wi - ((Wk-1)*dilation_w+1) + 2*pad_w) / stride_w + 1;
// prepare matrix size
const int64_t alignment = BLOCK_SIZE_L;
// const int64_t alignment = 8;
const int64_t M = Co;
const int64_t K = Ci * Hk * Wk;
const int64_t N = Ho * Wo;
const int64_t M_align = align(M, alignment);
const int64_t K_align = align(K, alignment);
const int64_t N_align = align(N, alignment);
// column size
const int64_t Hc_align = K_align;
const int64_t Wc_align = N_align;
// host data
float* input = nullptr;
float* kernel = nullptr;
float* column = nullptr;
float* column_align = nullptr;
float* output = nullptr;
float* output_align = nullptr;
const int64_t size_input = BATCH_SIZE * Ci * Hi * Wi;
const int64_t size_kernel = M * K; // (Co * Ci * Hk * Wk)
const int64_t size_kernel_align = M_align * K_align;
const int64_t size_column = BATCH_SIZE * Hk * Wk * Ho * Wo * Ci;
// std::cout << "size_column="<<size_column << std::endl;
// std::cout << "Hk="<<Hk << std::endl;
// std::cout << "Wk="<<Wk << std::endl;
// std::cout << "Ho="<<Ho << std::endl;
// std::cout << "Wo="<<Wo << std::endl;
// std::cout << "Ci="<<Ci << std::endl;
const int64_t size_column_align = BATCH_SIZE * Hc_align * Wc_align;
const int64_t size_output = BATCH_SIZE * M * N; // = BATCH_SIZE * Co * Ho * Wo
const int64_t size_output_align = BATCH_SIZE * M_align * N_align;
malloc_and_init(&input, size_input);
malloc_and_init(&kernel, size_kernel);
malloc_and_init(&column, size_column);
malloc_and_init(&column_align, size_column_align);
malloc_and_init(&output, size_output);
malloc_and_init(&output_align, size_output_align);
// device data
float* d_input = nullptr;
float* d_kernel = nullptr;
float* d_kernel_align = nullptr;
float* d_column = nullptr;
float* d_column_align = nullptr;
float* d_output = nullptr;
float* d_output_align = nullptr;
CUDA_CHECK(hipMalloc(&d_input, size_input * sizeof(float)));
CUDA_CHECK(hipMalloc(&d_kernel, size_kernel * sizeof(float)));
CUDA_CHECK(hipMalloc(&d_kernel_align, size_kernel_align * sizeof(float)));
CUDA_CHECK(hipMalloc(&d_column, size_column * sizeof(float)));
CUDA_CHECK(hipMalloc(&d_column_align, size_column_align * sizeof(float)));
CUDA_CHECK(hipMalloc(&d_output, size_output * sizeof(float)));
CUDA_CHECK(hipMalloc(&d_output_align, size_output_align * sizeof(float)));
CUDA_CHECK(hipMemcpy(d_input, input, size_input * sizeof(float), hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpy(d_kernel, kernel, size_kernel * sizeof(float), hipMemcpyHostToDevice));
// conv ref
float* output_ref = nullptr;
float* d_output_ref = nullptr;
malloc_and_init(&output_ref, size_output);
CUDA_CHECK(hipMalloc(&d_output_ref, size_output * sizeof(float)));
CUDA_CHECK(hipMemcpy(d_output_ref, output_ref, size_output * sizeof(float), hipMemcpyHostToDevice));
float time_cudnn;
convCuDNN(BATCH_SIZE, Ci, Hi, Wi, d_input,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
Co, Hk, Wk, d_kernel, Ho, Wo, d_output_ref, &time_cudnn);
time_cudnn = time_cudnn / n_rounds;
printf("kernel %-20s: %8.2f ms, speedup=%.2f.\n",
"conv_cudnn",
time_cudnn,
1.0);
// conv im2col
float elapsedTime_conv;
dim3 dims_block_conv(NTX, NTY);
dim3 dims_grid_conv(CEIL_DIV(N, BLOCK_SIZE_L), BATCH_SIZE * CEIL_DIV(M, BLOCK_SIZE_L));
// dim3 dims_block_conv(BLOCK_SIZE, BLOCK_SIZE);
// dim3 dims_grid_conv(CEIL_DIV(N, BLOCK_SIZE_L), BATCH_SIZE * CEIL_DIV(M, BLOCK_SIZE_L));
// im2col
// kernel_im2col_align_with_batch<<<1024, 1024>>>(
// alignment,
// BATCH_SIZE,
// Ci,
// Hi, Wi,
// Ho, Wo,
// Hk, Wk,
// stride_w, stride_h,
// pad_w, pad_h,
// dilation_w, dilation_h,
// d_input,
// d_column_align);
hipLaunchKernelGGL(( kernel_im2col_align), dim3(1024), dim3(1024), 0, 0,
alignment,
Ci,
Hi, Wi,
Ho, Wo,
Hk, Wk,
stride_w, stride_h,
pad_w, pad_h,
dilation_w, dilation_h,
d_input,
d_column_align);
// kernel_im2col<<<1024, 1024>>>(
// Ci,
// Hi, Wi,
// Ho, Wo,
// Hk, Wk,
// stride_w, stride_h,
// pad_w, pad_h,
// dilation_w, dilation_h,
// d_input,
// d_column);
// CUDA_CHECK(hipDeviceSynchronize());
// CUDA_CHECK(hipMemcpy(column, d_column, size_column * sizeof(float), hipMemcpyDeviceToHost));
// CUDA_CHECK(hipMemcpy(column_align, d_column_align, size_column_align * sizeof(float), hipMemcpyDeviceToHost));
// CUDA_CHECK(hipDeviceSynchronize());
// std::cout << "input1:" << std::endl;
// print_matrix(input, Hi, Wi);
// std::cout << "input2:" << std::endl;
// print_matrix(input+Wi*Hi, Hi, Wi);
// std::cout << "col:" << std::endl;
// print_matrix(column, Ci*Hk*Wk, Wo*Ho);
// std::cout << "col_align:" << std::endl;
// print_matrix(column_align, Hc_align, Wc_align);
// warm up
//hipLaunchKernelGGL(( kernel_conv_im2col_align), dim3(dims_grid_conv), dim3(dims_block_conv), 0, 0,
// M_align, N_align, K_align, (float4*)d_kernel_align, (float4*)d_column_align, d_output_align);
// align
float time_padding = padding(d_kernel, d_kernel_align, M, K, M_align, K_align);
hipblasHandle_t handle;
hipblasCreate(&handle);
hipblasSetPointerMode(handle, HIPBLAS_POINTER_MODE_HOST);
hipEvent_t start_conv, stop_conv;
CUDA_CHECK(hipEventCreate(&start_conv));
CUDA_CHECK(hipEventCreate(&stop_conv));
CUDA_CHECK(hipEventRecord(start_conv, 0));
for (int64_t i = 0; i < n_rounds; ++i) {
hipLaunchKernelGGL(( kernel_conv_im2col_align), dim3(dims_grid_conv), dim3(dims_block_conv), 0, 0,
M_align, N_align, K_align, (float4*)d_kernel_align, (float4*)d_column_align, d_output_align);
// gemmCublas(M_align, N_align, K_align, d_kernel_align, d_column_align, d_output_align, BATCH_SIZE, handle);
// kernel_shared_4w<<<dims_grid_conv, dims_block_conv>>>(
// M, N, K, d_kernel, d_column, d_output);
}
CUDA_CHECK(hipEventRecord(stop_conv, 0));
CUDA_CHECK(hipEventSynchronize(stop_conv));
CUDA_CHECK(hipEventElapsedTime(&elapsedTime_conv, start_conv, stop_conv));
CUDA_CHECK(hipEventDestroy(start_conv));
CUDA_CHECK(hipEventDestroy(stop_conv));
float time_unpadding = unpadding_with_batch(d_output, d_output_align, M, N, M_align, N_align, BATCH_SIZE);
// float time_unpadding = unpadding(d_output, d_output_align, M, N, M_align, N_align);
elapsedTime_conv = elapsedTime_conv / n_rounds + time_unpadding + time_padding;
printf("kernel %-20s: %8.2f ms, speedup=%.2f.\n",
"conv_NxN_im2col",
elapsedTime_conv,
time_cudnn / elapsedTime_conv);
// copy result to host
// CUDA_CHECK(hipMemcpy(output_ref, d_output_ref, size_output * sizeof(float), hipMemcpyDeviceToHost));
// CUDA_CHECK(hipMemcpy(output, d_output, size_output * sizeof(float), hipMemcpyDeviceToHost));
// CUDA_CHECK(hipMemcpy(output_align, d_output_align, size_output_align * sizeof(float), hipMemcpyDeviceToHost));
// std::cout << "output_ref:" << std::endl;
// print_matrix(output_ref, M, N);
// std::cout << "output:" << std::endl;
// print_matrix(output, M, N_align);
// check
std::cout << "check correctness..." << std::endl;
bool error = false;
for (int64_t i = 0; i < BATCH_SIZE * Co * Ho * Wo; ++i) {
// std::cout<<output[i] - output_ref[i]<<", ";
error = error || (std::abs(output[i] - output_ref[i]) > 1e-3);
}
std::cout << "error: " << error << std::endl;
// free memory
free(input);
free(kernel);
free(output);
free(output_ref);
hipFree(d_input);
hipFree(d_kernel);
hipFree(d_column_align);
hipFree(d_output);
hipFree(d_output_align);
hipFree(d_output_ref);
free(column);
hipFree(d_column);
free(column_align);
}
void convCuDNN(
const int64_t BATCH_SIZE, const int64_t Ci, const int64_t Hi, const int64_t Wi, const float* input,
const int64_t pad_h, const int64_t pad_w,
const int64_t stride_h, const int64_t stride_w,
const int64_t dilation_h, const int64_t dilation_w,
const int64_t Co, const int64_t Hk, const int64_t Wk, const float* kernel,
const int64_t Ho, const int64_t Wo, float* output,
float * time_ptr) {
//handle
cudnnHandle_t handle;
cudnnCreate(&handle);
// tensor descriptor
cudnnTensorDescriptor_t input_desc;
cudnnTensorDescriptor_t output_desc;
CUDNN_CHECK(cudnnCreateTensorDescriptor(&input_desc));
CUDNN_CHECK(cudnnCreateTensorDescriptor(&output_desc));
CUDNN_CHECK(cudnnSetTensor4dDescriptor(
input_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
BATCH_SIZE, Ci, Hi, Wi));
CUDNN_CHECK(cudnnSetTensor4dDescriptor(
output_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
BATCH_SIZE, Co, Ho, Wo));
// kernel
cudnnFilterDescriptor_t kernel_desc;
CUDNN_CHECK(cudnnCreateFilterDescriptor(&kernel_desc));
CUDNN_CHECK(cudnnSetFilter4dDescriptor(
kernel_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
Co, Ci, Hk, Wk));
// convolution descriptor
cudnnConvolutionDescriptor_t conv_desc;
cudnnCreateConvolutionDescriptor(&conv_desc);
CUDNN_CHECK(cudnnSetConvolution2dDescriptor(conv_desc,
pad_h, pad_w, // padding
stride_h, stride_w, // stride
dilation_h, dilation_w, // dilation
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// algorithm
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CHECK(cudnnGetConvolutionForwardAlgorithm(
handle, input_desc, kernel_desc, conv_desc, output_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST,
0, &algo));
// workspace size && allocate memory
size_t workspace_size = 0;
CUDNN_CHECK(cudnnGetConvolutionForwardWorkspaceSize(handle,
input_desc, kernel_desc, conv_desc, output_desc,
algo, &workspace_size));
void * workspace = nullptr;
CUDA_CHECK(hipMalloc(&workspace, workspace_size));
// convolution
auto alpha = 1.0f, beta = 0.0f;
// warm up
CUDNN_CHECK(cudnnConvolutionForward(handle,
&alpha, input_desc, input,
kernel_desc, kernel,
conv_desc, algo,
workspace, workspace_size,
&beta, output_desc, output));
hipEvent_t start_conv_ref, stop_conv_ref;
CUDA_CHECK(hipEventCreate(&start_conv_ref));
CUDA_CHECK(hipEventCreate(&stop_conv_ref));
CUDA_CHECK(hipEventRecord(start_conv_ref, 0));
for (int64_t i = 0; i < n_rounds; ++i) {
CUDNN_CHECK(cudnnConvolutionForward(handle,
&alpha, input_desc, input,
kernel_desc, kernel,
conv_desc, algo,
workspace, workspace_size,
&beta, output_desc, output));
}
CUDA_CHECK(hipEventRecord(stop_conv_ref, 0));
CUDA_CHECK(hipEventSynchronize(stop_conv_ref));
CUDA_CHECK(hipEventElapsedTime(time_ptr, start_conv_ref, stop_conv_ref));
CUDA_CHECK(hipEventDestroy(start_conv_ref));
CUDA_CHECK(hipEventDestroy(stop_conv_ref));
// destroy
hipFree(workspace);
cudnnDestroyTensorDescriptor(input_desc);
cudnnDestroyTensorDescriptor(output_desc);
cudnnDestroyConvolutionDescriptor(conv_desc);
cudnnDestroyFilterDescriptor(kernel_desc);
cudnnDestroy(handle);
}
void gemmCublas(
const int64_t M, const int64_t N, const int64_t K,
const float* A, const float* B, float* C,
const int64_t batch_size, hipblasHandle_t& handle
) {
hipblasOperation_t transa = HIPBLAS_OP_N;
hipblasOperation_t transb = HIPBLAS_OP_N;
const int m = N;
const int k = K;
const int n = M;
const int lda = K;
const int ldb = N;
const int ldc = N;
using scalar_t = float;
const scalar_t *a = A;
const scalar_t *b = B;
scalar_t *c = C;
scalar_t alpha = 1, beta = 0;
for (int i = 0; i < batch_size; ++i)
hipblasSgemm(handle, transb, transa, m, n, k,
&alpha, b+K*N*i, ldb, a+M*K*i, lda, &beta, c+M*N*i, ldc);
} | f39bb576f2aba1a6b3e1b825b4bef4eff0b949cc.cu | #include "conv_kernels.cuh"
#include "im2col.cuh"
#include "utils.cuh"
#include <iostream>
#include <stdint.h>
void conv_1x1_im2col_test() {
// input size
const int64_t BATCH_SIZE = 64;
const int64_t Ci = 128;
const int64_t Hi = 128;
const int64_t Wi = 128;
// kernel size
const int64_t Co = 128;
const int64_t Hk = 1;
const int64_t Wk = 1;
// padding, stride and dilation
const int64_t pad_h = 0;
const int64_t pad_w = 0;
const int64_t stride_h = 1;
const int64_t stride_w = 1;
const int64_t dilation_h = 1;
const int64_t dilation_w = 1;
// output size
const int64_t Ho = Hi;
const int64_t Wo = Wi;
// host data
float* input = nullptr;
float* kernel = nullptr;
float* output = nullptr;
const int64_t size_input = BATCH_SIZE * Ci * Hi * Wi;
const int64_t size_kernel = Co * Ci * Hk * Wk;
const int64_t size_output = BATCH_SIZE * Co * Ho * Wo;
malloc_and_init(&input, size_input);
malloc_and_init(&kernel, size_kernel);
malloc_and_init(&output, size_output);
// device data
float* d_input = nullptr;
float* d_kernel = nullptr;
float* d_output = nullptr;
CUDA_CHECK(cudaMalloc(&d_input, size_input * sizeof(float)));
CUDA_CHECK(cudaMalloc(&d_kernel, size_kernel * sizeof(float)));
CUDA_CHECK(cudaMalloc(&d_output, size_output * sizeof(float)));
CUDA_CHECK(cudaMemcpy(d_input, input, size_input * sizeof(float), cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(d_kernel, kernel, size_kernel * sizeof(float), cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(d_output, output, size_output * sizeof(float), cudaMemcpyHostToDevice));
// prepare matrix size
const int64_t M = Co;
const int64_t K = Ci;
const int64_t N = Ho * Wo;
// conv ref
float* output_ref = nullptr;
float* d_output_ref = nullptr;
malloc_and_init(&output_ref, size_output);
CUDA_CHECK(cudaMalloc(&d_output_ref, size_output * sizeof(float)));
CUDA_CHECK(cudaMemcpy(d_output_ref, output_ref, size_output * sizeof(float), cudaMemcpyHostToDevice));
float time_cudnn;
convCuDNN(BATCH_SIZE, Ci, Hi, Wi, d_input,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
Co, Hk, Wk, d_kernel, Ho, Wo, d_output_ref, &time_cudnn);
time_cudnn = time_cudnn / n_rounds;
printf("kernel %-20s: %8.2f ms, speedup=%.2f.\n",
"conv_cudnn",
time_cudnn,
1.0);
// conv1x1
float elapsedTime_conv_1x1;
dim3 dims_block_conv_1x1(NTX, NTY);
dim3 dims_grid_conv_1x1(CEIL_DIV(N, BLOCK_SIZE_L), BATCH_SIZE * CEIL_DIV(M, BLOCK_SIZE_L));
// warm up
kernel_conv_im2col_align<<<dims_grid_conv_1x1, dims_block_conv_1x1>>>(
M, N, K, (float4*)d_kernel, (float4*)d_input, d_output);
cudaEvent_t start_conv_1x1, stop_conv_1x1;
CUDA_CHECK(cudaEventCreate(&start_conv_1x1));
CUDA_CHECK(cudaEventCreate(&stop_conv_1x1));
CUDA_CHECK(cudaEventRecord(start_conv_1x1, 0));
for (int64_t i = 0; i < n_rounds; ++i) {
kernel_conv_im2col_align<<<dims_grid_conv_1x1, dims_block_conv_1x1>>>(
M, N, K, (float4*)d_kernel, (float4*)d_input, d_output);
}
CUDA_CHECK(cudaEventRecord(stop_conv_1x1, 0));
CUDA_CHECK(cudaEventSynchronize(stop_conv_1x1));
CUDA_CHECK(cudaEventElapsedTime(&elapsedTime_conv_1x1, start_conv_1x1, stop_conv_1x1));
CUDA_CHECK(cudaEventDestroy(start_conv_1x1));
CUDA_CHECK(cudaEventDestroy(stop_conv_1x1));
elapsedTime_conv_1x1 = elapsedTime_conv_1x1 / n_rounds;
printf("kernel %-20s: %8.2f ms, speedup=%.2f.\n",
"conv_1x1",
elapsedTime_conv_1x1,
time_cudnn/ elapsedTime_conv_1x1);
// copy result to host
CUDA_CHECK(cudaMemcpy(output_ref, d_output_ref, size_output * sizeof(float), cudaMemcpyDeviceToHost));
CUDA_CHECK(cudaMemcpy(output, d_output, size_output * sizeof(float), cudaMemcpyDeviceToHost));
// check
std::cout << "check correctness..." << std::endl;
bool error = false;
for (int64_t i = 0; i < BATCH_SIZE * Co * Ho * Wo; ++i) {
error = error || (std::abs(output[i] - output_ref[i]) > 1e-3);
}
std::cout << "error: " << error << std::endl;
// free memory
delete[] input;
delete[] kernel;
delete[] output;
delete[] output_ref;
cudaFree(d_input);
cudaFree(d_kernel);
cudaFree(d_output);
cudaFree(d_output_ref);
}
void conv_NxN_im2col_with_batch_test() {
// input size
const int64_t BATCH_SIZE = 32;
const int64_t Ci = 128;
const int64_t Hi = 128;
const int64_t Wi = 128;
// kernel size
const int64_t Co = 128;
const int64_t Hk = 4;
const int64_t Wk = 4;
// padding, stride and dilation
const int64_t pad_h = 2;
const int64_t pad_w = 2;
const int64_t stride_h = 2;
const int64_t stride_w = 2;
const int64_t dilation_h = 1;
const int64_t dilation_w = 1;
// output size
const int64_t Ho = (Hi - ((Hk-1)*dilation_h+1) + 2*pad_h) / stride_h + 1;
const int64_t Wo = (Wi - ((Wk-1)*dilation_w+1) + 2*pad_w) / stride_w + 1;
// prepare matrix size
const int64_t alignment = BLOCK_SIZE_L;
// const int64_t alignment = 8;
const int64_t M = Co;
const int64_t K = Ci * Hk * Wk;
const int64_t N = Ho * Wo;
const int64_t M_align = align(M, alignment);
const int64_t K_align = align(K, alignment);
const int64_t N_align = align(N, alignment);
// column size
const int64_t Hc_align = K_align;
const int64_t Wc_align = N_align;
// host data
float* input = nullptr;
float* kernel = nullptr;
float* column = nullptr;
float* column_align = nullptr;
float* output = nullptr;
float* output_align = nullptr;
const int64_t size_input = BATCH_SIZE * Ci * Hi * Wi;
const int64_t size_kernel = M * K; // (Co * Ci * Hk * Wk)
const int64_t size_kernel_align = M_align * K_align;
const int64_t size_column = BATCH_SIZE * Hk * Wk * Ho * Wo * Ci;
// std::cout << "size_column="<<size_column << std::endl;
// std::cout << "Hk="<<Hk << std::endl;
// std::cout << "Wk="<<Wk << std::endl;
// std::cout << "Ho="<<Ho << std::endl;
// std::cout << "Wo="<<Wo << std::endl;
// std::cout << "Ci="<<Ci << std::endl;
const int64_t size_column_align = BATCH_SIZE * Hc_align * Wc_align;
const int64_t size_output = BATCH_SIZE * M * N; // = BATCH_SIZE * Co * Ho * Wo
const int64_t size_output_align = BATCH_SIZE * M_align * N_align;
malloc_and_init(&input, size_input);
malloc_and_init(&kernel, size_kernel);
malloc_and_init(&column, size_column);
malloc_and_init(&column_align, size_column_align);
malloc_and_init(&output, size_output);
malloc_and_init(&output_align, size_output_align);
// device data
float* d_input = nullptr;
float* d_kernel = nullptr;
float* d_kernel_align = nullptr;
float* d_column = nullptr;
float* d_column_align = nullptr;
float* d_output = nullptr;
float* d_output_align = nullptr;
CUDA_CHECK(cudaMalloc(&d_input, size_input * sizeof(float)));
CUDA_CHECK(cudaMalloc(&d_kernel, size_kernel * sizeof(float)));
CUDA_CHECK(cudaMalloc(&d_kernel_align, size_kernel_align * sizeof(float)));
CUDA_CHECK(cudaMalloc(&d_column, size_column * sizeof(float)));
CUDA_CHECK(cudaMalloc(&d_column_align, size_column_align * sizeof(float)));
CUDA_CHECK(cudaMalloc(&d_output, size_output * sizeof(float)));
CUDA_CHECK(cudaMalloc(&d_output_align, size_output_align * sizeof(float)));
CUDA_CHECK(cudaMemcpy(d_input, input, size_input * sizeof(float), cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(d_kernel, kernel, size_kernel * sizeof(float), cudaMemcpyHostToDevice));
// conv ref
float* output_ref = nullptr;
float* d_output_ref = nullptr;
malloc_and_init(&output_ref, size_output);
CUDA_CHECK(cudaMalloc(&d_output_ref, size_output * sizeof(float)));
CUDA_CHECK(cudaMemcpy(d_output_ref, output_ref, size_output * sizeof(float), cudaMemcpyHostToDevice));
float time_cudnn;
convCuDNN(BATCH_SIZE, Ci, Hi, Wi, d_input,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
Co, Hk, Wk, d_kernel, Ho, Wo, d_output_ref, &time_cudnn);
time_cudnn = time_cudnn / n_rounds;
printf("kernel %-20s: %8.2f ms, speedup=%.2f.\n",
"conv_cudnn",
time_cudnn,
1.0);
// conv im2col
float elapsedTime_conv;
dim3 dims_block_conv(NTX, NTY);
dim3 dims_grid_conv(CEIL_DIV(N, BLOCK_SIZE_L), BATCH_SIZE * CEIL_DIV(M, BLOCK_SIZE_L));
// dim3 dims_block_conv(BLOCK_SIZE, BLOCK_SIZE);
// dim3 dims_grid_conv(CEIL_DIV(N, BLOCK_SIZE_L), BATCH_SIZE * CEIL_DIV(M, BLOCK_SIZE_L));
// im2col
// kernel_im2col_align_with_batch<<<1024, 1024>>>(
// alignment,
// BATCH_SIZE,
// Ci,
// Hi, Wi,
// Ho, Wo,
// Hk, Wk,
// stride_w, stride_h,
// pad_w, pad_h,
// dilation_w, dilation_h,
// d_input,
// d_column_align);
kernel_im2col_align<<<1024, 1024>>>(
alignment,
Ci,
Hi, Wi,
Ho, Wo,
Hk, Wk,
stride_w, stride_h,
pad_w, pad_h,
dilation_w, dilation_h,
d_input,
d_column_align);
// kernel_im2col<<<1024, 1024>>>(
// Ci,
// Hi, Wi,
// Ho, Wo,
// Hk, Wk,
// stride_w, stride_h,
// pad_w, pad_h,
// dilation_w, dilation_h,
// d_input,
// d_column);
// CUDA_CHECK(cudaDeviceSynchronize());
// CUDA_CHECK(cudaMemcpy(column, d_column, size_column * sizeof(float), cudaMemcpyDeviceToHost));
// CUDA_CHECK(cudaMemcpy(column_align, d_column_align, size_column_align * sizeof(float), cudaMemcpyDeviceToHost));
// CUDA_CHECK(cudaDeviceSynchronize());
// std::cout << "input1:" << std::endl;
// print_matrix(input, Hi, Wi);
// std::cout << "input2:" << std::endl;
// print_matrix(input+Wi*Hi, Hi, Wi);
// std::cout << "col:" << std::endl;
// print_matrix(column, Ci*Hk*Wk, Wo*Ho);
// std::cout << "col_align:" << std::endl;
// print_matrix(column_align, Hc_align, Wc_align);
// warm up
// kernel_conv_im2col_align<<<dims_grid_conv, dims_block_conv>>>(
// M_align, N_align, K_align, (float4*)d_kernel_align, (float4*)d_column_align, d_output_align);
// align
float time_padding = padding(d_kernel, d_kernel_align, M, K, M_align, K_align);
cublasHandle_t handle;
cublasCreate(&handle);
cublasSetPointerMode(handle, CUBLAS_POINTER_MODE_HOST);
cudaEvent_t start_conv, stop_conv;
CUDA_CHECK(cudaEventCreate(&start_conv));
CUDA_CHECK(cudaEventCreate(&stop_conv));
CUDA_CHECK(cudaEventRecord(start_conv, 0));
for (int64_t i = 0; i < n_rounds; ++i) {
kernel_conv_im2col_align<<<dims_grid_conv, dims_block_conv>>>(
M_align, N_align, K_align, (float4*)d_kernel_align, (float4*)d_column_align, d_output_align);
// gemmCublas(M_align, N_align, K_align, d_kernel_align, d_column_align, d_output_align, BATCH_SIZE, handle);
// kernel_shared_4w<<<dims_grid_conv, dims_block_conv>>>(
// M, N, K, d_kernel, d_column, d_output);
}
CUDA_CHECK(cudaEventRecord(stop_conv, 0));
CUDA_CHECK(cudaEventSynchronize(stop_conv));
CUDA_CHECK(cudaEventElapsedTime(&elapsedTime_conv, start_conv, stop_conv));
CUDA_CHECK(cudaEventDestroy(start_conv));
CUDA_CHECK(cudaEventDestroy(stop_conv));
float time_unpadding = unpadding_with_batch(d_output, d_output_align, M, N, M_align, N_align, BATCH_SIZE);
// float time_unpadding = unpadding(d_output, d_output_align, M, N, M_align, N_align);
elapsedTime_conv = elapsedTime_conv / n_rounds + time_unpadding + time_padding;
printf("kernel %-20s: %8.2f ms, speedup=%.2f.\n",
"conv_NxN_im2col",
elapsedTime_conv,
time_cudnn / elapsedTime_conv);
// copy result to host
// CUDA_CHECK(cudaMemcpy(output_ref, d_output_ref, size_output * sizeof(float), cudaMemcpyDeviceToHost));
// CUDA_CHECK(cudaMemcpy(output, d_output, size_output * sizeof(float), cudaMemcpyDeviceToHost));
// CUDA_CHECK(cudaMemcpy(output_align, d_output_align, size_output_align * sizeof(float), cudaMemcpyDeviceToHost));
// std::cout << "output_ref:" << std::endl;
// print_matrix(output_ref, M, N);
// std::cout << "output:" << std::endl;
// print_matrix(output, M, N_align);
// check
std::cout << "check correctness..." << std::endl;
bool error = false;
for (int64_t i = 0; i < BATCH_SIZE * Co * Ho * Wo; ++i) {
// std::cout<<output[i] - output_ref[i]<<", ";
error = error || (std::abs(output[i] - output_ref[i]) > 1e-3);
}
std::cout << "error: " << error << std::endl;
// free memory
free(input);
free(kernel);
free(output);
free(output_ref);
cudaFree(d_input);
cudaFree(d_kernel);
cudaFree(d_column_align);
cudaFree(d_output);
cudaFree(d_output_align);
cudaFree(d_output_ref);
free(column);
cudaFree(d_column);
free(column_align);
}
void convCuDNN(
const int64_t BATCH_SIZE, const int64_t Ci, const int64_t Hi, const int64_t Wi, const float* input,
const int64_t pad_h, const int64_t pad_w,
const int64_t stride_h, const int64_t stride_w,
const int64_t dilation_h, const int64_t dilation_w,
const int64_t Co, const int64_t Hk, const int64_t Wk, const float* kernel,
const int64_t Ho, const int64_t Wo, float* output,
float * time_ptr) {
//handle
cudnnHandle_t handle;
cudnnCreate(&handle);
// tensor descriptor
cudnnTensorDescriptor_t input_desc;
cudnnTensorDescriptor_t output_desc;
CUDNN_CHECK(cudnnCreateTensorDescriptor(&input_desc));
CUDNN_CHECK(cudnnCreateTensorDescriptor(&output_desc));
CUDNN_CHECK(cudnnSetTensor4dDescriptor(
input_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
BATCH_SIZE, Ci, Hi, Wi));
CUDNN_CHECK(cudnnSetTensor4dDescriptor(
output_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT,
BATCH_SIZE, Co, Ho, Wo));
// kernel
cudnnFilterDescriptor_t kernel_desc;
CUDNN_CHECK(cudnnCreateFilterDescriptor(&kernel_desc));
CUDNN_CHECK(cudnnSetFilter4dDescriptor(
kernel_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW,
Co, Ci, Hk, Wk));
// convolution descriptor
cudnnConvolutionDescriptor_t conv_desc;
cudnnCreateConvolutionDescriptor(&conv_desc);
CUDNN_CHECK(cudnnSetConvolution2dDescriptor(conv_desc,
pad_h, pad_w, // padding
stride_h, stride_w, // stride
dilation_h, dilation_w, // dilation
CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT));
// algorithm
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CHECK(cudnnGetConvolutionForwardAlgorithm(
handle, input_desc, kernel_desc, conv_desc, output_desc,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST,
0, &algo));
// workspace size && allocate memory
size_t workspace_size = 0;
CUDNN_CHECK(cudnnGetConvolutionForwardWorkspaceSize(handle,
input_desc, kernel_desc, conv_desc, output_desc,
algo, &workspace_size));
void * workspace = nullptr;
CUDA_CHECK(cudaMalloc(&workspace, workspace_size));
// convolution
auto alpha = 1.0f, beta = 0.0f;
// warm up
CUDNN_CHECK(cudnnConvolutionForward(handle,
&alpha, input_desc, input,
kernel_desc, kernel,
conv_desc, algo,
workspace, workspace_size,
&beta, output_desc, output));
cudaEvent_t start_conv_ref, stop_conv_ref;
CUDA_CHECK(cudaEventCreate(&start_conv_ref));
CUDA_CHECK(cudaEventCreate(&stop_conv_ref));
CUDA_CHECK(cudaEventRecord(start_conv_ref, 0));
for (int64_t i = 0; i < n_rounds; ++i) {
CUDNN_CHECK(cudnnConvolutionForward(handle,
&alpha, input_desc, input,
kernel_desc, kernel,
conv_desc, algo,
workspace, workspace_size,
&beta, output_desc, output));
}
CUDA_CHECK(cudaEventRecord(stop_conv_ref, 0));
CUDA_CHECK(cudaEventSynchronize(stop_conv_ref));
CUDA_CHECK(cudaEventElapsedTime(time_ptr, start_conv_ref, stop_conv_ref));
CUDA_CHECK(cudaEventDestroy(start_conv_ref));
CUDA_CHECK(cudaEventDestroy(stop_conv_ref));
// destroy
cudaFree(workspace);
cudnnDestroyTensorDescriptor(input_desc);
cudnnDestroyTensorDescriptor(output_desc);
cudnnDestroyConvolutionDescriptor(conv_desc);
cudnnDestroyFilterDescriptor(kernel_desc);
cudnnDestroy(handle);
}
void gemmCublas(
const int64_t M, const int64_t N, const int64_t K,
const float* A, const float* B, float* C,
const int64_t batch_size, cublasHandle_t& handle
) {
cublasOperation_t transa = CUBLAS_OP_N;
cublasOperation_t transb = CUBLAS_OP_N;
const int m = N;
const int k = K;
const int n = M;
const int lda = K;
const int ldb = N;
const int ldc = N;
using scalar_t = float;
const scalar_t *a = A;
const scalar_t *b = B;
scalar_t *c = C;
scalar_t alpha = 1, beta = 0;
for (int i = 0; i < batch_size; ++i)
cublasSgemm(handle, transb, transa, m, n, k,
&alpha, b+K*N*i, ldb, a+M*K*i, lda, &beta, c+M*N*i, ldc);
} |
9e9d02d4fa40ab89c462f4eab5d01b2e480d14ca.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the root directory of this source tree.
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "cuda_utils.h"
namespace votenet {
// input: new_xyz(b, m, 3) xyz(b, n, 3)
// output: idx(b, m, nsample)
__global__ void query_ball_point_kernel(int b, int n, int m, float radius,
int nsample,
const float *__restrict__ new_xyz,
const float *__restrict__ xyz,
int *__restrict__ idx) {
int batch_index = blockIdx.x;
xyz += batch_index * n * 3;
new_xyz += batch_index * m * 3;
idx += m * nsample * batch_index;
int index = threadIdx.x;
int stride = blockDim.x;
float radius2 = radius * radius;
for (int j = index; j < m; j += stride) {
float new_x = new_xyz[j * 3 + 0];
float new_y = new_xyz[j * 3 + 1];
float new_z = new_xyz[j * 3 + 2];
for (int k = 0, cnt = 0; k < n && cnt < nsample; ++k) {
float x = xyz[k * 3 + 0];
float y = xyz[k * 3 + 1];
float z = xyz[k * 3 + 2];
float d2 = (new_x - x) * (new_x - x) + (new_y - y) * (new_y - y) +
(new_z - z) * (new_z - z);
if (d2 < radius2) {
if (cnt == 0) {
for (int l = 0; l < nsample; ++l) {
idx[j * nsample + l] = k;
}
}
idx[j * nsample + cnt] = k;
++cnt;
}
}
}
}
void query_ball_point_kernel_wrapper(int b, int n, int m, float radius,
int nsample, const float *new_xyz,
const float *xyz, int *idx) {
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipLaunchKernelGGL(( query_ball_point_kernel), dim3(b), dim3(opt_n_threads(m)), 0, stream,
b, n, m, radius, nsample, new_xyz, xyz, idx);
CUDA_CHECK_ERRORS();
}
} // namespace votenet
| 9e9d02d4fa40ab89c462f4eab5d01b2e480d14ca.cu | // Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the root directory of this source tree.
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "cuda_utils.h"
namespace votenet {
// input: new_xyz(b, m, 3) xyz(b, n, 3)
// output: idx(b, m, nsample)
__global__ void query_ball_point_kernel(int b, int n, int m, float radius,
int nsample,
const float *__restrict__ new_xyz,
const float *__restrict__ xyz,
int *__restrict__ idx) {
int batch_index = blockIdx.x;
xyz += batch_index * n * 3;
new_xyz += batch_index * m * 3;
idx += m * nsample * batch_index;
int index = threadIdx.x;
int stride = blockDim.x;
float radius2 = radius * radius;
for (int j = index; j < m; j += stride) {
float new_x = new_xyz[j * 3 + 0];
float new_y = new_xyz[j * 3 + 1];
float new_z = new_xyz[j * 3 + 2];
for (int k = 0, cnt = 0; k < n && cnt < nsample; ++k) {
float x = xyz[k * 3 + 0];
float y = xyz[k * 3 + 1];
float z = xyz[k * 3 + 2];
float d2 = (new_x - x) * (new_x - x) + (new_y - y) * (new_y - y) +
(new_z - z) * (new_z - z);
if (d2 < radius2) {
if (cnt == 0) {
for (int l = 0; l < nsample; ++l) {
idx[j * nsample + l] = k;
}
}
idx[j * nsample + cnt] = k;
++cnt;
}
}
}
}
void query_ball_point_kernel_wrapper(int b, int n, int m, float radius,
int nsample, const float *new_xyz,
const float *xyz, int *idx) {
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
query_ball_point_kernel<<<b, opt_n_threads(m), 0, stream>>>(
b, n, m, radius, nsample, new_xyz, xyz, idx);
CUDA_CHECK_ERRORS();
}
} // namespace votenet
|
dd158f96d92d3c0bcc44d1b1d1c4e260ba1d8e5f.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <vector>
#include <unordered_map>
#include <algorithm>
#include <fstream>
#include <chrono>
#include <random>
#include <cmath>
#include <cassert>
#include <iomanip>
#include <sstream>
#include <hip/hip_runtime.h>
#include "hipsparse.h"
#define Gamma 100.0
using namespace std;
#define D2H hipMemcpyDeviceToHost
#define H2D hipMemcpyHostToDevice
#define cudaAlloc(ptr,sz,type) do{ \
hipMalloc((void **)&ptr, sz*sizeof(type)); \
}while(0)
char* testcase_name[] = {
"./exp/mgc_superblue16_a", // 0
"./exp/mgc_superblue11_a", // 1
"./exp/mgc_superblue12", // 2
"./exp/mgc_des_perf_1", // 3
"./exp/mgc_des_perf_a", // 4
"./exp/mgc_edit_dist_a", // 5
"./exp/mgc_edit_dist_2", // 6
"./exp/mgc_matrix_mult_1", // 7
"./exp/mgc_pci_bridge32_a",// 8
"./exp/mgc_pci_bridge32_b",// 9
"./exp/mgc_fft_1", // 10
"./exp/mgc_fft_2", // 11
"./exp/mgc_matrix_mult_a", // 12
"./exp/mgc_matrix_mult_b", // 13
"./exp/mgc_des_perf_b", // 14
"./exp/mgc_fft_a", // 15
"./exp/mgc_fft_b", // 16
};
int ID = 0;
void checkCUDAerror(){
hipError_t error = hipGetLastError();
if(error != hipSuccess)
{
// print the CUDA error message and exit
printf("CUDA error: %s\n", hipGetErrorString(error));
exit(-1);
}
}
__global__
void pin_exp_sum(double *pin , size_t num , double *out, bool neg )
{
auto i = blockIdx.x*blockDim.x + threadIdx.x;
if( i < num )
if( !neg )
out[i] = exp( pin[i]/Gamma );
else
out[i] = exp( -pin[i]/Gamma );
}
__global__
void exp_sum(double *pin_x , double *pin_y , size_t num , double *out )
{
auto i = blockIdx.x*blockDim.x + threadIdx.x;
if( i < num ){
// out[4*i] = exp( pin_x[i]/Gamma );
// out[4*i+1] = exp( -pin_x[i]/Gamma );
// out[4*i+2] = exp( pin_y[i]/Gamma );
// out[4*i+3] = exp( -pin_y[i]/Gamma );
out[i] = exp( pin_x[i]/Gamma );
out[i+num] = exp( -pin_x[i]/Gamma );
out[i+2*num] = exp( pin_y[i]/Gamma );
out[i+3*num] = exp( -pin_y[i]/Gamma );
}
}
void compute_exp( size_t pin_size, double *pin_x, double *pin_y, double* pin_exp_d ){
dim3 DimGrid(pin_size/256+1,1,1);
dim3 DimBlock(256,1,1);
hipLaunchKernelGGL(( exp_sum), dim3(DimGrid),dim3(DimBlock), 0, 0, pin_x, pin_y, pin_size, pin_exp_d);
checkCUDAerror();
hipDeviceSynchronize(); // if we want to use printf in kernel, must have hipDeviceSynchronize()
}
__global__
void reciprocal_kernel(double *mat , size_t sz )
{
auto i = blockIdx.x*blockDim.x + threadIdx.x;
if( i < sz ){
if( mat[i] != 0.0 )
mat[i] = 1.0/mat[i];
}
}
void gpu_reciprocal( double *mat , size_t sz ){
dim3 DimGrid(sz/256+1,1,1);
dim3 DimBlock(256,1,1);
hipLaunchKernelGGL(( reciprocal_kernel), dim3(DimGrid),dim3(DimBlock), 0, 0, mat , sz);
checkCUDAerror();
}
__global__
void neg_odd_col_kernel(double *mat , size_t pin_num )
{
auto i = blockIdx.x*blockDim.x + threadIdx.x;
if( i < pin_num ){
mat[i+pin_num] = -mat[i+pin_num];
mat[i+3*pin_num] = -mat[i+3*pin_num];
}
//auto i = blockIdx.y * col + blockIdx.x*blockDim.x + threadIdx.x;
//if( blockIdx.x*blockDim.x + threadIdx.x < col )
// mat[i] = -mat[i];
}
void gpu_neg_odd_row( double *mat, size_t pin_num ){
dim3 DimGrid( (pin_num/256+1),1,1);
dim3 DimBlock(256,1,1);
hipLaunchKernelGGL(( neg_odd_col_kernel), dim3(DimGrid),dim3(DimBlock), 0, 0, mat , pin_num);
checkCUDAerror();
hipDeviceSynchronize(); // if we want to use printf in kernel, must have hipDeviceSynchronize()
}
//__global__ void Dev_dot(double x[], double y[], double z[], int n) {
// /* Use tmp to store products of vector components in each block */
// /* Can't use variable dimension here */
// __shared__ double tmp[MAX_BLOCK_SZ];
// int t = blockDim.x * blockIdx.x + threadIdx.x;
// int loc_t = threadIdx.x;
//
// if (t < n) tmp[loc_t] = x[t]*y[t];
// __syncthreads();
//
// /* This uses a tree structure to do the additions */
// for (int stride = blockDim.x/2; stride > 0; stride /= 2) {
// if (loc_t < stride)
// tmp[loc_t] += tmp[loc_t + stride];
// __syncthreads();
// }
//
// /* Store the result from this cache block in z[blockIdx.x] */
// if (threadIdx.x == 0) {
// z[blockIdx.x] = tmp[0];
// }
//} /* Dev_dot */
__global__
void matrix_dot_product_kernel(double* A, double* B, double* C, size_t sz )
{
auto i = blockIdx.x*blockDim.x + threadIdx.x;
if( i < sz ){
//C[i] = A[i*4] *B[i*4];
//C[i] += A[i*4+1]*B[i*4+1];
//C[i] += A[i*4+2]*B[i*4+2];
//C[i] += A[i*4+3]*B[i*4+3];
C[i] = A[i] *B[i];
C[i] += A[i+1*sz]*B[i+1*sz];
C[i] += A[i+2*sz]*B[i+2*sz];
C[i] += A[i+3*sz]*B[i+3*sz];
}
}
// B is transpose
void gpu_matrix_dot_product( double *A , double *B, double *C, size_t pin_size ){
dim3 DimGrid( (pin_size/256+1),1,1);
dim3 DimBlock(256,1,1);
hipLaunchKernelGGL(( matrix_dot_product_kernel), dim3(DimGrid),dim3(DimBlock), 0, 0, A,B,C,pin_size);
checkCUDAerror();
}
void read_pin_location( vector<double> &pin_x , vector<double> &pin_y ){
int pin_num;
double x,y,w,h;
ifstream fptr;
vector<double> x_v;
vector<double> y_v;
string tname(testcase_name[ID]);
tname += "_gpu_density_info";
cout << "GPU density Info = " << tname << '\n';
double Gx,Gy;
//fptr.open("./tune_placer/gpu_density_info");
fptr.open(tname.c_str());
fptr >> pin_num >> Gx >> Gy;
for( size_t i = 0 ; i < pin_num ; ++ i ){
fptr >> x >> y >> w >> h;
x_v.emplace_back(x);
y_v.emplace_back(y);
}
std::move(x_v.begin(), x_v.end(), std::back_inserter(pin_x));
std::move(y_v.begin(), y_v.end(), std::back_inserter(pin_y));
fptr.close();
}
__global__
void compute_wire_exp_kernel(int* start, int* end, int* pinInWire,
double* wire_x, double* wire_y,
double* wire_x_neg, double* wire_y_neg,
double *pin_x, double *pin_y, double* pin_x_neg, double* pin_y_neg, size_t num )
{
auto i = blockIdx.x*blockDim.x + threadIdx.x;
if( i < num ){
for( auto id = start[i] ; id < end[i] ; ++id ){
if( id == start[i] ){
wire_x[i] = pin_x[pinInWire[id]];
wire_y[i] = pin_y[pinInWire[id]];
wire_x_neg[i] = pin_x_neg[pinInWire[id]];
wire_y_neg[i] = pin_y_neg[pinInWire[id]];
}
else{
wire_x[i] += pin_x[pinInWire[id]];
wire_y[i] += pin_y[pinInWire[id]];
wire_x_neg[i] += pin_x_neg[pinInWire[id]];
wire_y_neg[i] += pin_y_neg[pinInWire[id]];
}
}
}
}
void compute_wire_exp( size_t wire_size, double *pin_x, double *pin_y, double* pin_x_neg, double* pin_y_neg,
int *start, int *end,
double *wire_x, double *wire_y, double *wire_x_neg, double *wire_y_neg, int *pinInWire ){
dim3 DimGrid(wire_size/1024+1,1,1);
dim3 DimBlock(1024,1,1);
hipEvent_t start_t, stop_t;
hipEventCreate(&start_t);
hipEventCreate(&stop_t);
hipEventRecord(start_t);
hipLaunchKernelGGL(( compute_wire_exp_kernel), dim3(DimGrid),dim3(DimBlock), 0, 0, start, end, pinInWire,
wire_x, wire_y, wire_x_neg, wire_y_neg, pin_x, pin_y, pin_x_neg, pin_y_neg, wire_size );
hipEventRecord(stop_t);
hipDeviceSynchronize(); // if we want to use printf in kernel, must have hipDeviceSynchronize()
hipEventSynchronize(stop_t);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start_t, stop_t);
std::cout << "Compute wire exp :" << milliseconds << " milli sec" << std::endl;
checkCUDAerror();
}
__global__
void compute_pin_exp_kernel(double* x, double* y, double *pin_x, double *pin_y, double* pin_x_neg, double* pin_y_neg, size_t num )
{
auto i = blockIdx.x*blockDim.x + threadIdx.x;
if( i < num ){
pin_x[i] = exp( x[i]/Gamma );
pin_y[i] = exp( y[i]/Gamma );
pin_x_neg[i] = exp( -x[i]/Gamma );
pin_y_neg[i] = exp( -y[i]/Gamma );
}
}
void compute_pin_exp( size_t pin_size, double* x, double* y, double *pin_x, double *pin_y, double* pin_x_neg, double* pin_y_neg ){
dim3 DimGrid(pin_size/1024+1,1,1);
dim3 DimBlock(1024,1,1);
hipEvent_t start_t, stop_t;
hipEventCreate(&start_t);
hipEventCreate(&stop_t);
hipEventRecord(start_t);
hipLaunchKernelGGL(( compute_pin_exp_kernel), dim3(DimGrid),dim3(DimBlock), 0, 0, x, y, pin_x, pin_y, pin_x_neg, pin_y_neg, pin_size );
hipEventRecord(stop_t);
hipDeviceSynchronize(); // if we want to use printf in kernel, must have hipDeviceSynchronize()
hipEventSynchronize(stop_t);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start_t, stop_t);
std::cout << "Compute pin exp :" << milliseconds << " milli sec" << std::endl;
checkCUDAerror();
}
__global__
void compute_grad_kernel(int* start, int *end, int *pinInWire,
double* pin_x, double* pin_y, double* pin_x_neg, double *pin_y_neg,
double *wire_x, double *wire_y, double* wire_x_neg, double* wire_y_neg, int num, double *x, double *y )
{
auto i = blockIdx.x*blockDim.x + threadIdx.x;
if( i < num ){
double grad_x = 0.0;
double grad_y = 0.0;
double grad_x_neg = 0.0;
double grad_y_neg = 0.0;
for( auto id = start[i] ; id < end[i] ; ++id ){
grad_x += 1.0/wire_x[pinInWire[id]];
grad_y += 1.0/wire_y[pinInWire[id]];
grad_x_neg -= 1.0/wire_x_neg[pinInWire[id]];
grad_y_neg -= 1.0/wire_y_neg[pinInWire[id]];
}
x[i] = grad_x*pin_x[i] + grad_x_neg*pin_x_neg[i];
y[i] = grad_y*pin_y[i] + grad_y_neg*pin_y_neg[i];
//if( i < 10 )
// printf("%lf , %lf , %lf , %lf, %.12e\n", pin_y[i], pin_y_neg[i], grad_y, grad_y_neg, y[i] );
//x[i] = grad_x*pin_x[i] - grad_x_neg*pin_x_neg[i] +
// grad_y*pin_y[i] - grad_y_neg*pin_y_neg[i] ;
//y[i] = 1.0/grad_y*pin_y[i] - 1.0/grad_y_neg*pin_y_neg[i];
}
}
int main(int argc, char *argv[] ){
if(argc < 2 ){
printf("Error : No testcase ID\n");
exit(1);
}
istringstream ss(argv[1]);
ss >> ID;
printf("Argv[1] (ID) = %d\n" , ID);
string tname(testcase_name[ID]);
tname += "_gpu_wire_info";
ofstream of;
ifstream fptr;
fptr.open(tname.c_str());
vector<vector<int>> wire;
vector<double> pin_x;
vector<double> pin_y;
size_t wire_num;
size_t pin_num;
fptr >> wire_num >> pin_num;
wire.resize(wire_num);
size_t pin_sum = 0;
for( size_t i = 0 ; i < wire_num ; ++ i ){
size_t id;
double x;
fptr >> id;
wire[i].resize(id);
pin_sum += id;
for( size_t j = 0 ; j < wire[i].size() ; ++ j ){
fptr >> wire[i][j] >> x;
}
}
fptr.close();
read_pin_location( pin_x, pin_y );
int *start = (int*)malloc(sizeof(int)*wire.size() );
int *end = (int*)malloc(sizeof(int)*wire.size() );
int *pinInWire = (int*)malloc(sizeof(int)*pin_sum);
int acc = 0;
pin_sum = 0;
for( size_t i = 0 ; i < wire.size() ; ++ i ){
start[i] = acc;
end[i] = acc + wire[i].size();
acc += wire[i].size();
for( size_t j = 0 ; j < wire[i].size() ; ++ j ){
pinInWire[pin_sum] = wire[i][j];
pin_sum += 1;
}
}
auto begin_t = std::chrono::high_resolution_clock::now();
auto end_t = std::chrono::high_resolution_clock::now();
double *pin_x_d;
double *pin_y_d;
double *pin_exp_x_d;
double *pin_exp_y_d;
double *pin_exp_x_neg_d;
double *pin_exp_y_neg_d;
hipMalloc((void **)&pin_x_d, pin_x.size() * sizeof(double) );
hipMalloc((void **)&pin_y_d, pin_x.size() * sizeof(double) );
hipMalloc((void **)&pin_exp_x_d, pin_x.size() * sizeof(double) );
hipMalloc((void **)&pin_exp_y_d, pin_x.size() * sizeof(double) );
hipMalloc((void **)&pin_exp_x_neg_d, pin_x.size() * sizeof(double) );
hipMalloc((void **)&pin_exp_y_neg_d, pin_x.size() * sizeof(double) );
// Copy pin coordinates to GPU and compute exponential values
begin_t = std::chrono::high_resolution_clock::now();
hipMemcpy(pin_x_d, pin_x.data(), pin_x.size() * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(pin_y_d, pin_y.data(), pin_x.size() * sizeof(double), hipMemcpyHostToDevice);
compute_pin_exp( pin_x.size(), pin_x_d, pin_y_d, pin_exp_x_d, pin_exp_y_d, pin_exp_x_neg_d, pin_exp_y_neg_d );
end_t = std::chrono::high_resolution_clock::now();
auto total_run_time = std::chrono::duration_cast<std::chrono::microseconds>(end_t-begin_t).count();
int *start_d;
int *end_d;
int *pinInWire_d;
hipMalloc((void **)&start_d, wire.size() * sizeof(int));
hipMalloc((void **)&end_d, wire.size() * sizeof(int));
hipMalloc((void **)&pinInWire_d, pin_sum* sizeof(int));
hipMemcpy(start_d, start, wire.size() * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(end_d, end, wire.size() * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(pinInWire_d, pinInWire, pin_sum * sizeof(int), hipMemcpyHostToDevice);
double *wire_exp_x_d;
double *wire_exp_y_d;
double *wire_exp_x_neg_d;
double *wire_exp_y_neg_d;
hipMalloc((void **)&wire_exp_x_d, wire.size() * sizeof(double));
hipMalloc((void **)&wire_exp_y_d, wire.size() * sizeof(double));
hipMalloc((void **)&wire_exp_x_neg_d, wire.size() * sizeof(double));
hipMalloc((void **)&wire_exp_y_neg_d, wire.size() * sizeof(double));
// Compute wire exp sum in GPU
begin_t = std::chrono::high_resolution_clock::now();
compute_wire_exp( wire.size(), pin_exp_x_d, pin_exp_y_d, pin_exp_x_neg_d, pin_exp_y_neg_d, start_d, end_d,
wire_exp_x_d, wire_exp_y_d, wire_exp_x_neg_d, wire_exp_y_neg_d, pinInWire_d );
end_t = std::chrono::high_resolution_clock::now();
total_run_time += std::chrono::duration_cast<std::chrono::microseconds>(end_t-begin_t).count();
start = static_cast<int*>( realloc( start, sizeof(int)*pin_x.size() ) );
end = static_cast<int*>( realloc( end, sizeof(int)*pin_x.size() ) );
vector<vector<int>> wireInPin;
wireInPin.resize( pin_x.size() );
for( int i = 0 ; i < wire.size() ; ++ i )
for( int j = 0 ; j < wire[i].size() ; ++ j )
wireInPin[wire[i][j]].push_back( i );
pin_sum = 0;
for( int i = 0 ; i < wireInPin.size(); ++ i ){
start[i] = pin_sum;
end[i] = start[i] + wireInPin[i].size();
memcpy(&pinInWire[start[i]] , wireInPin[i].data(), sizeof(int)*wireInPin[i].size() );
pin_sum += wireInPin[i].size();
}
hipMemcpy(pinInWire_d, pinInWire, pin_sum * sizeof(int), hipMemcpyHostToDevice);
int* s;
int* e;
hipMalloc((void **)&s, pin_x.size() * sizeof(double) );
hipMalloc((void **)&e, pin_x.size() * sizeof(double) );
hipMemcpy(s, start, pin_x.size() * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(e, end, pin_x.size() * sizeof(int), hipMemcpyHostToDevice);
double *grad_x;
double *grad_y;
hipMalloc((void **)&grad_x, pin_x.size() * sizeof(double) );
hipMalloc((void **)&grad_y, pin_x.size() * sizeof(double) );
dim3 DimGrid(pin_x.size()/1024+1,1,1);
dim3 DimBlock(1024,1,1);
hipEvent_t start_t, stop_t;
hipEventCreate(&start_t);
hipEventCreate(&stop_t);
hipEventRecord(start_t);
// Compute gradient in GPU
begin_t = std::chrono::high_resolution_clock::now();
hipLaunchKernelGGL(( compute_grad_kernel), dim3(DimGrid),dim3(DimBlock), 0, 0, s, e, pinInWire_d, pin_exp_x_d, pin_exp_y_d, pin_exp_x_neg_d, pin_exp_y_neg_d,
wire_exp_x_d, wire_exp_y_d, wire_exp_x_neg_d, wire_exp_y_neg_d,
pin_x.size(), grad_x, grad_y );
end_t = std::chrono::high_resolution_clock::now();
total_run_time += std::chrono::duration_cast<std::chrono::microseconds>(end_t-begin_t).count();
hipEventRecord(stop_t);
hipDeviceSynchronize(); // if we want to use printf in kernel, must have hipDeviceSynchronize()
hipEventSynchronize(stop_t);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start_t, stop_t);
std::cout << "Compute grad :" << milliseconds << " milli sec" << std::endl;
checkCUDAerror();
double *x_host = static_cast<double*>(malloc(sizeof(double)*pin_x.size()));
double *y_host = static_cast<double*>(malloc(sizeof(double)*pin_x.size()));
// Copy gradient from GPU to CPU
begin_t = std::chrono::high_resolution_clock::now();
hipMemcpy(x_host, grad_x, pin_x.size() * sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(y_host, grad_y, pin_x.size() * sizeof(double), hipMemcpyDeviceToHost);
end_t = std::chrono::high_resolution_clock::now();
std::cout << "Copy grad D2H: " << std::chrono::duration_cast<std::chrono::milliseconds>(end_t-begin_t).count() << " milli sec" << std::endl;
total_run_time += std::chrono::duration_cast<std::chrono::microseconds>(end_t-begin_t).count();
std::cout << "Total Direct Wire GPU Run time = " << total_run_time << " micro sec" << std::endl;
// This is for debugging
//of.open("./gpu_result");
//of.setf(ios::fixed,ios::floatfield);
//of.precision(12);
//for( int i = 0; i < pin_x.size() ; ++ i ){
// if( i < 10 )
// //printf("%.25e %.25e\n", x_host[i], y_host[i]);
// cout << x_host[i] << " " << y_host[i] << '\n';
// of << y_host[i] << '\n';
//}
//of.close();
//delete [] y_host;
}
| dd158f96d92d3c0bcc44d1b1d1c4e260ba1d8e5f.cu | #include <iostream>
#include <vector>
#include <unordered_map>
#include <algorithm>
#include <fstream>
#include <chrono>
#include <random>
#include <cmath>
#include <cassert>
#include <iomanip>
#include <sstream>
#include <cuda_runtime.h>
#include "cusparse.h"
#define Gamma 100.0
using namespace std;
#define D2H cudaMemcpyDeviceToHost
#define H2D cudaMemcpyHostToDevice
#define cudaAlloc(ptr,sz,type) do{ \
cudaMalloc((void **)&ptr, sz*sizeof(type)); \
}while(0)
char* testcase_name[] = {
"./exp/mgc_superblue16_a", // 0
"./exp/mgc_superblue11_a", // 1
"./exp/mgc_superblue12", // 2
"./exp/mgc_des_perf_1", // 3
"./exp/mgc_des_perf_a", // 4
"./exp/mgc_edit_dist_a", // 5
"./exp/mgc_edit_dist_2", // 6
"./exp/mgc_matrix_mult_1", // 7
"./exp/mgc_pci_bridge32_a",// 8
"./exp/mgc_pci_bridge32_b",// 9
"./exp/mgc_fft_1", // 10
"./exp/mgc_fft_2", // 11
"./exp/mgc_matrix_mult_a", // 12
"./exp/mgc_matrix_mult_b", // 13
"./exp/mgc_des_perf_b", // 14
"./exp/mgc_fft_a", // 15
"./exp/mgc_fft_b", // 16
};
int ID = 0;
void checkCUDAerror(){
cudaError_t error = cudaGetLastError();
if(error != cudaSuccess)
{
// print the CUDA error message and exit
printf("CUDA error: %s\n", cudaGetErrorString(error));
exit(-1);
}
}
__global__
void pin_exp_sum(double *pin , size_t num , double *out, bool neg )
{
auto i = blockIdx.x*blockDim.x + threadIdx.x;
if( i < num )
if( !neg )
out[i] = exp( pin[i]/Gamma );
else
out[i] = exp( -pin[i]/Gamma );
}
__global__
void exp_sum(double *pin_x , double *pin_y , size_t num , double *out )
{
auto i = blockIdx.x*blockDim.x + threadIdx.x;
if( i < num ){
// out[4*i] = exp( pin_x[i]/Gamma );
// out[4*i+1] = exp( -pin_x[i]/Gamma );
// out[4*i+2] = exp( pin_y[i]/Gamma );
// out[4*i+3] = exp( -pin_y[i]/Gamma );
out[i] = exp( pin_x[i]/Gamma );
out[i+num] = exp( -pin_x[i]/Gamma );
out[i+2*num] = exp( pin_y[i]/Gamma );
out[i+3*num] = exp( -pin_y[i]/Gamma );
}
}
void compute_exp( size_t pin_size, double *pin_x, double *pin_y, double* pin_exp_d ){
dim3 DimGrid(pin_size/256+1,1,1);
dim3 DimBlock(256,1,1);
exp_sum<<<DimGrid,DimBlock>>>( pin_x, pin_y, pin_size, pin_exp_d);
checkCUDAerror();
cudaDeviceSynchronize(); // if we want to use printf in kernel, must have cudaDeviceSynchronize()
}
__global__
void reciprocal_kernel(double *mat , size_t sz )
{
auto i = blockIdx.x*blockDim.x + threadIdx.x;
if( i < sz ){
if( mat[i] != 0.0 )
mat[i] = 1.0/mat[i];
}
}
void gpu_reciprocal( double *mat , size_t sz ){
dim3 DimGrid(sz/256+1,1,1);
dim3 DimBlock(256,1,1);
reciprocal_kernel<<<DimGrid,DimBlock>>>(mat , sz);
checkCUDAerror();
}
__global__
void neg_odd_col_kernel(double *mat , size_t pin_num )
{
auto i = blockIdx.x*blockDim.x + threadIdx.x;
if( i < pin_num ){
mat[i+pin_num] = -mat[i+pin_num];
mat[i+3*pin_num] = -mat[i+3*pin_num];
}
//auto i = blockIdx.y * col + blockIdx.x*blockDim.x + threadIdx.x;
//if( blockIdx.x*blockDim.x + threadIdx.x < col )
// mat[i] = -mat[i];
}
void gpu_neg_odd_row( double *mat, size_t pin_num ){
dim3 DimGrid( (pin_num/256+1),1,1);
dim3 DimBlock(256,1,1);
neg_odd_col_kernel<<<DimGrid,DimBlock>>>(mat , pin_num);
checkCUDAerror();
cudaDeviceSynchronize(); // if we want to use printf in kernel, must have cudaDeviceSynchronize()
}
//__global__ void Dev_dot(double x[], double y[], double z[], int n) {
// /* Use tmp to store products of vector components in each block */
// /* Can't use variable dimension here */
// __shared__ double tmp[MAX_BLOCK_SZ];
// int t = blockDim.x * blockIdx.x + threadIdx.x;
// int loc_t = threadIdx.x;
//
// if (t < n) tmp[loc_t] = x[t]*y[t];
// __syncthreads();
//
// /* This uses a tree structure to do the additions */
// for (int stride = blockDim.x/2; stride > 0; stride /= 2) {
// if (loc_t < stride)
// tmp[loc_t] += tmp[loc_t + stride];
// __syncthreads();
// }
//
// /* Store the result from this cache block in z[blockIdx.x] */
// if (threadIdx.x == 0) {
// z[blockIdx.x] = tmp[0];
// }
//} /* Dev_dot */
__global__
void matrix_dot_product_kernel(double* A, double* B, double* C, size_t sz )
{
auto i = blockIdx.x*blockDim.x + threadIdx.x;
if( i < sz ){
//C[i] = A[i*4] *B[i*4];
//C[i] += A[i*4+1]*B[i*4+1];
//C[i] += A[i*4+2]*B[i*4+2];
//C[i] += A[i*4+3]*B[i*4+3];
C[i] = A[i] *B[i];
C[i] += A[i+1*sz]*B[i+1*sz];
C[i] += A[i+2*sz]*B[i+2*sz];
C[i] += A[i+3*sz]*B[i+3*sz];
}
}
// B is transpose
void gpu_matrix_dot_product( double *A , double *B, double *C, size_t pin_size ){
dim3 DimGrid( (pin_size/256+1),1,1);
dim3 DimBlock(256,1,1);
matrix_dot_product_kernel<<<DimGrid,DimBlock>>>(A,B,C,pin_size);
checkCUDAerror();
}
void read_pin_location( vector<double> &pin_x , vector<double> &pin_y ){
int pin_num;
double x,y,w,h;
ifstream fptr;
vector<double> x_v;
vector<double> y_v;
string tname(testcase_name[ID]);
tname += "_gpu_density_info";
cout << "GPU density Info = " << tname << '\n';
double Gx,Gy;
//fptr.open("./tune_placer/gpu_density_info");
fptr.open(tname.c_str());
fptr >> pin_num >> Gx >> Gy;
for( size_t i = 0 ; i < pin_num ; ++ i ){
fptr >> x >> y >> w >> h;
x_v.emplace_back(x);
y_v.emplace_back(y);
}
std::move(x_v.begin(), x_v.end(), std::back_inserter(pin_x));
std::move(y_v.begin(), y_v.end(), std::back_inserter(pin_y));
fptr.close();
}
__global__
void compute_wire_exp_kernel(int* start, int* end, int* pinInWire,
double* wire_x, double* wire_y,
double* wire_x_neg, double* wire_y_neg,
double *pin_x, double *pin_y, double* pin_x_neg, double* pin_y_neg, size_t num )
{
auto i = blockIdx.x*blockDim.x + threadIdx.x;
if( i < num ){
for( auto id = start[i] ; id < end[i] ; ++id ){
if( id == start[i] ){
wire_x[i] = pin_x[pinInWire[id]];
wire_y[i] = pin_y[pinInWire[id]];
wire_x_neg[i] = pin_x_neg[pinInWire[id]];
wire_y_neg[i] = pin_y_neg[pinInWire[id]];
}
else{
wire_x[i] += pin_x[pinInWire[id]];
wire_y[i] += pin_y[pinInWire[id]];
wire_x_neg[i] += pin_x_neg[pinInWire[id]];
wire_y_neg[i] += pin_y_neg[pinInWire[id]];
}
}
}
}
void compute_wire_exp( size_t wire_size, double *pin_x, double *pin_y, double* pin_x_neg, double* pin_y_neg,
int *start, int *end,
double *wire_x, double *wire_y, double *wire_x_neg, double *wire_y_neg, int *pinInWire ){
dim3 DimGrid(wire_size/1024+1,1,1);
dim3 DimBlock(1024,1,1);
cudaEvent_t start_t, stop_t;
cudaEventCreate(&start_t);
cudaEventCreate(&stop_t);
cudaEventRecord(start_t);
compute_wire_exp_kernel<<<DimGrid,DimBlock>>>( start, end, pinInWire,
wire_x, wire_y, wire_x_neg, wire_y_neg, pin_x, pin_y, pin_x_neg, pin_y_neg, wire_size );
cudaEventRecord(stop_t);
cudaDeviceSynchronize(); // if we want to use printf in kernel, must have cudaDeviceSynchronize()
cudaEventSynchronize(stop_t);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start_t, stop_t);
std::cout << "Compute wire exp :" << milliseconds << " milli sec" << std::endl;
checkCUDAerror();
}
__global__
void compute_pin_exp_kernel(double* x, double* y, double *pin_x, double *pin_y, double* pin_x_neg, double* pin_y_neg, size_t num )
{
auto i = blockIdx.x*blockDim.x + threadIdx.x;
if( i < num ){
pin_x[i] = exp( x[i]/Gamma );
pin_y[i] = exp( y[i]/Gamma );
pin_x_neg[i] = exp( -x[i]/Gamma );
pin_y_neg[i] = exp( -y[i]/Gamma );
}
}
void compute_pin_exp( size_t pin_size, double* x, double* y, double *pin_x, double *pin_y, double* pin_x_neg, double* pin_y_neg ){
dim3 DimGrid(pin_size/1024+1,1,1);
dim3 DimBlock(1024,1,1);
cudaEvent_t start_t, stop_t;
cudaEventCreate(&start_t);
cudaEventCreate(&stop_t);
cudaEventRecord(start_t);
compute_pin_exp_kernel<<<DimGrid,DimBlock>>>( x, y, pin_x, pin_y, pin_x_neg, pin_y_neg, pin_size );
cudaEventRecord(stop_t);
cudaDeviceSynchronize(); // if we want to use printf in kernel, must have cudaDeviceSynchronize()
cudaEventSynchronize(stop_t);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start_t, stop_t);
std::cout << "Compute pin exp :" << milliseconds << " milli sec" << std::endl;
checkCUDAerror();
}
__global__
void compute_grad_kernel(int* start, int *end, int *pinInWire,
double* pin_x, double* pin_y, double* pin_x_neg, double *pin_y_neg,
double *wire_x, double *wire_y, double* wire_x_neg, double* wire_y_neg, int num, double *x, double *y )
{
auto i = blockIdx.x*blockDim.x + threadIdx.x;
if( i < num ){
double grad_x = 0.0;
double grad_y = 0.0;
double grad_x_neg = 0.0;
double grad_y_neg = 0.0;
for( auto id = start[i] ; id < end[i] ; ++id ){
grad_x += 1.0/wire_x[pinInWire[id]];
grad_y += 1.0/wire_y[pinInWire[id]];
grad_x_neg -= 1.0/wire_x_neg[pinInWire[id]];
grad_y_neg -= 1.0/wire_y_neg[pinInWire[id]];
}
x[i] = grad_x*pin_x[i] + grad_x_neg*pin_x_neg[i];
y[i] = grad_y*pin_y[i] + grad_y_neg*pin_y_neg[i];
//if( i < 10 )
// printf("%lf , %lf , %lf , %lf, %.12e\n", pin_y[i], pin_y_neg[i], grad_y, grad_y_neg, y[i] );
//x[i] = grad_x*pin_x[i] - grad_x_neg*pin_x_neg[i] +
// grad_y*pin_y[i] - grad_y_neg*pin_y_neg[i] ;
//y[i] = 1.0/grad_y*pin_y[i] - 1.0/grad_y_neg*pin_y_neg[i];
}
}
int main(int argc, char *argv[] ){
if(argc < 2 ){
printf("Error : No testcase ID\n");
exit(1);
}
istringstream ss(argv[1]);
ss >> ID;
printf("Argv[1] (ID) = %d\n" , ID);
string tname(testcase_name[ID]);
tname += "_gpu_wire_info";
ofstream of;
ifstream fptr;
fptr.open(tname.c_str());
vector<vector<int>> wire;
vector<double> pin_x;
vector<double> pin_y;
size_t wire_num;
size_t pin_num;
fptr >> wire_num >> pin_num;
wire.resize(wire_num);
size_t pin_sum = 0;
for( size_t i = 0 ; i < wire_num ; ++ i ){
size_t id;
double x;
fptr >> id;
wire[i].resize(id);
pin_sum += id;
for( size_t j = 0 ; j < wire[i].size() ; ++ j ){
fptr >> wire[i][j] >> x;
}
}
fptr.close();
read_pin_location( pin_x, pin_y );
int *start = (int*)malloc(sizeof(int)*wire.size() );
int *end = (int*)malloc(sizeof(int)*wire.size() );
int *pinInWire = (int*)malloc(sizeof(int)*pin_sum);
int acc = 0;
pin_sum = 0;
for( size_t i = 0 ; i < wire.size() ; ++ i ){
start[i] = acc;
end[i] = acc + wire[i].size();
acc += wire[i].size();
for( size_t j = 0 ; j < wire[i].size() ; ++ j ){
pinInWire[pin_sum] = wire[i][j];
pin_sum += 1;
}
}
auto begin_t = std::chrono::high_resolution_clock::now();
auto end_t = std::chrono::high_resolution_clock::now();
double *pin_x_d;
double *pin_y_d;
double *pin_exp_x_d;
double *pin_exp_y_d;
double *pin_exp_x_neg_d;
double *pin_exp_y_neg_d;
cudaMalloc((void **)&pin_x_d, pin_x.size() * sizeof(double) );
cudaMalloc((void **)&pin_y_d, pin_x.size() * sizeof(double) );
cudaMalloc((void **)&pin_exp_x_d, pin_x.size() * sizeof(double) );
cudaMalloc((void **)&pin_exp_y_d, pin_x.size() * sizeof(double) );
cudaMalloc((void **)&pin_exp_x_neg_d, pin_x.size() * sizeof(double) );
cudaMalloc((void **)&pin_exp_y_neg_d, pin_x.size() * sizeof(double) );
// Copy pin coordinates to GPU and compute exponential values
begin_t = std::chrono::high_resolution_clock::now();
cudaMemcpy(pin_x_d, pin_x.data(), pin_x.size() * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(pin_y_d, pin_y.data(), pin_x.size() * sizeof(double), cudaMemcpyHostToDevice);
compute_pin_exp( pin_x.size(), pin_x_d, pin_y_d, pin_exp_x_d, pin_exp_y_d, pin_exp_x_neg_d, pin_exp_y_neg_d );
end_t = std::chrono::high_resolution_clock::now();
auto total_run_time = std::chrono::duration_cast<std::chrono::microseconds>(end_t-begin_t).count();
int *start_d;
int *end_d;
int *pinInWire_d;
cudaMalloc((void **)&start_d, wire.size() * sizeof(int));
cudaMalloc((void **)&end_d, wire.size() * sizeof(int));
cudaMalloc((void **)&pinInWire_d, pin_sum* sizeof(int));
cudaMemcpy(start_d, start, wire.size() * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(end_d, end, wire.size() * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(pinInWire_d, pinInWire, pin_sum * sizeof(int), cudaMemcpyHostToDevice);
double *wire_exp_x_d;
double *wire_exp_y_d;
double *wire_exp_x_neg_d;
double *wire_exp_y_neg_d;
cudaMalloc((void **)&wire_exp_x_d, wire.size() * sizeof(double));
cudaMalloc((void **)&wire_exp_y_d, wire.size() * sizeof(double));
cudaMalloc((void **)&wire_exp_x_neg_d, wire.size() * sizeof(double));
cudaMalloc((void **)&wire_exp_y_neg_d, wire.size() * sizeof(double));
// Compute wire exp sum in GPU
begin_t = std::chrono::high_resolution_clock::now();
compute_wire_exp( wire.size(), pin_exp_x_d, pin_exp_y_d, pin_exp_x_neg_d, pin_exp_y_neg_d, start_d, end_d,
wire_exp_x_d, wire_exp_y_d, wire_exp_x_neg_d, wire_exp_y_neg_d, pinInWire_d );
end_t = std::chrono::high_resolution_clock::now();
total_run_time += std::chrono::duration_cast<std::chrono::microseconds>(end_t-begin_t).count();
start = static_cast<int*>( realloc( start, sizeof(int)*pin_x.size() ) );
end = static_cast<int*>( realloc( end, sizeof(int)*pin_x.size() ) );
vector<vector<int>> wireInPin;
wireInPin.resize( pin_x.size() );
for( int i = 0 ; i < wire.size() ; ++ i )
for( int j = 0 ; j < wire[i].size() ; ++ j )
wireInPin[wire[i][j]].push_back( i );
pin_sum = 0;
for( int i = 0 ; i < wireInPin.size(); ++ i ){
start[i] = pin_sum;
end[i] = start[i] + wireInPin[i].size();
memcpy(&pinInWire[start[i]] , wireInPin[i].data(), sizeof(int)*wireInPin[i].size() );
pin_sum += wireInPin[i].size();
}
cudaMemcpy(pinInWire_d, pinInWire, pin_sum * sizeof(int), cudaMemcpyHostToDevice);
int* s;
int* e;
cudaMalloc((void **)&s, pin_x.size() * sizeof(double) );
cudaMalloc((void **)&e, pin_x.size() * sizeof(double) );
cudaMemcpy(s, start, pin_x.size() * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(e, end, pin_x.size() * sizeof(int), cudaMemcpyHostToDevice);
double *grad_x;
double *grad_y;
cudaMalloc((void **)&grad_x, pin_x.size() * sizeof(double) );
cudaMalloc((void **)&grad_y, pin_x.size() * sizeof(double) );
dim3 DimGrid(pin_x.size()/1024+1,1,1);
dim3 DimBlock(1024,1,1);
cudaEvent_t start_t, stop_t;
cudaEventCreate(&start_t);
cudaEventCreate(&stop_t);
cudaEventRecord(start_t);
// Compute gradient in GPU
begin_t = std::chrono::high_resolution_clock::now();
compute_grad_kernel<<<DimGrid,DimBlock>>>( s, e, pinInWire_d, pin_exp_x_d, pin_exp_y_d, pin_exp_x_neg_d, pin_exp_y_neg_d,
wire_exp_x_d, wire_exp_y_d, wire_exp_x_neg_d, wire_exp_y_neg_d,
pin_x.size(), grad_x, grad_y );
end_t = std::chrono::high_resolution_clock::now();
total_run_time += std::chrono::duration_cast<std::chrono::microseconds>(end_t-begin_t).count();
cudaEventRecord(stop_t);
cudaDeviceSynchronize(); // if we want to use printf in kernel, must have cudaDeviceSynchronize()
cudaEventSynchronize(stop_t);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start_t, stop_t);
std::cout << "Compute grad :" << milliseconds << " milli sec" << std::endl;
checkCUDAerror();
double *x_host = static_cast<double*>(malloc(sizeof(double)*pin_x.size()));
double *y_host = static_cast<double*>(malloc(sizeof(double)*pin_x.size()));
// Copy gradient from GPU to CPU
begin_t = std::chrono::high_resolution_clock::now();
cudaMemcpy(x_host, grad_x, pin_x.size() * sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(y_host, grad_y, pin_x.size() * sizeof(double), cudaMemcpyDeviceToHost);
end_t = std::chrono::high_resolution_clock::now();
std::cout << "Copy grad D2H: " << std::chrono::duration_cast<std::chrono::milliseconds>(end_t-begin_t).count() << " milli sec" << std::endl;
total_run_time += std::chrono::duration_cast<std::chrono::microseconds>(end_t-begin_t).count();
std::cout << "Total Direct Wire GPU Run time = " << total_run_time << " micro sec" << std::endl;
// This is for debugging
//of.open("./gpu_result");
//of.setf(ios::fixed,ios::floatfield);
//of.precision(12);
//for( int i = 0; i < pin_x.size() ; ++ i ){
// if( i < 10 )
// //printf("%.25e %.25e\n", x_host[i], y_host[i]);
// cout << x_host[i] << " " << y_host[i] << '\n';
// of << y_host[i] << '\n';
//}
//of.close();
//delete [] y_host;
}
|
ddc6624d4a018e9e3039e7f43165acd936dd84cb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* @author: xiaomin wu
* @date: 1/16/2020
* */
#include <stdio.h>
#include "dense.h"
#include "densecu.h"
#include "funcs.h"
#define TIME
void denserun(DENSEHANDLER densehandler,unsigned int inN,unsigned int nodeNum,float* preOut,unsigned int oD, unsigned int fN){
#ifdef TIME
float elapsed=0;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
#endif
//grid:{inN,oD*oD*fN,nodeN} here oD is new oD after max pooling
//block:{1}
hipLaunchKernelGGL(( forward), dim3({inN),dim3(oD*oD),fN},nodeNum, densehandler->outs, densehandler->Weights,preOut,oD,fN,nodeNum);
//grid: {inN,nodeN}
//block: {1}
hipLaunchKernelGGL(( addBias), dim3({inN),dim3(nodeNum}),1, 0, densehandler->outs,densehandler->bias,nodeNum);
//apply relu nonlinearlity to outputs
//reluDense<<<{inN*nodeNum},1>>>(densehandler->outs);
#ifdef TIME
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
printf("denserun takes %.4f ms\n", elapsed);
#endif
}
void denserunD(DENSEHANDLER densehandler,unsigned int inN,unsigned int preNodeNum, unsigned int nodeNum,float* predenseouts){
#ifdef TIME
float elapsed=0;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
#endif
//grid:{inN,preNodeNum,nodeN}
//block:{1}
hipLaunchKernelGGL(( forwardD), dim3({inN),dim3(preNodeNum),nodeNum},1, predenseouts,densehandler->Weights,densehandler->outs, preNodeNum, nodeNum);
//grid: {inN,nodeN}
//block: {1}
hipLaunchKernelGGL(( addBias), dim3({inN),dim3(nodeNum}),1, 0, densehandler->outs,densehandler->bias,nodeNum);
#ifdef TIME
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
printf("denserunD takes %.4f ms\n", elapsed);
#endif
}
void headDenserunD(HEADDENSEHANDLER headdensehandler,unsigned int inN,unsigned int preNodeNum, unsigned int nodeNum,float* predenseouts){
#ifdef TIME
float elapsed=0;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
#endif
//grid:{inN,preNodeNum,nodeN}
//block:{1}
hipLaunchKernelGGL(( forwardD), dim3({inN),dim3(preNodeNum),nodeNum},1, predenseouts,headdensehandler->Weights,headdensehandler->outs, preNodeNum, nodeNum);
//grid: {inN,nodeN}
//block: {1}
hipLaunchKernelGGL(( addBias), dim3({inN),dim3(nodeNum}),1, 0, headdensehandler->outs,headdensehandler->bias,nodeNum);
#ifdef TIME
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
printf("headDenserunD takes %.4f ms\n", elapsed);
#endif
}
void denseFree(DENSEHANDLER densehandler){
hipFree(densehandler->Weights);
hipFree(densehandler->bias);
hipFree(densehandler->outs);
free(densehandler);
}
void headDenseFree(HEADDENSEHANDLER headdensehandler){
hipFree(headdensehandler->Weights);
hipFree(headdensehandler->bias);
hipFree(headdensehandler->outs);
hipFree(headdensehandler->inputs);
free(headdensehandler);
} | ddc6624d4a018e9e3039e7f43165acd936dd84cb.cu | /*
* @author: xiaomin wu
* @date: 1/16/2020
* */
#include <stdio.h>
#include "dense.h"
#include "densecu.h"
#include "funcs.h"
#define TIME
void denserun(DENSEHANDLER densehandler,unsigned int inN,unsigned int nodeNum,float* preOut,unsigned int oD, unsigned int fN){
#ifdef TIME
float elapsed=0;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
#endif
//grid:{inN,oD*oD*fN,nodeN} here oD is new oD after max pooling
//block:{1}
forward<<<{inN,oD*oD,fN},nodeNum>>>(densehandler->outs, densehandler->Weights,preOut,oD,fN,nodeNum);
//grid: {inN,nodeN}
//block: {1}
addBias<<<{inN,nodeNum},1>>>(densehandler->outs,densehandler->bias,nodeNum);
//apply relu nonlinearlity to outputs
//reluDense<<<{inN*nodeNum},1>>>(densehandler->outs);
#ifdef TIME
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("denserun takes %.4f ms\n", elapsed);
#endif
}
void denserunD(DENSEHANDLER densehandler,unsigned int inN,unsigned int preNodeNum, unsigned int nodeNum,float* predenseouts){
#ifdef TIME
float elapsed=0;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
#endif
//grid:{inN,preNodeNum,nodeN}
//block:{1}
forwardD<<<{inN,preNodeNum,nodeNum},1>>>(predenseouts,densehandler->Weights,densehandler->outs, preNodeNum, nodeNum);
//grid: {inN,nodeN}
//block: {1}
addBias<<<{inN,nodeNum},1>>>(densehandler->outs,densehandler->bias,nodeNum);
#ifdef TIME
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("denserunD takes %.4f ms\n", elapsed);
#endif
}
void headDenserunD(HEADDENSEHANDLER headdensehandler,unsigned int inN,unsigned int preNodeNum, unsigned int nodeNum,float* predenseouts){
#ifdef TIME
float elapsed=0;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
#endif
//grid:{inN,preNodeNum,nodeN}
//block:{1}
forwardD<<<{inN,preNodeNum,nodeNum},1>>>(predenseouts,headdensehandler->Weights,headdensehandler->outs, preNodeNum, nodeNum);
//grid: {inN,nodeN}
//block: {1}
addBias<<<{inN,nodeNum},1>>>(headdensehandler->outs,headdensehandler->bias,nodeNum);
#ifdef TIME
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("headDenserunD takes %.4f ms\n", elapsed);
#endif
}
void denseFree(DENSEHANDLER densehandler){
cudaFree(densehandler->Weights);
cudaFree(densehandler->bias);
cudaFree(densehandler->outs);
free(densehandler);
}
void headDenseFree(HEADDENSEHANDLER headdensehandler){
cudaFree(headdensehandler->Weights);
cudaFree(headdensehandler->bias);
cudaFree(headdensehandler->outs);
cudaFree(headdensehandler->inputs);
free(headdensehandler);
} |
5f9dd6f53128e7961c995738d2432cfef250d8c9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void extract_hll(int n, char *in, char *out) {
int offset = (blockIdx.x * blockDim.x + threadIdx.x);
if (offset < n) {
uint64_t *hash = (uint64_t*)(in + (HASH_WIDTH * offset));
// Get the first HLL_PREFIX_BITS to determine the bucket
int bucket = hash[0] >> (64 - HLL_PREFIX_BITS);
// Finds the position of the least significant 1 (0 to 64)
int position = __ffsll(hash[1]);
// Adjust for the limit of the bucket
if (position == 0) {
position = HLL_MAX_SCAN - 1;
} else
position = min(position, HLL_MAX_SCAN) - 1;
// Update the output
unsigned int *outp = ((unsigned int*)out) + offset;
*outp = ((bucket << HLL_BUCKET_WIDTH) | position);
}
} | 5f9dd6f53128e7961c995738d2432cfef250d8c9.cu | #include "includes.h"
__global__ void extract_hll(int n, char *in, char *out) {
int offset = (blockIdx.x * blockDim.x + threadIdx.x);
if (offset < n) {
uint64_t *hash = (uint64_t*)(in + (HASH_WIDTH * offset));
// Get the first HLL_PREFIX_BITS to determine the bucket
int bucket = hash[0] >> (64 - HLL_PREFIX_BITS);
// Finds the position of the least significant 1 (0 to 64)
int position = __ffsll(hash[1]);
// Adjust for the limit of the bucket
if (position == 0) {
position = HLL_MAX_SCAN - 1;
} else
position = min(position, HLL_MAX_SCAN) - 1;
// Update the output
unsigned int *outp = ((unsigned int*)out) + offset;
*outp = ((bucket << HLL_BUCKET_WIDTH) | position);
}
} |
ab186637371ee4082d6ac3b5ab7a0d0cf6f70869.hip | // !!! This is a file automatically generated by hipify!!!
#ifdef USE_CUDNN
#include <vector>
#include "caffe/layers/cudnn_sigmoid_layer.hpp"
namespace caffe {
template <typename Ftype, typename Btype>
void CuDNNSigmoidLayer<Ftype, Btype>::Forward_gpu(const vector<Blob*>& bottom,
const vector<Blob*>& top) {
const Ftype* bottom_data = bottom[0]->gpu_data<Ftype>();
Ftype* top_data = top[0]->mutable_gpu_data<Ftype>();
CUDNN_CHECK(cudnnActivationForward(Caffe::cudnn_handle(0),
activ_desc_,
cudnn::dataType<Ftype>::one,
this->fwd_bottom_desc_, bottom_data,
cudnn::dataType<Ftype>::zero,
this->fwd_top_desc_, top_data));
CUDA_CHECK(hipStreamSynchronize(Caffe::thread_stream(0)));
}
template <typename Ftype, typename Btype>
void CuDNNSigmoidLayer<Ftype, Btype>::Backward_gpu(const vector<Blob*>& top,
const vector<bool>& propagate_down,
const vector<Blob*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Btype* top_data = top[0]->gpu_data<Btype>();
const Btype* top_diff = top[0]->gpu_diff<Btype>();
const Btype* bottom_data = bottom[0]->gpu_data<Btype>();
Btype* bottom_diff = bottom[0]->mutable_gpu_diff<Btype>();
CUDNN_CHECK(cudnnActivationBackward(Caffe::cudnn_handle(0),
activ_desc_,
cudnn::dataType<Btype>::one,
bwd_top_desc_, top_data, bwd_top_desc_, top_diff,
bwd_bottom_desc_, bottom_data,
cudnn::dataType<Btype>::zero,
bwd_bottom_desc_, bottom_diff));
CUDA_CHECK(hipStreamSynchronize(Caffe::thread_stream(0)));
}
INSTANTIATE_LAYER_GPU_FUNCS_FB(CuDNNSigmoidLayer);
} // namespace caffe
#endif
| ab186637371ee4082d6ac3b5ab7a0d0cf6f70869.cu | #ifdef USE_CUDNN
#include <vector>
#include "caffe/layers/cudnn_sigmoid_layer.hpp"
namespace caffe {
template <typename Ftype, typename Btype>
void CuDNNSigmoidLayer<Ftype, Btype>::Forward_gpu(const vector<Blob*>& bottom,
const vector<Blob*>& top) {
const Ftype* bottom_data = bottom[0]->gpu_data<Ftype>();
Ftype* top_data = top[0]->mutable_gpu_data<Ftype>();
CUDNN_CHECK(cudnnActivationForward(Caffe::cudnn_handle(0),
activ_desc_,
cudnn::dataType<Ftype>::one,
this->fwd_bottom_desc_, bottom_data,
cudnn::dataType<Ftype>::zero,
this->fwd_top_desc_, top_data));
CUDA_CHECK(cudaStreamSynchronize(Caffe::thread_stream(0)));
}
template <typename Ftype, typename Btype>
void CuDNNSigmoidLayer<Ftype, Btype>::Backward_gpu(const vector<Blob*>& top,
const vector<bool>& propagate_down,
const vector<Blob*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Btype* top_data = top[0]->gpu_data<Btype>();
const Btype* top_diff = top[0]->gpu_diff<Btype>();
const Btype* bottom_data = bottom[0]->gpu_data<Btype>();
Btype* bottom_diff = bottom[0]->mutable_gpu_diff<Btype>();
CUDNN_CHECK(cudnnActivationBackward(Caffe::cudnn_handle(0),
activ_desc_,
cudnn::dataType<Btype>::one,
bwd_top_desc_, top_data, bwd_top_desc_, top_diff,
bwd_bottom_desc_, bottom_data,
cudnn::dataType<Btype>::zero,
bwd_bottom_desc_, bottom_diff));
CUDA_CHECK(cudaStreamSynchronize(Caffe::thread_stream(0)));
}
INSTANTIATE_LAYER_GPU_FUNCS_FB(CuDNNSigmoidLayer);
} // namespace caffe
#endif
|
4a9399ec08339c9a9141b3739c2cc2d90fbf465e.hip | // !!! This is a file automatically generated by hipify!!!
#include <sstream>
#include <fstream>
#include <iostream>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
//Error handling micro, wrap it around function whenever possible
static void HandleError(hipError_t err, const char *file, int line) {
if (err != hipSuccess) {
printf("\n%s in %s at line %d\n", hipGetErrorString(err), file, line);
//system("pause");
exit(EXIT_FAILURE);
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
#include "validate.h"
#include "serial.h"
#include "utility.h"
#include "coloring.cu"
#include "cuda_query.cu"
int main(int argc, char* argv[])
{
cuda_query(0); //Set the deivde number here
if(argc != 2){
std::cout<<" Usage ./graphGPU INPUTFILE"<<std::endl;
std::cout<<"input files can be found under input/ "<<std::endl;
exit(EXIT_FAILURE);
}
bool* graph;
int V;
uint32_t numNNZ=0;
uint32_t NumRow=0; //same as V
//1) Read graph
if (std::string(argv[1]).find(".col") != std::string::npos){
ReadColFile(argv[1], &graph, &V, &numNNZ,&NumRow);
} else if (std::string(argv[1]).find(".mm") != std::string::npos){
ReadMMFile(argv[1], &graph, &V, &numNNZ,&NumRow);
} else{
std::cout<<" Invalid file formate!!"<<std::endl;
exit(EXIT_FAILURE);
}
/***********************************************************************/
//2) Allocate memory (on both sides)
int *col_id(NULL),*offset(NULL);
HANDLE_ERROR(hipMallocManaged(&col_id, numNNZ*sizeof(int)));
//last entry will be = numNonZero (so that we have always a pointer
//to the first and last id for each row with no need for if statments)
HANDLE_ERROR(hipMallocManaged(&offset, (NumRow +1)*sizeof(int)));
/***********************************************************************/
//3) Get graph in CSR format
getCSR(numNNZ, NumRow, graph, col_id, offset);
//printCSR(numNNZ,NumRow,col_id, offset);
/***********************************************************************/
//5) Color Vertices in paralllel
int* color;
HANDLE_ERROR(hipMallocManaged(&color, NumRow*sizeof(int)));
memset(color, 0, NumRow );
bool*set;
HANDLE_ERROR(hipMallocManaged(&set, NumRow*sizeof(bool)));
memset(set, 1, NumRow);
coloring(NumRow, numNNZ, col_id, offset, color, set);
//6) Validate parallel solution
printf("Parallel solution has %d colors\n", CountColors(V, color));
printf("Valid coloring: %d\n\n", IsValidColoring(graph, V, color));
//PrintSolution(color,V);
/***********************************************************************/
//7) Color Vertices on CPU
// GraphColoring(graph, V, &color);
// printf("Brute-foce solution has %d colors\n", CountColors(V, color));
// printf("Valid coloring: %d\n", IsValidColoring(graph, V, color));
//
// GreedyColoring(graph, V, &color);
// printf("\n***************\n");
// printf("Greedy solution has %d colors\n", CountColors(V, color));
// printf("Valid coloring: %d\n\n", IsValidColoring(graph, V, color));
// //PrintSolution(color,V);
/***********************************************************************/
//8)Compare solution
/***********************************************************************/
return 0;
}
| 4a9399ec08339c9a9141b3739c2cc2d90fbf465e.cu | #include <sstream>
#include <fstream>
#include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
//Error handling micro, wrap it around function whenever possible
static void HandleError(cudaError_t err, const char *file, int line) {
if (err != cudaSuccess) {
printf("\n%s in %s at line %d\n", cudaGetErrorString(err), file, line);
//system("pause");
exit(EXIT_FAILURE);
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
#include "validate.h"
#include "serial.h"
#include "utility.h"
#include "coloring.cu"
#include "cuda_query.cu"
int main(int argc, char* argv[])
{
cuda_query(0); //Set the deivde number here
if(argc != 2){
std::cout<<" Usage ./graphGPU INPUTFILE"<<std::endl;
std::cout<<"input files can be found under input/ "<<std::endl;
exit(EXIT_FAILURE);
}
bool* graph;
int V;
uint32_t numNNZ=0;
uint32_t NumRow=0; //same as V
//1) Read graph
if (std::string(argv[1]).find(".col") != std::string::npos){
ReadColFile(argv[1], &graph, &V, &numNNZ,&NumRow);
} else if (std::string(argv[1]).find(".mm") != std::string::npos){
ReadMMFile(argv[1], &graph, &V, &numNNZ,&NumRow);
} else{
std::cout<<" Invalid file formate!!"<<std::endl;
exit(EXIT_FAILURE);
}
/***********************************************************************/
//2) Allocate memory (on both sides)
int *col_id(NULL),*offset(NULL);
HANDLE_ERROR(cudaMallocManaged(&col_id, numNNZ*sizeof(int)));
//last entry will be = numNonZero (so that we have always a pointer
//to the first and last id for each row with no need for if statments)
HANDLE_ERROR(cudaMallocManaged(&offset, (NumRow +1)*sizeof(int)));
/***********************************************************************/
//3) Get graph in CSR format
getCSR(numNNZ, NumRow, graph, col_id, offset);
//printCSR(numNNZ,NumRow,col_id, offset);
/***********************************************************************/
//5) Color Vertices in paralllel
int* color;
HANDLE_ERROR(cudaMallocManaged(&color, NumRow*sizeof(int)));
memset(color, 0, NumRow );
bool*set;
HANDLE_ERROR(cudaMallocManaged(&set, NumRow*sizeof(bool)));
memset(set, 1, NumRow);
coloring(NumRow, numNNZ, col_id, offset, color, set);
//6) Validate parallel solution
printf("Parallel solution has %d colors\n", CountColors(V, color));
printf("Valid coloring: %d\n\n", IsValidColoring(graph, V, color));
//PrintSolution(color,V);
/***********************************************************************/
//7) Color Vertices on CPU
// GraphColoring(graph, V, &color);
// printf("Brute-foce solution has %d colors\n", CountColors(V, color));
// printf("Valid coloring: %d\n", IsValidColoring(graph, V, color));
//
// GreedyColoring(graph, V, &color);
// printf("\n***************\n");
// printf("Greedy solution has %d colors\n", CountColors(V, color));
// printf("Valid coloring: %d\n\n", IsValidColoring(graph, V, color));
// //PrintSolution(color,V);
/***********************************************************************/
//8)Compare solution
/***********************************************************************/
return 0;
}
|
0ae5c96438642bb4c5eda7270e5ac043256327d1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
/*
* Host function to initialize vector elements. This function
* simply initializes each element to equal its index in the
* vector.
*/
void initWith(float num, float *a, int N)
{
for(int i = 0; i < N; ++i)
{
a[i] = num;
}
}
__global__ void initwith(float num, float *a, int N)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < N; i+= stride)
{
a[i] = num;
}
}
/*
* Device kernel stores into `result` the sum of each
* same-indexed value of `a` and `b`.
*/
__global__
void addVectorsInto(float *result, float *a, float *b, int N)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < N; i += stride)
{
result[i] = a[i] + b[i];
}
}
/*
* Host function to confirm values in `vector`. This function
* assumes all values are the same `target` value.
*/
void checkElementsAre(float target, float *vector, int N)
{
for(int i = 0; i < N; i++)
{
if(vector[i] != target)
{
printf("FAIL: vector[%d] - %0.0f does not equal %0.0f\n", i, vector[i], target);
exit(1);
}
}
printf("Success! All values calculated correctly.\n");
}
int main()
{
int deviceId;
hipGetDevice(&deviceId);
const int N = 2<<24;
size_t size = N * sizeof(float);
float *a;
float *b;
float *c;
hipMallocManaged(&a, size);
hipMallocManaged(&b, size);
hipMallocManaged(&c, size);
// initWith(3, a, N);
// initWith(4, b, N);
// initWith(0, c, N);
size_t threadsPerBlock;
size_t numberOfBlocks;
/*
* nvprof should register performance changes when execution configuration
* is updated.
*/
threadsPerBlock = 100;
numberOfBlocks = 1;
hipError_t addVectorsErr;
hipError_t asyncErr;
hipLaunchKernelGGL(( initwith), dim3(numberOfBlocks), dim3(threadsPerBlock), 0, 0, 3, a, N);
hipLaunchKernelGGL(( initwith), dim3(numberOfBlocks), dim3(threadsPerBlock), 0, 0, 4, b, N);
hipLaunchKernelGGL(( initwith), dim3(numberOfBlocks), dim3(threadsPerBlock), 0, 0, 0, c, N);
//no prefetching memory
// 11.30 D -> H
// 77.30 PF
// with prefetching memory
// 11.30 D -> H
// 77.30 PF
/*
* Add asynchronous prefetching after the data is initialized,
* and before launching the kernel, to avoid host to GPU page
* faulting.
*/
// hipMemPrefetchAsync(a, size, deviceId);
// hipMemPrefetchAsync(b, size, deviceId);
// hipMemPrefetchAsync(c, size, deviceId);
hipLaunchKernelGGL(( addVectorsInto), dim3(numberOfBlocks), dim3(threadsPerBlock), 0, 0, c, a, b, N);
addVectorsErr = hipGetLastError();
if(addVectorsErr != hipSuccess) printf("Error: %s\n", hipGetErrorString(addVectorsErr));
asyncErr = hipDeviceSynchronize();
if(asyncErr != hipSuccess) printf("Error: %s\n", hipGetErrorString(asyncErr));
checkElementsAre(7, c, N);
hipFree(a);
hipFree(b);
hipFree(c);
}
| 0ae5c96438642bb4c5eda7270e5ac043256327d1.cu | #include <stdio.h>
/*
* Host function to initialize vector elements. This function
* simply initializes each element to equal its index in the
* vector.
*/
void initWith(float num, float *a, int N)
{
for(int i = 0; i < N; ++i)
{
a[i] = num;
}
}
__global__ void initwith(float num, float *a, int N)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < N; i+= stride)
{
a[i] = num;
}
}
/*
* Device kernel stores into `result` the sum of each
* same-indexed value of `a` and `b`.
*/
__global__
void addVectorsInto(float *result, float *a, float *b, int N)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < N; i += stride)
{
result[i] = a[i] + b[i];
}
}
/*
* Host function to confirm values in `vector`. This function
* assumes all values are the same `target` value.
*/
void checkElementsAre(float target, float *vector, int N)
{
for(int i = 0; i < N; i++)
{
if(vector[i] != target)
{
printf("FAIL: vector[%d] - %0.0f does not equal %0.0f\n", i, vector[i], target);
exit(1);
}
}
printf("Success! All values calculated correctly.\n");
}
int main()
{
int deviceId;
cudaGetDevice(&deviceId);
const int N = 2<<24;
size_t size = N * sizeof(float);
float *a;
float *b;
float *c;
cudaMallocManaged(&a, size);
cudaMallocManaged(&b, size);
cudaMallocManaged(&c, size);
// initWith(3, a, N);
// initWith(4, b, N);
// initWith(0, c, N);
size_t threadsPerBlock;
size_t numberOfBlocks;
/*
* nvprof should register performance changes when execution configuration
* is updated.
*/
threadsPerBlock = 100;
numberOfBlocks = 1;
cudaError_t addVectorsErr;
cudaError_t asyncErr;
initwith<<<numberOfBlocks, threadsPerBlock>>>(3, a, N);
initwith<<<numberOfBlocks, threadsPerBlock>>>(4, b, N);
initwith<<<numberOfBlocks, threadsPerBlock>>>(0, c, N);
//no prefetching memory
// 11.30 D -> H
// 77.30 PF
// with prefetching memory
// 11.30 D -> H
// 77.30 PF
/*
* Add asynchronous prefetching after the data is initialized,
* and before launching the kernel, to avoid host to GPU page
* faulting.
*/
// cudaMemPrefetchAsync(a, size, deviceId);
// cudaMemPrefetchAsync(b, size, deviceId);
// cudaMemPrefetchAsync(c, size, deviceId);
addVectorsInto<<<numberOfBlocks, threadsPerBlock>>>(c, a, b, N);
addVectorsErr = cudaGetLastError();
if(addVectorsErr != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(addVectorsErr));
asyncErr = cudaDeviceSynchronize();
if(asyncErr != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(asyncErr));
checkElementsAre(7, c, N);
cudaFree(a);
cudaFree(b);
cudaFree(c);
}
|
ebd70ca9c9e917f3d2b0a8b24f66b2a7d437f741.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/device/cuda_util.h"
#include "oneflow/core/cuda/atomic.cuh"
#include "oneflow/user/kernels/roll_kernel_utils.h"
#include "oneflow/core/ep/cuda/cuda_stream.h"
namespace oneflow {
namespace {
template<typename T, int Dim>
__global__ void RollCudaKernel(const T* in_ptr, const SHIFTS shifts, const SHAPE shape,
const STRIDE stride, const int64_t elements, T* out_ptr) {
int32_t global_index = (blockDim.x * blockIdx.x) + threadIdx.x;
int32_t step = gridDim.x * blockDim.x;
while (global_index < elements) {
int32_t shifted_global_index =
getShiftedIndex<Dim>(global_index, shifts.val, shape.val, stride.val);
out_ptr[global_index] = in_ptr[shifted_global_index];
global_index += step;
}
}
template<typename T, int Dim>
struct GpuRollFunctor final {
void operator()(ep::Stream* stream, const T* in_ptr, const SHIFTS shifts, const SHAPE shape,
const STRIDE stride, const int64_t elements, T* out_ptr) {
hipLaunchKernelGGL(( RollCudaKernel<T, Dim>), dim3(BlocksNum4ThreadsNum(elements)), dim3(kCudaThreadsNumPerBlock), 0,
stream->As<ep::CudaStream>()->cuda_stream(),
in_ptr, shifts, shape, stride, elements, out_ptr);
}
};
template<int Dim>
struct GpuRollFunctor<float16, Dim> final {
void operator()(ep::Stream* stream, const float16* in_ptr, const SHIFTS shifts, const SHAPE shape,
const STRIDE stride, const int64_t elements, float16* out_ptr) {
hipLaunchKernelGGL(( RollCudaKernel<half, Dim>), dim3(BlocksNum4ThreadsNum(elements)), dim3(kCudaThreadsNumPerBlock), 0,
stream->As<ep::CudaStream>()->cuda_stream(),
reinterpret_cast<const half*>(in_ptr), shifts, shape, stride, elements,
reinterpret_cast<half*>(out_ptr));
}
};
template<typename T>
__global__ void RollFlattenCudaKernel(const T* in_ptr, const int64_t start,
const int64_t elem_count_minus_start, const int64_t elements,
T* out_ptr) {
int64_t global_index = (blockDim.x * blockIdx.x) + threadIdx.x;
int32_t step = gridDim.x * blockDim.x;
while (global_index < elements) {
int64_t source_idx = 0;
if (global_index >= elem_count_minus_start) {
source_idx = global_index - elem_count_minus_start;
} else {
source_idx = global_index + start;
}
out_ptr[global_index] = in_ptr[source_idx];
global_index += step;
}
}
template<typename T>
struct GpuRollFlattenFunctor final {
void operator()(ep::Stream* stream, const T* in_ptr, const int64_t start,
const int64_t elem_count_minus_start, const int64_t elements, T* out_ptr) {
hipLaunchKernelGGL(( RollFlattenCudaKernel<T>), dim3(BlocksNum4ThreadsNum(elements)), dim3(kCudaThreadsNumPerBlock), 0,
stream->As<ep::CudaStream>()->cuda_stream(),
in_ptr, start, elem_count_minus_start, elements, out_ptr);
}
};
template<>
void GpuRollFlattenFunctor<float16>::operator()(ep::Stream* stream, const float16* in_ptr,
const int64_t start,
const int64_t elem_count_minus_start,
const int64_t elements, float16* out_ptr) {
hipLaunchKernelGGL(( RollFlattenCudaKernel<half>), dim3(BlocksNum4ThreadsNum(elements)), dim3(kCudaThreadsNumPerBlock), 0,
stream->As<ep::CudaStream>()->cuda_stream(),
reinterpret_cast<const half*>(in_ptr), start, elem_count_minus_start, elements,
reinterpret_cast<half*>(out_ptr));
}
template<typename T>
__global__ void Roll1DimCudaKernel(const T* in_ptr, const int32_t stride_x_size,
const int32_t stride, const int32_t size_minus_start,
const int32_t size_minus_start_x_stride,
const int32_t start_x_stride, const int64_t elements,
T* out_ptr) {
int32_t global_index = (blockDim.x * blockIdx.x) + threadIdx.x;
int32_t step = gridDim.x * blockDim.x;
while (global_index < elements) {
// roll dim idx is the index of linear_index along the rolling dimension.
int32_t roll_dim_idx = global_index % stride_x_size / stride;
// index into the source data to find appropriate value.
int32_t source_idx = 0;
if (roll_dim_idx >= size_minus_start) {
source_idx = global_index - size_minus_start_x_stride;
} else {
source_idx = global_index + start_x_stride;
}
out_ptr[global_index] = in_ptr[source_idx];
global_index += step;
}
}
template<typename T>
struct GpuRoll1DimFunctor final {
void operator()(ep::Stream* stream, const T* in_ptr, const int32_t stride_x_size,
const int32_t stride, const int32_t size_minus_start,
const int32_t size_minus_start_x_stride, const int32_t start_x_stride,
const int64_t elements, T* out_ptr) {
hipLaunchKernelGGL(( Roll1DimCudaKernel<T>), dim3(BlocksNum4ThreadsNum(elements)), dim3(kCudaThreadsNumPerBlock), 0,
stream->As<ep::CudaStream>()->cuda_stream(),
in_ptr, stride_x_size, stride, size_minus_start, size_minus_start_x_stride, start_x_stride,
elements, out_ptr);
}
};
template<>
void GpuRoll1DimFunctor<float16>::operator()(ep::Stream* stream, const float16* in_ptr,
const int32_t stride_x_size, const int32_t stride,
const int32_t size_minus_start,
const int32_t size_minus_start_x_stride,
const int32_t start_x_stride, const int64_t elements,
float16* out_ptr) {
hipLaunchKernelGGL(( Roll1DimCudaKernel<half>), dim3(BlocksNum4ThreadsNum(elements)), dim3(kCudaThreadsNumPerBlock), 0,
stream->As<ep::CudaStream>()->cuda_stream(),
reinterpret_cast<const half*>(in_ptr), stride_x_size, stride, size_minus_start,
size_minus_start_x_stride, start_x_stride, elements, reinterpret_cast<half*>(out_ptr));
}
} // namespace
template<typename T>
class GpuRollKernel final : public user_op::OpKernel {
public:
GpuRollKernel() = default;
~GpuRollKernel() = default;
private:
using user_op::OpKernel::Compute;
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* in = ctx->Tensor4ArgNameAndIndex("in", 0);
user_op::Tensor* out = ctx->Tensor4ArgNameAndIndex("out", 0);
const std::vector<int32_t>& shifts = ctx->Attr<std::vector<int32_t>>("shifts");
const std::vector<int32_t>& dims = ctx->Attr<std::vector<int32_t>>("dims");
const T* in_ptr = in->dptr<T>();
T* out_ptr = out->mut_dptr<T>();
const int64_t elem_count = out->shape_view().elem_cnt();
if (dims[0] == -1) {
// NOTE(Liang Depeng): Borrow the implementation of pytorch and simplify to 1d array case.
int64_t start = (elem_count - shifts[0]) % elem_count;
if (start < 0) start = start + elem_count;
const int64_t elem_count_minus_start = elem_count - start;
GpuRollFlattenFunctor<T>()(ctx->stream(), in_ptr, start, elem_count_minus_start, elem_count,
out_ptr);
} else {
SHAPE new_shape{};
SHIFTS new_shifts{};
int32_t num_axes = 0;
computeParams(in->shape_view(), shifts, dims, new_shifts.val, new_shape.val, &num_axes);
STRIDE stride{};
initStride(stride, new_shape, num_axes);
if (dims.size() == 1) {
// NOTE(Liang Depeng): Borrow the implementation of pytorch
const int32_t size = new_shape.val[dims[0]];
int32_t start = (size - new_shifts.val[dims[0]]) % size;
// Behavior of % is different in C++ vs Python for negative numbers. This
// corrects the difference.
if (start < 0) start = start + size;
const int32_t stride_x_size = stride.val[dims[0]] * size;
const int32_t size_minus_start = size - start;
const int32_t size_minus_start_x_stride = size_minus_start * stride.val[dims[0]];
const int32_t start_x_stride = start * stride.val[dims[0]];
GpuRoll1DimFunctor<T>()(ctx->stream(), in_ptr, stride_x_size, stride.val[dims[0]],
size_minus_start, size_minus_start_x_stride, start_x_stride,
elem_count, out_ptr);
} else {
transformShifts(new_shifts.val, new_shape.val, num_axes);
switch (num_axes) {
case 1:
GpuRollFunctor<T, 1>()(ctx->stream(), in_ptr, new_shifts, new_shape, stride, elem_count,
out_ptr);
break;
case 2:
GpuRollFunctor<T, 2>()(ctx->stream(), in_ptr, new_shifts, new_shape, stride, elem_count,
out_ptr);
break;
case 3:
GpuRollFunctor<T, 3>()(ctx->stream(), in_ptr, new_shifts, new_shape, stride, elem_count,
out_ptr);
break;
case 4:
GpuRollFunctor<T, 4>()(ctx->stream(), in_ptr, new_shifts, new_shape, stride, elem_count,
out_ptr);
break;
case 5:
GpuRollFunctor<T, 5>()(ctx->stream(), in_ptr, new_shifts, new_shape, stride, elem_count,
out_ptr);
break;
case 6:
GpuRollFunctor<T, 6>()(ctx->stream(), in_ptr, new_shifts, new_shape, stride, elem_count,
out_ptr);
break;
case 7:
GpuRollFunctor<T, 7>()(ctx->stream(), in_ptr, new_shifts, new_shape, stride, elem_count,
out_ptr);
break;
case 8:
GpuRollFunctor<T, 8>()(ctx->stream(), in_ptr, new_shifts, new_shape, stride, elem_count,
out_ptr);
break;
case 9:
GpuRollFunctor<T, 9>()(ctx->stream(), in_ptr, new_shifts, new_shape, stride, elem_count,
out_ptr);
break;
case 10:
GpuRollFunctor<T, 10>()(ctx->stream(), in_ptr, new_shifts, new_shape, stride,
elem_count, out_ptr);
break;
case 11:
GpuRollFunctor<T, 11>()(ctx->stream(), in_ptr, new_shifts, new_shape, stride,
elem_count, out_ptr);
break;
case 12:
GpuRollFunctor<T, 12>()(ctx->stream(), in_ptr, new_shifts, new_shape, stride,
elem_count, out_ptr);
break;
case 13:
GpuRollFunctor<T, 13>()(ctx->stream(), in_ptr, new_shifts, new_shape, stride,
elem_count, out_ptr);
break;
case 14:
GpuRollFunctor<T, 14>()(ctx->stream(), in_ptr, new_shifts, new_shape, stride,
elem_count, out_ptr);
break;
case 15:
GpuRollFunctor<T, 15>()(ctx->stream(), in_ptr, new_shifts, new_shape, stride,
elem_count, out_ptr);
break;
case 16:
GpuRollFunctor<T, 16>()(ctx->stream(), in_ptr, new_shifts, new_shape, stride,
elem_count, out_ptr);
break;
default: break;
}
}
}
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_ROLL_KERNEL(dtype) \
REGISTER_USER_KERNEL("roll").SetCreateFn<GpuRollKernel<dtype>>().SetIsMatchedHob( \
(user_op::HobDeviceType() == DeviceType::kCUDA) \
&& (user_op::HobDataType("in", 0) == GetDataType<dtype>::value))
REGISTER_ROLL_KERNEL(float);
REGISTER_ROLL_KERNEL(double);
REGISTER_ROLL_KERNEL(float16);
REGISTER_ROLL_KERNEL(bool);
REGISTER_ROLL_KERNEL(uint8_t);
REGISTER_ROLL_KERNEL(int8_t);
REGISTER_ROLL_KERNEL(int32_t);
REGISTER_ROLL_KERNEL(int64_t);
} // namespace oneflow
| ebd70ca9c9e917f3d2b0a8b24f66b2a7d437f741.cu | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/device/cuda_util.h"
#include "oneflow/core/cuda/atomic.cuh"
#include "oneflow/user/kernels/roll_kernel_utils.h"
#include "oneflow/core/ep/cuda/cuda_stream.h"
namespace oneflow {
namespace {
template<typename T, int Dim>
__global__ void RollCudaKernel(const T* in_ptr, const SHIFTS shifts, const SHAPE shape,
const STRIDE stride, const int64_t elements, T* out_ptr) {
int32_t global_index = (blockDim.x * blockIdx.x) + threadIdx.x;
int32_t step = gridDim.x * blockDim.x;
while (global_index < elements) {
int32_t shifted_global_index =
getShiftedIndex<Dim>(global_index, shifts.val, shape.val, stride.val);
out_ptr[global_index] = in_ptr[shifted_global_index];
global_index += step;
}
}
template<typename T, int Dim>
struct GpuRollFunctor final {
void operator()(ep::Stream* stream, const T* in_ptr, const SHIFTS shifts, const SHAPE shape,
const STRIDE stride, const int64_t elements, T* out_ptr) {
RollCudaKernel<T, Dim><<<BlocksNum4ThreadsNum(elements), kCudaThreadsNumPerBlock, 0,
stream->As<ep::CudaStream>()->cuda_stream()>>>(
in_ptr, shifts, shape, stride, elements, out_ptr);
}
};
template<int Dim>
struct GpuRollFunctor<float16, Dim> final {
void operator()(ep::Stream* stream, const float16* in_ptr, const SHIFTS shifts, const SHAPE shape,
const STRIDE stride, const int64_t elements, float16* out_ptr) {
RollCudaKernel<half, Dim><<<BlocksNum4ThreadsNum(elements), kCudaThreadsNumPerBlock, 0,
stream->As<ep::CudaStream>()->cuda_stream()>>>(
reinterpret_cast<const half*>(in_ptr), shifts, shape, stride, elements,
reinterpret_cast<half*>(out_ptr));
}
};
template<typename T>
__global__ void RollFlattenCudaKernel(const T* in_ptr, const int64_t start,
const int64_t elem_count_minus_start, const int64_t elements,
T* out_ptr) {
int64_t global_index = (blockDim.x * blockIdx.x) + threadIdx.x;
int32_t step = gridDim.x * blockDim.x;
while (global_index < elements) {
int64_t source_idx = 0;
if (global_index >= elem_count_minus_start) {
source_idx = global_index - elem_count_minus_start;
} else {
source_idx = global_index + start;
}
out_ptr[global_index] = in_ptr[source_idx];
global_index += step;
}
}
template<typename T>
struct GpuRollFlattenFunctor final {
void operator()(ep::Stream* stream, const T* in_ptr, const int64_t start,
const int64_t elem_count_minus_start, const int64_t elements, T* out_ptr) {
RollFlattenCudaKernel<T><<<BlocksNum4ThreadsNum(elements), kCudaThreadsNumPerBlock, 0,
stream->As<ep::CudaStream>()->cuda_stream()>>>(
in_ptr, start, elem_count_minus_start, elements, out_ptr);
}
};
template<>
void GpuRollFlattenFunctor<float16>::operator()(ep::Stream* stream, const float16* in_ptr,
const int64_t start,
const int64_t elem_count_minus_start,
const int64_t elements, float16* out_ptr) {
RollFlattenCudaKernel<half><<<BlocksNum4ThreadsNum(elements), kCudaThreadsNumPerBlock, 0,
stream->As<ep::CudaStream>()->cuda_stream()>>>(
reinterpret_cast<const half*>(in_ptr), start, elem_count_minus_start, elements,
reinterpret_cast<half*>(out_ptr));
}
template<typename T>
__global__ void Roll1DimCudaKernel(const T* in_ptr, const int32_t stride_x_size,
const int32_t stride, const int32_t size_minus_start,
const int32_t size_minus_start_x_stride,
const int32_t start_x_stride, const int64_t elements,
T* out_ptr) {
int32_t global_index = (blockDim.x * blockIdx.x) + threadIdx.x;
int32_t step = gridDim.x * blockDim.x;
while (global_index < elements) {
// roll dim idx is the index of linear_index along the rolling dimension.
int32_t roll_dim_idx = global_index % stride_x_size / stride;
// index into the source data to find appropriate value.
int32_t source_idx = 0;
if (roll_dim_idx >= size_minus_start) {
source_idx = global_index - size_minus_start_x_stride;
} else {
source_idx = global_index + start_x_stride;
}
out_ptr[global_index] = in_ptr[source_idx];
global_index += step;
}
}
template<typename T>
struct GpuRoll1DimFunctor final {
void operator()(ep::Stream* stream, const T* in_ptr, const int32_t stride_x_size,
const int32_t stride, const int32_t size_minus_start,
const int32_t size_minus_start_x_stride, const int32_t start_x_stride,
const int64_t elements, T* out_ptr) {
Roll1DimCudaKernel<T><<<BlocksNum4ThreadsNum(elements), kCudaThreadsNumPerBlock, 0,
stream->As<ep::CudaStream>()->cuda_stream()>>>(
in_ptr, stride_x_size, stride, size_minus_start, size_minus_start_x_stride, start_x_stride,
elements, out_ptr);
}
};
template<>
void GpuRoll1DimFunctor<float16>::operator()(ep::Stream* stream, const float16* in_ptr,
const int32_t stride_x_size, const int32_t stride,
const int32_t size_minus_start,
const int32_t size_minus_start_x_stride,
const int32_t start_x_stride, const int64_t elements,
float16* out_ptr) {
Roll1DimCudaKernel<half><<<BlocksNum4ThreadsNum(elements), kCudaThreadsNumPerBlock, 0,
stream->As<ep::CudaStream>()->cuda_stream()>>>(
reinterpret_cast<const half*>(in_ptr), stride_x_size, stride, size_minus_start,
size_minus_start_x_stride, start_x_stride, elements, reinterpret_cast<half*>(out_ptr));
}
} // namespace
template<typename T>
class GpuRollKernel final : public user_op::OpKernel {
public:
GpuRollKernel() = default;
~GpuRollKernel() = default;
private:
using user_op::OpKernel::Compute;
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* in = ctx->Tensor4ArgNameAndIndex("in", 0);
user_op::Tensor* out = ctx->Tensor4ArgNameAndIndex("out", 0);
const std::vector<int32_t>& shifts = ctx->Attr<std::vector<int32_t>>("shifts");
const std::vector<int32_t>& dims = ctx->Attr<std::vector<int32_t>>("dims");
const T* in_ptr = in->dptr<T>();
T* out_ptr = out->mut_dptr<T>();
const int64_t elem_count = out->shape_view().elem_cnt();
if (dims[0] == -1) {
// NOTE(Liang Depeng): Borrow the implementation of pytorch and simplify to 1d array case.
int64_t start = (elem_count - shifts[0]) % elem_count;
if (start < 0) start = start + elem_count;
const int64_t elem_count_minus_start = elem_count - start;
GpuRollFlattenFunctor<T>()(ctx->stream(), in_ptr, start, elem_count_minus_start, elem_count,
out_ptr);
} else {
SHAPE new_shape{};
SHIFTS new_shifts{};
int32_t num_axes = 0;
computeParams(in->shape_view(), shifts, dims, new_shifts.val, new_shape.val, &num_axes);
STRIDE stride{};
initStride(stride, new_shape, num_axes);
if (dims.size() == 1) {
// NOTE(Liang Depeng): Borrow the implementation of pytorch
const int32_t size = new_shape.val[dims[0]];
int32_t start = (size - new_shifts.val[dims[0]]) % size;
// Behavior of % is different in C++ vs Python for negative numbers. This
// corrects the difference.
if (start < 0) start = start + size;
const int32_t stride_x_size = stride.val[dims[0]] * size;
const int32_t size_minus_start = size - start;
const int32_t size_minus_start_x_stride = size_minus_start * stride.val[dims[0]];
const int32_t start_x_stride = start * stride.val[dims[0]];
GpuRoll1DimFunctor<T>()(ctx->stream(), in_ptr, stride_x_size, stride.val[dims[0]],
size_minus_start, size_minus_start_x_stride, start_x_stride,
elem_count, out_ptr);
} else {
transformShifts(new_shifts.val, new_shape.val, num_axes);
switch (num_axes) {
case 1:
GpuRollFunctor<T, 1>()(ctx->stream(), in_ptr, new_shifts, new_shape, stride, elem_count,
out_ptr);
break;
case 2:
GpuRollFunctor<T, 2>()(ctx->stream(), in_ptr, new_shifts, new_shape, stride, elem_count,
out_ptr);
break;
case 3:
GpuRollFunctor<T, 3>()(ctx->stream(), in_ptr, new_shifts, new_shape, stride, elem_count,
out_ptr);
break;
case 4:
GpuRollFunctor<T, 4>()(ctx->stream(), in_ptr, new_shifts, new_shape, stride, elem_count,
out_ptr);
break;
case 5:
GpuRollFunctor<T, 5>()(ctx->stream(), in_ptr, new_shifts, new_shape, stride, elem_count,
out_ptr);
break;
case 6:
GpuRollFunctor<T, 6>()(ctx->stream(), in_ptr, new_shifts, new_shape, stride, elem_count,
out_ptr);
break;
case 7:
GpuRollFunctor<T, 7>()(ctx->stream(), in_ptr, new_shifts, new_shape, stride, elem_count,
out_ptr);
break;
case 8:
GpuRollFunctor<T, 8>()(ctx->stream(), in_ptr, new_shifts, new_shape, stride, elem_count,
out_ptr);
break;
case 9:
GpuRollFunctor<T, 9>()(ctx->stream(), in_ptr, new_shifts, new_shape, stride, elem_count,
out_ptr);
break;
case 10:
GpuRollFunctor<T, 10>()(ctx->stream(), in_ptr, new_shifts, new_shape, stride,
elem_count, out_ptr);
break;
case 11:
GpuRollFunctor<T, 11>()(ctx->stream(), in_ptr, new_shifts, new_shape, stride,
elem_count, out_ptr);
break;
case 12:
GpuRollFunctor<T, 12>()(ctx->stream(), in_ptr, new_shifts, new_shape, stride,
elem_count, out_ptr);
break;
case 13:
GpuRollFunctor<T, 13>()(ctx->stream(), in_ptr, new_shifts, new_shape, stride,
elem_count, out_ptr);
break;
case 14:
GpuRollFunctor<T, 14>()(ctx->stream(), in_ptr, new_shifts, new_shape, stride,
elem_count, out_ptr);
break;
case 15:
GpuRollFunctor<T, 15>()(ctx->stream(), in_ptr, new_shifts, new_shape, stride,
elem_count, out_ptr);
break;
case 16:
GpuRollFunctor<T, 16>()(ctx->stream(), in_ptr, new_shifts, new_shape, stride,
elem_count, out_ptr);
break;
default: break;
}
}
}
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_ROLL_KERNEL(dtype) \
REGISTER_USER_KERNEL("roll").SetCreateFn<GpuRollKernel<dtype>>().SetIsMatchedHob( \
(user_op::HobDeviceType() == DeviceType::kCUDA) \
&& (user_op::HobDataType("in", 0) == GetDataType<dtype>::value))
REGISTER_ROLL_KERNEL(float);
REGISTER_ROLL_KERNEL(double);
REGISTER_ROLL_KERNEL(float16);
REGISTER_ROLL_KERNEL(bool);
REGISTER_ROLL_KERNEL(uint8_t);
REGISTER_ROLL_KERNEL(int8_t);
REGISTER_ROLL_KERNEL(int32_t);
REGISTER_ROLL_KERNEL(int64_t);
} // namespace oneflow
|
f2c79c4f7b3662f1fac8d16fa92297e1706cf2ce.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
extern "C" {
#ifndef REAL
#define REAL float
#endif
#ifndef CAST
#define CAST(fun) fun ## f
#endif
#ifndef REAL2o3
#define REAL2o3 (REAL)0.6666666666666667
#endif
#ifndef REAL3o2
#define REAL3o2 (REAL)1.5
#endif
}
__global__ void ge_fmax (const int sd, const int fd, const REAL* a, const int offset_a, const int ld_a, const REAL* b, const int offset_b, const int ld_b, REAL* c, const int offset_c, const int ld_c) {
const int gid_0 = blockIdx.x * blockDim.x + threadIdx.x;
const int gid_1 = blockIdx.y * blockDim.y + threadIdx.y;
const bool valid = (gid_0 < sd) && (gid_1 < fd);
if (valid) {
c[offset_c + gid_0 + gid_1 * ld_c] =
CAST(fmax)(a[offset_a + gid_0 + gid_1 * ld_a], b[offset_b + gid_0 + gid_1 * ld_b]);
}
} | f2c79c4f7b3662f1fac8d16fa92297e1706cf2ce.cu | #include "includes.h"
extern "C" {
#ifndef REAL
#define REAL float
#endif
#ifndef CAST
#define CAST(fun) fun ## f
#endif
#ifndef REAL2o3
#define REAL2o3 (REAL)0.6666666666666667
#endif
#ifndef REAL3o2
#define REAL3o2 (REAL)1.5
#endif
}
__global__ void ge_fmax (const int sd, const int fd, const REAL* a, const int offset_a, const int ld_a, const REAL* b, const int offset_b, const int ld_b, REAL* c, const int offset_c, const int ld_c) {
const int gid_0 = blockIdx.x * blockDim.x + threadIdx.x;
const int gid_1 = blockIdx.y * blockDim.y + threadIdx.y;
const bool valid = (gid_0 < sd) && (gid_1 < fd);
if (valid) {
c[offset_c + gid_0 + gid_1 * ld_c] =
CAST(fmax)(a[offset_a + gid_0 + gid_1 * ld_a], b[offset_b + gid_0 + gid_1 * ld_b]);
}
} |
187a48d48568c4c1046385a60ca235e25d61763b.hip | // !!! This is a file automatically generated by hipify!!!
#include <cassert>
#include <chrono>
#include "../constants_bench_3d.h"
#include <hip/hip_runtime.h>
#include <iostream>
#include "../../../../utils.h"
// Dataset
constexpr auto N = mean_shift::cuda::bench_3d::case_2000::N;
constexpr auto D = mean_shift::cuda::bench_3d::D;
constexpr auto M = mean_shift::cuda::bench_3d::M;
const auto PATH_TO_DATA = mean_shift::cuda::bench_3d::case_2000::PATH_TO_DATA;
const auto PATH_TO_CENTROIDS = mean_shift::cuda::bench_3d::case_2000::PATH_TO_CENTROIDS;
const auto LOG_NAIVE = mean_shift::cuda::bench_3d::case_2000::LOG_NAIVE;
// Hyperparams
constexpr auto RADIUS = mean_shift::cuda::bench_3d::case_2000::RADIUS;
constexpr auto NUM_ITER = mean_shift::cuda::bench_3d::NUM_ITER;
constexpr auto DBL_SIGMA_SQ = mean_shift::cuda::bench_3d::case_2000::DBL_SIGMA_SQ;
constexpr auto MIN_DISTANCE = mean_shift::cuda::bench_3d::case_2000::MIN_DISTANCE;
// Device
constexpr auto THREADS = mean_shift::cuda::bench_3d::THREADS;
constexpr auto BLOCKS = mean_shift::cuda::bench_3d::case_2000::BLOCKS;
// Benchmarking
constexpr auto NUM_TRIALS = mean_shift::cuda::bench_3d::NUM_TRIALS;
__global__ void mean_shift_naive(float *data, float *data_next) {
size_t tid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (tid < N) {
size_t row = tid * D;
float new_position[D] = {0.};
float tot_weight = 0.;
for (size_t i = 0; i < N; ++i) {
size_t row_n = i * D;
float sq_dist = 0.;
for (size_t j = 0; j < D; ++j) {
sq_dist += (data[row + j] - data[row_n + j]) * (data[row + j] - data[row_n + j]);
}
if (sq_dist <= RADIUS) {
float weight = expf(-sq_dist / DBL_SIGMA_SQ);
for (size_t j = 0; j < D; ++j) {
new_position[j] += weight * data[row_n + j];
}
tot_weight += weight;
}
}
for (size_t j = 0; j < D; ++j) {
data_next[row + j] = new_position[j] / tot_weight;
}
}
return;
}
double run_once() {
// Load data
std::array<float, N * D> data = mean_shift::cuda::utils::load_csv<N, D>(PATH_TO_DATA, ',');
std::array<float, N * D> data_next {};
float *dev_data;
float *dev_data_next;
// Allocate GPU memory
size_t data_bytes = N * D * sizeof(float);
hipMalloc(&dev_data, data_bytes);
hipMalloc(&dev_data_next, data_bytes);
// Copy to GPU memory
hipMemcpy(dev_data, data.data(), data_bytes, hipMemcpyHostToDevice);
hipMemcpy(dev_data_next, data_next.data(), data_bytes, hipMemcpyHostToDevice);
// Run mean shift clustering
auto start = std::chrono::high_resolution_clock::now();
for (size_t i = 0; i < NUM_ITER; ++i) {
hipLaunchKernelGGL(( mean_shift_naive), dim3(BLOCKS), dim3(THREADS), 0, 0, dev_data, dev_data_next);
hipDeviceSynchronize();
mean_shift::cuda::utils::swap(dev_data, dev_data_next);
}
hipMemcpy(data.data(), dev_data, data_bytes, hipMemcpyDeviceToHost);
const auto centroids = mean_shift::cuda::utils::reduce_to_centroids<N, D>(data, MIN_DISTANCE);
auto end = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();
// Check if correct number
assert(centroids.size() == M);
return duration;
}
int main() {
std::array<double, NUM_TRIALS> exec_times;
for (auto i = 0; i < NUM_TRIALS; ++i)
exec_times[i] = run_once();
mean_shift::cuda::utils::write_csv<double, NUM_TRIALS>(exec_times, LOG_NAIVE, ',');
return 0;
} | 187a48d48568c4c1046385a60ca235e25d61763b.cu | #include <cassert>
#include <chrono>
#include "../constants_bench_3d.h"
#include <cuda.h>
#include <iostream>
#include "../../../../utils.h"
// Dataset
constexpr auto N = mean_shift::cuda::bench_3d::case_2000::N;
constexpr auto D = mean_shift::cuda::bench_3d::D;
constexpr auto M = mean_shift::cuda::bench_3d::M;
const auto PATH_TO_DATA = mean_shift::cuda::bench_3d::case_2000::PATH_TO_DATA;
const auto PATH_TO_CENTROIDS = mean_shift::cuda::bench_3d::case_2000::PATH_TO_CENTROIDS;
const auto LOG_NAIVE = mean_shift::cuda::bench_3d::case_2000::LOG_NAIVE;
// Hyperparams
constexpr auto RADIUS = mean_shift::cuda::bench_3d::case_2000::RADIUS;
constexpr auto NUM_ITER = mean_shift::cuda::bench_3d::NUM_ITER;
constexpr auto DBL_SIGMA_SQ = mean_shift::cuda::bench_3d::case_2000::DBL_SIGMA_SQ;
constexpr auto MIN_DISTANCE = mean_shift::cuda::bench_3d::case_2000::MIN_DISTANCE;
// Device
constexpr auto THREADS = mean_shift::cuda::bench_3d::THREADS;
constexpr auto BLOCKS = mean_shift::cuda::bench_3d::case_2000::BLOCKS;
// Benchmarking
constexpr auto NUM_TRIALS = mean_shift::cuda::bench_3d::NUM_TRIALS;
__global__ void mean_shift_naive(float *data, float *data_next) {
size_t tid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (tid < N) {
size_t row = tid * D;
float new_position[D] = {0.};
float tot_weight = 0.;
for (size_t i = 0; i < N; ++i) {
size_t row_n = i * D;
float sq_dist = 0.;
for (size_t j = 0; j < D; ++j) {
sq_dist += (data[row + j] - data[row_n + j]) * (data[row + j] - data[row_n + j]);
}
if (sq_dist <= RADIUS) {
float weight = expf(-sq_dist / DBL_SIGMA_SQ);
for (size_t j = 0; j < D; ++j) {
new_position[j] += weight * data[row_n + j];
}
tot_weight += weight;
}
}
for (size_t j = 0; j < D; ++j) {
data_next[row + j] = new_position[j] / tot_weight;
}
}
return;
}
double run_once() {
// Load data
std::array<float, N * D> data = mean_shift::cuda::utils::load_csv<N, D>(PATH_TO_DATA, ',');
std::array<float, N * D> data_next {};
float *dev_data;
float *dev_data_next;
// Allocate GPU memory
size_t data_bytes = N * D * sizeof(float);
cudaMalloc(&dev_data, data_bytes);
cudaMalloc(&dev_data_next, data_bytes);
// Copy to GPU memory
cudaMemcpy(dev_data, data.data(), data_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(dev_data_next, data_next.data(), data_bytes, cudaMemcpyHostToDevice);
// Run mean shift clustering
auto start = std::chrono::high_resolution_clock::now();
for (size_t i = 0; i < NUM_ITER; ++i) {
mean_shift_naive<<<BLOCKS, THREADS>>>(dev_data, dev_data_next);
cudaDeviceSynchronize();
mean_shift::cuda::utils::swap(dev_data, dev_data_next);
}
cudaMemcpy(data.data(), dev_data, data_bytes, cudaMemcpyDeviceToHost);
const auto centroids = mean_shift::cuda::utils::reduce_to_centroids<N, D>(data, MIN_DISTANCE);
auto end = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();
// Check if correct number
assert(centroids.size() == M);
return duration;
}
int main() {
std::array<double, NUM_TRIALS> exec_times;
for (auto i = 0; i < NUM_TRIALS; ++i)
exec_times[i] = run_once();
mean_shift::cuda::utils::write_csv<double, NUM_TRIALS>(exec_times, LOG_NAIVE, ',');
return 0;
} |
6d90ce94afc97b210eeb283408d792721500a8aa.hip | // !!! This is a file automatically generated by hipify!!!
#include<stdio.h>
#include<cuda.h>
int main()
{
hipDeviceProp_t p;
int count,i,flag=0;
hipGetDeviceCount(&count);
for(i=0;i<count;i++)
{
hipGetDeviceProperties(&p,i);
if(p.major==1 && p.minor==2)
{
hipSetDevice(i);
printf("GPU with Compute Capability 1.2 is set as current GPU on your system.\n");
flag=1;
}
| 6d90ce94afc97b210eeb283408d792721500a8aa.cu | #include<stdio.h>
#include<cuda.h>
int main()
{
cudaDeviceProp p;
int count,i,flag=0;
cudaGetDeviceCount(&count);
for(i=0;i<count;i++)
{
cudaGetDeviceProperties(&p,i);
if(p.major==1 && p.minor==2)
{
cudaSetDevice(i);
printf("GPU with Compute Capability 1.2 is set as current GPU on your system.\n");
flag=1;
}
|
4aa7ac0c667a35ac5d7e5fbba9a883fb43ddfda9.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuml/random_projection/rproj_c.h>
#include <gtest/gtest.h>
#include <raft/cudart_utils.h>
#include <raft/linalg/transpose.h>
#include <test_utils.h>
#include <distance/distance.cuh>
#include <iostream>
#include <raft/cuda_utils.cuh>
#include <random>
#include <vector>
namespace ML {
using namespace MLCommon;
template <typename T, int N, int M>
class RPROJTest : public ::testing::Test {
protected:
T* transpose(T* in, int n_rows, int n_cols) {
hipStream_t stream = h.get_stream();
hipblasHandle_t cublas_handle = h.get_cublas_handle();
T* result;
raft::allocate(result, n_rows * n_cols);
raft::linalg::transpose(h, in, result, n_rows, n_cols, stream);
CUDA_CHECK(hipPeekAtLastError());
CUDA_CHECK(hipFree(in));
return result;
}
void generate_data() {
std::random_device rd;
std::mt19937 rng(rd());
std::uniform_real_distribution<T> dist(0, 1);
h_input.resize(N * M);
for (auto& i : h_input) {
i = dist(rng);
}
raft::allocate(d_input, h_input.size());
raft::update_device(d_input, h_input.data(), h_input.size(), NULL);
//d_input = transpose(d_input, N, M);
// From row major to column major (this operation is only useful for non-random datasets)
}
void gaussianTest() {
params1 = new paramsRPROJ();
*params1 = {
N, // number of samples
M, // number of features
-1, // number of components
epsilon, // error tolerance
true, // gaussian or sparse method
-1.0, // auto density
false, // not used
42 // random seed
};
hipStream_t stream = h.get_stream();
auto alloc = h.get_device_allocator();
random_matrix1 = new rand_mat<T>(alloc, stream);
RPROJfit(h, random_matrix1, params1);
raft::allocate(d_output1, N * params1->n_components);
RPROJtransform(h, d_input, random_matrix1, d_output1, params1);
d_output1 = transpose(
d_output1, N, params1->n_components); // From column major to row major
}
void sparseTest() {
params2 = new paramsRPROJ();
*params2 = {
N, // number of samples
M, // number of features
-1, // number of components (-1: auto-deduction)
epsilon, // error tolerance
false, // gaussian or sparse method
-1.0, // auto density (-1: auto-deduction)
false, // not used
42 // random seed
};
hipStream_t stream = h.get_stream();
auto alloc = h.get_device_allocator();
random_matrix2 = new rand_mat<T>(alloc, stream);
RPROJfit(h, random_matrix2, params2);
raft::allocate(d_output2, N * params2->n_components);
RPROJtransform(h, d_input, random_matrix2, d_output2, params2);
d_output2 = transpose(
d_output2, N, params2->n_components); // From column major to row major
}
void SetUp() override {
epsilon = 0.2;
generate_data();
gaussianTest();
sparseTest();
}
void TearDown() override {
CUDA_CHECK(hipFree(d_input));
CUDA_CHECK(hipFree(d_output1));
CUDA_CHECK(hipFree(d_output2));
delete params1;
delete random_matrix1;
delete params2;
delete random_matrix2;
}
void random_matrix_check() {
size_t D = johnson_lindenstrauss_min_dim(N, epsilon);
ASSERT_TRUE(params1->n_components == D);
ASSERT_TRUE(random_matrix1->dense_data.size() > 0);
ASSERT_TRUE(random_matrix1->type == dense);
ASSERT_TRUE(params2->n_components == D);
ASSERT_TRUE(params2->density == 1 / sqrt(M));
ASSERT_TRUE(random_matrix2->indices.size() > 0);
ASSERT_TRUE(random_matrix2->indptr.size() > 0);
ASSERT_TRUE(random_matrix2->sparse_data.size() > 0);
ASSERT_TRUE(random_matrix2->type == sparse);
}
void epsilon_check() {
int D = johnson_lindenstrauss_min_dim(N, epsilon);
constexpr auto distance_type =
raft::distance::DistanceType::L2SqrtUnexpanded;
size_t workspaceSize = 0;
typedef cutlass::Shape<8, 128, 128> OutputTile_t;
T* d_pdist;
raft::allocate(d_pdist, N * N);
MLCommon::Distance::distance<distance_type, T, T, T, OutputTile_t>(
d_input, d_input, d_pdist, N, N, M, (void*)nullptr, workspaceSize,
h.get_stream());
CUDA_CHECK(hipPeekAtLastError());
T* h_pdist = new T[N * N];
raft::update_host(h_pdist, d_pdist, N * N, NULL);
CUDA_CHECK(hipFree(d_pdist));
T* d_pdist1;
raft::allocate(d_pdist1, N * N);
MLCommon::Distance::distance<distance_type, T, T, T, OutputTile_t>(
d_output1, d_output1, d_pdist1, N, N, D, (void*)nullptr, workspaceSize,
h.get_stream());
CUDA_CHECK(hipPeekAtLastError());
T* h_pdist1 = new T[N * N];
raft::update_host(h_pdist1, d_pdist1, N * N, NULL);
CUDA_CHECK(hipFree(d_pdist1));
T* d_pdist2;
raft::allocate(d_pdist2, N * N);
MLCommon::Distance::distance<distance_type, T, T, T, OutputTile_t>(
d_output2, d_output2, d_pdist2, N, N, D, (void*)nullptr, workspaceSize,
h.get_stream());
CUDA_CHECK(hipPeekAtLastError());
T* h_pdist2 = new T[N * N];
raft::update_host(h_pdist2, d_pdist2, N * N, NULL);
CUDA_CHECK(hipFree(d_pdist2));
for (size_t i = 0; i < N; i++) {
for (size_t j = 0; j <= i; j++) {
T pdist = h_pdist[i * N + j];
T pdist1 = h_pdist1[i * N + j];
T pdist2 = h_pdist2[i * N + j];
T lower_bound = (1.0 - epsilon) * pdist;
T upper_bound = (1.0 + epsilon) * pdist;
ASSERT_TRUE(lower_bound <= pdist1 && pdist1 <= upper_bound);
ASSERT_TRUE(lower_bound <= pdist2 && pdist2 <= upper_bound);
}
}
delete[] h_pdist;
delete[] h_pdist1;
delete[] h_pdist2;
}
protected:
raft::handle_t h;
paramsRPROJ* params1;
T epsilon;
std::vector<T> h_input;
T* d_input;
rand_mat<T>* random_matrix1;
T* d_output1;
paramsRPROJ* params2;
rand_mat<T>* random_matrix2;
T* d_output2;
};
typedef RPROJTest<float, 500, 2000> RPROJTestF1;
TEST_F(RPROJTestF1, RandomMatrixCheck) { random_matrix_check(); }
TEST_F(RPROJTestF1, EpsilonCheck) { epsilon_check(); }
typedef RPROJTest<double, 500, 2000> RPROJTestD1;
TEST_F(RPROJTestD1, RandomMatrixCheck) { random_matrix_check(); }
TEST_F(RPROJTestD1, EpsilonCheck) { epsilon_check(); }
typedef RPROJTest<float, 5000, 3500> RPROJTestF2;
TEST_F(RPROJTestF2, RandomMatrixCheck) { random_matrix_check(); }
TEST_F(RPROJTestF2, EpsilonCheck) { epsilon_check(); }
typedef RPROJTest<double, 5000, 3500> RPROJTestD2;
TEST_F(RPROJTestD2, RandomMatrixCheck) { random_matrix_check(); }
TEST_F(RPROJTestD2, EpsilonCheck) { epsilon_check(); }
} // end namespace ML
| 4aa7ac0c667a35ac5d7e5fbba9a883fb43ddfda9.cu | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuml/random_projection/rproj_c.h>
#include <gtest/gtest.h>
#include <raft/cudart_utils.h>
#include <raft/linalg/transpose.h>
#include <test_utils.h>
#include <distance/distance.cuh>
#include <iostream>
#include <raft/cuda_utils.cuh>
#include <random>
#include <vector>
namespace ML {
using namespace MLCommon;
template <typename T, int N, int M>
class RPROJTest : public ::testing::Test {
protected:
T* transpose(T* in, int n_rows, int n_cols) {
cudaStream_t stream = h.get_stream();
cublasHandle_t cublas_handle = h.get_cublas_handle();
T* result;
raft::allocate(result, n_rows * n_cols);
raft::linalg::transpose(h, in, result, n_rows, n_cols, stream);
CUDA_CHECK(cudaPeekAtLastError());
CUDA_CHECK(cudaFree(in));
return result;
}
void generate_data() {
std::random_device rd;
std::mt19937 rng(rd());
std::uniform_real_distribution<T> dist(0, 1);
h_input.resize(N * M);
for (auto& i : h_input) {
i = dist(rng);
}
raft::allocate(d_input, h_input.size());
raft::update_device(d_input, h_input.data(), h_input.size(), NULL);
//d_input = transpose(d_input, N, M);
// From row major to column major (this operation is only useful for non-random datasets)
}
void gaussianTest() {
params1 = new paramsRPROJ();
*params1 = {
N, // number of samples
M, // number of features
-1, // number of components
epsilon, // error tolerance
true, // gaussian or sparse method
-1.0, // auto density
false, // not used
42 // random seed
};
cudaStream_t stream = h.get_stream();
auto alloc = h.get_device_allocator();
random_matrix1 = new rand_mat<T>(alloc, stream);
RPROJfit(h, random_matrix1, params1);
raft::allocate(d_output1, N * params1->n_components);
RPROJtransform(h, d_input, random_matrix1, d_output1, params1);
d_output1 = transpose(
d_output1, N, params1->n_components); // From column major to row major
}
void sparseTest() {
params2 = new paramsRPROJ();
*params2 = {
N, // number of samples
M, // number of features
-1, // number of components (-1: auto-deduction)
epsilon, // error tolerance
false, // gaussian or sparse method
-1.0, // auto density (-1: auto-deduction)
false, // not used
42 // random seed
};
cudaStream_t stream = h.get_stream();
auto alloc = h.get_device_allocator();
random_matrix2 = new rand_mat<T>(alloc, stream);
RPROJfit(h, random_matrix2, params2);
raft::allocate(d_output2, N * params2->n_components);
RPROJtransform(h, d_input, random_matrix2, d_output2, params2);
d_output2 = transpose(
d_output2, N, params2->n_components); // From column major to row major
}
void SetUp() override {
epsilon = 0.2;
generate_data();
gaussianTest();
sparseTest();
}
void TearDown() override {
CUDA_CHECK(cudaFree(d_input));
CUDA_CHECK(cudaFree(d_output1));
CUDA_CHECK(cudaFree(d_output2));
delete params1;
delete random_matrix1;
delete params2;
delete random_matrix2;
}
void random_matrix_check() {
size_t D = johnson_lindenstrauss_min_dim(N, epsilon);
ASSERT_TRUE(params1->n_components == D);
ASSERT_TRUE(random_matrix1->dense_data.size() > 0);
ASSERT_TRUE(random_matrix1->type == dense);
ASSERT_TRUE(params2->n_components == D);
ASSERT_TRUE(params2->density == 1 / sqrt(M));
ASSERT_TRUE(random_matrix2->indices.size() > 0);
ASSERT_TRUE(random_matrix2->indptr.size() > 0);
ASSERT_TRUE(random_matrix2->sparse_data.size() > 0);
ASSERT_TRUE(random_matrix2->type == sparse);
}
void epsilon_check() {
int D = johnson_lindenstrauss_min_dim(N, epsilon);
constexpr auto distance_type =
raft::distance::DistanceType::L2SqrtUnexpanded;
size_t workspaceSize = 0;
typedef cutlass::Shape<8, 128, 128> OutputTile_t;
T* d_pdist;
raft::allocate(d_pdist, N * N);
MLCommon::Distance::distance<distance_type, T, T, T, OutputTile_t>(
d_input, d_input, d_pdist, N, N, M, (void*)nullptr, workspaceSize,
h.get_stream());
CUDA_CHECK(cudaPeekAtLastError());
T* h_pdist = new T[N * N];
raft::update_host(h_pdist, d_pdist, N * N, NULL);
CUDA_CHECK(cudaFree(d_pdist));
T* d_pdist1;
raft::allocate(d_pdist1, N * N);
MLCommon::Distance::distance<distance_type, T, T, T, OutputTile_t>(
d_output1, d_output1, d_pdist1, N, N, D, (void*)nullptr, workspaceSize,
h.get_stream());
CUDA_CHECK(cudaPeekAtLastError());
T* h_pdist1 = new T[N * N];
raft::update_host(h_pdist1, d_pdist1, N * N, NULL);
CUDA_CHECK(cudaFree(d_pdist1));
T* d_pdist2;
raft::allocate(d_pdist2, N * N);
MLCommon::Distance::distance<distance_type, T, T, T, OutputTile_t>(
d_output2, d_output2, d_pdist2, N, N, D, (void*)nullptr, workspaceSize,
h.get_stream());
CUDA_CHECK(cudaPeekAtLastError());
T* h_pdist2 = new T[N * N];
raft::update_host(h_pdist2, d_pdist2, N * N, NULL);
CUDA_CHECK(cudaFree(d_pdist2));
for (size_t i = 0; i < N; i++) {
for (size_t j = 0; j <= i; j++) {
T pdist = h_pdist[i * N + j];
T pdist1 = h_pdist1[i * N + j];
T pdist2 = h_pdist2[i * N + j];
T lower_bound = (1.0 - epsilon) * pdist;
T upper_bound = (1.0 + epsilon) * pdist;
ASSERT_TRUE(lower_bound <= pdist1 && pdist1 <= upper_bound);
ASSERT_TRUE(lower_bound <= pdist2 && pdist2 <= upper_bound);
}
}
delete[] h_pdist;
delete[] h_pdist1;
delete[] h_pdist2;
}
protected:
raft::handle_t h;
paramsRPROJ* params1;
T epsilon;
std::vector<T> h_input;
T* d_input;
rand_mat<T>* random_matrix1;
T* d_output1;
paramsRPROJ* params2;
rand_mat<T>* random_matrix2;
T* d_output2;
};
typedef RPROJTest<float, 500, 2000> RPROJTestF1;
TEST_F(RPROJTestF1, RandomMatrixCheck) { random_matrix_check(); }
TEST_F(RPROJTestF1, EpsilonCheck) { epsilon_check(); }
typedef RPROJTest<double, 500, 2000> RPROJTestD1;
TEST_F(RPROJTestD1, RandomMatrixCheck) { random_matrix_check(); }
TEST_F(RPROJTestD1, EpsilonCheck) { epsilon_check(); }
typedef RPROJTest<float, 5000, 3500> RPROJTestF2;
TEST_F(RPROJTestF2, RandomMatrixCheck) { random_matrix_check(); }
TEST_F(RPROJTestF2, EpsilonCheck) { epsilon_check(); }
typedef RPROJTest<double, 5000, 3500> RPROJTestD2;
TEST_F(RPROJTestD2, RandomMatrixCheck) { random_matrix_check(); }
TEST_F(RPROJTestD2, EpsilonCheck) { epsilon_check(); }
} // end namespace ML
|
279b5750bd3833b6df0e81c03a4aca9043c37b2d.hip | // !!! This is a file automatically generated by hipify!!!
#include <algorithm>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/layers/triplet_loss_layer.hpp"
#include "caffe/util/io.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
void TripletLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
Dtype margin = this->layer_param_.triplet_loss_param().margin();
Dtype losstype = this->layer_param_.triplet_loss_param().losstype();
int num_triplets = this->layer_param_.triplet_loss_param().num_triplets();
int use_pair = this->layer_param_.triplet_loss_param().use_pair();
CHECK_EQ(bottom[0]->num()%(2 + num_triplets), 0);
Dtype loss(0.0);
int dim = bottom[0]->count()/bottom[0]->num();
int num_set = bottom[0]->num()/(2 + num_triplets);
if (losstype == 0) {
for (int i = 0; i < num_set; ++i) {
caffe_gpu_sub(
dim,
bottom[0]->gpu_data() + (2 + num_triplets)*i*dim, // reference
bottom[0]->gpu_data() + ((2 + num_triplets)*i + 1)*dim, // positive
diff_pos.mutable_gpu_data() + i*dim); // reference-pose_close
caffe_gpu_dot(
dim,
diff_pos.gpu_data() + i*dim,
diff_pos.gpu_data() + i*dim,
dist_sq_pos.mutable_cpu_data() + i);
// a b is a similar pair for pair wise
// loss accumulated by the pair wise part
if (use_pair == 1) {
loss += dist_sq_pos.cpu_data()[i];
}
for (int triplet = 0; triplet < num_triplets; ++triplet) {
// Triplet loss accumulation
// a and negative[triplet] is a similar pair for triplet
dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.cpu_data()[i];
// Loss component calculated from negative part
caffe_gpu_sub(
dim,
bottom[0]->gpu_data() + (2 + num_triplets)*i*dim, // reference
bottom[0]->gpu_data() + ((2 + num_triplets)*i + 2 + triplet)*dim,
diff_neg.mutable_gpu_data() + i*dim); // reference-negative
caffe_gpu_dot(
dim,
diff_neg.gpu_data() + i*dim,
diff_neg.gpu_data() + i*dim,
dist_sq_neg.mutable_cpu_data() + i);
// a and negative[triplet] is a dissimilar pair for triplet
dist_sq_.mutable_cpu_data()[i] -= dist_sq_neg.cpu_data()[i];
// loss accumulated accumulated by the triplet part
loss += ::max(margin + dist_sq_.cpu_data()[i], Dtype(0.0));
}
}
loss = loss / static_cast<Dtype>(num_set) / Dtype(2);
top[0]->mutable_cpu_data()[0] = loss;
} else if (losstype==1) {
for (int i = 0; i < num_set; ++i) {
caffe_gpu_sub(
dim,
bottom[0]->gpu_data() + (2 + num_triplets)*i*dim, // reference
bottom[0]->gpu_data() + ((2 + num_triplets)*i + 1)*dim, // positive
diff_pos.mutable_gpu_data() + i*dim); // reference-pose_close
// Loss component calculated from reference and close one
caffe_gpu_dot(
dim,
diff_pos.gpu_data() + i*dim,
diff_pos.gpu_data() + i*dim,
dist_sq_pos.mutable_cpu_data() + i);
// a b is a similar pair for pair wise
// loss accumulated by the pair wise part
if (use_pair == 1) {
loss += dist_sq_pos.cpu_data()[i];
}
for (int triplet = 0; triplet < num_triplets; ++triplet) {
dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.mutable_cpu_data()[i];
dist_sq_.mutable_cpu_data()[i] += margin;
// Loss component calculated from negative part
caffe_gpu_sub(
dim,
bottom[0]->gpu_data() + (2 + num_triplets)*i*dim, // reference
bottom[0]->gpu_data() + ((2 + num_triplets)*i + 2 + triplet)*dim,
diff_neg.mutable_gpu_data() + i*dim); // reference-negative
caffe_gpu_dot(
dim,
diff_neg.gpu_data() + i*dim,
diff_neg.gpu_data() + i*dim,
dist_sq_neg.mutable_cpu_data() + i);
// a and negative[triplet] is a dissimilar pair for triplet
dist_sq_.mutable_cpu_data()[i] = 1 - \
dist_sq_neg.cpu_data()[i] / dist_sq_.mutable_cpu_data()[i];
// loss accumulated accumulated by the triplet part
loss += ::max(dist_sq_.cpu_data()[i], Dtype(0.0));
}
}
loss = loss / static_cast<Dtype>(num_set) / Dtype(2);
top[0]->mutable_cpu_data()[0] = loss;
} else if (losstype == 2) {
for (int i = 0; i < num_set; ++i) {
caffe_gpu_sub(
dim,
bottom[0]->gpu_data() +
(2 + num_triplets)*i*dim, // reference
bottom[0]->gpu_data() +
((2 + num_triplets)*i + 1)*dim, // positive
diff_pos.mutable_gpu_data() + i*dim); // reference-pose_close
// Loss component calculated from reference and close one
caffe_gpu_dot(
dim,
diff_pos.gpu_data() + i*dim,
diff_pos.gpu_data() + i*dim,
dist_sq_pos.mutable_cpu_data() + i);
// a b is a similar pair for pair wise
// loss accumulated by the pair wise part
if (use_pair == 1) {
loss += dist_sq_pos.cpu_data()[i];
}
for (int triplet = 0; triplet < num_triplets; ++triplet) {
dist_sq_.mutable_cpu_data()[i] = exp(dist_sq_pos.mutable_cpu_data()[i]);
dist_sq_.mutable_cpu_data()[i] += margin;
// Loss component calculated from negative part
caffe_gpu_sub(
dim,
bottom[0]->gpu_data() +
(2 + num_triplets)*i*dim, // reference
bottom[0]->gpu_data() +
((2 + num_triplets)*i + 2 + triplet)*dim,
diff_neg.mutable_gpu_data() + i*dim); // reference-negative
caffe_gpu_dot(
dim,
diff_neg.gpu_data() + i*dim,
diff_neg.gpu_data() + i*dim,
dist_sq_neg.mutable_cpu_data() + i);
// a and negative[triplet] is a dissimilar pair for triplet
dist_sq_.mutable_cpu_data()[i] = 1 - \
exp(dist_sq_neg.cpu_data()[i]) / dist_sq_.mutable_cpu_data()[i];
// loss accumulated accumulated by the triplet part
loss += ::max(dist_sq_.cpu_data()[i], Dtype(0.0));
}
}
loss = loss / static_cast<Dtype>(num_set) / Dtype(2);
top[0]->mutable_cpu_data()[0] = loss;
}
}
template <typename Dtype>
void TripletLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
Dtype margin = this->layer_param_.triplet_loss_param().margin();
Dtype losstype = this->layer_param_.triplet_loss_param().losstype();
int num_triplets = this->layer_param_.triplet_loss_param().num_triplets();
int use_pair = this->layer_param_.triplet_loss_param().use_pair();
int dim = bottom[0]->count()/bottom[0]->num();
int num_set = bottom[0]->num()/(2 + num_triplets);
if (losstype == 0) {
// BP for feat1(extracted from reference)
for (int i = 0; i < 1; ++i) {
if (propagate_down[0]) {
const Dtype sign = 1;
const Dtype alpha = sign * top[0]->cpu_diff()[0] /
static_cast<Dtype>(num_set);
for (int j = 0; j < num_set; ++j) {
Dtype* bout = bottom[0]->mutable_gpu_diff();
// the pair part
if (use_pair == 1) {
caffe_gpu_axpby(
dim,
alpha,
diff_pos.gpu_data() + (j*dim),
Dtype(0.0),
bout + ((2 + num_triplets)*j + i)*dim);
} else {
caffe_gpu_axpby(
dim,
Dtype(0.0),
diff_pos.gpu_data() + (j*dim),
Dtype(0.0),
bout + ((2 + num_triplets)*j + i)*dim);
}
// the num_triplets triplet part
for (int triplet = 0; triplet < num_triplets; ++triplet) {
caffe_gpu_sub(
dim,
bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference
bottom[0]->gpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim,
diff_neg.mutable_gpu_data() + j*dim); // reference-negative
caffe_gpu_dot(
dim,
diff_neg.gpu_data() + j*dim,
diff_neg.gpu_data() + j*dim,
dist_sq_neg.mutable_cpu_data() + j);
// Triplet loss accumulation
// a and negative[triplet] is a similar pair for triplet
dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.cpu_data()[j];
// a and negative[triplet] is a dissimilar pair for triplet
dist_sq_.mutable_cpu_data()[j] -= dist_sq_neg.cpu_data()[j];
// Loss component calculated from negative part
if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) {
// similar pair in triplet
caffe_gpu_axpby(
dim,
alpha,
diff_pos.gpu_data() + (j*dim),
Dtype(1.0),
bout + (2 + num_triplets)*j*dim);
// dissimilar pair in triplet
caffe_gpu_axpby(
dim,
-alpha,
diff_neg.gpu_data() + (j*dim),
Dtype(1.0),
bout + ((2 + num_triplets)*j + i)*dim);
}
}
}
}
}
// BP for feat2(extracted from the closest sample)
for (int i = 1; i < 2; ++i) {
if (propagate_down[0]) {
const Dtype sign = -1;
const Dtype alpha = sign * top[0]->cpu_diff()[0] /
static_cast<Dtype>(num_set);
for (int j = 0; j < num_set; ++j) {
Dtype* bout = bottom[0]->mutable_gpu_diff();
// the pair part
if (use_pair == 1) {
caffe_gpu_axpby(
dim,
alpha,
diff_pos.gpu_data() + (j*dim),
Dtype(0.0),
bout + ((2 + num_triplets)*j + i)*dim);
} else {
caffe_gpu_axpby(
dim,
Dtype(0.0),
diff_pos.gpu_data() + (j*dim),
Dtype(0.0),
bout + ((2 + num_triplets)*j + i)*dim);
}
// the num_triplets triplet part
for (int triplet = 0; triplet < num_triplets; ++triplet) {
caffe_gpu_sub(
dim,
bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference
bottom[0]->gpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim,
diff_neg.mutable_gpu_data() + j*dim); // reference-negative
// Triplet loss accumulation
// a and negative[triplet] is a similar pair for triplet
dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.cpu_data()[j];
caffe_gpu_dot(
dim,
diff_neg.gpu_data() + j*dim,
diff_neg.gpu_data() + j*dim,
dist_sq_neg.mutable_cpu_data() + j);
// a and negative[triplet] is a dissimilar pair for triplet
dist_sq_.mutable_cpu_data()[j] -= dist_sq_neg.cpu_data()[j];
if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) {
// similar pair in triplet
caffe_gpu_axpby(
dim,
alpha,
diff_pos.gpu_data() + (j*dim),
Dtype(1.0),
bout + ((2 + num_triplets)*j + i)*dim);
}
}
}
}
}
// BP for negative feature used in the num_triplets triplet part
for (int i = 2; i < 2 + num_triplets; ++i) {
if (propagate_down[0]) {
const Dtype sign = 1;
const Dtype alpha = sign * top[0]->cpu_diff()[0] /
static_cast<Dtype>(num_set);
for (int j = 0; j < num_set; ++j) {
Dtype* bout = bottom[0]->mutable_gpu_diff();
caffe_gpu_sub(
dim,
bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference
bottom[0]->gpu_data() + ((2 + num_triplets)*j + i)*dim,
diff_neg.mutable_gpu_data() + j*dim); // reference-negative
// Triplet loss accumulation
// a and negative[triplet] is a similar pair for triplet
dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.cpu_data()[j];
caffe_gpu_dot(
dim,
diff_neg.gpu_data() + j*dim,
diff_neg.gpu_data() + j*dim,
dist_sq_neg.mutable_cpu_data() + j);
// a and negative[triplet] is a dissimilar pair for triplet
dist_sq_.mutable_cpu_data()[j] -= dist_sq_neg.cpu_data()[j];
if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) {
// dissimilar pairs
caffe_gpu_axpby(
dim,
alpha,
diff_neg.gpu_data() + (j*dim),
Dtype(0.0),
bout + ((2 + num_triplets)*j + i)*dim);
} else {
caffe_gpu_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim);
}
}
}
}
} else if (losstype==1) {
for (int i = 0; i < 1; ++i) {
// BP for data1(feat1)
if (propagate_down[0]) {
const Dtype sign = 1;
const Dtype alpha = sign * top[0]->cpu_diff()[0] /
static_cast<Dtype>(num_set);
for (int j = 0; j < num_set; ++j) {
Dtype* bout = bottom[0]->mutable_gpu_diff();
// the pair part
if (use_pair == 1) {
caffe_gpu_axpby(
dim,
alpha,
diff_pos.gpu_data() + (j*dim),
Dtype(0.0),
bout + ((2 + num_triplets)*j + i)*dim);
} else {
caffe_gpu_axpby(
dim,
Dtype(0.0),
diff_pos.gpu_data() + (j*dim),
Dtype(0.0),
bout + ((2 + num_triplets)*j + i)*dim);
}
// the num_triplets triplet part
for (int triplet = 0; triplet < num_triplets; ++triplet) {
dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.cpu_data()[j];
dist_sq_.mutable_cpu_data()[j] += margin;
// Loss component calculated from negative part
caffe_gpu_sub(
dim,
bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference
bottom[0]->gpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim,
diff_neg.mutable_gpu_data() + j*dim); // reference-negative
caffe_gpu_dot(
dim,
diff_neg.gpu_data() + j*dim,
diff_neg.gpu_data() + j*dim,
dist_sq_neg.mutable_cpu_data() + j);
// a and negative[triplet] is a dissimilar pair for triplet
dist_sq_.mutable_cpu_data()[j] = 1 - \
dist_sq_neg.cpu_data()[j] / dist_sq_.cpu_data()[j];
// loss accumulated accumulated by the triplet part
if ((dist_sq_.cpu_data()[j]) > Dtype(0.0)) {
caffe_gpu_axpby(
dim,
alpha*dist_sq_neg.mutable_cpu_data()[j]/
((dist_sq_pos.cpu_data()[j]+margin)*
(dist_sq_pos.cpu_data()[j]+margin)),
diff_pos.gpu_data() + (j*dim),
Dtype(1.0),
bout + ((2 + num_triplets)*j + i)*dim);
caffe_gpu_axpby(
dim,
-alpha/(dist_sq_pos.cpu_data()[j] + margin),
diff_neg.gpu_data() + (j*dim),
Dtype(1.0),
bout + ((2 + num_triplets)*j + i)*dim);
}
}
}
}
}
for (int i = 1; i < 2; ++i) {
// BP for positive data(feat2)
if (propagate_down[0]) {
const Dtype sign = -1;
const Dtype alpha = sign * top[0]->cpu_diff()[0] /
static_cast<Dtype>(num_set);
for (int j = 0; j < num_set; ++j) {
Dtype* bout = bottom[0]->mutable_gpu_diff();
// the pair part
if (use_pair == 1) {
caffe_gpu_axpby(
dim,
alpha,
diff_pos.gpu_data() + (j*dim),
Dtype(0.0),
bout + ((2 + num_triplets)*j + i)*dim);
} else {
caffe_gpu_axpby(
dim,
Dtype(0.0),
diff_pos.gpu_data() + (j*dim),
Dtype(0.0),
bout + ((2 + num_triplets)*j + i)*dim);
}
// the num_triplets triplet part
for (int triplet = 0; triplet < num_triplets; ++triplet) {
dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.cpu_data()[j];
dist_sq_.mutable_cpu_data()[j] += margin;
// Loss component calculated from negative part
caffe_gpu_sub(
dim,
bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference
bottom[0]->gpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim,
diff_neg.mutable_gpu_data() + j*dim); // reference-negative
caffe_gpu_dot(
dim,
diff_neg.gpu_data() + j*dim,
diff_neg.gpu_data() + j*dim,
dist_sq_neg.mutable_cpu_data() + j);
// a and negative[triplet] is a dissimilar pair for triplet
dist_sq_.mutable_cpu_data()[j] = 1 - \
dist_sq_neg.cpu_data()[j] / dist_sq_.mutable_cpu_data()[j];
// loss accumulated accumulated by the triplet part
if ((dist_sq_.cpu_data()[j]) > Dtype(0.0)) {
caffe_gpu_axpby(
dim,
alpha*dist_sq_neg.cpu_data()[j]/
((dist_sq_pos.cpu_data()[j]+margin)*
(dist_sq_pos.cpu_data()[j]+margin)),
diff_pos.gpu_data() + (j*dim),
Dtype(1.0),
bout + ((2 + num_triplets)*j + i)*dim);
}
}
}
}
}
for (int i = 2; i < 2 + num_triplets; ++i) {
// BP for negative data(feat3)
if (propagate_down[0]) {
const Dtype sign = 1;
const Dtype alpha = sign * top[0]->cpu_diff()[0] /
static_cast<Dtype>(num_set);
for (int j = 0; j < num_set; ++j) {
Dtype* bout = bottom[0]->mutable_gpu_diff();
dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.cpu_data()[j];
dist_sq_.mutable_cpu_data()[j] += margin;
// Loss component calculated from negative part
caffe_gpu_sub(
dim,
bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference
bottom[0]->gpu_data() + ((2 + num_triplets)*j + i)*dim,
diff_neg.mutable_gpu_data() + j*dim); // reference-negative
caffe_gpu_dot(
dim,
diff_neg.gpu_data() + j*dim,
diff_neg.gpu_data() + j*dim,
dist_sq_neg.mutable_cpu_data() + j);
// a and negative[triplet] is a dissimilar pair for triplet
dist_sq_.mutable_cpu_data()[j] = 1 - \
dist_sq_neg.cpu_data()[j] / dist_sq_.cpu_data()[j];
// loss accumulated accumulated by the triplet part
if ((dist_sq_.cpu_data()[j]) > Dtype(0.0)) {
caffe_gpu_axpby(
dim,
alpha/(dist_sq_pos.cpu_data()[j] + margin),
diff_neg.gpu_data() + (j*dim),
Dtype(0.0),
bout + ((2 + num_triplets)*j + i)*dim);
} else {
caffe_gpu_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim);
}
}
}
}
} else if (losstype == 2) {
for (int i = 0; i < 1; ++i) {
// BP for data1(feat1)
if (propagate_down[0]) {
const Dtype sign = 1;
const Dtype alpha = sign * top[0]->cpu_diff()[0] /
static_cast<Dtype>(num_set);
for (int j = 0; j < num_set; ++j) {
Dtype* bout = bottom[0]->mutable_cpu_diff();
// the pair part
if (use_pair == 1) {
caffe_gpu_axpby(
dim,
alpha,
diff_pos.gpu_data() + (j*dim),
Dtype(0.0),
bout + ((2 + num_triplets)*j + i)*dim);
} else {
caffe_gpu_axpby(
dim,
Dtype(0.0),
diff_pos.gpu_data() + (j*dim),
Dtype(0.0),
bout + ((2 + num_triplets)*j + i)*dim);
}
// the num_triplets triplet part
for (int triplet = 0; triplet < num_triplets; ++triplet) {
dist_sq_.mutable_cpu_data()[j] =
exp(dist_sq_pos.mutable_cpu_data()[j]);
dist_sq_.mutable_cpu_data()[j] += margin;
// Loss component calculated from negative part
caffe_gpu_sub(
dim,
bottom[0]->gpu_data()+(2 + num_triplets)*j*dim, // reference
bottom[0]->gpu_data()+((2 + num_triplets)*j + 2 + triplet)*dim,
diff_neg.mutable_gpu_data() + j*dim); // reference-negative
caffe_gpu_dot(
dim,
diff_neg.gpu_data()+j*dim,
diff_neg.gpu_data()+j*dim,
dist_sq_neg.mutable_cpu_data() + j);
// a and negative[triplet] is a dissimilar pair for triplet
dist_sq_.mutable_cpu_data()[j] = 1 - \
exp(dist_sq_neg.cpu_data()[j]) / dist_sq_.cpu_data()[j];
// loss accumulated accumulated by the triplet part
if ((dist_sq_.cpu_data()[j]) > Dtype(0.0)) {
caffe_gpu_axpby(
dim,
alpha*
Dtype(exp(dist_sq_neg.cpu_data()[j]))*
Dtype(exp(dist_sq_pos.cpu_data()[j]))/
(Dtype((exp(dist_sq_pos.cpu_data()[j]))+margin)*
(Dtype(exp(dist_sq_pos.cpu_data()[j]))+margin)),
diff_pos.gpu_data() + (j*dim),
Dtype(1.0),
bout + ((2 + num_triplets)*j + i)*dim);
caffe_cpu_axpby(
dim,
-alpha*
Dtype(exp(dist_sq_neg.cpu_data()[j]))/
(Dtype(exp(dist_sq_pos.cpu_data()[j]))+margin),
diff_neg.cpu_data() + (j*dim),
Dtype(1.0),
bout + ((2 + num_triplets)*j + i)*dim);
}
}
}
}
}
for (int i = 1; i < 2; ++i) {
// BP for positive data(feat2)
if (propagate_down[0]) {
const Dtype sign = -1;
const Dtype alpha = sign * top[0]->cpu_diff()[0] /
static_cast<Dtype>(num_set);
for (int j = 0; j < num_set; ++j) {
Dtype* bout = bottom[0]->mutable_cpu_diff();
// the pair part
if (use_pair == 1) {
caffe_gpu_axpby(
dim,
alpha,
diff_pos.gpu_data() + (j*dim),
Dtype(0.0),
bout + ((2 + num_triplets)*j + i)*dim);
} else {
caffe_gpu_axpby(
dim,
Dtype(0.0),
diff_pos.gpu_data() + (j*dim),
Dtype(0.0),
bout + ((2 + num_triplets)*j + i)*dim);
}
// the num_triplets triplet part
for (int triplet = 0; triplet < num_triplets; ++triplet) {
dist_sq_.mutable_cpu_data()[j] =
exp(dist_sq_pos.cpu_data()[j]);
dist_sq_.mutable_cpu_data()[j] += margin;
// Loss component calculated from negative part
caffe_gpu_sub(
dim,
bottom[0]->gpu_data()+(2 + num_triplets)*j*dim, // reference
bottom[0]->gpu_data()+((2 + num_triplets)*j + 2 + triplet)*dim,
diff_neg.mutable_gpu_data() + j*dim); // reference-negative
caffe_gpu_dot(
dim,
diff_neg.gpu_data()+j*dim,
diff_neg.gpu_data()+j*dim,
dist_sq_neg.mutable_cpu_data() + j);
// a and negative[triplet] is a dissimilar pair for triplet
dist_sq_.mutable_cpu_data()[j] = 1 - \
exp(dist_sq_neg.cpu_data()[j]) / dist_sq_.cpu_data()[j];
// loss accumulated accumulated by the triplet part
if ((dist_sq_.cpu_data()[j]) > Dtype(0.0)) {
caffe_gpu_axpby(
dim,
alpha*
Dtype(exp(dist_sq_neg.cpu_data()[j]))*
Dtype(exp(dist_sq_pos.cpu_data()[j]))/
((Dtype(exp(dist_sq_pos.cpu_data()[j]))+margin)*
(Dtype(exp(dist_sq_pos.cpu_data()[j]))+margin)),
diff_pos.gpu_data() + (j*dim),
Dtype(1.0),
bout + ((2 + num_triplets)*j + i)*dim);
}
}
}
}
}
for (int i = 2; i < 2 + num_triplets; ++i) {
// BP for negative data(feat3)
if (propagate_down[0]) {
const Dtype sign = 1;
const Dtype alpha = sign * top[0]->cpu_diff()[0] /
static_cast<Dtype>(num_set);
for (int j = 0; j < num_set; ++j) {
Dtype* bout = bottom[0]->mutable_cpu_diff();
dist_sq_.mutable_cpu_data()[j] =
exp(dist_sq_pos.cpu_data()[j]);
dist_sq_.mutable_cpu_data()[j] += margin;
// Loss component calculated from negative part
caffe_gpu_sub(
dim,
bottom[0]->gpu_data()+(2 + num_triplets)*j*dim, // reference
bottom[0]->gpu_data()+((2 + num_triplets)*j + i)*dim,
diff_neg.mutable_gpu_data() + j*dim); // reference-negative
caffe_gpu_dot(
dim,
diff_neg.gpu_data()+j*dim,
diff_neg.gpu_data()+j*dim,
dist_sq_neg.mutable_cpu_data() + j);
// a and negative[triplet] is a dissimilar pair for triplet
dist_sq_.mutable_cpu_data()[j] = 1 - \
exp(dist_sq_neg.cpu_data()[j]) / dist_sq_.cpu_data()[j];
// loss accumulated accumulated by the triplet part
if ((dist_sq_.cpu_data()[j]) > Dtype(0.0)) {
caffe_gpu_axpby(
dim,
alpha*Dtype(exp(dist_sq_neg.cpu_data()[j]))/
(Dtype(exp(dist_sq_pos.cpu_data()[j]))+margin),
diff_neg.gpu_data() + (j*dim),
Dtype(0.0),
bout + ((2 + num_triplets)*j + i)*dim);
} else {
caffe_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim);
}
}
}
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(TripletLossLayer);
} // namespace caffe
| 279b5750bd3833b6df0e81c03a4aca9043c37b2d.cu | #include <algorithm>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/layers/triplet_loss_layer.hpp"
#include "caffe/util/io.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
void TripletLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
Dtype margin = this->layer_param_.triplet_loss_param().margin();
Dtype losstype = this->layer_param_.triplet_loss_param().losstype();
int num_triplets = this->layer_param_.triplet_loss_param().num_triplets();
int use_pair = this->layer_param_.triplet_loss_param().use_pair();
CHECK_EQ(bottom[0]->num()%(2 + num_triplets), 0);
Dtype loss(0.0);
int dim = bottom[0]->count()/bottom[0]->num();
int num_set = bottom[0]->num()/(2 + num_triplets);
if (losstype == 0) {
for (int i = 0; i < num_set; ++i) {
caffe_gpu_sub(
dim,
bottom[0]->gpu_data() + (2 + num_triplets)*i*dim, // reference
bottom[0]->gpu_data() + ((2 + num_triplets)*i + 1)*dim, // positive
diff_pos.mutable_gpu_data() + i*dim); // reference-pose_close
caffe_gpu_dot(
dim,
diff_pos.gpu_data() + i*dim,
diff_pos.gpu_data() + i*dim,
dist_sq_pos.mutable_cpu_data() + i);
// a b is a similar pair for pair wise
// loss accumulated by the pair wise part
if (use_pair == 1) {
loss += dist_sq_pos.cpu_data()[i];
}
for (int triplet = 0; triplet < num_triplets; ++triplet) {
// Triplet loss accumulation
// a and negative[triplet] is a similar pair for triplet
dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.cpu_data()[i];
// Loss component calculated from negative part
caffe_gpu_sub(
dim,
bottom[0]->gpu_data() + (2 + num_triplets)*i*dim, // reference
bottom[0]->gpu_data() + ((2 + num_triplets)*i + 2 + triplet)*dim,
diff_neg.mutable_gpu_data() + i*dim); // reference-negative
caffe_gpu_dot(
dim,
diff_neg.gpu_data() + i*dim,
diff_neg.gpu_data() + i*dim,
dist_sq_neg.mutable_cpu_data() + i);
// a and negative[triplet] is a dissimilar pair for triplet
dist_sq_.mutable_cpu_data()[i] -= dist_sq_neg.cpu_data()[i];
// loss accumulated accumulated by the triplet part
loss += std::max(margin + dist_sq_.cpu_data()[i], Dtype(0.0));
}
}
loss = loss / static_cast<Dtype>(num_set) / Dtype(2);
top[0]->mutable_cpu_data()[0] = loss;
} else if (losstype==1) {
for (int i = 0; i < num_set; ++i) {
caffe_gpu_sub(
dim,
bottom[0]->gpu_data() + (2 + num_triplets)*i*dim, // reference
bottom[0]->gpu_data() + ((2 + num_triplets)*i + 1)*dim, // positive
diff_pos.mutable_gpu_data() + i*dim); // reference-pose_close
// Loss component calculated from reference and close one
caffe_gpu_dot(
dim,
diff_pos.gpu_data() + i*dim,
diff_pos.gpu_data() + i*dim,
dist_sq_pos.mutable_cpu_data() + i);
// a b is a similar pair for pair wise
// loss accumulated by the pair wise part
if (use_pair == 1) {
loss += dist_sq_pos.cpu_data()[i];
}
for (int triplet = 0; triplet < num_triplets; ++triplet) {
dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.mutable_cpu_data()[i];
dist_sq_.mutable_cpu_data()[i] += margin;
// Loss component calculated from negative part
caffe_gpu_sub(
dim,
bottom[0]->gpu_data() + (2 + num_triplets)*i*dim, // reference
bottom[0]->gpu_data() + ((2 + num_triplets)*i + 2 + triplet)*dim,
diff_neg.mutable_gpu_data() + i*dim); // reference-negative
caffe_gpu_dot(
dim,
diff_neg.gpu_data() + i*dim,
diff_neg.gpu_data() + i*dim,
dist_sq_neg.mutable_cpu_data() + i);
// a and negative[triplet] is a dissimilar pair for triplet
dist_sq_.mutable_cpu_data()[i] = 1 - \
dist_sq_neg.cpu_data()[i] / dist_sq_.mutable_cpu_data()[i];
// loss accumulated accumulated by the triplet part
loss += std::max(dist_sq_.cpu_data()[i], Dtype(0.0));
}
}
loss = loss / static_cast<Dtype>(num_set) / Dtype(2);
top[0]->mutable_cpu_data()[0] = loss;
} else if (losstype == 2) {
for (int i = 0; i < num_set; ++i) {
caffe_gpu_sub(
dim,
bottom[0]->gpu_data() +
(2 + num_triplets)*i*dim, // reference
bottom[0]->gpu_data() +
((2 + num_triplets)*i + 1)*dim, // positive
diff_pos.mutable_gpu_data() + i*dim); // reference-pose_close
// Loss component calculated from reference and close one
caffe_gpu_dot(
dim,
diff_pos.gpu_data() + i*dim,
diff_pos.gpu_data() + i*dim,
dist_sq_pos.mutable_cpu_data() + i);
// a b is a similar pair for pair wise
// loss accumulated by the pair wise part
if (use_pair == 1) {
loss += dist_sq_pos.cpu_data()[i];
}
for (int triplet = 0; triplet < num_triplets; ++triplet) {
dist_sq_.mutable_cpu_data()[i] = exp(dist_sq_pos.mutable_cpu_data()[i]);
dist_sq_.mutable_cpu_data()[i] += margin;
// Loss component calculated from negative part
caffe_gpu_sub(
dim,
bottom[0]->gpu_data() +
(2 + num_triplets)*i*dim, // reference
bottom[0]->gpu_data() +
((2 + num_triplets)*i + 2 + triplet)*dim,
diff_neg.mutable_gpu_data() + i*dim); // reference-negative
caffe_gpu_dot(
dim,
diff_neg.gpu_data() + i*dim,
diff_neg.gpu_data() + i*dim,
dist_sq_neg.mutable_cpu_data() + i);
// a and negative[triplet] is a dissimilar pair for triplet
dist_sq_.mutable_cpu_data()[i] = 1 - \
exp(dist_sq_neg.cpu_data()[i]) / dist_sq_.mutable_cpu_data()[i];
// loss accumulated accumulated by the triplet part
loss += std::max(dist_sq_.cpu_data()[i], Dtype(0.0));
}
}
loss = loss / static_cast<Dtype>(num_set) / Dtype(2);
top[0]->mutable_cpu_data()[0] = loss;
}
}
template <typename Dtype>
void TripletLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
Dtype margin = this->layer_param_.triplet_loss_param().margin();
Dtype losstype = this->layer_param_.triplet_loss_param().losstype();
int num_triplets = this->layer_param_.triplet_loss_param().num_triplets();
int use_pair = this->layer_param_.triplet_loss_param().use_pair();
int dim = bottom[0]->count()/bottom[0]->num();
int num_set = bottom[0]->num()/(2 + num_triplets);
if (losstype == 0) {
// BP for feat1(extracted from reference)
for (int i = 0; i < 1; ++i) {
if (propagate_down[0]) {
const Dtype sign = 1;
const Dtype alpha = sign * top[0]->cpu_diff()[0] /
static_cast<Dtype>(num_set);
for (int j = 0; j < num_set; ++j) {
Dtype* bout = bottom[0]->mutable_gpu_diff();
// the pair part
if (use_pair == 1) {
caffe_gpu_axpby(
dim,
alpha,
diff_pos.gpu_data() + (j*dim),
Dtype(0.0),
bout + ((2 + num_triplets)*j + i)*dim);
} else {
caffe_gpu_axpby(
dim,
Dtype(0.0),
diff_pos.gpu_data() + (j*dim),
Dtype(0.0),
bout + ((2 + num_triplets)*j + i)*dim);
}
// the num_triplets triplet part
for (int triplet = 0; triplet < num_triplets; ++triplet) {
caffe_gpu_sub(
dim,
bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference
bottom[0]->gpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim,
diff_neg.mutable_gpu_data() + j*dim); // reference-negative
caffe_gpu_dot(
dim,
diff_neg.gpu_data() + j*dim,
diff_neg.gpu_data() + j*dim,
dist_sq_neg.mutable_cpu_data() + j);
// Triplet loss accumulation
// a and negative[triplet] is a similar pair for triplet
dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.cpu_data()[j];
// a and negative[triplet] is a dissimilar pair for triplet
dist_sq_.mutable_cpu_data()[j] -= dist_sq_neg.cpu_data()[j];
// Loss component calculated from negative part
if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) {
// similar pair in triplet
caffe_gpu_axpby(
dim,
alpha,
diff_pos.gpu_data() + (j*dim),
Dtype(1.0),
bout + (2 + num_triplets)*j*dim);
// dissimilar pair in triplet
caffe_gpu_axpby(
dim,
-alpha,
diff_neg.gpu_data() + (j*dim),
Dtype(1.0),
bout + ((2 + num_triplets)*j + i)*dim);
}
}
}
}
}
// BP for feat2(extracted from the closest sample)
for (int i = 1; i < 2; ++i) {
if (propagate_down[0]) {
const Dtype sign = -1;
const Dtype alpha = sign * top[0]->cpu_diff()[0] /
static_cast<Dtype>(num_set);
for (int j = 0; j < num_set; ++j) {
Dtype* bout = bottom[0]->mutable_gpu_diff();
// the pair part
if (use_pair == 1) {
caffe_gpu_axpby(
dim,
alpha,
diff_pos.gpu_data() + (j*dim),
Dtype(0.0),
bout + ((2 + num_triplets)*j + i)*dim);
} else {
caffe_gpu_axpby(
dim,
Dtype(0.0),
diff_pos.gpu_data() + (j*dim),
Dtype(0.0),
bout + ((2 + num_triplets)*j + i)*dim);
}
// the num_triplets triplet part
for (int triplet = 0; triplet < num_triplets; ++triplet) {
caffe_gpu_sub(
dim,
bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference
bottom[0]->gpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim,
diff_neg.mutable_gpu_data() + j*dim); // reference-negative
// Triplet loss accumulation
// a and negative[triplet] is a similar pair for triplet
dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.cpu_data()[j];
caffe_gpu_dot(
dim,
diff_neg.gpu_data() + j*dim,
diff_neg.gpu_data() + j*dim,
dist_sq_neg.mutable_cpu_data() + j);
// a and negative[triplet] is a dissimilar pair for triplet
dist_sq_.mutable_cpu_data()[j] -= dist_sq_neg.cpu_data()[j];
if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) {
// similar pair in triplet
caffe_gpu_axpby(
dim,
alpha,
diff_pos.gpu_data() + (j*dim),
Dtype(1.0),
bout + ((2 + num_triplets)*j + i)*dim);
}
}
}
}
}
// BP for negative feature used in the num_triplets triplet part
for (int i = 2; i < 2 + num_triplets; ++i) {
if (propagate_down[0]) {
const Dtype sign = 1;
const Dtype alpha = sign * top[0]->cpu_diff()[0] /
static_cast<Dtype>(num_set);
for (int j = 0; j < num_set; ++j) {
Dtype* bout = bottom[0]->mutable_gpu_diff();
caffe_gpu_sub(
dim,
bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference
bottom[0]->gpu_data() + ((2 + num_triplets)*j + i)*dim,
diff_neg.mutable_gpu_data() + j*dim); // reference-negative
// Triplet loss accumulation
// a and negative[triplet] is a similar pair for triplet
dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.cpu_data()[j];
caffe_gpu_dot(
dim,
diff_neg.gpu_data() + j*dim,
diff_neg.gpu_data() + j*dim,
dist_sq_neg.mutable_cpu_data() + j);
// a and negative[triplet] is a dissimilar pair for triplet
dist_sq_.mutable_cpu_data()[j] -= dist_sq_neg.cpu_data()[j];
if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) {
// dissimilar pairs
caffe_gpu_axpby(
dim,
alpha,
diff_neg.gpu_data() + (j*dim),
Dtype(0.0),
bout + ((2 + num_triplets)*j + i)*dim);
} else {
caffe_gpu_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim);
}
}
}
}
} else if (losstype==1) {
for (int i = 0; i < 1; ++i) {
// BP for data1(feat1)
if (propagate_down[0]) {
const Dtype sign = 1;
const Dtype alpha = sign * top[0]->cpu_diff()[0] /
static_cast<Dtype>(num_set);
for (int j = 0; j < num_set; ++j) {
Dtype* bout = bottom[0]->mutable_gpu_diff();
// the pair part
if (use_pair == 1) {
caffe_gpu_axpby(
dim,
alpha,
diff_pos.gpu_data() + (j*dim),
Dtype(0.0),
bout + ((2 + num_triplets)*j + i)*dim);
} else {
caffe_gpu_axpby(
dim,
Dtype(0.0),
diff_pos.gpu_data() + (j*dim),
Dtype(0.0),
bout + ((2 + num_triplets)*j + i)*dim);
}
// the num_triplets triplet part
for (int triplet = 0; triplet < num_triplets; ++triplet) {
dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.cpu_data()[j];
dist_sq_.mutable_cpu_data()[j] += margin;
// Loss component calculated from negative part
caffe_gpu_sub(
dim,
bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference
bottom[0]->gpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim,
diff_neg.mutable_gpu_data() + j*dim); // reference-negative
caffe_gpu_dot(
dim,
diff_neg.gpu_data() + j*dim,
diff_neg.gpu_data() + j*dim,
dist_sq_neg.mutable_cpu_data() + j);
// a and negative[triplet] is a dissimilar pair for triplet
dist_sq_.mutable_cpu_data()[j] = 1 - \
dist_sq_neg.cpu_data()[j] / dist_sq_.cpu_data()[j];
// loss accumulated accumulated by the triplet part
if ((dist_sq_.cpu_data()[j]) > Dtype(0.0)) {
caffe_gpu_axpby(
dim,
alpha*dist_sq_neg.mutable_cpu_data()[j]/
((dist_sq_pos.cpu_data()[j]+margin)*
(dist_sq_pos.cpu_data()[j]+margin)),
diff_pos.gpu_data() + (j*dim),
Dtype(1.0),
bout + ((2 + num_triplets)*j + i)*dim);
caffe_gpu_axpby(
dim,
-alpha/(dist_sq_pos.cpu_data()[j] + margin),
diff_neg.gpu_data() + (j*dim),
Dtype(1.0),
bout + ((2 + num_triplets)*j + i)*dim);
}
}
}
}
}
for (int i = 1; i < 2; ++i) {
// BP for positive data(feat2)
if (propagate_down[0]) {
const Dtype sign = -1;
const Dtype alpha = sign * top[0]->cpu_diff()[0] /
static_cast<Dtype>(num_set);
for (int j = 0; j < num_set; ++j) {
Dtype* bout = bottom[0]->mutable_gpu_diff();
// the pair part
if (use_pair == 1) {
caffe_gpu_axpby(
dim,
alpha,
diff_pos.gpu_data() + (j*dim),
Dtype(0.0),
bout + ((2 + num_triplets)*j + i)*dim);
} else {
caffe_gpu_axpby(
dim,
Dtype(0.0),
diff_pos.gpu_data() + (j*dim),
Dtype(0.0),
bout + ((2 + num_triplets)*j + i)*dim);
}
// the num_triplets triplet part
for (int triplet = 0; triplet < num_triplets; ++triplet) {
dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.cpu_data()[j];
dist_sq_.mutable_cpu_data()[j] += margin;
// Loss component calculated from negative part
caffe_gpu_sub(
dim,
bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference
bottom[0]->gpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim,
diff_neg.mutable_gpu_data() + j*dim); // reference-negative
caffe_gpu_dot(
dim,
diff_neg.gpu_data() + j*dim,
diff_neg.gpu_data() + j*dim,
dist_sq_neg.mutable_cpu_data() + j);
// a and negative[triplet] is a dissimilar pair for triplet
dist_sq_.mutable_cpu_data()[j] = 1 - \
dist_sq_neg.cpu_data()[j] / dist_sq_.mutable_cpu_data()[j];
// loss accumulated accumulated by the triplet part
if ((dist_sq_.cpu_data()[j]) > Dtype(0.0)) {
caffe_gpu_axpby(
dim,
alpha*dist_sq_neg.cpu_data()[j]/
((dist_sq_pos.cpu_data()[j]+margin)*
(dist_sq_pos.cpu_data()[j]+margin)),
diff_pos.gpu_data() + (j*dim),
Dtype(1.0),
bout + ((2 + num_triplets)*j + i)*dim);
}
}
}
}
}
for (int i = 2; i < 2 + num_triplets; ++i) {
// BP for negative data(feat3)
if (propagate_down[0]) {
const Dtype sign = 1;
const Dtype alpha = sign * top[0]->cpu_diff()[0] /
static_cast<Dtype>(num_set);
for (int j = 0; j < num_set; ++j) {
Dtype* bout = bottom[0]->mutable_gpu_diff();
dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.cpu_data()[j];
dist_sq_.mutable_cpu_data()[j] += margin;
// Loss component calculated from negative part
caffe_gpu_sub(
dim,
bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference
bottom[0]->gpu_data() + ((2 + num_triplets)*j + i)*dim,
diff_neg.mutable_gpu_data() + j*dim); // reference-negative
caffe_gpu_dot(
dim,
diff_neg.gpu_data() + j*dim,
diff_neg.gpu_data() + j*dim,
dist_sq_neg.mutable_cpu_data() + j);
// a and negative[triplet] is a dissimilar pair for triplet
dist_sq_.mutable_cpu_data()[j] = 1 - \
dist_sq_neg.cpu_data()[j] / dist_sq_.cpu_data()[j];
// loss accumulated accumulated by the triplet part
if ((dist_sq_.cpu_data()[j]) > Dtype(0.0)) {
caffe_gpu_axpby(
dim,
alpha/(dist_sq_pos.cpu_data()[j] + margin),
diff_neg.gpu_data() + (j*dim),
Dtype(0.0),
bout + ((2 + num_triplets)*j + i)*dim);
} else {
caffe_gpu_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim);
}
}
}
}
} else if (losstype == 2) {
for (int i = 0; i < 1; ++i) {
// BP for data1(feat1)
if (propagate_down[0]) {
const Dtype sign = 1;
const Dtype alpha = sign * top[0]->cpu_diff()[0] /
static_cast<Dtype>(num_set);
for (int j = 0; j < num_set; ++j) {
Dtype* bout = bottom[0]->mutable_cpu_diff();
// the pair part
if (use_pair == 1) {
caffe_gpu_axpby(
dim,
alpha,
diff_pos.gpu_data() + (j*dim),
Dtype(0.0),
bout + ((2 + num_triplets)*j + i)*dim);
} else {
caffe_gpu_axpby(
dim,
Dtype(0.0),
diff_pos.gpu_data() + (j*dim),
Dtype(0.0),
bout + ((2 + num_triplets)*j + i)*dim);
}
// the num_triplets triplet part
for (int triplet = 0; triplet < num_triplets; ++triplet) {
dist_sq_.mutable_cpu_data()[j] =
exp(dist_sq_pos.mutable_cpu_data()[j]);
dist_sq_.mutable_cpu_data()[j] += margin;
// Loss component calculated from negative part
caffe_gpu_sub(
dim,
bottom[0]->gpu_data()+(2 + num_triplets)*j*dim, // reference
bottom[0]->gpu_data()+((2 + num_triplets)*j + 2 + triplet)*dim,
diff_neg.mutable_gpu_data() + j*dim); // reference-negative
caffe_gpu_dot(
dim,
diff_neg.gpu_data()+j*dim,
diff_neg.gpu_data()+j*dim,
dist_sq_neg.mutable_cpu_data() + j);
// a and negative[triplet] is a dissimilar pair for triplet
dist_sq_.mutable_cpu_data()[j] = 1 - \
exp(dist_sq_neg.cpu_data()[j]) / dist_sq_.cpu_data()[j];
// loss accumulated accumulated by the triplet part
if ((dist_sq_.cpu_data()[j]) > Dtype(0.0)) {
caffe_gpu_axpby(
dim,
alpha*
Dtype(exp(dist_sq_neg.cpu_data()[j]))*
Dtype(exp(dist_sq_pos.cpu_data()[j]))/
(Dtype((exp(dist_sq_pos.cpu_data()[j]))+margin)*
(Dtype(exp(dist_sq_pos.cpu_data()[j]))+margin)),
diff_pos.gpu_data() + (j*dim),
Dtype(1.0),
bout + ((2 + num_triplets)*j + i)*dim);
caffe_cpu_axpby(
dim,
-alpha*
Dtype(exp(dist_sq_neg.cpu_data()[j]))/
(Dtype(exp(dist_sq_pos.cpu_data()[j]))+margin),
diff_neg.cpu_data() + (j*dim),
Dtype(1.0),
bout + ((2 + num_triplets)*j + i)*dim);
}
}
}
}
}
for (int i = 1; i < 2; ++i) {
// BP for positive data(feat2)
if (propagate_down[0]) {
const Dtype sign = -1;
const Dtype alpha = sign * top[0]->cpu_diff()[0] /
static_cast<Dtype>(num_set);
for (int j = 0; j < num_set; ++j) {
Dtype* bout = bottom[0]->mutable_cpu_diff();
// the pair part
if (use_pair == 1) {
caffe_gpu_axpby(
dim,
alpha,
diff_pos.gpu_data() + (j*dim),
Dtype(0.0),
bout + ((2 + num_triplets)*j + i)*dim);
} else {
caffe_gpu_axpby(
dim,
Dtype(0.0),
diff_pos.gpu_data() + (j*dim),
Dtype(0.0),
bout + ((2 + num_triplets)*j + i)*dim);
}
// the num_triplets triplet part
for (int triplet = 0; triplet < num_triplets; ++triplet) {
dist_sq_.mutable_cpu_data()[j] =
exp(dist_sq_pos.cpu_data()[j]);
dist_sq_.mutable_cpu_data()[j] += margin;
// Loss component calculated from negative part
caffe_gpu_sub(
dim,
bottom[0]->gpu_data()+(2 + num_triplets)*j*dim, // reference
bottom[0]->gpu_data()+((2 + num_triplets)*j + 2 + triplet)*dim,
diff_neg.mutable_gpu_data() + j*dim); // reference-negative
caffe_gpu_dot(
dim,
diff_neg.gpu_data()+j*dim,
diff_neg.gpu_data()+j*dim,
dist_sq_neg.mutable_cpu_data() + j);
// a and negative[triplet] is a dissimilar pair for triplet
dist_sq_.mutable_cpu_data()[j] = 1 - \
exp(dist_sq_neg.cpu_data()[j]) / dist_sq_.cpu_data()[j];
// loss accumulated accumulated by the triplet part
if ((dist_sq_.cpu_data()[j]) > Dtype(0.0)) {
caffe_gpu_axpby(
dim,
alpha*
Dtype(exp(dist_sq_neg.cpu_data()[j]))*
Dtype(exp(dist_sq_pos.cpu_data()[j]))/
((Dtype(exp(dist_sq_pos.cpu_data()[j]))+margin)*
(Dtype(exp(dist_sq_pos.cpu_data()[j]))+margin)),
diff_pos.gpu_data() + (j*dim),
Dtype(1.0),
bout + ((2 + num_triplets)*j + i)*dim);
}
}
}
}
}
for (int i = 2; i < 2 + num_triplets; ++i) {
// BP for negative data(feat3)
if (propagate_down[0]) {
const Dtype sign = 1;
const Dtype alpha = sign * top[0]->cpu_diff()[0] /
static_cast<Dtype>(num_set);
for (int j = 0; j < num_set; ++j) {
Dtype* bout = bottom[0]->mutable_cpu_diff();
dist_sq_.mutable_cpu_data()[j] =
exp(dist_sq_pos.cpu_data()[j]);
dist_sq_.mutable_cpu_data()[j] += margin;
// Loss component calculated from negative part
caffe_gpu_sub(
dim,
bottom[0]->gpu_data()+(2 + num_triplets)*j*dim, // reference
bottom[0]->gpu_data()+((2 + num_triplets)*j + i)*dim,
diff_neg.mutable_gpu_data() + j*dim); // reference-negative
caffe_gpu_dot(
dim,
diff_neg.gpu_data()+j*dim,
diff_neg.gpu_data()+j*dim,
dist_sq_neg.mutable_cpu_data() + j);
// a and negative[triplet] is a dissimilar pair for triplet
dist_sq_.mutable_cpu_data()[j] = 1 - \
exp(dist_sq_neg.cpu_data()[j]) / dist_sq_.cpu_data()[j];
// loss accumulated accumulated by the triplet part
if ((dist_sq_.cpu_data()[j]) > Dtype(0.0)) {
caffe_gpu_axpby(
dim,
alpha*Dtype(exp(dist_sq_neg.cpu_data()[j]))/
(Dtype(exp(dist_sq_pos.cpu_data()[j]))+margin),
diff_neg.gpu_data() + (j*dim),
Dtype(0.0),
bout + ((2 + num_triplets)*j + i)*dim);
} else {
caffe_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim);
}
}
}
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(TripletLossLayer);
} // namespace caffe
|
c6ac01e4fcc02aa286a6b570434878a85f898eb5.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
#include "hip/hip_runtime.h"
#include "book.h"
#include "cpu_bitmap.h"
#define DIM 1024
#define rnd( x ) (x * rand() / RAND_MAX)
#define INF 2e10f
struct Sphere {
float r,b,g;
float radius;
float x,y,z;
__device__ float hit( float ox, float oy, float *n ) {
float dx = ox - x;
float dy = oy - y;
/*
float sqdist = dx*dx + dy*dy;
float sqrad = radius*radius;
if (sqdist < sqrad) {
float dz = sqrtf( sqrad - sqdist );
*n = dz / radius;
return dz + z;
}
*/
if (dx*dx + dy*dy < radius*radius) {
float dz = sqrtf( radius*radius - dx*dx - dy*dy );
*n = dz / sqrtf(radius*radius);
return dz + z;
}
return -INF;
}
};
#define SPHERES 20
__constant__ Sphere s[SPHERES];
__global__ void kernel( unsigned char *ptr ) {
// map from threadIdx/BlockIdx to pixel position
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
float ox = (x - DIM/2);
float oy = (y - DIM/2);
float r=0, g=0, b=0;
float maxz = -INF;
for(int i=0; i<SPHERES; i++) {
float n;
float t = s[i].hit( ox, oy, &n );
if (t > maxz) {
float fscale = n;
r = s[i].r * fscale;
g = s[i].g * fscale;
b = s[i].b * fscale;
maxz = t;
}
}
ptr[offset*4 + 0] = (int)(r * 255);
ptr[offset*4 + 1] = (int)(g * 255);
ptr[offset*4 + 2] = (int)(b * 255);
ptr[offset*4 + 3] = 255;
}
// globals needed by the update routine
struct DataBlock {
unsigned char *dev_bitmap;
};
int main( void ) {
DataBlock data;
// capture the start time
hipEvent_t start, stop;
HANDLE_ERROR( hipEventCreate( &start ) );
HANDLE_ERROR( hipEventCreate( &stop ) );
HANDLE_ERROR( hipEventRecord( start, 0 ) );
CPUBitmap bitmap( DIM, DIM, &data );
unsigned char *dev_bitmap;
// allocate memory on the GPU for the output bitmap
HANDLE_ERROR( hipMalloc( (void**)&dev_bitmap,
bitmap.image_size() ) );
// allocate temp memory, initialize it, copy to constant
// memory on the GPU, then free our temp memory
Sphere *temp_s = (Sphere*)malloc( sizeof(Sphere) * SPHERES );
for (int i=0; i<SPHERES; i++) {
temp_s[i].r = rnd( 1.0f );
temp_s[i].g = rnd( 1.0f );
temp_s[i].b = rnd( 1.0f );
temp_s[i].x = rnd( 1000.0f ) - 500;
temp_s[i].y = rnd( 1000.0f ) - 500;
temp_s[i].z = rnd( 1000.0f ) - 500;
temp_s[i].radius = rnd( 100.0f ) + 20;
}
HANDLE_ERROR( hipMemcpyToSymbol( s, temp_s,
sizeof(Sphere) * SPHERES) );
free( temp_s );
// generate a bitmap from our sphere data
dim3 grids(DIM/16,DIM/16);
dim3 threads(16,16);
hipLaunchKernelGGL(( kernel), dim3(grids),dim3(threads), 0, 0, dev_bitmap );
// copy our bitmap back from the GPU for display
HANDLE_ERROR( hipMemcpy( bitmap.get_ptr(), dev_bitmap,
bitmap.image_size(),
hipMemcpyDeviceToHost ) );
// get stop time, and display the timing results
HANDLE_ERROR( hipEventRecord( stop, 0 ) );
HANDLE_ERROR( hipEventSynchronize( stop ) );
float elapsedTime;
HANDLE_ERROR( hipEventElapsedTime( &elapsedTime,
start, stop ) );
printf( "Time to generate: %3.1f ms\n", elapsedTime );
HANDLE_ERROR( hipEventDestroy( start ) );
HANDLE_ERROR( hipEventDestroy( stop ) );
HANDLE_ERROR( hipFree( dev_bitmap ) );
// display
bitmap.display_and_exit();
}
| c6ac01e4fcc02aa286a6b570434878a85f898eb5.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
#include "cuda.h"
#include "book.h"
#include "cpu_bitmap.h"
#define DIM 1024
#define rnd( x ) (x * rand() / RAND_MAX)
#define INF 2e10f
struct Sphere {
float r,b,g;
float radius;
float x,y,z;
__device__ float hit( float ox, float oy, float *n ) {
float dx = ox - x;
float dy = oy - y;
/*
float sqdist = dx*dx + dy*dy;
float sqrad = radius*radius;
if (sqdist < sqrad) {
float dz = sqrtf( sqrad - sqdist );
*n = dz / radius;
return dz + z;
}
*/
if (dx*dx + dy*dy < radius*radius) {
float dz = sqrtf( radius*radius - dx*dx - dy*dy );
*n = dz / sqrtf(radius*radius);
return dz + z;
}
return -INF;
}
};
#define SPHERES 20
__constant__ Sphere s[SPHERES];
__global__ void kernel( unsigned char *ptr ) {
// map from threadIdx/BlockIdx to pixel position
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
float ox = (x - DIM/2);
float oy = (y - DIM/2);
float r=0, g=0, b=0;
float maxz = -INF;
for(int i=0; i<SPHERES; i++) {
float n;
float t = s[i].hit( ox, oy, &n );
if (t > maxz) {
float fscale = n;
r = s[i].r * fscale;
g = s[i].g * fscale;
b = s[i].b * fscale;
maxz = t;
}
}
ptr[offset*4 + 0] = (int)(r * 255);
ptr[offset*4 + 1] = (int)(g * 255);
ptr[offset*4 + 2] = (int)(b * 255);
ptr[offset*4 + 3] = 255;
}
// globals needed by the update routine
struct DataBlock {
unsigned char *dev_bitmap;
};
int main( void ) {
DataBlock data;
// capture the start time
cudaEvent_t start, stop;
HANDLE_ERROR( cudaEventCreate( &start ) );
HANDLE_ERROR( cudaEventCreate( &stop ) );
HANDLE_ERROR( cudaEventRecord( start, 0 ) );
CPUBitmap bitmap( DIM, DIM, &data );
unsigned char *dev_bitmap;
// allocate memory on the GPU for the output bitmap
HANDLE_ERROR( cudaMalloc( (void**)&dev_bitmap,
bitmap.image_size() ) );
// allocate temp memory, initialize it, copy to constant
// memory on the GPU, then free our temp memory
Sphere *temp_s = (Sphere*)malloc( sizeof(Sphere) * SPHERES );
for (int i=0; i<SPHERES; i++) {
temp_s[i].r = rnd( 1.0f );
temp_s[i].g = rnd( 1.0f );
temp_s[i].b = rnd( 1.0f );
temp_s[i].x = rnd( 1000.0f ) - 500;
temp_s[i].y = rnd( 1000.0f ) - 500;
temp_s[i].z = rnd( 1000.0f ) - 500;
temp_s[i].radius = rnd( 100.0f ) + 20;
}
HANDLE_ERROR( cudaMemcpyToSymbol( s, temp_s,
sizeof(Sphere) * SPHERES) );
free( temp_s );
// generate a bitmap from our sphere data
dim3 grids(DIM/16,DIM/16);
dim3 threads(16,16);
kernel<<<grids,threads>>>( dev_bitmap );
// copy our bitmap back from the GPU for display
HANDLE_ERROR( cudaMemcpy( bitmap.get_ptr(), dev_bitmap,
bitmap.image_size(),
cudaMemcpyDeviceToHost ) );
// get stop time, and display the timing results
HANDLE_ERROR( cudaEventRecord( stop, 0 ) );
HANDLE_ERROR( cudaEventSynchronize( stop ) );
float elapsedTime;
HANDLE_ERROR( cudaEventElapsedTime( &elapsedTime,
start, stop ) );
printf( "Time to generate: %3.1f ms\n", elapsedTime );
HANDLE_ERROR( cudaEventDestroy( start ) );
HANDLE_ERROR( cudaEventDestroy( stop ) );
HANDLE_ERROR( cudaFree( dev_bitmap ) );
// display
bitmap.display_and_exit();
}
|
0d89b280c029a45354310fcd40c4a24ff1341c3a.hip | // !!! This is a file automatically generated by hipify!!!
/**
* gemm.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <sgrauerg@gmail.com>
* Louis-Noel Pouchet <pouchet@cse.ohio-state.edu>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <unistd.h>
#include <stdio.h>
#include <time.h>
#include <sys/time.h>
#include <stdlib.h>
#include <stdarg.h>
#include <string.h>
#include <hip/hip_runtime.h>
#include <hip/hip_cooperative_groups.h>
#include "polybenchUtilFuncts.h"
#include <hip/hip_runtime.h>
#define GPU_DEVICE 0
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.05
/* Problem size */
#define NI 1024
#define NJ 1024
#define NK 1024
/* Thread block dimensions */
#define DIM_THREAD_BLOCK_X 32
#define DIM_THREAD_BLOCK_Y 32
/* Declared constant values for ALPHA and BETA (same as values in PolyBench 2.0) */
#define ALPHA 32412.0f
#define BETA 2123.0f
#define NUM_BLOCK 1024
#define DIM_BLOCK_VECTOR 256
#define NUM_BLOCK_COMPUTE 32768 //1024*32
#define NUM_SM 80
#define NUM_SM_COMPUTE 78
#define NUM_SM_HtoD_A 1
#define NUM_SM_HtoD_B 1
#define IN_CHUNK_SIZE 8
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
#define DUMMY_N 1000
__device__ void dummy_comp()
{
double sum = 0.0;
for (int i = 0; i < DUMMY_N; i++)
{
sum = sum + tan(0.1) * tan(0.1);
}
}
__device__ int flag_global_read(volatile int * flag, int rid)
{
return(flag[rid]);
}
void gemm(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C)
{
int i,j,k;
for (i = 0; i < NI; i++)
{
for (j = 0; j < NJ; j++)
{
C[i*NJ + j] *= BETA;
for (k = 0; k < NK; ++k)
{
C[i*NJ + j] += ALPHA * A[i*NK + k] * B[k*NJ + j];
}
}
}
}
void init(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C)
{
int i, j;
for (i = 0; i < NI; i++)
{
for (j = 0; j < NK; j++)
{
A[i*NK + j] = ((DATA_TYPE) i*j) / NI;
}
}
for (i = 0; i < NK; i++)
{
for (j = 0; j < NJ; j++)
{
B[i*NJ + j] = ((DATA_TYPE) i*j + 1) / NJ;
}
}
for (i = 0; i < NI; i++)
{
for (j = 0; j < NJ; j++)
{
C[i*NJ + j] = ((DATA_TYPE) i*j + 2) / NJ;
}
}
}
void compareResults(DATA_TYPE* C, DATA_TYPE* C_outputFromGpu)
{
int i, j, fail;
fail = 0;
// Compare C1 and C2
for (i=0; i < NI; i++)
{
for (j=0; j < NJ; j++)
{
if (percentDiff(C[i*NJ + j], C_outputFromGpu[i*NJ + j]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
}
}
// Print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void GPU_argv_init()
{
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, GPU_DEVICE);
//printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name);
hipSetDevice( 0 );
}
__global__ void gemm_kernel_block_spin(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C, DATA_TYPE *A_host, DATA_TYPE *B_host, int *inflag_A, int *inflag_B)
{
__shared__ DATA_TYPE A_tile[32][32];
__shared__ DATA_TYPE B_tile[32][32];
const int tidx = threadIdx.x;
const int tidy = threadIdx.y;
if (blockIdx.x < NUM_SM_HtoD_A ){
//copy matrix A by 32 columns
for (int bid = blockIdx.x; bid < DIM_BLOCK_VECTOR; bid += NUM_SM_HtoD_A * IN_CHUNK_SIZE){
for (int i = 0; i< IN_CHUNK_SIZE;i++){
int bid_chunk = bid + NUM_SM_HtoD_A*i;
int block_row = bid_chunk % 32;
int block_offset = bid_chunk / 32;
int offset = 8192*block_row+256*tidy+block_offset*32+tidx;
reinterpret_cast<double2*>(A)[offset] = reinterpret_cast<double2*>(A_host)[offset];
}
__syncthreads();
__threadfence();
if ((tidx< IN_CHUNK_SIZE)&&(tidy==0)){
int bb = bid + NUM_SM_HtoD_A*tidx;
int br = bb % 32;
int bo = bb / 32;
atomicOr(&inflag_A[br*8+bo],1);
}
}
}else if (blockIdx.x < (NUM_SM_HtoD_A+NUM_SM_HtoD_B)){
//copy matrix B by 32 rows
for (int bid = (blockIdx.x-NUM_SM_HtoD_A); bid < DIM_BLOCK_VECTOR; bid += NUM_SM_HtoD_B * IN_CHUNK_SIZE){
for (int i = 0 ; i < IN_CHUNK_SIZE;i ++) {
int bid_chunk = bid + NUM_SM_HtoD_B*i;
int block_row = bid_chunk / 8;
int block_offset = bid_chunk % 8;
int offset = 8192*block_row+256*tidy+block_offset*32+tidx;
reinterpret_cast<double2*>(B)[offset] = reinterpret_cast<double2*>(B_host)[offset];
}
__syncthreads();
__threadfence();
if ((tidx< IN_CHUNK_SIZE)&&(tidy==0)){
atomicOr(&inflag_B[bid+NUM_SM_HtoD_B*tidx],1);
}
}
}else{
//compute blocks
for (int bid= (blockIdx.x-(NUM_SM_HtoD_A+NUM_SM_HtoD_B)); bid < NUM_BLOCK_COMPUTE; bid += NUM_SM_COMPUTE){
int tid = bid/ 1024;
int bb = bid % 1024;
int bidx = bb % 32;
int bidy = bb / 32;
int blockA,blockB;
blockA = bidy*8+tid/4;
blockB = tid*8+bidx/4;
if ((tidx==0)&&(tidy==0)){
//while ( (atomicAnd(&inflag_A[blockA],1)==0) ){
while (flag_global_read(inflag_A,blockA) != 1){
//dummy_comp();
}
}
if ((tidx==1)&&(tidy==0)){
//while ( (atomicAnd(&inflag_B[blockB],1)==0) ){
while (flag_global_read(inflag_B,blockB) != 1){
//dummy_comp();
}
}
__syncthreads();
DATA_TYPE accu= 0;
int i,j;
i = bidy*32+tidy;
j = bidx*32+tidx;
A_tile[threadIdx.y][threadIdx.x] = A[i*NJ+tid*32+threadIdx.x];
B_tile[threadIdx.y][threadIdx.x] = B[(tid*32+threadIdx.y)*NJ+j];
__syncthreads();
for (int k = 0; k < 32; k++){
accu += ALPHA * A_tile[threadIdx.y][k]*B_tile[k][threadIdx.x];
}
__syncthreads();
if (tid==0){
C[i*NJ+j] *= BETA;
}
C[i*NJ+j] += accu;
}
}
}
void gemmCuda(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* C, DATA_TYPE* C_outputFromGpu)
{
DATA_TYPE *A_gpu;
DATA_TYPE *B_gpu;
DATA_TYPE *C_gpu;
int *inflag_A, *inflag_B;
hipMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * NI * NK);
hipMalloc((void **)&B_gpu, sizeof(DATA_TYPE) * NK * NJ);
hipMalloc((void **)&C_gpu, sizeof(DATA_TYPE) * NI * NJ);
hipMalloc((void **)&inflag_A, sizeof(int) * DIM_BLOCK_VECTOR);
hipMalloc((void **)&inflag_B, sizeof(int) * DIM_BLOCK_VECTOR);
hipMemset(inflag_A, 0, sizeof(int) * DIM_BLOCK_VECTOR);
hipMemset(inflag_B, 0, sizeof(int) * DIM_BLOCK_VECTOR);
hipEvent_t start,stop;
float elapsedTimeInMs = 0.0f;
hipEventCreate(&start);
hipEventCreate(&stop);
dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
dim3 grid((size_t)(NUM_SM), (size_t)1 );
void *kernelArgs[] = {
(void *)&A_gpu, (void *)&B_gpu,
(void *)&C_gpu, (void *)&A,
(void *)&B, (void *)&inflag_A, (void*)&inflag_B
};
hipEventRecord(start);
//verify compute
//hipMemset(inflag_A, 0, sizeof(int) * DIM_BLOCK_VECTOR);
//hipMemset(inflag_B, 0, sizeof(int) * DIM_BLOCK_VECTOR);
//hipMemcpy(A_gpu, A, sizeof(DATA_TYPE) * NI * NJ, hipMemcpyHostToDevice);
//hipMemcpy(B_gpu, B, sizeof(DATA_TYPE) * NI * NJ, hipMemcpyHostToDevice);
hipMemcpy(C_gpu, C, sizeof(DATA_TYPE) * NI * NJ, hipMemcpyHostToDevice);
//gemm_kernel_block_spin<<< grid, block >>>(A_gpu, B_gpu, C_gpu, A,B,inflag_A, inflag_B);
hipLaunchCooperativeKernel((void*)gemm_kernel_block_spin, grid, block, kernelArgs,0, NULL);
//gpuErrchk( hipPeekAtLastError() );
//gpuErrchk( hipDeviceSynchronize() );
hipMemcpy(C_outputFromGpu, C_gpu, sizeof(DATA_TYPE) * NI * NJ, hipMemcpyDeviceToHost);
hipEventRecord(stop);
hipDeviceSynchronize();
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTimeInMs, start, stop);
fprintf(stdout,"GPU RunTime= %.2f Ms \n", elapsedTimeInMs);
//verify copy results
/*
DATA_TYPE *AA, *BB;
int *FA,*FB;
hipHostMalloc((void **)&AA, sizeof(DATA_TYPE) * NI * NK, hipHostMallocPortable);
hipHostMalloc((void **)&BB, sizeof(DATA_TYPE) * NI * NK, hipHostMallocPortable);
hipHostMalloc((void **)&FA, sizeof(int) * DIM_BLOCK_VECTOR, hipHostMallocPortable);
hipHostMalloc((void **)&FB, sizeof(int) * DIM_BLOCK_VECTOR, hipHostMallocPortable);
hipMemcpy(AA, A_gpu, sizeof(DATA_TYPE) * NI * NJ, hipMemcpyDeviceToHost);
hipMemcpy(BB, B_gpu, sizeof(DATA_TYPE) * NI * NJ, hipMemcpyDeviceToHost);
hipMemcpy(FA, inflag_A, sizeof(int) * DIM_BLOCK_VECTOR, hipMemcpyDeviceToHost);
hipMemcpy(FB, inflag_B, sizeof(int) * DIM_BLOCK_VECTOR, hipMemcpyDeviceToHost);
compareResults(A, AA);
compareResults(B, BB);
for(int i = 0 ; i < 256; i++) {
fprintf(stdout, "%d",FA[i]);
}
for(int i = 0 ; i < 256; i++) {
fprintf(stdout, "%d",FB[i]);
}
*/
hipFree(A_gpu);
hipFree(B_gpu);
hipFree(C_gpu);
}
int main(int argc, char *argv[])
{
double t_start, t_end;
DATA_TYPE* A;
DATA_TYPE* B;
DATA_TYPE* C;
DATA_TYPE* C_outputFromGpu;
//A = (DATA_TYPE*)malloc(NI*NK*sizeof(DATA_TYPE));
//B = (DATA_TYPE*)malloc(NK*NJ*sizeof(DATA_TYPE));
//C = (DATA_TYPE*)malloc(NI*NJ*sizeof(DATA_TYPE));
//C_outputFromGpu = (DATA_TYPE*)malloc(NI*NJ*sizeof(DATA_TYPE));
hipHostMalloc((void **)&A, sizeof(DATA_TYPE) * NI * NK, hipHostMallocPortable);
hipHostMalloc((void **)&B, sizeof(DATA_TYPE) * NK * NJ, hipHostMallocPortable);
hipHostMalloc((void **)&C, sizeof(DATA_TYPE) * NI * NJ, hipHostMallocPortable);
hipHostMalloc((void **)&C_outputFromGpu, sizeof(DATA_TYPE) * NI * NJ, hipHostMallocPortable);
init(A, B, C);
GPU_argv_init();
gemmCuda(A, B, C, C_outputFromGpu);
/*
t_start = rtclock();
gemm(A, B, C);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
compareResults(C, C_outputFromGpu);
*/
hipFree(A);
hipFree(B);
hipFree(C);
hipFree(C_outputFromGpu);
return 0;
}
| 0d89b280c029a45354310fcd40c4a24ff1341c3a.cu | /**
* gemm.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <sgrauerg@gmail.com>
* Louis-Noel Pouchet <pouchet@cse.ohio-state.edu>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <unistd.h>
#include <stdio.h>
#include <time.h>
#include <sys/time.h>
#include <stdlib.h>
#include <stdarg.h>
#include <string.h>
#include <cuda.h>
#include <cooperative_groups.h>
#include "polybenchUtilFuncts.h"
#include <cuda_runtime.h>
#define GPU_DEVICE 0
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.05
/* Problem size */
#define NI 1024
#define NJ 1024
#define NK 1024
/* Thread block dimensions */
#define DIM_THREAD_BLOCK_X 32
#define DIM_THREAD_BLOCK_Y 32
/* Declared constant values for ALPHA and BETA (same as values in PolyBench 2.0) */
#define ALPHA 32412.0f
#define BETA 2123.0f
#define NUM_BLOCK 1024
#define DIM_BLOCK_VECTOR 256
#define NUM_BLOCK_COMPUTE 32768 //1024*32
#define NUM_SM 80
#define NUM_SM_COMPUTE 78
#define NUM_SM_HtoD_A 1
#define NUM_SM_HtoD_B 1
#define IN_CHUNK_SIZE 8
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
#define DUMMY_N 1000
__device__ void dummy_comp()
{
double sum = 0.0;
for (int i = 0; i < DUMMY_N; i++)
{
sum = sum + tan(0.1) * tan(0.1);
}
}
__device__ int flag_global_read(volatile int * flag, int rid)
{
return(flag[rid]);
}
void gemm(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C)
{
int i,j,k;
for (i = 0; i < NI; i++)
{
for (j = 0; j < NJ; j++)
{
C[i*NJ + j] *= BETA;
for (k = 0; k < NK; ++k)
{
C[i*NJ + j] += ALPHA * A[i*NK + k] * B[k*NJ + j];
}
}
}
}
void init(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C)
{
int i, j;
for (i = 0; i < NI; i++)
{
for (j = 0; j < NK; j++)
{
A[i*NK + j] = ((DATA_TYPE) i*j) / NI;
}
}
for (i = 0; i < NK; i++)
{
for (j = 0; j < NJ; j++)
{
B[i*NJ + j] = ((DATA_TYPE) i*j + 1) / NJ;
}
}
for (i = 0; i < NI; i++)
{
for (j = 0; j < NJ; j++)
{
C[i*NJ + j] = ((DATA_TYPE) i*j + 2) / NJ;
}
}
}
void compareResults(DATA_TYPE* C, DATA_TYPE* C_outputFromGpu)
{
int i, j, fail;
fail = 0;
// Compare C1 and C2
for (i=0; i < NI; i++)
{
for (j=0; j < NJ; j++)
{
if (percentDiff(C[i*NJ + j], C_outputFromGpu[i*NJ + j]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
}
}
// Print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void GPU_argv_init()
{
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, GPU_DEVICE);
//printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name);
cudaSetDevice( 0 );
}
__global__ void gemm_kernel_block_spin(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C, DATA_TYPE *A_host, DATA_TYPE *B_host, int *inflag_A, int *inflag_B)
{
__shared__ DATA_TYPE A_tile[32][32];
__shared__ DATA_TYPE B_tile[32][32];
const int tidx = threadIdx.x;
const int tidy = threadIdx.y;
if (blockIdx.x < NUM_SM_HtoD_A ){
//copy matrix A by 32 columns
for (int bid = blockIdx.x; bid < DIM_BLOCK_VECTOR; bid += NUM_SM_HtoD_A * IN_CHUNK_SIZE){
for (int i = 0; i< IN_CHUNK_SIZE;i++){
int bid_chunk = bid + NUM_SM_HtoD_A*i;
int block_row = bid_chunk % 32;
int block_offset = bid_chunk / 32;
int offset = 8192*block_row+256*tidy+block_offset*32+tidx;
reinterpret_cast<double2*>(A)[offset] = reinterpret_cast<double2*>(A_host)[offset];
}
__syncthreads();
__threadfence();
if ((tidx< IN_CHUNK_SIZE)&&(tidy==0)){
int bb = bid + NUM_SM_HtoD_A*tidx;
int br = bb % 32;
int bo = bb / 32;
atomicOr(&inflag_A[br*8+bo],1);
}
}
}else if (blockIdx.x < (NUM_SM_HtoD_A+NUM_SM_HtoD_B)){
//copy matrix B by 32 rows
for (int bid = (blockIdx.x-NUM_SM_HtoD_A); bid < DIM_BLOCK_VECTOR; bid += NUM_SM_HtoD_B * IN_CHUNK_SIZE){
for (int i = 0 ; i < IN_CHUNK_SIZE;i ++) {
int bid_chunk = bid + NUM_SM_HtoD_B*i;
int block_row = bid_chunk / 8;
int block_offset = bid_chunk % 8;
int offset = 8192*block_row+256*tidy+block_offset*32+tidx;
reinterpret_cast<double2*>(B)[offset] = reinterpret_cast<double2*>(B_host)[offset];
}
__syncthreads();
__threadfence();
if ((tidx< IN_CHUNK_SIZE)&&(tidy==0)){
atomicOr(&inflag_B[bid+NUM_SM_HtoD_B*tidx],1);
}
}
}else{
//compute blocks
for (int bid= (blockIdx.x-(NUM_SM_HtoD_A+NUM_SM_HtoD_B)); bid < NUM_BLOCK_COMPUTE; bid += NUM_SM_COMPUTE){
int tid = bid/ 1024;
int bb = bid % 1024;
int bidx = bb % 32;
int bidy = bb / 32;
int blockA,blockB;
blockA = bidy*8+tid/4;
blockB = tid*8+bidx/4;
if ((tidx==0)&&(tidy==0)){
//while ( (atomicAnd(&inflag_A[blockA],1)==0) ){
while (flag_global_read(inflag_A,blockA) != 1){
//dummy_comp();
}
}
if ((tidx==1)&&(tidy==0)){
//while ( (atomicAnd(&inflag_B[blockB],1)==0) ){
while (flag_global_read(inflag_B,blockB) != 1){
//dummy_comp();
}
}
__syncthreads();
DATA_TYPE accu= 0;
int i,j;
i = bidy*32+tidy;
j = bidx*32+tidx;
A_tile[threadIdx.y][threadIdx.x] = A[i*NJ+tid*32+threadIdx.x];
B_tile[threadIdx.y][threadIdx.x] = B[(tid*32+threadIdx.y)*NJ+j];
__syncthreads();
for (int k = 0; k < 32; k++){
accu += ALPHA * A_tile[threadIdx.y][k]*B_tile[k][threadIdx.x];
}
__syncthreads();
if (tid==0){
C[i*NJ+j] *= BETA;
}
C[i*NJ+j] += accu;
}
}
}
void gemmCuda(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* C, DATA_TYPE* C_outputFromGpu)
{
DATA_TYPE *A_gpu;
DATA_TYPE *B_gpu;
DATA_TYPE *C_gpu;
int *inflag_A, *inflag_B;
cudaMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * NI * NK);
cudaMalloc((void **)&B_gpu, sizeof(DATA_TYPE) * NK * NJ);
cudaMalloc((void **)&C_gpu, sizeof(DATA_TYPE) * NI * NJ);
cudaMalloc((void **)&inflag_A, sizeof(int) * DIM_BLOCK_VECTOR);
cudaMalloc((void **)&inflag_B, sizeof(int) * DIM_BLOCK_VECTOR);
cudaMemset(inflag_A, 0, sizeof(int) * DIM_BLOCK_VECTOR);
cudaMemset(inflag_B, 0, sizeof(int) * DIM_BLOCK_VECTOR);
cudaEvent_t start,stop;
float elapsedTimeInMs = 0.0f;
cudaEventCreate(&start);
cudaEventCreate(&stop);
dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
dim3 grid((size_t)(NUM_SM), (size_t)1 );
void *kernelArgs[] = {
(void *)&A_gpu, (void *)&B_gpu,
(void *)&C_gpu, (void *)&A,
(void *)&B, (void *)&inflag_A, (void*)&inflag_B
};
cudaEventRecord(start);
//verify compute
//cudaMemset(inflag_A, 0, sizeof(int) * DIM_BLOCK_VECTOR);
//cudaMemset(inflag_B, 0, sizeof(int) * DIM_BLOCK_VECTOR);
//cudaMemcpy(A_gpu, A, sizeof(DATA_TYPE) * NI * NJ, cudaMemcpyHostToDevice);
//cudaMemcpy(B_gpu, B, sizeof(DATA_TYPE) * NI * NJ, cudaMemcpyHostToDevice);
cudaMemcpy(C_gpu, C, sizeof(DATA_TYPE) * NI * NJ, cudaMemcpyHostToDevice);
//gemm_kernel_block_spin<<< grid, block >>>(A_gpu, B_gpu, C_gpu, A,B,inflag_A, inflag_B);
cudaLaunchCooperativeKernel((void*)gemm_kernel_block_spin, grid, block, kernelArgs,0, NULL);
//gpuErrchk( cudaPeekAtLastError() );
//gpuErrchk( cudaDeviceSynchronize() );
cudaMemcpy(C_outputFromGpu, C_gpu, sizeof(DATA_TYPE) * NI * NJ, cudaMemcpyDeviceToHost);
cudaEventRecord(stop);
cudaDeviceSynchronize();
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTimeInMs, start, stop);
fprintf(stdout,"GPU RunTime= %.2f Ms \n", elapsedTimeInMs);
//verify copy results
/*
DATA_TYPE *AA, *BB;
int *FA,*FB;
cudaHostAlloc((void **)&AA, sizeof(DATA_TYPE) * NI * NK, cudaHostAllocPortable);
cudaHostAlloc((void **)&BB, sizeof(DATA_TYPE) * NI * NK, cudaHostAllocPortable);
cudaHostAlloc((void **)&FA, sizeof(int) * DIM_BLOCK_VECTOR, cudaHostAllocPortable);
cudaHostAlloc((void **)&FB, sizeof(int) * DIM_BLOCK_VECTOR, cudaHostAllocPortable);
cudaMemcpy(AA, A_gpu, sizeof(DATA_TYPE) * NI * NJ, cudaMemcpyDeviceToHost);
cudaMemcpy(BB, B_gpu, sizeof(DATA_TYPE) * NI * NJ, cudaMemcpyDeviceToHost);
cudaMemcpy(FA, inflag_A, sizeof(int) * DIM_BLOCK_VECTOR, cudaMemcpyDeviceToHost);
cudaMemcpy(FB, inflag_B, sizeof(int) * DIM_BLOCK_VECTOR, cudaMemcpyDeviceToHost);
compareResults(A, AA);
compareResults(B, BB);
for(int i = 0 ; i < 256; i++) {
fprintf(stdout, "%d",FA[i]);
}
for(int i = 0 ; i < 256; i++) {
fprintf(stdout, "%d",FB[i]);
}
*/
cudaFree(A_gpu);
cudaFree(B_gpu);
cudaFree(C_gpu);
}
int main(int argc, char *argv[])
{
double t_start, t_end;
DATA_TYPE* A;
DATA_TYPE* B;
DATA_TYPE* C;
DATA_TYPE* C_outputFromGpu;
//A = (DATA_TYPE*)malloc(NI*NK*sizeof(DATA_TYPE));
//B = (DATA_TYPE*)malloc(NK*NJ*sizeof(DATA_TYPE));
//C = (DATA_TYPE*)malloc(NI*NJ*sizeof(DATA_TYPE));
//C_outputFromGpu = (DATA_TYPE*)malloc(NI*NJ*sizeof(DATA_TYPE));
cudaHostAlloc((void **)&A, sizeof(DATA_TYPE) * NI * NK, cudaHostAllocPortable);
cudaHostAlloc((void **)&B, sizeof(DATA_TYPE) * NK * NJ, cudaHostAllocPortable);
cudaHostAlloc((void **)&C, sizeof(DATA_TYPE) * NI * NJ, cudaHostAllocPortable);
cudaHostAlloc((void **)&C_outputFromGpu, sizeof(DATA_TYPE) * NI * NJ, cudaHostAllocPortable);
init(A, B, C);
GPU_argv_init();
gemmCuda(A, B, C, C_outputFromGpu);
/*
t_start = rtclock();
gemm(A, B, C);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
compareResults(C, C_outputFromGpu);
*/
cudaFree(A);
cudaFree(B);
cudaFree(C);
cudaFree(C_outputFromGpu);
return 0;
}
|
a089ffebf3a24b2be04459c1fde32e135d283962.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "imager.h"
#include "antialias.h"
namespace Imager{
#if __CUDA_ARCH__ < 600
__device__ double atomicAdd(double* address, double val)
{
unsigned long long int* address_as_ull =
(unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
} while (assumed != old);
return __longlong_as_double(old);
}
__device__ static double atomicMax(double* address, double val)
{
int* address_as_i = (int*) address;
int old = *address_as_i, assumed;
do {
assumed = old;
old = ::atomicCAS(address_as_i, assumed,
__longlong_as_double(::fmaxf(val, __int_as_float(assumed))));
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
__global__ void _anti_alias(double *addr_r,double *addr_g,double *addr_b,double **red,double **green,double **blue,int i,int j,int antiAliasFactor){
int di=threadIdx.x;
int dj=threadIdx.y;
int x=antiAliasFactor*i + di;
int y=antiAliasFactor*j + dj;
atomicAdd(addr_r,red[x][y]);
atomicAdd(addr_g,green[x][y]);
atomicAdd(addr_b,blue[x][y]);
}
__global__ void _max_color(double *addr_r,double *addr_g,double *addr_b,double *red,double *green,double *blue,int n){
int di=threadIdx.x;
atomicMax(addr_r,red[di]);
atomicMax(addr_g,green[di]);
atomicMax(addr_b,blue[di]);
}
Color cuda_antiAlias(double **red,double **green,double **blue,int i,int j,int antiAliasFactor,int wide,int height){
double *addr_r,*addr_g,*addr_b,*rr,*gg,*bb;
double **r,**g,**b;
hipMallocManaged(&addr_r,4);
hipMallocManaged(&addr_g,4);
hipMallocManaged(&addr_b,4);
hipMalloc(&r,wide*height*sizeof(double));
hipMalloc(&g,wide*height*sizeof(double));
hipMalloc(&b,wide*height*sizeof(double));
hipMemset(addr_r,0,sizeof(double));
hipMemset(addr_g,0,sizeof(double));
hipMemset(addr_b,0,sizeof(double));
hipMemcpy(r,red,wide*height*sizeof(double),hipMemcpyHostToDevice);
hipMemcpy(g,green,wide*height*sizeof(double),hipMemcpyHostToDevice);
hipMemcpy(b,blue,wide*height*sizeof(double),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( _anti_alias), dim3(antiAliasFactor),dim3(antiAliasFactor), 0, 0, addr_r,addr_g,addr_b,red,green,blue,i,j,antiAliasFactor);
rr=(double*)malloc(sizeof(double));
gg=(double*)malloc(sizeof(double));
bb=(double*)malloc(sizeof(double));
hipMemcpy(rr,addr_r,sizeof(double),hipMemcpyDeviceToHost);
hipMemcpy(gg,addr_g,sizeof(double),hipMemcpyDeviceToHost);
hipMemcpy(bb,addr_b,sizeof(double),hipMemcpyDeviceToHost);
return Color(*rr,*gg,*bb);
}
int cuda_maxColor(double *red,double *green,double *blue,int n){
double *addr_r,*addr_g,*addr_b,*rr,*gg,*bb;
double *r,*g,*b;
hipMallocManaged(&addr_r,4);
hipMallocManaged(&addr_g,4);
hipMallocManaged(&addr_b,4);
hipMalloc(&r,n*sizeof(double));
hipMalloc(&g,n*sizeof(double));
hipMalloc(&b,n*sizeof(double));
hipMemset(addr_r,0,sizeof(double));
hipMemset(addr_g,0,sizeof(double));
hipMemset(addr_b,0,sizeof(double));
hipMemcpy(r,red,n*sizeof(double),hipMemcpyHostToDevice);
hipMemcpy(g,green,n*sizeof(double),hipMemcpyHostToDevice);
hipMemcpy(b,blue,n*sizeof(double),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( _max_color), dim3(n),dim3(1), 0, 0, addr_r,addr_g,addr_b,r,g,b,n);
rr=(double*)malloc(sizeof(double));
gg=(double*)malloc(sizeof(double));
bb=(double*)malloc(sizeof(double));
hipMemcpy(rr,addr_r,sizeof(double),hipMemcpyDeviceToHost);
hipMemcpy(gg,addr_g,sizeof(double),hipMemcpyDeviceToHost);
hipMemcpy(bb,addr_b,sizeof(double),hipMemcpyDeviceToHost);
if(*rr>=*gg&&*rr>=*bb) return *rr;
else if(*gg>=*bb&&*gg>=*rr) return *gg;
else return *bb;
}
}
| a089ffebf3a24b2be04459c1fde32e135d283962.cu | #include "imager.h"
#include "antialias.h"
namespace Imager{
#if __CUDA_ARCH__ < 600
__device__ double atomicAdd(double* address, double val)
{
unsigned long long int* address_as_ull =
(unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
} while (assumed != old);
return __longlong_as_double(old);
}
__device__ static double atomicMax(double* address, double val)
{
int* address_as_i = (int*) address;
int old = *address_as_i, assumed;
do {
assumed = old;
old = ::atomicCAS(address_as_i, assumed,
__longlong_as_double(::fmaxf(val, __int_as_float(assumed))));
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
__global__ void _anti_alias(double *addr_r,double *addr_g,double *addr_b,double **red,double **green,double **blue,int i,int j,int antiAliasFactor){
int di=threadIdx.x;
int dj=threadIdx.y;
int x=antiAliasFactor*i + di;
int y=antiAliasFactor*j + dj;
atomicAdd(addr_r,red[x][y]);
atomicAdd(addr_g,green[x][y]);
atomicAdd(addr_b,blue[x][y]);
}
__global__ void _max_color(double *addr_r,double *addr_g,double *addr_b,double *red,double *green,double *blue,int n){
int di=threadIdx.x;
atomicMax(addr_r,red[di]);
atomicMax(addr_g,green[di]);
atomicMax(addr_b,blue[di]);
}
Color cuda_antiAlias(double **red,double **green,double **blue,int i,int j,int antiAliasFactor,int wide,int height){
double *addr_r,*addr_g,*addr_b,*rr,*gg,*bb;
double **r,**g,**b;
cudaMallocManaged(&addr_r,4);
cudaMallocManaged(&addr_g,4);
cudaMallocManaged(&addr_b,4);
cudaMalloc(&r,wide*height*sizeof(double));
cudaMalloc(&g,wide*height*sizeof(double));
cudaMalloc(&b,wide*height*sizeof(double));
cudaMemset(addr_r,0,sizeof(double));
cudaMemset(addr_g,0,sizeof(double));
cudaMemset(addr_b,0,sizeof(double));
cudaMemcpy(r,red,wide*height*sizeof(double),cudaMemcpyHostToDevice);
cudaMemcpy(g,green,wide*height*sizeof(double),cudaMemcpyHostToDevice);
cudaMemcpy(b,blue,wide*height*sizeof(double),cudaMemcpyHostToDevice);
_anti_alias<<<antiAliasFactor,antiAliasFactor>>>(addr_r,addr_g,addr_b,red,green,blue,i,j,antiAliasFactor);
rr=(double*)malloc(sizeof(double));
gg=(double*)malloc(sizeof(double));
bb=(double*)malloc(sizeof(double));
cudaMemcpy(rr,addr_r,sizeof(double),cudaMemcpyDeviceToHost);
cudaMemcpy(gg,addr_g,sizeof(double),cudaMemcpyDeviceToHost);
cudaMemcpy(bb,addr_b,sizeof(double),cudaMemcpyDeviceToHost);
return Color(*rr,*gg,*bb);
}
int cuda_maxColor(double *red,double *green,double *blue,int n){
double *addr_r,*addr_g,*addr_b,*rr,*gg,*bb;
double *r,*g,*b;
cudaMallocManaged(&addr_r,4);
cudaMallocManaged(&addr_g,4);
cudaMallocManaged(&addr_b,4);
cudaMalloc(&r,n*sizeof(double));
cudaMalloc(&g,n*sizeof(double));
cudaMalloc(&b,n*sizeof(double));
cudaMemset(addr_r,0,sizeof(double));
cudaMemset(addr_g,0,sizeof(double));
cudaMemset(addr_b,0,sizeof(double));
cudaMemcpy(r,red,n*sizeof(double),cudaMemcpyHostToDevice);
cudaMemcpy(g,green,n*sizeof(double),cudaMemcpyHostToDevice);
cudaMemcpy(b,blue,n*sizeof(double),cudaMemcpyHostToDevice);
_max_color<<<n,1>>>(addr_r,addr_g,addr_b,r,g,b,n);
rr=(double*)malloc(sizeof(double));
gg=(double*)malloc(sizeof(double));
bb=(double*)malloc(sizeof(double));
cudaMemcpy(rr,addr_r,sizeof(double),cudaMemcpyDeviceToHost);
cudaMemcpy(gg,addr_g,sizeof(double),cudaMemcpyDeviceToHost);
cudaMemcpy(bb,addr_b,sizeof(double),cudaMemcpyDeviceToHost);
if(*rr>=*gg&&*rr>=*bb) return *rr;
else if(*gg>=*bb&&*gg>=*rr) return *gg;
else return *bb;
}
}
|
2ada112a7d854a370d56d6eb345101603cba4398.hip | // !!! This is a file automatically generated by hipify!!!
#include <typeinfo>
/*
Ripser++: accelerated Vietoris-Rips persistence barcodes computation with GPU
MIT License
Copyright (c) 2019, 2020 Simon Zhang, Mengbai Xiao, Hao Wang
Python Bindings: Birkan Gokbag
Copyright (c) 2015-2019 Ripser codebase, written by Ulrich Bauer
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
You are under no obligation whatsoever to provide any bug fixes, patches, or
upgrades to the features, functionality or performance of the source code
("Enhancements") to anyone; however, if you choose to make your Enhancements
available either publicly, or directly to the author of this software, without
imposing a separate written license agreement for such Enhancements, then you
hereby grant the following license: a non-exclusive, royalty-free perpetual
license to install, use, modify, prepare derivative works, incorporate into
other computer software, distribute, and sublicense such enhancements or
derivative works thereof, in binary and source code form.
*/
#define CUDACHECK(cmd) do {\
hipError_t e= cmd;\
if( e != hipSuccess ) {\
printf("Failed: Cuda error %s:%d '%s'\n",\
__FILE__,__LINE__,hipGetErrorString(e));\
exit(EXIT_FAILURE);\
}\
} while(0)
//#define INDICATE_PROGRESS//DO NOT UNCOMMENT THIS IF YOU WANT TO LOG PROFILING NUMBERS FROM stderr TO FILE
#define PRINT_PERSISTENCE_PAIRS//print out all persistence paris to stdout
//#define CPUONLY_ASSEMBLE_REDUCTION_MATRIX//do full matrix reduction on CPU with the sparse coefficient matrix V
//#define ASSEMBLE_REDUCTION_SUBMATRIX//do submatrix reduction with the sparse coefficient submatrix of V
#define PROFILING
#define COUNTING
#define USE_PHASHMAP//www.github.com/greg7mdp/parallel-hashmap
#ifndef USE_PHASHMAP
#define USE_GOOGLE_HASHMAP
#endif
//#define CPUONLY_SPARSE_HASHMAP//WARNING: MAY NEED LOWER GCC VERSION TO RUN, TESTED ON: NVCC VERSION 9.2 WITH GCC VERSIONS >=5.3.0 AND <=7.3.0
#define MIN_INT64 (-9223372036854775807-1)
#define MAX_INT64 (9223372036854775807)
#define MAX_FLOAT (340282346638528859811704183484516925440.000000)
#include <cassert>
#include <fstream>
#include <iostream>
#include <numeric>
#include <queue>
#include <sstream>
#include <unordered_map>
#include <cmath>
#include <algorithm>
#include <profiling/stopwatch.h>
#include <sparsehash/dense_hash_map>
#include <phmap_interface/phmap_interface.h>
#include <omp.h>
#include <thrust/fill.h>
#include <thrust/device_vector.h>
#include <thrust/scan.h>
#include <thrust/execution_policy.h>
#include <thrust/unique.h>
#include <hip/hip_runtime.h>
#ifdef CPUONLY_SPARSE_HASHMAP
#include <sparsehash/sparse_hash_map>
template <class Key, class T> class hash_map : public google::sparse_hash_map<Key, T> {
public:
explicit hash_map() : google::sparse_hash_map<Key, T>() {
}
inline void reserve(size_t hint) { this->resize(hint); }
};
#endif
#ifndef CPUONLY_SPARSE_HASHMAP
template <class Key, class T> class hash_map : public google::dense_hash_map<Key, T> {
public:
explicit hash_map() : google::dense_hash_map<Key, T>() {
this->set_empty_key(-1);
}
inline void reserve(size_t hint) { this->resize(hint); }
};
#endif
static const std::string clear_line("\r\033[K");
#ifdef INDICATE_PROGRESS
static const std::chrono::milliseconds time_step(40);
#endif
typedef float value_t;
typedef int64_t index_t;
struct diameter_index_t_struct{
value_t diameter;
index_t index;
};
struct index_diameter_t_struct{
index_t index;
value_t diameter;
};
struct lowerindex_lowerdiameter_index_t_struct_compare{
__host__ __device__ bool operator() (struct index_diameter_t_struct a, struct index_diameter_t_struct b){
return a.index!=b.index ? a.index<b.index : a.diameter<b.diameter;
}
};
struct greaterdiam_lowerindex_diameter_index_t_struct_compare {
__host__ __device__ bool operator() (struct diameter_index_t_struct a, struct diameter_index_t_struct b){
return a.diameter!=b.diameter ? a.diameter>b.diameter : a.index<b.index;
}
};
struct greaterdiam_lowerindex_diameter_index_t_struct_compare_reverse {
__host__ __device__ bool operator() (struct diameter_index_t_struct a, struct diameter_index_t_struct b){
return a.diameter!=b.diameter ? a.diameter<b.diameter : a.index>b.index;
}
};
struct lowerindex_lowerdiam_diameter_index_t_struct_compare{
__host__ __device__ bool operator() (struct diameter_index_t_struct a, struct diameter_index_t_struct b){
return a.index!=b.index ? a.index<b.index : a.diameter<b.diameter;
}
};
struct index_t_pair_struct{//data type for a pivot in the coboundary matrix: (row,column)
index_t row_cidx;
index_t column_idx;
};
struct row_cidx_column_idx_struct_compare{
__host__ __device__ bool operator()(struct index_t_pair_struct a, struct index_t_pair_struct b){
//return a.row_cidx!=b.row_cidx ? a.row_cidx<b.row_cidx : a.column_idx<b.column_idx;//the second condition should never happen if sorting pivot pairs since pivots do not conflict on rows or columns
return a.row_cidx<b.row_cidx || (a.row_cidx==b.row_cidx && a.column_idx<b.column_idx);
}
};
__host__ __device__ value_t hd_max(value_t a, value_t b){
return a>b?a:b;
}
void check_overflow(index_t i){
if(i<0){
throw std::overflow_error("simplex index "+std::to_string((uint64_t)i)+" in filtration is overflowing past 64 bits signed integer");
}
}
//assume i>j (lower triangular with i indexing rows and j indexing columns
#define LOWER_DISTANCE_INDEX(i,j,n) (((i)*((i)-1)/2)+(j))
class binomial_coeff_table {
index_t num_n;
index_t max_tuple_length;
#define BINOM_TRANSPOSE(i,j) ((j)*(num_n)+(i))
#define BINOM(i,j) ((i)*(max_tuple_length)+(j))
public:
index_t* binoms;
binomial_coeff_table(index_t n, index_t k) {
binoms= (index_t*)malloc(sizeof(index_t)*(n+1)*(k+1));
if(binoms==NULL){
//std::cerr<<"malloc for binoms failed"<<std::endl;
exit(1);
}
num_n= n+1;
max_tuple_length= k+1;
memset(binoms, 0, sizeof(index_t)*num_n*max_tuple_length);
for (index_t i= 0; i <= n; i++) {
for (index_t j= 0; j <= ::min(i, k); j++){
if (j == 0 || j == i) {
binoms[BINOM_TRANSPOSE(i,j)]= 1;
} else {
binoms[BINOM_TRANSPOSE(i,j)]= binoms[BINOM_TRANSPOSE(i-1,j-1)]+binoms[BINOM_TRANSPOSE(i-1,j)];
}
}
check_overflow(binoms[BINOM_TRANSPOSE(i,::min(i>>1,k))]);
}
}
index_t get_num_n() const{
return num_n;
}
index_t get_max_tuple_length() const{
return max_tuple_length;
}
__host__ __device__ index_t operator()(index_t n, index_t k) const{
assert(n<num_n && k<max_tuple_length);
return binoms[BINOM_TRANSPOSE(n,k)];
}
};
typedef std::pair<value_t, index_t> diameter_index_t;
value_t get_diameter(const diameter_index_t& i) { return i.first; }
index_t get_index(const diameter_index_t& i) { return i.second; }
template <typename Entry> struct greater_diameter_or_smaller_index {
bool operator()(const Entry& a, const Entry& b) {
return (get_diameter(a) > get_diameter(b)) ||
((get_diameter(a) == get_diameter(b)) && (get_index(a) < get_index(b)));
}
};
struct CSR_distance_matrix{
index_t capacity;
value_t* entries;
index_t* offsets;
index_t* col_indices;
index_t n;
index_t num_edges;
index_t num_entries;
public:
CSR_distance_matrix(){}//avoid calling malloc in constructor for GPU side
index_t size(){return n;}
};
class compressed_lower_distance_matrix {
public:
std::vector<value_t> distances;
std::vector<value_t*> rows;
void init_rows() {
value_t* pointer= &distances[0];
for (index_t i= 1; i < size(); ++i) {
rows[i]= pointer;
pointer+= i;
}
}
compressed_lower_distance_matrix(std::vector<value_t>&& _distances)
: distances(std::move(_distances)), rows((1 + std::sqrt(1 + 8 * distances.size())) / 2) {
assert(distances.size() == size() * (size() - 1) / 2);
init_rows();
}
template <typename DistanceMatrix>
compressed_lower_distance_matrix(const DistanceMatrix& mat)
: distances(mat.size() * (mat.size() - 1) / 2), rows(mat.size()) {
init_rows();
for (index_t i= 1; i < size(); ++i)
for (index_t j= 0; j < i; ++j) rows[i][j]= mat(i, j);
}
value_t operator()(const index_t i, const index_t j) const {
return i == j ? 0 : i < j ? rows[j][i] : rows[i][j];
}
value_t distance(index_t i, index_t j){
return i == j ? 0 : i < j ? rows[j][i] : rows[i][j];
}
size_t size() const { return rows.size(); }
};
struct sparse_distance_matrix {
std::vector<std::vector<index_diameter_t_struct>> neighbors;
index_t num_entries;
mutable std::vector<std::vector<index_diameter_t_struct>::const_reverse_iterator> neighbor_it;
mutable std::vector<std::vector<index_diameter_t_struct>::const_reverse_iterator> neighbor_end;
sparse_distance_matrix(std::vector<std::vector<index_diameter_t_struct>>&& _neighbors,
index_t _num_edges)
: neighbors(std::move(_neighbors)), num_entries(_num_edges*2) {}
value_t distance(index_t i, index_t j){
// TODO (@captain-pool / anyone who is reading)
// This is a dummy, just to fool the compiler. The compiler looks for it,
// When it doesn't get, it raises an issue.
// The reason for this extra distance() is because the operator() can only called with
// const index_t / mutable
// I tried those, but have no effing clue, why it doesn't work.
// Now I'm defining another generic function that doesn't needs const and it works.
// It is not yet implemented for sparse_distance_matrix, so please implement it.
return 0;
}
template <typename DistanceMatrix>
sparse_distance_matrix(const DistanceMatrix& mat, const value_t threshold)
: neighbors(mat.size()), num_entries(0) {
//std::cerr << "threshold: " << threshold << std::endl;
for (index_t i= 0; i < size(); ++i) {
for (index_t j= 0; j < size(); ++j) {
if (i != j && mat(i, j) <= threshold) {
++num_entries;
neighbors[i].push_back({j, mat(i, j)});
}
}
}
}
size_t size() const { return neighbors.size(); }
private:
//this should only be called from CPU side
void append_sparse(CSR_distance_matrix& dist, value_t e, index_t j) {
if (dist.capacity == 0) {
dist.entries= (value_t *) malloc(sizeof(value_t) * size() * 10);
if(dist.entries==NULL){
//std::cerr<<"entries could not be malloced"<<std::endl;
exit(1);
}
dist.col_indices= (index_t *) malloc(sizeof(index_t) * size() * 10);
if(dist.col_indices==NULL){
//std::cerr<<"col_indices could not be malloced"<<std::endl;
exit(1);
}
dist.capacity= size() * 10;
}
if (dist.num_entries >= dist.capacity) {
dist.capacity*= 2;
dist.entries= (value_t *) realloc(dist.entries, sizeof(value_t) * dist.capacity);
if(dist.entries==NULL){
//std::cerr<<"col_indices could not be realloced with double memory"<<std::endl;
exit(1);
}
dist.col_indices= (index_t *) realloc(dist.col_indices, sizeof(index_t) * dist.capacity);
if(dist.col_indices==NULL){
//std::cerr<<"col_indices could not be realloced with double memory"<<std::endl;
exit(1);
}
}
dist.entries[dist.num_entries]= e;
dist.col_indices[dist.num_entries++]= j;
}
//this should only be called on CPU side
void update_offsets(CSR_distance_matrix& dist, index_t row_index, index_t offset_increment){
if(row_index==0){
dist.offsets[0]= 0;
}
dist.offsets[row_index+1]= dist.offsets[row_index]+offset_increment;
}
public:
CSR_distance_matrix toCSR(){
CSR_distance_matrix dist;
dist.n= size();
dist.num_entries= 0;
dist.capacity= num_entries;//this sets the matrix to exactly num_entries memory allocation
dist.offsets= (index_t*) malloc(sizeof(index_t)*(size()+1));
if(dist.offsets==NULL){
//std::cerr<<"malloc for offsets failed"<<std::endl;
exit(1);
}
dist.col_indices= (index_t*) malloc(sizeof(index_t)*dist.capacity);
if(dist.col_indices==NULL){
//std::cerr<<"malloc for col_indices failed"<<std::endl;
exit(1);
}
dist.entries= (value_t*) malloc(sizeof(value_t)*dist.capacity);
if(dist.entries==NULL){
//std::cerr<<"malloc for entries failed"<<std::endl;
exit(1);
}
for(index_t i= 0; i<size(); i++){
index_t nnz_inrow= 0;
for(index_t j=0; j<neighbors[i].size(); j++){
append_sparse(dist, neighbors[i][j].diameter, neighbors[i][j].index);
nnz_inrow++;
}
update_offsets(dist, i, nnz_inrow);
}
dist.num_edges= num_entries/2;
return dist;
}
};
class euclidean_distance_matrix {
public:
std::vector<std::vector<value_t>> points;
euclidean_distance_matrix(std::vector<std::vector<value_t>>&& _points)
: points(std::move(_points)) {
for (auto p : points) { assert(p.size() == points.front().size()); }
}
value_t operator()(const index_t i, const index_t j) const {
assert(i < points.size());
assert(j < points.size());
return std::sqrt(std::inner_product(
points[i].begin(), points[i].end(), points[j].begin(), value_t(), std::plus<value_t>(),
[](value_t u, value_t v) { return (u - v) * (u - v); }));
}
size_t size() const { return points.size(); }
};
class union_find {
std::vector<index_t> parent;
std::vector<uint8_t> rank;
public:
union_find(index_t n) : parent(n), rank(n, 0) {
for (index_t i= 0; i < n; ++i) parent[i]= i;
}
index_t find(index_t x) {
index_t y= x, z;
while ((z= parent[y]) != y) y= z;
while ((z= parent[x]) != y) {
parent[x]= y;
x= z;
}
return z;
}
void link(index_t x, index_t y) {
if ((x= find(x)) == (y= find(y))) return;
if (rank[x] > rank[y])
parent[y]= x;
else {
parent[x]= y;
if (rank[x] == rank[y]) ++rank[y];
}
}
};
template <typename Heap> struct diameter_index_t_struct pop_pivot(Heap& column) {
if(column.empty()) {
return {0,-1};
}
auto pivot= column.top();
column.pop();
while(!column.empty() && (column.top()).index == pivot.index) {
column.pop();
if (column.empty()) {
return {0,-1};
}
else {
pivot= column.top();
column.pop();
}
}
return pivot;
}
template <typename Heap> struct diameter_index_t_struct get_pivot(Heap& column) {
struct diameter_index_t_struct result= pop_pivot(column);
if (result.index != -1) column.push(result);
return result;
}
template <typename T> T begin(std::pair<T, T>& p) { return p.first; }
template <typename T> T end(std::pair<T, T>& p) { return p.second; }
template <typename ValueType> class compressed_sparse_matrix {
std::vector<size_t> bounds;
std::vector<ValueType> entries;
typedef typename std::vector<ValueType>::iterator iterator;
typedef std::pair<iterator, iterator> iterator_pair;
public:
size_t size() const { return bounds.size(); }
iterator_pair subrange(const index_t index) {
return {entries.begin() + (index == 0 ? 0 : bounds[index - 1]),
entries.begin() + bounds[index]};
}
void append_column() { bounds.push_back(entries.size()); }
void push_back(const ValueType e) {
assert(0 < size());
entries.push_back(e);
++bounds.back();
}
};
template <typename ValueType> class compressed_sparse_submatrix {
std::vector<size_t> sub_bounds;//the 0-based indices for
std::vector<ValueType> entries;
typedef typename std::vector<ValueType>::iterator iterator;
typedef std::pair<iterator, iterator> iterator_pair;
public:
size_t size() const { return sub_bounds.size(); }
//assume we are given a "subindex" for the submatrix
//allows iteration from sub_bounds[index_to_subindex[index]] to sub_bounds[index_to_subindex[index+1]]-1
iterator_pair subrange(const index_t subindex) {
return {entries.begin() + (subindex == 0 ? 0 : sub_bounds[subindex - 1]),
entries.begin() + sub_bounds[subindex]};
}
void append_column() { sub_bounds.push_back(entries.size()); }
void push_back(const ValueType e) {
assert(0 < size());
entries.push_back(e);
++sub_bounds.back();
}
};
template <class Predicate> index_t upper_bound(index_t top, Predicate pred) {
if (!pred(top)) {
index_t count= top;
while (count > 0) {
index_t step= count >> 1;
if (!pred(top - step)) {
top-= step + 1;
count-= step + 1;
} else
count= step;
}
}
return top;
}
__global__ void gpu_insert_pivots_kernel(struct index_t_pair_struct* d_pivot_array, index_t* d_lowest_one_of_apparent_pair, index_t* d_pivot_column_index_OR_nonapparent_cols, index_t num_columns_to_reduce, index_t* d_num_nonapparent){
index_t tid= (index_t)threadIdx.x+(index_t)blockIdx.x*(index_t)blockDim.x;
index_t stride= (index_t)blockDim.x*(index_t)gridDim.x;
//index_t* d_pivot_column_index_OR_nonapparent_cols is being used as d_nonapparent_cols
for(; tid<num_columns_to_reduce; tid+= stride) {
int keep_tid= d_lowest_one_of_apparent_pair[tid] == -1;
if (!keep_tid) {//insert pivot
d_pivot_array[tid].row_cidx= d_lowest_one_of_apparent_pair[tid];
d_pivot_array[tid].column_idx= tid;
}else {//keep track of nonapparent columns
d_pivot_array[tid].row_cidx= MAX_INT64;
d_pivot_array[tid].column_idx= MAX_INT64;
//do standard warp based filtering under the assumption that there are few nonapparent columns
#define FULL_MASK 0xFFFFFFFF
int lane_id= threadIdx.x % 32;
int mask= __ballot_sync(FULL_MASK, keep_tid);
int leader= __ffs(mask) - 1;
int base;
if (lane_id == leader)
base= atomicAdd((unsigned long long int *) d_num_nonapparent, __popc(mask));
base= __shfl_sync(mask, base, leader);
int pos= base + __popc(mask & ((1 << lane_id) - 1));
if (keep_tid) {
d_pivot_column_index_OR_nonapparent_cols[pos]= tid;//being used as d_nonapparent_cols
}
}
}
}
__global__ void populate_edges_warpfiltering(struct diameter_index_t_struct* d_columns_to_reduce, value_t threshold, value_t* d_distance_matrix, index_t max_num_simplices, index_t num_points, binomial_coeff_table* d_binomial_coeff, index_t* d_num_columns_to_reduce){
index_t tid= (index_t)threadIdx.x+(index_t)blockIdx.x*(index_t)blockDim.x;
index_t stride= (index_t)blockDim.x*(index_t)gridDim.x;
__shared__ index_t shared_vertices[256][3];//eliminate bank conflicts (that's what the 3 is for)
for(; tid<max_num_simplices; tid+= stride) {
index_t offset= 0;
index_t v= num_points - 1;
index_t idx= tid;
for (index_t k= 2; k > 0; --k) {
if (!((*d_binomial_coeff)(v, k) <= idx)) {
index_t count= v;
while (count > 0) {
index_t step= count >> 1;
if (!((*d_binomial_coeff)(v - step, k) <= idx)) {
v-= step + 1;
count-= step + 1;//+1 is here to preserve the induction hypothesis (check v=4, k=4)
} else
count= step;//went too far, need to try a smaller step size to subtract from top
}
}
shared_vertices[threadIdx.x][offset++]= v;
idx -= (*d_binomial_coeff)(v, k);
}
//shared_vertices is always sorted in decreasing order
value_t diam= d_distance_matrix[LOWER_DISTANCE_INDEX(shared_vertices[threadIdx.x][0], shared_vertices[threadIdx.x][1], num_points)];
#define FULL_MASK 0xFFFFFFFF
int lane_id= threadIdx.x % 32;
int keep_tid= diam<=threshold;
int mask= __ballot_sync(FULL_MASK, keep_tid);
int leader= __ffs(mask) - 1;
int base;
if (lane_id == leader)
base= atomicAdd((unsigned long long int *)d_num_columns_to_reduce, __popc(mask));
base= __shfl_sync(mask, base, leader);
int pos= base + __popc(mask & ((1 << lane_id) - 1));
if(keep_tid){
d_columns_to_reduce[pos].diameter= diam;
d_columns_to_reduce[pos].index= tid;
}
}
}
template <typename T> __global__ void populate_edges(T* d_flagarray, struct diameter_index_t_struct* d_columns_to_reduce, value_t threshold, value_t* d_distance_matrix, index_t max_num_simplices, index_t num_points, binomial_coeff_table* d_binomial_coeff){
index_t tid= (index_t)threadIdx.x+(index_t)blockIdx.x*(index_t)blockDim.x;
index_t stride= (index_t)blockDim.x*(index_t)gridDim.x;
__shared__ index_t shared_vertices[256][3];//designed to eliminate bank conflicts (that's what the 3 is for)
for(; tid<max_num_simplices; tid+= stride) {
index_t offset= 0;
index_t v= num_points - 1;
index_t idx= tid;
for (index_t k= 2; k > 0; --k) {
if (!((*d_binomial_coeff)(v, k) <= idx)) {
index_t count= v;
while (count > 0) {
index_t step= count >> 1;
if (!((*d_binomial_coeff)(v - step, k) <= idx)) {
v-= step + 1;
count-= step + 1;//+1 is here to preserve the induction hypothesis (check v=4, k=4)
} else
count= step;//went too far, need to try a smaller step size to subtract from top
}
}
shared_vertices[threadIdx.x][offset++]= v;
idx-= (*d_binomial_coeff)(v, k);
}
//shared_vertices is sorted in decreasing order
value_t diam= d_distance_matrix[LOWER_DISTANCE_INDEX(shared_vertices[threadIdx.x][0], shared_vertices[threadIdx.x][1], num_points)];
if(diam<=threshold){
d_columns_to_reduce[tid].diameter= diam;
d_columns_to_reduce[tid].index= tid;
d_flagarray[tid]= 1;
}else{
d_columns_to_reduce[tid].diameter= MAX_FLOAT;//the sorting is in boundary matrix filtration order
d_columns_to_reduce[tid].index= MIN_INT64;
d_flagarray[tid]= 0;
}
}
}
__global__ void populate_columns_to_reduce_warpfiltering(struct diameter_index_t_struct* d_columns_to_reduce, index_t* d_num_columns_to_reduce, index_t* d_pivot_column_index, value_t* d_distance_matrix, index_t num_points, index_t max_num_simplices, index_t dim, value_t threshold, binomial_coeff_table* d_binomial_coeff) {
index_t tid= (index_t)threadIdx.x + (index_t)blockIdx.x * (index_t)blockDim.x;
index_t stride= (index_t)blockDim.x * (index_t)gridDim.x;
extern __shared__ index_t shared_vertices[];//a 256x(dim+1) matrix; shared_vertices[threadIdx.x*(dim+1)+j]=the jth vertex for threadIdx.x thread in the thread block
for (; tid < max_num_simplices; tid+= stride) {
index_t offset= 0;
index_t v= num_points - 1;
index_t idx= tid;
for (index_t k= dim + 1; k > 0; --k) {
if (!((*d_binomial_coeff)(v, k) <= idx)) {
index_t count= v;
while (count > 0) {
index_t step= count >> 1;
if (!((*d_binomial_coeff)(v - step, k) <= idx)) {
v-= step + 1;
count-= step + 1;//+1 is here to preserve the induction hypothesis (check v=4, k=4)
} else
count= step;//went too far, need to try a smaller step size to subtract from top
}
}
shared_vertices[threadIdx.x * (dim + 1) + offset++]= v;
idx-= (*d_binomial_coeff)(v, k);
}
value_t diam= -MAX_FLOAT;
for (index_t j= 0; j <= dim; ++j) {
for (index_t i= 0; i < j; ++i) {
diam= hd_max(diam, d_distance_matrix[LOWER_DISTANCE_INDEX(shared_vertices[threadIdx.x * (dim + 1) + i], shared_vertices[threadIdx.x *(dim + 1) + j], num_points)]);
}
}
#define FULL_MASK 0xFFFFFFFF
int lane_id= threadIdx.x % 32;
int keep_tid= d_pivot_column_index[tid] == -1 && diam<=threshold;
int mask= __ballot_sync(FULL_MASK, keep_tid);
int leader= __ffs(mask) - 1;
int base;
if (lane_id == leader)
base= atomicAdd((unsigned long long int *)d_num_columns_to_reduce, __popc(mask));
base= __shfl_sync(mask, base, leader);
int pos= base + __popc(mask & ((1 << lane_id) - 1));
if(keep_tid){
d_columns_to_reduce[pos].diameter= diam;
d_columns_to_reduce[pos].index= tid;
}
}
}
__global__ void populate_sparse_edges_preparingcount(int* d_num, CSR_distance_matrix* d_CSR_distance_matrix, index_t num_points, index_t* d_num_simplices){
index_t tid= (index_t)threadIdx.x+(index_t)blockIdx.x*(index_t)blockDim.x;
index_t stride= (index_t)blockDim.x * (index_t)gridDim.x;
index_t* offsets= d_CSR_distance_matrix->offsets;
index_t* col_indices= d_CSR_distance_matrix->col_indices;
for(; tid<num_points; tid+= stride){
int _num=0;
index_t col_start= offsets[tid];
index_t col_end= offsets[tid+1];
for(index_t entry_idx= col_start; entry_idx<col_end; entry_idx++){
index_t neighbor_of_tid= col_indices[entry_idx];
if(tid>neighbor_of_tid)_num++;
}
d_num[tid]= _num;
}
}
__global__ void populate_sparse_edges_prefixsum(struct diameter_index_t_struct* d_simplices, int* d_num, CSR_distance_matrix* d_CSR_distance_matrix, binomial_coeff_table* d_binomial_coeff, index_t num_points, index_t* d_num_simplices){
index_t tid= (index_t)threadIdx.x+(index_t)blockIdx.x*(index_t)blockDim.x;
index_t stride= (index_t)blockDim.x * (index_t)gridDim.x;
value_t* entries= d_CSR_distance_matrix->entries;
index_t* offsets= d_CSR_distance_matrix->offsets;
index_t* col_indices= d_CSR_distance_matrix->col_indices;
for(; tid<num_points; tid+= stride){
int _pos=0;
index_t col_start= offsets[tid];
index_t col_end= offsets[tid+1];
for(index_t entry_idx= col_start; entry_idx<col_end; entry_idx++){
index_t neighbor_of_tid= col_indices[entry_idx];
if(tid>neighbor_of_tid){
d_simplices[d_num[tid]+_pos].diameter= entries[entry_idx];
d_simplices[d_num[tid]+_pos++].index= (*d_binomial_coeff)(tid,2) + neighbor_of_tid;
}
}
if(tid==num_points-1){
*d_num_simplices= d_num[tid]+_pos;
}
}
}
__global__ void populate_sparse_simplices_warpfiltering(struct diameter_index_t_struct* d_simplices, index_t* d_num_simplices, struct diameter_index_t_struct* d_columns_to_reduce, index_t* d_num_columns_to_reduce, CSR_distance_matrix* d_CSR_distance_matrix, index_t num_points, index_t dim, value_t threshold, binomial_coeff_table* d_binomial_coeff){
index_t tid= (index_t)threadIdx.x + (index_t)blockIdx.x * (index_t)blockDim.x;
index_t stride= (index_t)blockDim.x * (index_t)gridDim.x;
dim--;//keep dim in terms of the dimension of the simplices
extern __shared__ index_t shared_vertices[];//a 256x(dim+1) matrix; shared_vertices[threadIdx.x*(dim+1)+j]=the jth vertex for threadIdx.x thread in the thread block
for (; tid < *d_num_simplices; tid += stride) {
index_t offset= 0;
index_t v= num_points - 1;
index_t idx= d_simplices[tid].index;
for (index_t k= dim + 1; k > 0; --k) {
if (!((*d_binomial_coeff)(v, k) <= idx)) {
index_t count= v;
while (count > 0) {
index_t step= count >> 1;
if (!((*d_binomial_coeff)(v - step, k) <= idx)) {
v-= step + 1;
count-= step + 1;//+1 is here to preserve the induction hypothesis (check v=4, k=4)
} else
count= step;//went too far, need to try a smaller step size to subtract from top
}
}
shared_vertices[threadIdx.x * (dim + 1) + offset++]= v;
idx-= (*d_binomial_coeff)(v, k);
}
index_t k= dim+1;
bool next_cofacet= false;
value_t nbr_diameter= -1;
index_t nbr_index= -1;
index_t idx_below= d_simplices[tid].index;
index_t idx_above= 0;
index_t base_vertex_index= shared_vertices[threadIdx.x * (dim + 1)]; //shared_vertices[threadIdx.x][0];
//this gives the entry indices of the right and left ends of the row indexed by base_vertex_index in the CSR distance matrix
index_t base_vertex_nbr_itr= d_CSR_distance_matrix->offsets[base_vertex_index+1]-1;
index_t base_vertex_nbr_end= d_CSR_distance_matrix->offsets[base_vertex_index];
for(; base_vertex_nbr_itr>=base_vertex_nbr_end; base_vertex_nbr_itr--){
//nbr is the neighboring vertex to the simplex corresponding to this tid
nbr_diameter= d_CSR_distance_matrix->entries[base_vertex_nbr_itr];
nbr_index= d_CSR_distance_matrix->col_indices[base_vertex_nbr_itr];
//there are dim other vertices along with the base_vertex
for(index_t other_vertex_idx=1; other_vertex_idx<dim+1; other_vertex_idx++){
index_t other_vertex= shared_vertices[threadIdx.x * (dim + 1) + other_vertex_idx];
index_t other_vertex_nbr_itr= d_CSR_distance_matrix->offsets[other_vertex+1]-1;
index_t other_vertex_nbr_end= d_CSR_distance_matrix->offsets[other_vertex];
index_t other_vertex_nbr_index= d_CSR_distance_matrix->col_indices[other_vertex_nbr_itr];
while(other_vertex_nbr_index>nbr_index){
if(other_vertex_nbr_itr==other_vertex_nbr_end) {
next_cofacet= false;
goto end_search;
}
other_vertex_nbr_itr--;
other_vertex_nbr_index= d_CSR_distance_matrix->col_indices[other_vertex_nbr_itr];
}
if(other_vertex_nbr_index!=nbr_index){
goto try_next_vertex;
}else{
nbr_diameter= hd_max(nbr_diameter, d_CSR_distance_matrix->entries[other_vertex_nbr_itr]);
}
}
//this simply says we only consider nbr_index (the appending point) to be of larger index than the largest of shared_vertices (the vertices of the current simplex)
if(shared_vertices[threadIdx.x * (dim + 1)]>nbr_index){
next_cofacet= false;
goto end_search;
}
next_cofacet= true;
goto end_search;
try_next_vertex:;
}
next_cofacet= false;
end_search:;
//end of search for next cofacet (sparse version)
while(next_cofacet){
base_vertex_nbr_itr--;
value_t cofacet_diameter= hd_max(d_simplices[tid].diameter, nbr_diameter);
index_t cofacet_index= idx_above + (*d_binomial_coeff)(nbr_index, k + 1) + idx_below;
#define FULL_MASK 0xFFFFFFFF
int lane_id= threadIdx.x % 32;
int keep_cofacet= cofacet_diameter<=threshold;
int mask= __ballot_sync(FULL_MASK, keep_cofacet);
int leader= __ffs(mask) - 1;
int base;
if (lane_id == leader)
base= atomicAdd((unsigned long long int *)d_num_columns_to_reduce, __popc(mask));
base= __shfl_sync(mask, base, leader);
int pos= base + __popc(mask & ((1 << lane_id) - 1));
if(keep_cofacet){
d_columns_to_reduce[pos].diameter= cofacet_diameter;
d_columns_to_reduce[pos].index= cofacet_index;
}
//isn't a way to represent the hash table on gpu in a cheap way, so we ignore the hash table for assembling columns to reduce
next_cofacet= false;
for(; base_vertex_nbr_itr>=base_vertex_nbr_end; base_vertex_nbr_itr--){
//nbr is the neighboring vertex to the simplex corresponding to this tid
nbr_diameter= d_CSR_distance_matrix->entries[base_vertex_nbr_itr];
nbr_index= d_CSR_distance_matrix->col_indices[base_vertex_nbr_itr];
//there are dim other vertices, in addition to the base_vertex
for(index_t other_vertex_index= 1; other_vertex_index<dim+1; other_vertex_index++){
index_t other_vertex= shared_vertices[threadIdx.x * (dim + 1) + other_vertex_index];
index_t other_vertex_nbr_itr= d_CSR_distance_matrix->offsets[other_vertex+1]-1;
index_t other_vertex_nbr_end= d_CSR_distance_matrix->offsets[other_vertex];
index_t other_vertex_nbr_index= d_CSR_distance_matrix->col_indices[other_vertex_nbr_itr];
while(other_vertex_nbr_index>nbr_index){
if(other_vertex_nbr_itr==other_vertex_nbr_end) {
next_cofacet= false;
goto end_search_inloop;
}
other_vertex_nbr_itr--;
other_vertex_nbr_index= d_CSR_distance_matrix->col_indices[other_vertex_nbr_itr];
}
if(other_vertex_nbr_index!=nbr_index){
goto try_next_vertex_inloop;
}else{
nbr_diameter= hd_max(nbr_diameter, d_CSR_distance_matrix->entries[other_vertex_nbr_itr]);
}
}
//notice we must reverse the shared_vertices in the original ripser code since they are sorted in decreasing order
if(shared_vertices[threadIdx.x * (dim + 1)]>nbr_index){
next_cofacet= false;
goto end_search_inloop;
}
next_cofacet= true;
goto end_search_inloop;
try_next_vertex_inloop:;
}
next_cofacet= false;
end_search_inloop:;
}
}
}
//the hope is that this is concurrency-bug free, however this is very bad for sparse graph performance
__global__ void populate_sparse_simplices_pairedfiltering(struct diameter_index_t_struct* d_simplices, index_t* d_num_simplices, struct diameter_index_t_struct* d_columns_to_reduce, index_t* d_num_columns_to_reduce, CSR_distance_matrix* d_CSR_distance_matrix, index_t num_points, index_t dim, value_t threshold, binomial_coeff_table* d_binomial_coeff){
//a thread per (simplex , point) pair
//if the point is a "neighbor" of the simplex, then include that cofacet in d_columns_to_reduce (a filtering of d_simplices),
index_t tid= (index_t)threadIdx.x + (index_t)blockIdx.x * (index_t)blockDim.x;
index_t stride= (index_t)blockDim.x * (index_t)gridDim.x;
dim--;
extern __shared__ index_t shared_vertices[];//a 256x(dim+1) matrix; shared_vertices[threadIdx.x*(dim+1)+j]=the jth vertex for threadIdx.x thread in the thread block
for (; tid < *d_num_simplices*num_points; tid+= stride) {
index_t vertex= tid%num_points;
index_t simplex= tid/num_points;
index_t offset= 0;
index_t v= num_points-1;
index_t idx= d_simplices[simplex].index;
for (index_t k= dim +1; k > 0; --k) {
if (!((*d_binomial_coeff)(v, k) <= idx)) {
index_t count= v;
while (count > 0) {
index_t step= count >> 1;
if (!((*d_binomial_coeff)(v - step, k) <= idx)) {
v-= step + 1;
count-= step + 1;//+1 is here to preserve the induction hypothesis (check v=4, k=4)
} else
count= step;//went too far, need to try a smaller step size to subtract from top
}
}
shared_vertices[threadIdx.x * (dim + 1) + offset++]= v;
idx-= (*d_binomial_coeff)(v, k);
}
index_t* offsets= d_CSR_distance_matrix->offsets;
index_t* col_indices= d_CSR_distance_matrix->col_indices;
value_t* entries= d_CSR_distance_matrix->entries;
bool alledges_exist= true;
index_t start_idx= offsets[vertex];
index_t end_idx= offsets[vertex+1];
value_t cofacet_diameter= d_simplices[simplex].diameter;
for(index_t vidx= 0; vidx<dim+1; vidx++) {
index_t v= shared_vertices[threadIdx.x * (dim + 1) + vidx];
index_t left= start_idx;
index_t right= end_idx-1;
//binary search for v in row vertex with start and end start_idx and end_idx respectively
while(left<=right){
index_t mid= left+(right-left)/2;
if(col_indices[mid]==v){
cofacet_diameter= hd_max(cofacet_diameter, entries[mid]);
goto next_vertex;
}
if(col_indices[mid]<v){
left= mid+1;
}else{
right= mid-1;
}
}
alledges_exist= false;
break;
next_vertex:;
}
if(!alledges_exist){
cofacet_diameter= threshold+1;
}
if(shared_vertices[threadIdx.x * (dim + 1)]>vertex){
alledges_exist= false;//we only include this vertex "vertex" if "vertex" has a strictly larger value than all other vertices in simplex
}
index_t cofacet_index= (*d_binomial_coeff)(vertex, dim+2) + d_simplices[simplex].index;
#define FULL_MASK 0xFFFFFFFF
int lane_id= threadIdx.x % 32;
int keep_cofacet= cofacet_diameter<=threshold && alledges_exist;
int mask= __ballot_sync(FULL_MASK, keep_cofacet);
int leader= __ffs(mask) - 1;
int base;
if (lane_id == leader)
base= atomicAdd((unsigned long long int *)d_num_columns_to_reduce, __popc(mask));
base= __shfl_sync(mask, base, leader);
int pos= base + __popc(mask & ((1 << lane_id) - 1));
if(keep_cofacet){
d_columns_to_reduce[pos].diameter= cofacet_diameter;
d_columns_to_reduce[pos].index= cofacet_index;
}
}
}
template <typename T>__global__ void populate_columns_to_reduce(T* d_flagarray, struct diameter_index_t_struct* d_columns_to_reduce, index_t* d_pivot_column_index,
value_t* d_distance_matrix, index_t num_points, index_t max_num_simplices, index_t dim, value_t threshold, binomial_coeff_table* d_binomial_coeff) {
index_t tid= (index_t)threadIdx.x + (index_t)blockIdx.x * (index_t)blockDim.x;
index_t stride= (index_t)blockDim.x * (index_t)gridDim.x;
extern __shared__ index_t shared_vertices[];//a 256x(dim+1) matrix; shared_vertices[threadIdx.x*(dim+1)+j]=the jth vertex for threadIdx.x thread in the thread block
for (; tid < max_num_simplices; tid+= stride) {
index_t offset= 0;
index_t v= num_points - 1;
index_t idx= tid;
for (index_t k= dim + 1; k > 0; --k) {
if (!((*d_binomial_coeff)(v, k) <= idx)) {
index_t count= v;
while (count > 0) {
index_t step= count >> 1;
if (!((*d_binomial_coeff)(v - step, k) <= idx)) {
v-= step + 1;
count-= step + 1;//+1 is here to preserve the induction hypothesis (check v=4, k=4)
} else
count= step;//went too far, need to try a smaller step size to subtract from top
}
}
shared_vertices[threadIdx.x * (dim + 1) + offset++]= v;
idx-= (*d_binomial_coeff)(v, k);
}
value_t diam= -MAX_FLOAT;
for(index_t i= 0; i<=dim; i++){
for(index_t j= i+1; j<=dim; j++){
diam= hd_max(diam, d_distance_matrix[LOWER_DISTANCE_INDEX(shared_vertices[threadIdx.x * (dim + 1) + i], shared_vertices[threadIdx.x * (dim + 1) + j], num_points)]);
}
}
if(d_pivot_column_index[tid]==-1 && diam<=threshold){
d_columns_to_reduce[tid].diameter= diam;
d_columns_to_reduce[tid].index= tid;
d_flagarray[tid]= 1;
}else{
d_columns_to_reduce[tid].diameter= -MAX_FLOAT;
d_columns_to_reduce[tid].index= MAX_INT64;
d_flagarray[tid]= 0;
}
}
}
__global__ void init_cidx_to_diam(value_t* d_cidx_to_diameter, struct diameter_index_t_struct* d_columns_to_reduce, index_t num_columns_to_reduce){
index_t tid= (index_t) threadIdx.x + (index_t) blockIdx.x * (index_t) blockDim.x;
index_t stride= (index_t) blockDim.x * (index_t) gridDim.x;
for (; tid < num_columns_to_reduce; tid += stride) {
d_cidx_to_diameter[d_columns_to_reduce[tid].index]= d_columns_to_reduce[tid].diameter;
}
}
//scatter operation
__global__ void init_index_to_subindex(index_t* d_index_to_subindex, index_t* d_nonapparent_columns, index_t num_nonapparent){
index_t tid= (index_t) threadIdx.x + (index_t) blockIdx.x * (index_t) blockDim.x;
index_t stride= (index_t) blockDim.x * (index_t) gridDim.x;
for (; tid < num_nonapparent; tid += stride) {
d_index_to_subindex[d_nonapparent_columns[tid]]= tid;
}
}
//THIS IS THE GPU SCAN KERNEL for the dense case!!
__global__ void coboundary_findapparent_single_kernel(value_t* d_cidx_to_diameter, struct diameter_index_t_struct * d_columns_to_reduce, index_t* d_lowest_one_of_apparent_pair, const index_t dim, index_t num_simplices, const index_t num_points, binomial_coeff_table* d_binomial_coeff, index_t num_columns_to_reduce, value_t* d_distance_matrix, value_t threshold) {
index_t tid= (index_t) threadIdx.x + (index_t) blockIdx.x * (index_t) blockDim.x;
index_t stride= (index_t) blockDim.x * (index_t) gridDim.x;
extern __shared__ index_t shared_vertices[];//a 256x(dim+1) matrix; shared_vertices[threadIdx.x*(dim+1)+j]=the jth vertex for threadIdx.x thread in the thread block
for (; tid < num_columns_to_reduce; tid += stride) {
//populate the shared_vertices[][] matrix with vertex indices of the column index= shared_vertices[threadIdx.x][-];
//shared_vertices[][] matrix has row index threadIdx.x and col index offset, represented by: shared_vertices[threadIdx.x * (dim + 1) + offset]=
index_t offset= 0;
index_t v= num_points - 1;
index_t idx= d_columns_to_reduce[tid].index;
for (index_t k= dim + 1; k > 0; --k) {
if (!((*d_binomial_coeff)(v, k) <= idx)) {
index_t count= v;
while (count > 0) {
index_t step= count >> 1;
if (!((*d_binomial_coeff)(v - step, k) <= idx)) {
v-= step + 1;
count-= step + 1;//+1 is here to preserve the induction hypothesis (check v=4, k=4)
} else
count= step;//went too far, need to try a smaller step size to subtract from top
}
}
shared_vertices[threadIdx.x * (dim + 1) + offset++]= v;//set v to the largest possible vertex index given idx as a combinatorial index
idx-= (*d_binomial_coeff)(v, k);
}
v= num_points-1;//this keeps track of the newly added vertex to the set of vertices stored in shared_vertices[threadIdx.x][-] to form a cofacet of the columns
index_t k= dim+1;
index_t idx_below= d_columns_to_reduce[tid].index;
index_t idx_above= 0;
while ((v != -1) && ((*d_binomial_coeff)(v, k) <= idx_below)) {
idx_below -= (*d_binomial_coeff)(v, k);
idx_above += (*d_binomial_coeff)(v, k + 1);
--v;
--k;
assert(k != -1);
}
while(v!=-1) {//need to enumerate cofacet combinatorial index in reverse lexicographic order (largest cidx down to lowest cidx)
index_t row_combinatorial_index= idx_above + (*d_binomial_coeff)(v--, k + 1) + idx_below;
//find the cofacet diameter
value_t cofacet_diam= d_columns_to_reduce[tid].diameter;
for(index_t j=0; j<dim+1; j++){
index_t last_v= v+1;
index_t simplex_v= shared_vertices[threadIdx.x * (dim + 1) + j];
if(last_v>simplex_v){
cofacet_diam= hd_max(cofacet_diam, d_distance_matrix[LOWER_DISTANCE_INDEX(last_v, shared_vertices[threadIdx.x * (dim + 1) + j], num_points)]);
}else{
cofacet_diam= hd_max(cofacet_diam, d_distance_matrix[LOWER_DISTANCE_INDEX(shared_vertices[threadIdx.x * (dim + 1) + j], last_v, num_points)]);
}
}
if(d_columns_to_reduce[tid].diameter==cofacet_diam) {//this is a sufficient condition to finding a lowest one
//check if there is a nonzero to the left of (row_combinatorial_index, tid) in the coboundary matrix
//extra_vertex is the "added" vertex to shared_vertices
//FACT: {shared_vertices[threadIdx.x*(dim+1)+0]... threadIdx.x*(dim+1)+dim] union extra_vertex} equals cofacet vertices
index_t prev_remove_v= -1;
index_t s_v= shared_vertices[threadIdx.x * (dim + 1)];//the largest indexed vertex, shared_vertices is sorted in decreasing orders
bool passed_extra_v= false;
index_t remove_v;//this is the vertex to remove from the cofacet
index_t extra_vertex= v+1;//the +1 is here to counteract the last v-- line of code
if(s_v>extra_vertex){
remove_v= s_v;
}else{
remove_v= extra_vertex;
passed_extra_v= true;
}
prev_remove_v= remove_v;
index_t facet_of_row_combinatorial_index= row_combinatorial_index;
facet_of_row_combinatorial_index-= (*d_binomial_coeff)(remove_v, dim+2);//subtract the largest binomial coefficient to get the new cidx
index_t col_cidx= d_columns_to_reduce[tid].index;
value_t facet_of_row_diameter= d_cidx_to_diameter[facet_of_row_combinatorial_index];
value_t col_diameter= d_columns_to_reduce[tid].diameter;
if(facet_of_row_combinatorial_index==col_cidx && facet_of_row_diameter== col_diameter){//if there is an exact match of the tid column and the face of the row, then all subsequent faces to search will be to the right of column tid
//coboundary column tid has an apparent pair, record it
d_lowest_one_of_apparent_pair[tid]= row_combinatorial_index;
break;
}
//else if(d_cidx_to_diameter[facet_of_row_combinatorial_index]<= threshold && (
// d_cidx_to_diameter[facet_of_row_combinatorial_index]>d_columns_to_reduce[tid].diameter
// || (d_cidx_to_diameter[facet_of_row_combinatorial_index]==d_columns_to_reduce[tid].diameter && facet_of_row_combinatorial_index<d_columns_to_reduce[tid].index)
// || facet_of_row_combinatorial_index> d_columns_to_reduce[tid].index)){
//FACT: it turns out we actually only need to check facet_of_row_diameter<= threshold &&(facet_of_row_diameter==col_diameter && facet_of_row_combinatorial_index<col_cidx)
//since we should never have a facet of the cofacet with diameter larger than the cofacet's diameter= column's diameter
//in fact, we don't even need to check facet_of_row_diameter<=threshold since diam(face(cofacet(simplex)))<=diam(cofacet(simplex))=diam(simplex)<=threshold
//furthermore, we don't even need to check facet_of_row_combinatorial_index<col_cidx since we will exit upon col_cidx while iterating in increasing combinatorial index
else if(facet_of_row_diameter==col_diameter){
assert(facet_of_row_diameter<= threshold && (facet_of_row_diameter==col_diameter && facet_of_row_combinatorial_index<col_cidx));
d_lowest_one_of_apparent_pair[tid]= -1;
break;
}
bool found_apparent_or_found_nonzero_to_left= false;
//need to remove the last vertex: extra_v during searches
//there are dim+2 total number of vertices, the largest vertex was already checked so that is why k starts at dim+1
//j is the col. index e.g. shared_vertices[threadIdx.x][j]=shared_vertices[threadIdx.x*(dim+1)+j]
for(index_t k= dim+1, j=passed_extra_v?0:1; k>=1; k--){//start the loop after checking the lexicographically smallest facet boundary case
if(passed_extra_v) {
remove_v= shared_vertices[threadIdx.x * (dim + 1) + j];
j++;
}
else if(j<dim+1) {
//compare s_v in shared_vertices with v
index_t s_v= shared_vertices[threadIdx.x * (dim + 1) + j];
if (s_v > extra_vertex) {
remove_v= s_v;
j++;
} else {
remove_v= extra_vertex;//recall: extra_vertex= v+1
passed_extra_v= true;
}
//this last else says: if j==dim+1 and we never passed extra vertex, then we must remove extra_vertex as the last vertex to remove to form a facet.
}else {//there is no need to check s_v>extra_vertex, we never passed extra_vertex, so we need to remove extra_vertex for the last check
remove_v= extra_vertex;//recall; v+1 since there is a v-- before this
passed_extra_v= true;
}
//exchange remove_v choose k with prev_remove_v choose k
facet_of_row_combinatorial_index-=(*d_binomial_coeff)(remove_v,k);
facet_of_row_combinatorial_index+= (*d_binomial_coeff)(prev_remove_v,k);
value_t facet_of_row_diameter= d_cidx_to_diameter[facet_of_row_combinatorial_index];
if(facet_of_row_combinatorial_index==col_cidx && facet_of_row_diameter==col_diameter){
//coboundary column tid has an apparent pair, record it
d_lowest_one_of_apparent_pair[tid]= row_combinatorial_index;
found_apparent_or_found_nonzero_to_left= true;
break;///need to break out the while(v!=-1) loop
}
//else if(d_cidx_to_diameter[facet_of_row_combinatorial_index]<=threshold &&
//( d_cidx_to_diameter[facet_of_row_combinatorial_index]>d_columns_to_reduce[tid].diameter
//|| (d_cidx_to_diameter[facet_of_row_combinatorial_index]==d_columns_to_reduce[tid].diameter && facet_of_row_combinatorial_index<d_columns_to_reduce[tid].index)
//|| facet_of_row_combinatorial_index>d_columns_to_reduce[tid].index)){
else if(facet_of_row_diameter==col_diameter){
assert(facet_of_row_diameter<= threshold && (facet_of_row_diameter==col_diameter && facet_of_row_combinatorial_index<col_cidx));
//d_lowest_one_of_apparent_pair[] is set to -1's already though...
d_lowest_one_of_apparent_pair[tid]= -1;
found_apparent_or_found_nonzero_to_left= true;
break;
}
prev_remove_v= remove_v;
}
//we must exit early if we have a nonzero to left or the column is apparent
if(found_apparent_or_found_nonzero_to_left){
break;
}
//end check for nonzero to left
//need to record the found pairs in the global hash_map for pairs (post processing)
//see post processing section in gpuscan method
}
while ((v != -1) && ((*d_binomial_coeff)(v, k) <= idx_below)) {
idx_below -= (*d_binomial_coeff)(v, k);
idx_above += (*d_binomial_coeff)(v, k + 1);
--v;
--k;
assert(k != -1);
}
}
}
}
//gpuscan for sparse case
__global__ void coboundary_findapparent_sparse_single_kernel(struct diameter_index_t_struct* d_cidx_diameter_sorted_list, struct diameter_index_t_struct * d_columns_to_reduce, index_t* d_lowest_one_of_apparent_pair, const index_t dim, const index_t num_points, binomial_coeff_table* d_binomial_coeff, index_t num_columns_to_reduce, CSR_distance_matrix* d_CSR_distance_matrix, value_t threshold){//(this was for debugging), index_t* d_leftmostnz_inrow) {
index_t tid= (index_t) threadIdx.x + (index_t) blockIdx.x * (index_t) blockDim.x;
index_t stride= (index_t) blockDim.x * (index_t) gridDim.x;
extern __shared__ index_t shared_vertices[];//a 256x(dim+1) matrix; shared_vertices[threadIdx.x*(dim+1)+j]=the jth vertex for threadIdx.x thread in the thread block
//vertices sorted in reverse order
for (; tid < num_columns_to_reduce; tid += stride) {
//populate the shared_vertices[][] matrix with vertex indices of the column tid;
//row index of the shared_vertices matrix is threadIdx.x, col index of the shared_vertices matrix is offset
index_t offset= 0;
index_t v= num_points - 1;
index_t idx= d_columns_to_reduce[tid].index;
for (index_t k= dim + 1; k > 0; --k) {
if (!((*d_binomial_coeff)(v, k) <= idx)) {
index_t count= v;
while (count > 0) {
index_t step= count >> 1;
if (!((*d_binomial_coeff)(v - step, k) <= idx)) {
v-= step + 1;
count-= step + 1;//+1 is here to preserve the induction hypothesis (check v=4, k=4)
} else
count= step;//went too far, need to try a smaller step size to subtract from top
}
}
shared_vertices[threadIdx.x * (dim + 1) + offset++]= v;//set v to the largest possible vertex index given idx as a combinatorial index
idx-= (*d_binomial_coeff)(v, k);
}
index_t k= dim+1;
bool next_cofacet= false;
value_t nbr_diameter= -1;
index_t nbr_index= -1;
index_t idx_below= d_columns_to_reduce[tid].index;
index_t idx_above= 0;
index_t base_vertex_index= shared_vertices[threadIdx.x * (dim + 1)];
//this gives the entry indices of the right and left ends of the row indexed by base_vertex_index in the CSR distance matrix
index_t base_vertex_nbr_itr= d_CSR_distance_matrix->offsets[base_vertex_index+1]-1;
index_t base_vertex_nbr_end= d_CSR_distance_matrix->offsets[base_vertex_index];
for(; base_vertex_nbr_itr>=base_vertex_nbr_end; base_vertex_nbr_itr--){
//nbr is the neighboring vertex to the simplex corresponding to this tid
nbr_diameter= d_CSR_distance_matrix->entries[base_vertex_nbr_itr];
nbr_index= d_CSR_distance_matrix->col_indices[base_vertex_nbr_itr];
//there are dim other vertices besides the base_vertex
for(index_t other_vertex_idx=1; other_vertex_idx<dim+1; other_vertex_idx++){
index_t other_vertex= shared_vertices[threadIdx.x * (dim + 1) + other_vertex_idx];
index_t other_vertex_nbr_itr= d_CSR_distance_matrix->offsets[other_vertex+1]-1;
index_t other_vertex_nbr_end= d_CSR_distance_matrix->offsets[other_vertex];
index_t other_vertex_nbr_index= d_CSR_distance_matrix->col_indices[other_vertex_nbr_itr];
while(other_vertex_nbr_index>nbr_index){
if(other_vertex_nbr_itr==other_vertex_nbr_end) {
next_cofacet= false;
goto end_search;
}
other_vertex_nbr_itr--;
other_vertex_nbr_index= d_CSR_distance_matrix->col_indices[other_vertex_nbr_itr];
}
if(other_vertex_nbr_index!=nbr_index){
goto try_next_vertex;
}else{
nbr_diameter= hd_max(nbr_diameter, d_CSR_distance_matrix->entries[other_vertex_nbr_itr]);
}
}
while (k > 0 && shared_vertices[threadIdx.x * (dim + 1) + dim- (k - 1)] > nbr_index) {
idx_below -= (*d_binomial_coeff)(shared_vertices[threadIdx.x * (dim + 1) + dim- (k - 1)], k);
idx_above += (*d_binomial_coeff)(shared_vertices[threadIdx.x * (dim + 1) + dim- (k - 1)], k + 1);
--k;
}
next_cofacet= true;
goto end_search;
try_next_vertex:;
}
next_cofacet= false;
end_search:;
//end of search for next cofacet (sparse version)
while(next_cofacet) {
base_vertex_nbr_itr--;
value_t cofacet_diameter= hd_max(d_columns_to_reduce[tid].diameter, nbr_diameter);
index_t row_combinatorial_index= idx_above + (*d_binomial_coeff)(nbr_index, k + 1) + idx_below;
if(d_columns_to_reduce[tid].diameter==cofacet_diameter) {//this is a sufficient condition to finding a lowest one
//check if there is a nonzero to the left of (row_combinatorial_index, tid) in the coboundary matrix
//extra_vertex is the "added" vertex to shared_verticess
//FACT: {shared_vertices[threadIdx.x*(dim+1)+0]... shared_vertices[threadIdx.x*(dim+1)+dim] union extra_vertex} equals cofacet vertices
index_t prev_remove_v= -1;
index_t s_v= shared_vertices[threadIdx.x * (dim + 1)];//the largest indexed vertex, shared_vertices is sorted in decreasing orders
bool passed_extra_v= false;
index_t remove_v;//this is the vertex to remove from the cofacet
index_t extra_vertex= nbr_index;//the +1 is here to counteract the last v-- line of code
if (s_v > extra_vertex) {
remove_v= s_v;
} else {
remove_v= extra_vertex;
passed_extra_v= true;
}
prev_remove_v= remove_v;
index_t facet_of_row_combinatorial_index= row_combinatorial_index;
facet_of_row_combinatorial_index-= (*d_binomial_coeff)(remove_v, dim + 2);//subtract the largest binomial coefficient to get the new cidx
index_t col_cidx= d_columns_to_reduce[tid].index;
value_t col_diameter= d_columns_to_reduce[tid].diameter;
//binary search d_columns_to_reduce to get face_of_row_diameter
value_t facet_of_row_diameter= -1;// there is no direct mapping: d_cidx_to_diameter[facet_of_row_combinatorial_index];
///binary search goes here on d_cidx_diameter_sorted_list
index_t left= 0;
index_t right= num_columns_to_reduce-1;
while(left<=right){
index_t mid= left + (right-left)/2;
if(d_cidx_diameter_sorted_list[mid].index==facet_of_row_combinatorial_index){
facet_of_row_diameter= d_cidx_diameter_sorted_list[mid].diameter;
break;
}
if(d_cidx_diameter_sorted_list[mid].index<facet_of_row_combinatorial_index){
left= mid+1;
}else{
right= mid-1;
}
}
if (facet_of_row_combinatorial_index == col_cidx && facet_of_row_diameter == col_diameter) {//if there is an exact match of the tid column and the face of the row, then all subsequent faces to search will be to the right of column tid
//coboundary column tid has an apparent pair, record it
d_lowest_one_of_apparent_pair[tid]= row_combinatorial_index;
break;
}
//else if(d_cidx_to_diameter[facet_of_row_combinatorial_index]<= threshold && (
// d_cidx_to_diameter[facet_of_row_combinatorial_index]>d_columns_to_reduce[tid].diameter
// || (d_cidx_to_diameter[facet_of_row_combinatorial_index]==d_columns_to_reduce[tid].diameter && facet_of_row_combinatorial_index<d_columns_to_reduce[tid].index)
// || facet_of_row_combinatorial_index> d_columns_to_reduce[tid].index)){
//FACT: it turns out we actually only need to check facet_of_row_diameter<= threshold &&(facet_of_row_diameter==col_diameter && facet_of_row_combinatorial_index<col_cidx)
//since we should never have a face of the cofacet with diameter larger than the cofacet's diameter= column's diameter
//in fact, we don't even need to check facet_of_row_diameter<=threshold since diam(face(cofacet(simplex)))<=diam(cofacet(simplex))=diam(simplex)<=threshold
//furthremore, we don't even need to check facet_of_row_combinatorial_index<col_cidx since we will exit upon col_cidx while iterating in increasing combinatorial index
else if (facet_of_row_diameter == col_diameter) {
assert(facet_of_row_diameter <= threshold &&
(facet_of_row_diameter == col_diameter && facet_of_row_combinatorial_index < col_cidx));
d_lowest_one_of_apparent_pair[tid]= -1;
break;
}
bool found_apparent_or_found_nonzero_to_left= false;
//need to remove the last vertex: extra_v during searches
//there are dim+2 total number of vertices, the largest vertex was already checked so that is why k starts at dim+1
//j is the col. index, e.g. shared_vertices[threadIdx.x][j]=shared_vertices[threadIdx.x*(dim+1)+j]
for (index_t k= dim + 1, j= passed_extra_v ? 0 : 1;
k >= 1; k--) {//start the loop after checking the lexicographically smallest facet boundary case
if (passed_extra_v) {
remove_v= shared_vertices[threadIdx.x * (dim + 1) + j];
j++;
} else if (j < dim + 1) {
//compare s_v in shared_vertices with v
index_t s_v= shared_vertices[threadIdx.x * (dim + 1) + j];
if (s_v > extra_vertex) {
remove_v= s_v;
j++;
} else {
remove_v= extra_vertex;//recall: extra_vertex=nbr_index;
passed_extra_v= true;
}
//this last else says: if j==dim+1 and we never passed extra vertex, then we must remove extra_vertex as the last vertex to remove to form a face.
} else {//there is no need to check s_v>extra_vertex, we never passed extra_vertex, so we need to remove extra_vertex for the last check
remove_v= extra_vertex;//recall; extra_vertex= nbr_index
passed_extra_v= true;
}
//exchange remove_v choose k with prev_remove_v choose k
facet_of_row_combinatorial_index -= (*d_binomial_coeff)(remove_v, k);
facet_of_row_combinatorial_index += (*d_binomial_coeff)(prev_remove_v, k);
//replace d_cidx_to_diameter with d_cidx_diameter_sorted_list;
value_t facet_of_row_diameter= -1;// replacing direct map:: d_cidx_to_diameter[facet_of_row_combinatorial_index];
///binary search goes here on d_cidx_diameter_sorted_list
index_t left= 0;
index_t right= num_columns_to_reduce-1;
while(left<=right){
index_t mid= left + (right-left)/2;
if(d_cidx_diameter_sorted_list[mid].index==facet_of_row_combinatorial_index){
facet_of_row_diameter= d_cidx_diameter_sorted_list[mid].diameter;
break;
}
if(d_cidx_diameter_sorted_list[mid].index<facet_of_row_combinatorial_index){
left= mid+1;
}else{
right= mid-1;
}
}
if (facet_of_row_combinatorial_index == col_cidx && facet_of_row_diameter == col_diameter) {
//coboundary column tid has an apparent pair, record it
d_lowest_one_of_apparent_pair[tid]= row_combinatorial_index;
found_apparent_or_found_nonzero_to_left= true;
break;///need to break out the while(v!=-1) loop
}
//else if(d_cidx_to_diameter[facet_of_row_combinatorial_index]<=threshold &&
//( d_cidx_to_diameter[facet_of_row_combinatorial_index]>d_columns_to_reduce[tid].diameter
//|| (d_cidx_to_diameter[facet_of_row_combinatorial_index]==d_columns_to_reduce[tid].diameter && facet_of_row_combinatorial_index<d_columns_to_reduce[tid].index)
//|| facet_of_row_combinatorial_index>d_columns_to_reduce[tid].index)){
else if (facet_of_row_diameter == col_diameter) {
assert(facet_of_row_diameter <= threshold &&
(facet_of_row_diameter == col_diameter && facet_of_row_combinatorial_index < col_cidx));
//d_lowest_one_of_apparent_pair[tid]= -1;
found_apparent_or_found_nonzero_to_left= true;
break;
}
prev_remove_v= remove_v;
}
//we must exit early if we have a nonzero to left or the column is apparent
if (found_apparent_or_found_nonzero_to_left) {
break;
}
//end check for nonzero to left
}
next_cofacet= false;
for(; base_vertex_nbr_itr>=base_vertex_nbr_end; base_vertex_nbr_itr--){
//nbr is the neighboring vertex to the simplex corresponding to this tid
nbr_diameter= d_CSR_distance_matrix->entries[base_vertex_nbr_itr];
nbr_index= d_CSR_distance_matrix->col_indices[base_vertex_nbr_itr];
//there are dim other vertices besides the base_vertex
for(index_t other_vertex_index=1; other_vertex_index<dim+1; other_vertex_index++){
index_t other_vertex= shared_vertices[threadIdx.x * (dim + 1) + other_vertex_index];
index_t other_vertex_nbr_itr= d_CSR_distance_matrix->offsets[other_vertex+1]-1;
index_t other_vertex_nbr_end= d_CSR_distance_matrix->offsets[other_vertex];
index_t other_vertex_nbr_index= d_CSR_distance_matrix->col_indices[other_vertex_nbr_itr];
while(other_vertex_nbr_index>nbr_index){
if(other_vertex_nbr_itr==other_vertex_nbr_end) {
next_cofacet= false;
goto end_search_inloop;
}
other_vertex_nbr_itr--;
other_vertex_nbr_index= d_CSR_distance_matrix->col_indices[other_vertex_nbr_itr];
}
if(other_vertex_nbr_index!=nbr_index){
goto try_next_vertex_inloop;
}else{
nbr_diameter= hd_max(nbr_diameter, d_CSR_distance_matrix->entries[other_vertex_nbr_itr]);
}
}
//notice we must reverse the shared_vertices since they are sorted in decreasing order
while (k > 0 && shared_vertices[threadIdx.x * (dim + 1) + dim- (k - 1)] > nbr_index) {
idx_below -= (*d_binomial_coeff)(shared_vertices[threadIdx.x * (dim + 1) + dim- (k - 1)], k);
idx_above += (*d_binomial_coeff)(shared_vertices[threadIdx.x * (dim + 1) + dim- (k - 1)], k + 1);
--k;
}
next_cofacet= true;
goto end_search_inloop;
try_next_vertex_inloop:;
}
next_cofacet= false;
end_search_inloop:;
}
}
}
template <typename DistanceMatrix> class ripser {
DistanceMatrix dist;//this can be either sparse or compressed
index_t n, dim_max;//n is the number of points, dim_max is the max dimension to compute PH
value_t threshold;//this truncates the filtration by removing simplices too large. low values of threshold should use --sparse option
float ratio;
const binomial_coeff_table binomial_coeff;
mutable std::vector<index_t> vertices;
mutable std::vector<index_t> vertices_of_birth;
mutable std::vector<index_t> vertices_of_death;
mutable std::vector<diameter_index_t_struct> cofacet_entries;
private:
size_t freeMem, totalMem;
hipDeviceProp_t deviceProp;
int grid_size;
hash_map<index_t, index_t> pivot_column_index;//small hash map for matrix reduction
//we are removing d_flagarray for a more general array: d_flagarray_OR_index_to_subindex
//char* type is 3x faster for thrust::count than index_t*
#ifndef ASSEMBLE_REDUCTION_SUBMATRIX
char* d_flagarray;//an array where d_flagarray[i]= 1 if i satisfies some property and d_flagarray[i]=0 otherwise
#endif
index_t* h_pivot_column_index_array_OR_nonapparent_cols;//the pivot column index hashmap represented by an array OR the set of nonapparent column indices
value_t* d_distance_matrix;//GPU copy of the distance matrix
CSR_distance_matrix* d_CSR_distance_matrix;
//d_pivot_column_index_OR_nonapparent_cols is d_nonapparent_cols when used in gpuscan() and compute_pairs() and is d_pivot_column_index when in gpu_assemble_columns()
index_t* d_pivot_column_index_OR_nonapparent_cols;//the pivot column index hashmap represented on GPU as an array OR the set of nonapparent columns on GPU
index_t max_num_simplices_forall_dims;//the total number of simplices of dimension dim_max possible (this assumes no threshold condition to sparsify the simplicial complex)
//the total number of simplices in the dim_max+1 dimension (a factor n larger than max_num_simplices_forall_dims), infeasible to allocate with this number if max_num_simplices_forall_dims is already pushing the memory limits.
struct diameter_index_t_struct* d_columns_to_reduce;//GPU copy of the columns to reduce depending on the current dimension
struct diameter_index_t_struct* h_columns_to_reduce;//columns to reduce depending on the current dimension
binomial_coeff_table* d_binomial_coeff;//GPU copy of the binomial coefficient table
index_t* d_num_columns_to_reduce=NULL;//use d_num_columns_to_reduce to keep track of the number of columns to reduce
index_t* h_num_columns_to_reduce;//h_num_columns_to_reduce is tied to d_num_columns_to_reduce in pinned memory?
index_t* d_num_nonapparent= NULL;//the number of nonapparent columns. *d_num_columns_to_reduce-*d_num_nonapparent= number of apparent columns
index_t* h_num_nonapparent;//h_num_nonapparent is tied to d_num_nonapparent in pinned memory?
index_t num_apparent;//the number of apparent pairs found
value_t* d_cidx_to_diameter;//GPU side mapping from cidx to diameters for gpuscan faces of a given row of a "lowest one" search
struct diameter_index_t_struct* d_cidx_diameter_pairs_sortedlist;//used as a sorted list of cidx,diameter pairs for lookup in gpuscan kernel for sparse case
#if defined(ASSEMBLE_REDUCTION_SUBMATRIX)//assemble reduction submatrix
index_t* d_flagarray_OR_index_to_subindex;//GPU data structure that maps index to subindex
index_t* h_flagarray_OR_index_to_subindex;//copy of index_to_subindex data structure that acts as a map for matrix index to reduction submatrix indexing on CPU side
#endif
//for GPU-scan (finding apparent pairs)
index_t* d_lowest_one_of_apparent_pair;//GPU copy of the lowest ones, d_lowest_one_of_apparent_pair[col]= lowest one row of column col
//index_t* h_lowest_one_of_apparent_pair;//the lowest ones, d_lowest_one_of_apparent_pair[col]= lowest one row of column col
struct index_t_pair_struct* d_pivot_array;//sorted array of all pivots, substitute for a structured hashmap with lookup done by log(n) binary search
struct index_t_pair_struct* h_pivot_array;//sorted array of all pivots
std::vector<struct diameter_index_t_struct> columns_to_reduce;
//used for sparse_distance_matrix ONLY:
struct diameter_index_t_struct* d_simplices;//GPU copy of h_simplices
struct diameter_index_t_struct* h_simplices;//the simplices filtered by diameter that need to be considered for the next dimension's simplices
index_t* d_num_simplices=NULL;//use d_num_simplices to keep track of the number of simplices in h_ or d_ simplices
index_t* h_num_simplices;//h_num_simplices is tied to d_num_simplices in pinned memory
public:
std::ofstream outfile;
ripser(DistanceMatrix&& _dist, index_t _dim_max, value_t _threshold, float _ratio)
: dist(std::move(_dist)), n(dist.size()),
dim_max(::min(_dim_max, index_t(dist.size() - 2))), threshold(_threshold),
ratio(_ratio), binomial_coeff(n, dim_max + 2) {
outfile.open("/tmp/features.txt", std::ios::trunc | std::ios::out);
}
void free_init_cpumem() {
free(h_pivot_column_index_array_OR_nonapparent_cols);
}
void free_remaining_cpumem(){
free(h_columns_to_reduce);
free(h_pivot_array);
pivot_column_index.resize(0);
}
//calulate gpu_num_simplices_forall_dims based on GPU memory limit
index_t calculate_gpu_max_columns_for_sparserips_computation_from_memory(){
hipGetDeviceProperties(&deviceProp, 0);
hipMemGetInfo(&freeMem,&totalMem);
#ifdef PROFILING
//std::cerr<<"before calculation, sparse: total mem, free mem: "<<totalMem <<" bytes, "<<freeMem<<" bytes"<<std::endl;
#endif
index_t gpumem_char_array_bytes_factor= sizeof(char);
index_t gpumem_index_t_array_bytes_factor= sizeof(index_t);
index_t gpumem_value_t_array_bytes_factor= sizeof(value_t);
index_t gpumem_index_t_pairs_array_bytes_factor= sizeof(index_t_pair_struct);
index_t gpumem_diameter_index_t_array_bytes_factor= sizeof(diameter_index_t_struct);
index_t gpumem_CSR_dist_matrix_bytes= sizeof(index_t)*(n+1+4)+(sizeof(index_t)+sizeof(value_t))*dist.num_entries;//sizeof(value_t)*(n*(n-1))/2;
index_t gpumem_binomial_coeff_table_bytes= sizeof(index_t)*binomial_coeff.get_num_n()*binomial_coeff.get_max_tuple_length() +sizeof(binomial_coeff_table);
index_t gpumem_index_t_bytes= sizeof(index_t);
index_t padding= 1024*1024*1024;//1GB padding
index_t fixedmemory= gpumem_index_t_bytes*4+gpumem_binomial_coeff_table_bytes+gpumem_CSR_dist_matrix_bytes+padding;
//this can be larger but not smaller than actual sizeof(-) sum
index_t sizeof_factor_sum=
gpumem_diameter_index_t_array_bytes_factor
#ifdef ASSEMBLE_REDUCTION_SUBMATRIX
+gpumem_index_t_array_bytes_factor
#endif
+gpumem_diameter_index_t_array_bytes_factor
+gpumem_index_t_array_bytes_factor
+gpumem_index_t_array_bytes_factor
+gpumem_index_t_pairs_array_bytes_factor
+gpumem_diameter_index_t_array_bytes_factor
+gpumem_index_t_pairs_array_bytes_factor;
#ifdef PROFILING
//std::cerr<<"sparse final calculation for memory, free memory: "<<freeMem <<" bytes, sizeof_factor_sum: "<<sizeof_factor_sum<<" bytes"<<std::endl;
#endif
return (freeMem*0.7-fixedmemory)/sizeof_factor_sum;
}
index_t calculate_gpu_dim_max_for_fullrips_computation_from_memory(const index_t dim_max, const bool isfullrips){
if(dim_max==0)return 0;
index_t gpu_dim_max= dim_max;
index_t gpu_alloc_memory_in_bytes= 0;
hipGetDeviceProperties(&deviceProp, 0);
hipMemGetInfo(&freeMem,&totalMem);
#ifdef PROFILING
//std::cerr<<"GPU memory before full rips memory calculation, total mem: "<< totalMem<<" bytes, free mem: "<<freeMem<<" bytes"<<std::endl;
#endif
do{
index_t gpu_num_simplices_forall_dims= gpu_dim_max<n/2?get_num_simplices_for_dim(gpu_dim_max): get_num_simplices_for_dim(n/2);
index_t gpumem_char_array_bytes= sizeof(char)*gpu_num_simplices_forall_dims;
index_t gpumem_index_t_array_bytes= sizeof(index_t)*gpu_num_simplices_forall_dims;
index_t gpumem_value_t_array_bytes= sizeof(value_t)*gpu_num_simplices_forall_dims;
index_t gpumem_index_t_pairs_array_bytes= sizeof(index_t_pair_struct)*gpu_num_simplices_forall_dims;
index_t gpumem_diameter_index_t_array_bytes= sizeof(diameter_index_t_struct)*gpu_num_simplices_forall_dims;
index_t gpumem_dist_matrix_bytes= sizeof(value_t)*(n*(n-1))/2;
index_t gpumem_binomial_coeff_table_bytes= sizeof(index_t)*binomial_coeff.get_num_n()*binomial_coeff.get_max_tuple_length() +sizeof(binomial_coeff_table);
index_t gpumem_index_t_bytes= sizeof(index_t);
//gpumem_CSR_dist_matrix_bytes is estimated to have n*(n-1)/2 number of nonzeros as an upper bound
index_t gpumem_CSR_dist_matrix_bytes= sizeof(index_t)*(n+1+4)+(sizeof(index_t)+sizeof(value_t))*n*(n-1)/2;//dist.num_entries;//sizeof(value_t)*(n*(n-1))/2;
if(isfullrips) {//count the allocated memory for dense case
gpu_alloc_memory_in_bytes= gpumem_diameter_index_t_array_bytes +
#ifndef ASSEMBLE_REDUCTION_SUBMATRIX
gpumem_char_array_bytes +
#endif
gpumem_value_t_array_bytes +
#ifdef ASSEMBLE_REDUCTION_SUBMATRIX
gpumem_index_t_array_bytes+
#endif
gpumem_dist_matrix_bytes +
gpumem_index_t_array_bytes +
gpumem_binomial_coeff_table_bytes +
gpumem_index_t_bytes * 2 +
gpumem_index_t_array_bytes +
gpumem_index_t_pairs_array_bytes +
gpumem_index_t_pairs_array_bytes;//this last one is for thrust radix sorting buffer
#ifdef PROFILING
////std::cerr<<"free gpu memory for full rips by calculation in bytes for gpu dim: "<<gpu_dim_max<<": "<<freeMem-gpu_alloc_memory_in_bytes<<std::endl;
//std::cerr<<"gpu memory needed for full rips by calculation in bytes for dim: "<<gpu_dim_max<<": "<<gpu_alloc_memory_in_bytes<<" bytes"<<std::endl;
#endif
if (gpu_alloc_memory_in_bytes <= freeMem){
return gpu_dim_max;
}
}else{//count the alloced memory for sparse case
//includes the d_simplices array used in sparse computation for an approximation for both sparse and full rips compelexes?
gpu_alloc_memory_in_bytes= gpumem_diameter_index_t_array_bytes
#ifdef ASSEMBlE_REDUCTION_SUBMATRIX
+ gpumem_index_t_array_bytes
#endif
+ gpumem_CSR_dist_matrix_bytes
+ gpumem_diameter_index_t_array_bytes
+ gpumem_index_t_array_bytes
+ gpumem_binomial_coeff_table_bytes
+ gpumem_index_t_array_bytes
+ gpumem_index_t_pairs_array_bytes
+ gpumem_index_t_bytes*4
+ gpumem_diameter_index_t_array_bytes
+ gpumem_index_t_pairs_array_bytes;//last one is for buffer needed for sorting
#ifdef PROFILING
////std::cerr<<"(sparse) free gpu memory for full rips by calculation in bytes for gpu dim: "<<gpu_dim_max<<": "<<freeMem-gpu_alloc_memory_in_bytes<<std::endl;
//std::cerr<<"(sparse) gpu memory needed for full rips by calculation in bytes for dim: "<<gpu_dim_max<<": "<<gpu_alloc_memory_in_bytes<<" bytes"<<std::endl;
#endif
if (gpu_alloc_memory_in_bytes <= freeMem){
return gpu_dim_max;
}
}
gpu_dim_max--;
}while(gpu_dim_max>=0);
return 0;
}
index_t get_num_simplices_for_dim(index_t dim){
//beware if dim+1>n and where dim is negative
assert(dim+1<=n && dim+1>=0);
return binomial_coeff(n, dim + 1);
}
index_t get_next_vertex(index_t& v, const index_t idx, const index_t k) const {
return v= upper_bound(
v, [&](const index_t& w) -> bool { return (binomial_coeff(w, k) <= idx); });
}
index_t get_edge_index(const index_t i, const index_t j) const {
return binomial_coeff(i, 2) + j;
}
template <typename OutputIterator>
OutputIterator get_simplex_vertices(index_t idx, const index_t dim, index_t v,
OutputIterator out) const {
--v;
for (index_t k= dim + 1; k > 0; --k) {
get_next_vertex(v, idx, k);
*out++= v;
idx-= binomial_coeff(v, k);
}
return out;
}
value_t compute_diameter(const index_t index, index_t dim) const {
value_t diam= -std::numeric_limits<value_t>::infinity();
vertices.clear();
get_simplex_vertices(index, dim, dist.size(), std::back_inserter(vertices));
for (index_t i= 0; i <= dim; ++i)
for (index_t j= 0; j < i; ++j) {
diam= ::max(diam, dist(vertices[i], vertices[j]));
}
return diam;
}
class simplex_coboundary_enumerator;
void gpu_assemble_columns_to_reduce_plusplus(const index_t dim);
void cpu_byneighbor_assemble_columns_to_reduce(std::vector<struct diameter_index_t_struct>& simplices, std::vector<struct diameter_index_t_struct>& columns_to_reduce,
hash_map<index_t, index_t>& pivot_column_index, index_t dim);
void cpu_assemble_columns_to_reduce(std::vector<struct diameter_index_t_struct>& columns_to_reduce,
hash_map<index_t, index_t>& pivot_column_index, index_t dim);
void assemble_columns_gpu_accel_transition_to_cpu_only(const bool& more_than_one_dim_cpu_only, std::vector<diameter_index_t_struct>& simplices, std::vector<diameter_index_t_struct>& columns_to_reduce, hash_map<index_t,index_t>& cpu_pivot_column_index, index_t dim);
index_t get_value_pivot_array_hashmap(index_t row_cidx, struct row_cidx_column_idx_struct_compare cmp){
#ifdef USE_PHASHMAP
index_t col_idx= phmap_get_value(row_cidx);
if(col_idx==-1){
#endif
#ifdef USE_GOOGLE_HASHMAP
auto pair= pivot_column_index.find(row_cidx);
if(pair==pivot_column_index.end()){
#endif
index_t first= 0;
index_t last= num_apparent- 1;
while(first<=last){
index_t mid= first + (last-first)/2;
if(h_pivot_array[mid].row_cidx==row_cidx){
return h_pivot_array[mid].column_idx;
}
if(h_pivot_array[mid].row_cidx<row_cidx){
first= mid+1;
}else{
last= mid-1;
}
}
return -1;
}else{
#ifdef USE_PHASHMAP
return col_idx;
#endif
#ifdef USE_GOOGLE_HASHMAP
return pair->second;
#endif
}
}
void compute_dim_0_pairs(std::vector<diameter_index_t_struct>& edges,
std::vector<diameter_index_t_struct>& columns_to_reduce) {
#ifdef PRINT_PERSISTENCE_PAIRS
//std::cerr << "persistence intervals in dim 0:" << std::endl;
#endif
union_find dset(n);
edges= get_edges();
struct greaterdiam_lowerindex_diameter_index_t_struct_compare cmp;
std::sort(edges.rbegin(), edges.rend(), cmp);
std::vector<index_t> vertices_of_edge(2);
for (auto e : edges) {
get_simplex_vertices(e.index, 1, n, vertices_of_edge.rbegin());
index_t u= dset.find(vertices_of_edge[0]), v= dset.find(vertices_of_edge[1]);
if (u != v) {
#ifdef PRINT_PERSISTENCE_PAIRS
if(e.diameter!=0) {
outfile << "0 0 " << e.diameter << " " << vertices_of_edge[0]<< " "<< vertices_of_edge[1] << " inf" << std::endl;
}
#endif
dset.link(u, v);
} else {
columns_to_reduce.push_back(e);
}
}
std::reverse(columns_to_reduce.begin(), columns_to_reduce.end());
#ifdef PRINT_PERSISTENCE_PAIRS
for (index_t i= 0; i < n; ++i)
if (dset.find(i) == i) outfile << "0 0 inf" << i << " " << i << " inf" <<std::endl;
#endif
}
void gpu_compute_dim_0_pairs(std::vector<struct diameter_index_t_struct>& columns_to_reduce);
void gpuscan(const index_t dim);
template <typename Column>
diameter_index_t_struct init_coboundary_and_get_pivot_fullmatrix(const diameter_index_t_struct simplex,
Column& working_coboundary, const index_t& dim
, hash_map<index_t, index_t>& pivot_column_index) {
bool check_for_emergent_pair= true;
cofacet_entries.clear();
simplex_coboundary_enumerator cofacets(simplex, dim, *this);
while (cofacets.has_next()) {
diameter_index_t_struct cofacet= cofacets.next();
if (cofacet.diameter <= threshold) {
cofacet_entries.push_back(cofacet);
if (check_for_emergent_pair && (simplex.diameter == cofacet.diameter)) {
if (pivot_column_index.find(cofacet.index) == pivot_column_index.end()){
return cofacet;
}
check_for_emergent_pair= false;
}
}
}
for (auto cofacet : cofacet_entries) working_coboundary.push(cofacet);
return get_pivot(working_coboundary);
}
template <typename Column>
diameter_index_t_struct init_coboundary_and_get_pivot_submatrix(const diameter_index_t_struct simplex,
Column& working_coboundary, index_t dim, struct row_cidx_column_idx_struct_compare cmp) {
bool check_for_emergent_pair= true;
cofacet_entries.clear();
simplex_coboundary_enumerator cofacets(simplex, dim, *this);
while (cofacets.has_next()) {
diameter_index_t_struct cofacet= cofacets.next();
if (cofacet.diameter <= threshold) {
cofacet_entries.push_back(cofacet);
if (check_for_emergent_pair && (simplex.diameter == cofacet.diameter)) {
if(get_value_pivot_array_hashmap(cofacet.index, cmp)==-1) {
return cofacet;
}
check_for_emergent_pair= false;
}
}
}
for (auto cofacet : cofacet_entries) working_coboundary.push(cofacet);
return get_pivot(working_coboundary);
}
template <typename Column>
void add_simplex_coboundary_oblivious(const diameter_index_t_struct simplex, const index_t& dim,
Column& working_coboundary) {
simplex_coboundary_enumerator cofacets(simplex, dim, *this);
while (cofacets.has_next()) {
diameter_index_t_struct cofacet= cofacets.next();
if (cofacet.diameter <= threshold) working_coboundary.push(cofacet);
}
}
template <typename Column>
void add_simplex_coboundary_use_reduction_column(const diameter_index_t_struct simplex, const index_t& dim,
Column& working_reduction_column, Column& working_coboundary) {
working_reduction_column.push(simplex);
simplex_coboundary_enumerator cofacets(simplex, dim, *this);
while (cofacets.has_next()) {
diameter_index_t_struct cofacet= cofacets.next();
if (cofacet.diameter <= threshold) working_coboundary.push(cofacet);
}
}
//THIS IS THE METHOD TO CALL FOR CPU SIDE FULL MATRIX REDUCTION
template <typename Column>
void add_coboundary_fullmatrix(compressed_sparse_matrix<diameter_index_t_struct>& reduction_matrix,
const std::vector<diameter_index_t_struct>& columns_to_reduce,
const size_t index_column_to_add, const size_t& dim,
Column& working_reduction_column, Column& working_coboundary) {
diameter_index_t_struct column_to_add= columns_to_reduce[index_column_to_add];
add_simplex_coboundary_use_reduction_column(column_to_add, dim, working_reduction_column, working_coboundary);
for (diameter_index_t_struct simplex : reduction_matrix.subrange(index_column_to_add)) {
add_simplex_coboundary_use_reduction_column(simplex, dim, working_reduction_column, working_coboundary);
}
}
//THIS IS THE METHOD TO CALL FOR SUBMATRIX REDUCTION ON CPU SIDE
#ifdef ASSEMBLE_REDUCTION_SUBMATRIX
template <typename Column>
void add_coboundary_reduction_submatrix(compressed_sparse_submatrix<diameter_index_t_struct>& reduction_submatrix,
const size_t index_column_to_add, const size_t& dim,
Column& working_reduction_column, Column& working_coboundary) {
diameter_index_t_struct column_to_add= h_columns_to_reduce[index_column_to_add];
add_simplex_coboundary_use_reduction_column(column_to_add, dim, working_reduction_column, working_coboundary);
index_t subindex= h_flagarray_OR_index_to_subindex[index_column_to_add];//this is only defined when ASSEMBLE_REDUCTION_SUBMATRIX is defined
if(subindex>-1) {
for (diameter_index_t_struct simplex : reduction_submatrix.subrange(subindex)) {
add_simplex_coboundary_use_reduction_column(simplex, dim, working_reduction_column, working_coboundary);
}
}
}
#endif
void compute_pairs(std::vector<diameter_index_t_struct>& columns_to_reduce,
hash_map<index_t, index_t>& pivot_column_index, index_t dim) {
#ifdef PRINT_PERSISTENCE_PAIRS
//std::cerr << "persistence intervals in dim " << dim << ":" << std::endl;
#endif
#ifdef CPUONLY_ASSEMBLE_REDUCTION_MATRIX
compressed_sparse_matrix<diameter_index_t_struct> reduction_matrix;
#endif
for (index_t index_column_to_reduce= 0; index_column_to_reduce < columns_to_reduce.size();
++index_column_to_reduce) {
auto column_to_reduce= columns_to_reduce[index_column_to_reduce];
std::priority_queue<diameter_index_t_struct, std::vector<diameter_index_t_struct>,
greaterdiam_lowerindex_diameter_index_t_struct_compare>
#ifdef CPUONLY_ASSEMBLE_REDUCTION_MATRIX
working_reduction_column,
#endif
working_coboundary;
value_t diameter= column_to_reduce.diameter;
vertices_of_birth.clear();
get_simplex_vertices(column_to_reduce.index, dim + 1, n, std::back_inserter(vertices_of_birth));
#ifdef INDICATE_PROGRESS
if ((index_column_to_reduce + 1) % 1000000 == 0)
//std::cerr << "\033[K"
<< "reducing column " << index_column_to_reduce + 1 << "/"
<< columns_to_reduce.size() << " (diameter " << diameter << ")"
<< std::flush << "\r";
#endif
index_t index_column_to_add= index_column_to_reduce;
diameter_index_t_struct pivot;
// initialize index bounds of reduction matrix
#ifdef CPUONLY_ASSEMBLE_REDUCTION_MATRIX
reduction_matrix.append_column();
#endif
pivot= init_coboundary_and_get_pivot_fullmatrix(columns_to_reduce[index_column_to_add], working_coboundary, dim, pivot_column_index);
while (true) {
if(pivot.index!=-1){
auto left_pair= pivot_column_index.find(pivot.index);
if (left_pair != pivot_column_index.end()) {
index_column_to_add= left_pair->second;
#ifdef CPUONLY_ASSEMBLE_REDUCTION_MATRIX
add_coboundary_fullmatrix(reduction_matrix, columns_to_reduce, index_column_to_add, dim, working_reduction_column, working_coboundary);
pivot= get_pivot(working_coboundary);
#else
add_simplex_coboundary_oblivious(columns_to_reduce[index_column_to_add], dim, working_coboundary);
pivot= get_pivot(working_coboundary);
#endif
} else {
#ifdef PRINT_PERSISTENCE_PAIRS
value_t death= pivot.diameter;
vertices_of_death.clear();
get_simplex_vertices(pivot.index, dim + 1, n, std::back_inserter(vertices_of_death));
if (death > diameter * ratio) {
#ifdef INDICATE_PROGRESS
//std::cerr << "\033[K";
#endif
//std::cout << diameter << " " << death << ")" << std::endl
// << std::flush;
}
#endif
pivot_column_index[pivot.index]= index_column_to_reduce;
break;
}
} else {
#ifdef PRINT_PERSISTENCE_PAIRS
outfile << dim << " " << diameter <<" inf "<< vertices_of_birth[0] << " " << vertices_of_birth[1] << " inf" << std::endl << std::flush;
#endif
break;
}
}
}
}
void compute_pairs_plusplus(
index_t dim,
index_t gpuscan_startingdim) {
#ifdef PRINT_PERSISTENCE_PAIRS
//std::cerr << "persistence intervals in dim " << dim << ":" << std::endl;
#endif
#ifdef ASSEMBLE_REDUCTION_SUBMATRIX
compressed_sparse_submatrix<diameter_index_t_struct> reduction_submatrix;
#endif
#ifdef INDICATE_PROGRESS
std::chrono::steady_clock::time_point next= std::chrono::steady_clock::now() + time_step;
#endif
struct row_cidx_column_idx_struct_compare cmp_pivots;
index_t num_columns_to_iterate= *h_num_columns_to_reduce;
if(dim>=gpuscan_startingdim){
num_columns_to_iterate= *h_num_nonapparent;
}
for (index_t sub_index_column_to_reduce= 0; sub_index_column_to_reduce < num_columns_to_iterate;
++sub_index_column_to_reduce) {
index_t index_column_to_reduce =sub_index_column_to_reduce;
if(dim>=gpuscan_startingdim) {
index_column_to_reduce= h_pivot_column_index_array_OR_nonapparent_cols[sub_index_column_to_reduce];//h_nonapparent_cols
}
auto column_to_reduce= h_columns_to_reduce[index_column_to_reduce];
std::priority_queue<diameter_index_t_struct, std::vector<diameter_index_t_struct>,
greaterdiam_lowerindex_diameter_index_t_struct_compare>
#ifdef ASSEMBLE_REDUCTION_SUBMATRIX
working_reduction_column,
#endif
working_coboundary;
value_t diameter= column_to_reduce.diameter;
vertices_of_birth.clear();
get_simplex_vertices(column_to_reduce.index, dim + 1, n, std::back_inserter(vertices_of_birth));
index_t index_column_to_add= index_column_to_reduce;
struct diameter_index_t_struct pivot;
#ifdef ASSEMBLE_REDUCTION_SUBMATRIX
reduction_submatrix.append_column();
#endif
pivot= init_coboundary_and_get_pivot_submatrix(column_to_reduce, working_coboundary, dim, cmp_pivots);
while (true) {
#ifdef INDICATE_PROGRESS
//if(sub_index_column_to_reduce%2==0){
if (std::chrono::steady_clock::now() > next) {
//std::cerr<< clear_line << "reducing column " << index_column_to_reduce + 1
<< "/" << *h_num_columns_to_reduce << " (diameter " << diameter << ")"
<< std::flush;
next= std::chrono::steady_clock::now() + time_step;
}
#endif
if(pivot.index!=-1){
index_column_to_add= get_value_pivot_array_hashmap(pivot.index,cmp_pivots);
if(index_column_to_add!=-1) {
#ifdef ASSEMBLE_REDUCTION_SUBMATRIX
add_coboundary_reduction_submatrix(reduction_submatrix, index_column_to_add,
dim, working_reduction_column, working_coboundary);
pivot= get_pivot(working_coboundary);
#else
add_simplex_coboundary_oblivious(h_columns_to_reduce[index_column_to_add], dim, working_coboundary);
pivot= get_pivot(working_coboundary);
#endif
}else{
#ifdef PRINT_PERSISTENCE_PAIRS
value_t death= pivot.diameter;
// TODO(@captain-pool): What's the length of the vertices when doing higher dimensions?
// if(dim == 1){
// vertices_of_death.clear();
// get_simplex_vertices(pivot.index, dim + 1, n, std::back_inserter(vertices_of_death));
//// // Feature gets created by Edges (1-simplex) and get closed by triangles (2-simplex)
//// // Selecting vertex of maximum length edge
// outfile << vertices_of_birth[0] << " " << vertices_of_birth[1] << " " << vertices_of_death[0] << " " << vertices_of_death[1] <<" " << vertices_of_death[2] << std::endl;
// }
if (death > diameter * ratio) {
#ifdef INDICATE_PROGRESS
std::cerr << clear_line << std::flush;
#endif
if(dim == 1){
vertices_of_death.clear();
get_simplex_vertices(pivot.index, dim + 1, n, std::back_inserter(vertices_of_death));
// Feature gets created by Edges (1-simplex) and get closed by triangles (2-simplex)
// Selecting vertex of maximum length edge
// outfile << dim << " " << diameter << " " << death << " " << vertices_of_death[0] << " " << vertices_of_death[1] <<" " << vertices_of_death[2] << std::endl;
value_t d1 = dist.distance(vertices_of_death[0], vertices_of_death[1]);
value_t d2 = dist.distance(vertices_of_death[0], vertices_of_death[2]);
value_t d3 = dist.distance(vertices_of_death[1], vertices_of_death[2]);
value_t c = dist.distance(vertices_of_birth[0], vertices_of_birth[1]);
auto k = vertices_of_birth[0];
auto l = vertices_of_birth[1];
if( d1 >= c){
k = vertices_of_death[0];
l = vertices_of_death[1];
}
else if(d2 >= c){
k = vertices_of_death[0];
l = vertices_of_death[2];
}
else if(d3 >= c){
k = vertices_of_death[1];
l = vertices_of_death[2];
}
outfile<< dim << " " << dim << " " << vertices_of_birth[0] << " " << vertices_of_birth[1] << " " << k << " " << l << std::endl;
//outfile<< vertices_of_birth[0] << " " << vertices_of_birth[1] << " " << vertices_of_death[0] << " " << vertices_of_death[1] <<" " << vertices_of_death[2] << std::endl;
}
else if(dim == 2){
// outfile << dim << " " << diameter << " " << death << " " << vertices_of_death[0] << " " << vertices_of_death[1] <<" " << vertices_of_death[2] << std::endl;
//outfile << dim << " " << diameter << " " << birth << " " << vertices_of_birth[0] << " " << vertices_of_birth[1] <<" " << vertices_of_birth[2] << std::endl;
}
else{
std::cout << " [" << diameter << "," << death << ")" << std::endl
<< std::flush;
}
}
#endif
#ifdef USE_PHASHMAP
phmap_put(pivot.index, index_column_to_reduce);
#endif
#ifdef USE_GOOGLE_HASHMAP
pivot_column_index[pivot.index]= index_column_to_reduce;
#endif
#ifdef ASSEMBLE_REDUCTION_SUBMATRIX
while (true) {
diameter_index_t_struct e= pop_pivot(working_reduction_column);
if (e.index == -1) break;
reduction_submatrix.push_back(e);
}
#endif
break;
}
} else {
#ifdef PRINT_PERSISTENCE_PAIRS
#ifdef INDICATE_PROGRESS
//std::cerr << clear_line << std::flush;
#endif
outfile << dim << " " << diameter <<" inf "<< vertices_of_birth[0] << " " << vertices_of_birth[1] << " inf" << std::endl << std::flush;
#endif
break;
}
}
}
#ifdef INDICATE_PROGRESS
//std::cerr << clear_line << std::flush;
#endif
}
std::vector<diameter_index_t_struct> get_edges();
void compute_barcodes();
};
template<>
class ripser<compressed_lower_distance_matrix>::simplex_coboundary_enumerator {
private:
index_t idx_below, idx_above, v, k;
std::vector<index_t> vertices;
///const diameter_index_t simplex;
const struct diameter_index_t_struct simplex;
const compressed_lower_distance_matrix& dist;
const binomial_coeff_table& binomial_coeff;
public:
simplex_coboundary_enumerator(
const struct diameter_index_t_struct _simplex, index_t _dim,
const ripser<compressed_lower_distance_matrix>& parent)
: idx_below(_simplex.index),
idx_above(0), v(parent.n - 1), k(_dim + 1),
vertices(_dim + 1), simplex(_simplex), dist(parent.dist),
binomial_coeff(parent.binomial_coeff) {
parent.get_simplex_vertices(_simplex.index, _dim, parent.n, vertices.begin());
}
bool has_next(bool all_cofacets= true) {
return (v >= k && (all_cofacets || binomial_coeff(v, k) > idx_below));//second condition after the || is to ensure iteration of cofacets with no need to adjust
}
struct diameter_index_t_struct next() {
while ((binomial_coeff(v, k) <= idx_below)) {
idx_below -= binomial_coeff(v, k);
idx_above += binomial_coeff(v, k + 1);
--v;
--k;
assert(k != -1);
}
value_t cofacet_diameter= simplex.diameter;
for (index_t w : vertices) cofacet_diameter= ::max(cofacet_diameter, dist(v, w));
index_t cofacet_index= idx_above + binomial_coeff(v--, k + 1) + idx_below;
return {cofacet_diameter, cofacet_index};
}
};
template <> class ripser<sparse_distance_matrix>::simplex_coboundary_enumerator {
const ripser& parent;
index_t idx_below, idx_above, k;
std::vector<index_t> vertices;
const diameter_index_t_struct simplex;
const sparse_distance_matrix& dist;
std::vector<std::vector<index_diameter_t_struct>::const_reverse_iterator>& neighbor_it;
std::vector<std::vector<index_diameter_t_struct>::const_reverse_iterator>& neighbor_end;
index_diameter_t_struct neighbor;
public:
simplex_coboundary_enumerator(const diameter_index_t_struct _simplex, const index_t _dim,
const ripser& _parent)
: parent(_parent), idx_below(_simplex.index), idx_above(0), k(_dim + 1),
vertices(_dim + 1), simplex(_simplex),
dist(parent.dist),
neighbor_it(dist.neighbor_it),
neighbor_end(dist.neighbor_end) {
neighbor_it.clear();
neighbor_end.clear();
parent.get_simplex_vertices(idx_below, _dim, parent.n, vertices.rbegin());
for (auto v : vertices) {
neighbor_it.push_back(dist.neighbors[v].rbegin());
neighbor_end.push_back(dist.neighbors[v].rend());
}
}
bool has_next(bool all_cofacets= true) {
//auto& x will permanently change upon updates to it.
for (auto &it0= neighbor_it[0], &end0= neighbor_end[0]; it0 != end0; ++it0) {
neighbor= *it0;//neighbor is a pair: diameter_index_t_struct
for (size_t idx= 1; idx < neighbor_it.size(); ++idx) {
auto &it= neighbor_it[idx], end= neighbor_end[idx];
//enforce the invariant that get_index(*it)<=get_index(neighbor)
while(it->index > neighbor.index)
if (++it == end) return false;
if(it->index != neighbor.index)
goto continue_outer;//try the next number in neighbor_it[0]
else
//update neighbor to the max of matching vertices of "neighbors" of each vertex in simplex
neighbor= (neighbor.diameter>it->diameter)?neighbor:*it;
}
while(k>0 && vertices[k-1]>neighbor.index){
if (!all_cofacets) return false;
idx_below -= parent.binomial_coeff(vertices[k - 1], k);
idx_above += parent.binomial_coeff(vertices[k - 1], k + 1);
--k;
}
return true;
continue_outer:;
}
return false;
}
diameter_index_t_struct next() {
++neighbor_it[0];
value_t cofacet_diameter= ::max(simplex.diameter, neighbor.diameter);
index_t cofacet_index= idx_above+parent.binomial_coeff(neighbor.index,k+1)+idx_below;
return {cofacet_diameter,cofacet_index};
}
};
template<> std::vector<diameter_index_t_struct> ripser<compressed_lower_distance_matrix>::get_edges() {
std::vector<diameter_index_t_struct> edges;
for (index_t index= binomial_coeff(n, 2); index-- > 0;) {
value_t diameter= compute_diameter(index, 1);
if (diameter <= threshold) edges.push_back({diameter, index});
}
return edges;
}
template <> std::vector<diameter_index_t_struct> ripser<sparse_distance_matrix>::get_edges() {
std::vector<diameter_index_t_struct> edges;
for (index_t i= 0; i < n; ++i)
for (auto nbr : dist.neighbors[i]) {
index_t j= nbr.index;
//(i choose 2) + (j choose 1) is the combinatorial index of nbr
if (i > j) edges.push_back({nbr.diameter, binomial_coeff(i, 2) + j});
}
return edges;
}
template <>
void ripser<compressed_lower_distance_matrix>::gpu_compute_dim_0_pairs(std::vector<struct diameter_index_t_struct>& columns_to_reduce
){
union_find dset(n);
index_t max_num_edges= binomial_coeff(n, 2);
struct greaterdiam_lowerindex_diameter_index_t_struct_compare_reverse cmp_reverse;
#ifdef ASSEMBLE_REDUCTION_SUBMATRIX
hipMemset(d_flagarray_OR_index_to_subindex, 0, sizeof(index_t)*max_num_edges);
CUDACHECK(hipDeviceSynchronize());
#else
hipMemset(d_flagarray, 0, sizeof(char)*max_num_edges);
CUDACHECK(hipDeviceSynchronize());
#endif
#ifdef ASSEMBLE_REDUCTION_SUBMATRIX
CUDACHECK(hipOccupancyMaxActiveBlocksPerMultiprocessor( &grid_size, populate_edges<index_t>, 256, 0));
grid_size *= deviceProp.multiProcessorCount;
hipLaunchKernelGGL(( populate_edges), dim3(grid_size), dim3(256), 0, 0, d_flagarray_OR_index_to_subindex, d_columns_to_reduce, threshold, d_distance_matrix, max_num_edges, n, d_binomial_coeff);
CUDACHECK(hipDeviceSynchronize());
*h_num_columns_to_reduce= thrust::count(thrust::device , d_flagarray_OR_index_to_subindex, d_flagarray_OR_index_to_subindex+max_num_edges, 1);
CUDACHECK(hipDeviceSynchronize());
thrust::sort(thrust::device, d_columns_to_reduce, d_columns_to_reduce+ max_num_edges, cmp_reverse);
#else
CUDACHECK(hipOccupancyMaxActiveBlocksPerMultiprocessor( &grid_size, populate_edges<char>, 256, 0));
grid_size *= deviceProp.multiProcessorCount;
hipLaunchKernelGGL(( populate_edges), dim3(grid_size), dim3(256), 0, 0, d_flagarray, d_columns_to_reduce, threshold, d_distance_matrix, max_num_edges, n, d_binomial_coeff);
CUDACHECK(hipDeviceSynchronize());
*h_num_columns_to_reduce= thrust::count(thrust::device , d_flagarray, d_flagarray+max_num_edges, 1);
CUDACHECK(hipDeviceSynchronize());
thrust::sort(thrust::device, d_columns_to_reduce, d_columns_to_reduce+ max_num_edges, cmp_reverse);
#endif
#ifdef COUNTING
//std::cerr<<"num edges filtered by diameter: "<<*h_num_columns_to_reduce<<std::endl;
#endif
hipMemcpy(h_columns_to_reduce, d_columns_to_reduce, sizeof(struct diameter_index_t_struct)*(*h_num_columns_to_reduce), hipMemcpyDeviceToHost);
#ifdef PRINT_PERSISTENCE_PAIRS
//std::cerr << "persistence intervals in dim 0:" << std::endl;
#endif
std::vector<index_t> vertices_of_edge(2);
for(index_t idx=0; idx<*h_num_columns_to_reduce; idx++){
struct diameter_index_t_struct e= h_columns_to_reduce[idx];
vertices_of_edge.clear();
get_simplex_vertices(e.index, 1, n, std::back_inserter(vertices_of_edge));
index_t u= dset.find(vertices_of_edge[0]), v= dset.find(vertices_of_edge[1]);
if (u != v) {
#ifdef PRINT_PERSISTENCE_PAIRS
//remove paired destroyer columns (we compute cohomology)
if(e.diameter!=0) {
// std::cout << " [0," << e.diameter << ")" << std::endl;
outfile << "0 0 " << e.diameter << " " << vertices_of_edge[0] << " " << vertices_of_edge[1] << " inf" << std::endl;
}
#endif
dset.link(u, v);
} else {
columns_to_reduce.push_back(e);
}
}
std::reverse(columns_to_reduce.begin(), columns_to_reduce.end());
//don't want to reverse the h_columns_to_reduce so just put into vector and copy later
#pragma omp parallel for schedule(guided,1)
for(index_t i=0; i<columns_to_reduce.size(); i++){
h_columns_to_reduce[i]= columns_to_reduce[i];
}
*h_num_columns_to_reduce= columns_to_reduce.size();
*h_num_nonapparent= *h_num_columns_to_reduce;//we haven't found any apparent columns yet, so set all columns to nonapparent
#ifdef PRINT_PERSISTENCE_PAIRS
for (index_t i= 0; i < n; ++i)
if (dset.find(i) == i) outfile << "0 0 inf " << i << " " << i << " inf" <<std::endl << std::flush;
#endif
#ifdef COUNTING
//std::cerr<<"num cols to reduce: dim 1, "<<*h_num_columns_to_reduce<<std::endl;
#endif
}
template <>
void ripser<sparse_distance_matrix>::gpu_compute_dim_0_pairs(std::vector<struct diameter_index_t_struct>& columns_to_reduce
){
union_find dset(n);
struct greaterdiam_lowerindex_diameter_index_t_struct_compare_reverse cmp_reverse;
CUDACHECK(hipOccupancyMaxActiveBlocksPerMultiprocessor( &grid_size, populate_sparse_edges_preparingcount, 256, 0));
grid_size *= deviceProp.multiProcessorCount;
//grid_size will return 0 if we have CPU-only code inside d_CSR_distance_matrix
*h_num_simplices= 0;
//populate edges kernel cannot have some threads iterating in the inner for loop, preventing shfl_sync() from runnning
int* d_num;
CUDACHECK(hipMalloc((void **) & d_num, sizeof(int)*(n+1)));
hipMemset(d_num, 0, sizeof(int)*(n+1));
hipLaunchKernelGGL(( populate_sparse_edges_preparingcount), dim3(grid_size), dim3(256), 0, 0, d_num, d_CSR_distance_matrix, n, d_num_simplices);
CUDACHECK(hipDeviceSynchronize());
thrust::exclusive_scan(thrust::device, d_num, d_num+n+1, d_num, 0);
CUDACHECK(hipOccupancyMaxActiveBlocksPerMultiprocessor( &grid_size, populate_sparse_edges_prefixsum, 256, 0));
grid_size *= deviceProp.multiProcessorCount;
hipLaunchKernelGGL(( populate_sparse_edges_prefixsum), dim3(grid_size),dim3(256), 0, 0, d_simplices, d_num, d_CSR_distance_matrix, d_binomial_coeff, n, d_num_simplices);
CUDACHECK(hipDeviceSynchronize());
thrust::sort(thrust::device, d_simplices, d_simplices+ *h_num_simplices, cmp_reverse);
CUDACHECK(hipDeviceSynchronize());
#ifdef COUNTING
//std::cerr<<"num (sparse) edges filtered: "<<*h_num_simplices<<std::endl;
#endif
hipMemcpy(h_simplices, d_simplices, sizeof(struct diameter_index_t_struct)*(*h_num_simplices), hipMemcpyDeviceToHost);
#ifdef PRINT_PERSISTENCE_PAIRS
//std::cerr << "persistence intervals in dim 0:" << std::endl;
#endif
std::vector<index_t> vertices_of_edge(2);
for(index_t idx=0; idx<*h_num_simplices; idx++){
struct diameter_index_t_struct e= h_simplices[idx];
vertices_of_edge.clear();
get_simplex_vertices(e.index, 1, n, std::back_inserter(vertices_of_edge));
index_t u= dset.find(vertices_of_edge[0]), v= dset.find(vertices_of_edge[1]);
if (u != v) {
#ifdef PRINT_PERSISTENCE_PAIRS
if(e.diameter!=0) {
//std::cerr << clear_line << "Writing Line . . ." << std::flush;
std::cout << "0 " << vertices_of_edge[0] << " " << vertices_of_edge[1] << " inf inf" << std::endl;
}
#endif
dset.link(u, v);
} else {
columns_to_reduce.push_back(e);
}
}
std::reverse(columns_to_reduce.begin(), columns_to_reduce.end());
//don't want to reverse the h_columns_to_reduce so just put into vector and copy later
#pragma omp parallel for schedule(guided,1)
for(index_t i=0; i<columns_to_reduce.size(); i++){
h_columns_to_reduce[i]= columns_to_reduce[i];
}
*h_num_columns_to_reduce= columns_to_reduce.size();
*h_num_nonapparent= *h_num_columns_to_reduce;//we haven't found any apparent columns yet, so set all columns to nonapparent
#ifdef PRINT_PERSISTENCE_PAIRS
for (index_t i= 0; i < n; ++i)
if (dset.find(i) == i) std::cout << "0 " << i << " " << i << " inf inf" << std::endl << std::flush;
#endif
#ifdef COUNTING
//std::cerr<<"num cols to reduce: dim 1, "<<*h_num_columns_to_reduce<<std::endl;
#endif
}
//finding apparent pairs
template <>
void ripser<compressed_lower_distance_matrix>::gpuscan(const index_t dim){
//(need to sort for filtration order before gpuscan first, then apply gpu scan then sort again)
//note: scan kernel can eliminate high percentage of columns in little time.
//filter by fully reduced columns (apparent pairs) found by gpu scan
//need this to prevent 0-blocks kernels from executing
if(*h_num_columns_to_reduce==0){
return;
}
index_t num_simplices= binomial_coeff(n,dim+1);
#ifdef COUNTING
//std::cerr<<"max possible num simplices: "<<num_simplices<<std::endl;
#endif
hipMemcpy(d_columns_to_reduce, h_columns_to_reduce,
sizeof(struct diameter_index_t_struct) * *h_num_columns_to_reduce, hipMemcpyHostToDevice);
CUDACHECK(hipDeviceSynchronize());
thrust::fill(thrust::device, d_cidx_to_diameter, d_cidx_to_diameter + num_simplices, -MAX_FLOAT);
CUDACHECK(hipDeviceSynchronize());
CUDACHECK(hipOccupancyMaxActiveBlocksPerMultiprocessor( &grid_size, init_cidx_to_diam, 256, 0));
grid_size *= deviceProp.multiProcessorCount;
//there will be kernel launch errors if columns_to_reduce.size()==0; it causes thrust to complain later in the code execution
init_cidx_to_diam << < grid_size, 256 >> >
(d_cidx_to_diameter, d_columns_to_reduce, *h_num_columns_to_reduce);
CUDACHECK(hipDeviceSynchronize());
hipMemset(d_lowest_one_of_apparent_pair, -1, sizeof(index_t) * *h_num_columns_to_reduce);
CUDACHECK(hipDeviceSynchronize());
Stopwatch sw;
sw.start();
CUDACHECK(hipOccupancyMaxActiveBlocksPerMultiprocessor( &grid_size, coboundary_findapparent_single_kernel, 256, 0));
grid_size *= deviceProp.multiProcessorCount;
coboundary_findapparent_single_kernel << < grid_size, 256, 256 * (dim + 1) * sizeof(index_t) >> >
(d_cidx_to_diameter, d_columns_to_reduce, d_lowest_one_of_apparent_pair, dim, num_simplices, n, d_binomial_coeff, *h_num_columns_to_reduce, d_distance_matrix, threshold);
CUDACHECK(hipDeviceSynchronize());
sw.stop();
#ifdef PROFILING
//std::cerr<<"gpu scan kernel time for dim: "<<dim<<": "<<sw.ms()/1000.0<<"s"<<std::endl;
#endif
CUDACHECK(hipDeviceSynchronize());
//post processing (inserting appararent pairs into a "hash map": 2 level data structure) now on GPU
Stopwatch postprocessing;
postprocessing.start();
struct row_cidx_column_idx_struct_compare cmp_pivots;
//put pairs into an array
CUDACHECK(hipOccupancyMaxActiveBlocksPerMultiprocessor( &grid_size, gpu_insert_pivots_kernel, 256, 0));
grid_size *= deviceProp.multiProcessorCount;
gpu_insert_pivots_kernel<< < grid_size, 256 >> >(d_pivot_array, d_lowest_one_of_apparent_pair, d_pivot_column_index_OR_nonapparent_cols, *h_num_columns_to_reduce, d_num_nonapparent);
CUDACHECK(hipDeviceSynchronize());
thrust::sort(thrust::device, d_pivot_array, d_pivot_array+*h_num_columns_to_reduce, cmp_pivots);
thrust::sort(thrust::device, d_pivot_column_index_OR_nonapparent_cols, d_pivot_column_index_OR_nonapparent_cols+*h_num_nonapparent);
num_apparent= *h_num_columns_to_reduce-*h_num_nonapparent;
#ifdef COUNTING
//std::cerr<<"num apparent for dim: "<<dim<<" is: " <<num_apparent<<std::endl;
#endif
//transfer to CPU side all GPU data structures
hipMemcpy(h_pivot_array, d_pivot_array, sizeof(index_t_pair_struct)*(num_apparent), hipMemcpyDeviceToHost);
hipMemcpy(h_pivot_column_index_array_OR_nonapparent_cols, d_pivot_column_index_OR_nonapparent_cols, sizeof(index_t)*(*h_num_nonapparent), hipMemcpyDeviceToHost);
#ifdef ASSEMBLE_REDUCTION_SUBMATRIX
hipMemset(d_flagarray_OR_index_to_subindex, -1, sizeof(index_t)* *h_num_columns_to_reduce);
//perform the scatter operation
CUDACHECK(hipOccupancyMaxActiveBlocksPerMultiprocessor( &grid_size, init_index_to_subindex, 256, 0));
grid_size *= deviceProp.multiProcessorCount;
init_index_to_subindex<< < grid_size, 256 >> >
(d_flagarray_OR_index_to_subindex, d_pivot_column_index_OR_nonapparent_cols, *h_num_nonapparent);
hipMemcpy(h_flagarray_OR_index_to_subindex, d_flagarray_OR_index_to_subindex, sizeof(index_t)*(*h_num_columns_to_reduce), hipMemcpyDeviceToHost);
#endif
postprocessing.stop();
#ifdef PROFILING
//std::cerr<<"INSERTION POSTPROCESSING FOR GPU IN DIM "<<dim<<": "<<postprocessing.ms()/1000.0<<"s"<<std::endl;
#endif
}
//finding apparent pairs
template <>
void ripser<sparse_distance_matrix>::gpuscan(const index_t dim){
//(need to sort for filtration order before gpuscan first, then apply gpu scan then sort again)
//note: scan kernel can eliminate high percentage of columns in little time.
//filter by fully reduced columns (apparent pairs) found by gpu scan
//need this to prevent 0-blocks kernels from executing
if(*h_num_columns_to_reduce==0){
return;
}
index_t num_simplices= binomial_coeff(n,dim+1);
#ifdef COUNTING
//std::cerr<<"max possible num simplices: "<<num_simplices<<std::endl;
#endif
hipMemcpy(d_columns_to_reduce, h_columns_to_reduce,
sizeof(struct diameter_index_t_struct) * *h_num_columns_to_reduce, hipMemcpyHostToDevice);
CUDACHECK(hipDeviceSynchronize());
//use binary search on d_columns_to_reduce as retrival process
hipMemcpy(d_cidx_diameter_pairs_sortedlist, d_columns_to_reduce, sizeof(struct diameter_index_t_struct)*(*h_num_columns_to_reduce), hipMemcpyDeviceToDevice);
struct lowerindex_lowerdiam_diameter_index_t_struct_compare cmp_cidx_diameter;
thrust::sort(thrust::device, d_cidx_diameter_pairs_sortedlist, d_cidx_diameter_pairs_sortedlist+*h_num_columns_to_reduce, cmp_cidx_diameter);
CUDACHECK(hipDeviceSynchronize());
hipMemset(d_lowest_one_of_apparent_pair, -1, sizeof(index_t) * *h_num_columns_to_reduce);
CUDACHECK(hipDeviceSynchronize());
Stopwatch sw;
sw.start();
CUDACHECK(hipOccupancyMaxActiveBlocksPerMultiprocessor( &grid_size, coboundary_findapparent_sparse_single_kernel, 256, 0));
grid_size *= deviceProp.multiProcessorCount;
coboundary_findapparent_sparse_single_kernel << < grid_size, 256, 256 * (dim + 1) * sizeof(index_t) >> >
(d_cidx_diameter_pairs_sortedlist, d_columns_to_reduce, d_lowest_one_of_apparent_pair, dim, n, d_binomial_coeff, *h_num_columns_to_reduce, d_CSR_distance_matrix, threshold);
CUDACHECK(hipDeviceSynchronize());
sw.stop();
#ifdef PROFILING
//std::cerr<<"gpu scan kernel time for dim: "<<dim<<": "<<sw.ms()/1000.0<<"s"<<std::endl;
#endif
CUDACHECK(hipDeviceSynchronize());
//post processing (inserting appararent pairs into a "hash map": 2 level data structure) now on GPU
Stopwatch postprocessing;
postprocessing.start();
struct row_cidx_column_idx_struct_compare cmp_pivots;
//put pairs into an array
CUDACHECK(hipOccupancyMaxActiveBlocksPerMultiprocessor( &grid_size, gpu_insert_pivots_kernel, 256, 0));
grid_size *= deviceProp.multiProcessorCount;
gpu_insert_pivots_kernel<< < grid_size, 256 >> >(d_pivot_array, d_lowest_one_of_apparent_pair, d_pivot_column_index_OR_nonapparent_cols, *h_num_columns_to_reduce, d_num_nonapparent);
CUDACHECK(hipDeviceSynchronize());
thrust::sort(thrust::device, d_pivot_array, d_pivot_array+*h_num_columns_to_reduce, cmp_pivots);
thrust::sort(thrust::device, d_pivot_column_index_OR_nonapparent_cols, d_pivot_column_index_OR_nonapparent_cols+*h_num_nonapparent);
num_apparent= *h_num_columns_to_reduce-*h_num_nonapparent;
#ifdef COUNTING
//std::cerr<<"num apparent for dim: "<<dim<<" is: "<<num_apparent<<std::endl;
#endif
//transfer to CPU side all GPU data structures
hipMemcpy(h_pivot_array, d_pivot_array, sizeof(index_t_pair_struct)*(num_apparent), hipMemcpyDeviceToHost);
hipMemcpy(h_pivot_column_index_array_OR_nonapparent_cols, d_pivot_column_index_OR_nonapparent_cols, sizeof(index_t)*(*h_num_nonapparent), hipMemcpyDeviceToHost);
#ifdef ASSEMBLE_REDUCTION_SUBMATRIX
hipMemset(d_flagarray_OR_index_to_subindex, -1, sizeof(index_t)* *h_num_columns_to_reduce);
//perform the scatter operation
CUDACHECK(hipOccupancyMaxActiveBlocksPerMultiprocessor( &grid_size, init_index_to_subindex, 256, 0));
grid_size *= deviceProp.multiProcessorCount;
init_index_to_subindex<< < grid_size, 256 >> >
(d_flagarray_OR_index_to_subindex, d_pivot_column_index_OR_nonapparent_cols, *h_num_nonapparent);
hipMemcpy(h_flagarray_OR_index_to_subindex, d_flagarray_OR_index_to_subindex, sizeof(index_t)*(*h_num_columns_to_reduce), hipMemcpyDeviceToHost);
#endif
postprocessing.stop();
#ifdef PROFILING
//std::cerr<<"INSERTION POSTPROCESSING FOR GPU IN DIM "<<dim<<": "<<postprocessing.ms()/1000.0<<"s"<<std::endl;
#endif
}
template <>
void ripser<compressed_lower_distance_matrix>::gpu_assemble_columns_to_reduce_plusplus(const index_t dim) {
index_t max_num_simplices= binomial_coeff(n, dim + 1);
Stopwatch sw;
sw.start();
#pragma omp parallel for schedule(guided,1)
for (index_t i= 0; i < max_num_simplices; i++) {
#ifdef USE_PHASHMAP
h_pivot_column_index_array_OR_nonapparent_cols[i]= phmap_get_value(i);
#endif
#ifdef USE_GOOGLE_HASHMAP
auto pair= pivot_column_index.find(i);
if(pair!=pivot_column_index.end()){
h_pivot_column_index_array_OR_nonapparent_cols[i]= pair->second;
}else{
h_pivot_column_index_array_OR_nonapparent_cols[i]= -1;
}
#endif
}
num_apparent= *h_num_columns_to_reduce-*h_num_nonapparent;
if(num_apparent>0) {
#pragma omp parallel for schedule(guided, 1)
for (index_t i= 0; i < num_apparent; i++) {
index_t row_cidx= h_pivot_array[i].row_cidx;
h_pivot_column_index_array_OR_nonapparent_cols[row_cidx]= h_pivot_array[i].column_idx;
}
}
*h_num_columns_to_reduce= 0;
hipMemcpy(d_pivot_column_index_OR_nonapparent_cols, h_pivot_column_index_array_OR_nonapparent_cols, sizeof(index_t)*max_num_simplices, hipMemcpyHostToDevice);
sw.stop();
#ifdef PROFILING
//std::cerr<<"time to copy hash map for dim "<<dim<<": "<<sw.ms()/1000.0<<"s"<<std::endl;
#endif
#ifdef ASSEMBLE_REDUCTION_SUBMATRIX
hipMemset(d_flagarray_OR_index_to_subindex, 0, sizeof(index_t)*max_num_simplices);
CUDACHECK(hipDeviceSynchronize());
#else
hipMemset(d_flagarray, 0, sizeof(char)*max_num_simplices);
CUDACHECK(hipDeviceSynchronize());
#endif
Stopwatch pop_cols_timer;
pop_cols_timer.start();
#ifdef ASSEMBLE_REDUCTION_SUBMATRIX
CUDACHECK(hipOccupancyMaxActiveBlocksPerMultiprocessor( &grid_size, populate_columns_to_reduce<index_t>, 256, 0));
grid_size *= deviceProp.multiProcessorCount;
hipLaunchKernelGGL(( populate_columns_to_reduce), dim3(grid_size), dim3(256), 256 * (dim + 1) * sizeof(index_t), 0, d_flagarray_OR_index_to_subindex, d_columns_to_reduce, d_pivot_column_index_OR_nonapparent_cols, d_distance_matrix, n, max_num_simplices, dim, threshold, d_binomial_coeff);
#else
CUDACHECK(hipOccupancyMaxActiveBlocksPerMultiprocessor( &grid_size, populate_columns_to_reduce<char>, 256, 0));
grid_size *= deviceProp.multiProcessorCount;
hipLaunchKernelGGL(( populate_columns_to_reduce), dim3(grid_size), dim3(256), 256 * (dim + 1) * sizeof(index_t), 0, d_flagarray, d_columns_to_reduce, d_pivot_column_index_OR_nonapparent_cols, d_distance_matrix, n, max_num_simplices, dim, threshold, d_binomial_coeff);
#endif
CUDACHECK(hipDeviceSynchronize());
pop_cols_timer.stop();
struct greaterdiam_lowerindex_diameter_index_t_struct_compare cmp;
#ifdef ASSEMBLE_REDUCTION_SUBMATRIX
*h_num_columns_to_reduce= thrust::count(thrust::device , d_flagarray_OR_index_to_subindex, d_flagarray_OR_index_to_subindex+max_num_simplices, 1);
CUDACHECK(hipDeviceSynchronize());
thrust::sort(thrust::device, d_columns_to_reduce, d_columns_to_reduce+ max_num_simplices, cmp);
#else
*h_num_columns_to_reduce= thrust::count(thrust::device , d_flagarray, d_flagarray+max_num_simplices, 1);
CUDACHECK(hipDeviceSynchronize());
thrust::sort(thrust::device, d_columns_to_reduce, d_columns_to_reduce+ max_num_simplices, cmp);
#endif
#ifdef COUNTING
//std::cerr<<"num cols to reduce for dim "<<dim<<": "<<*h_num_columns_to_reduce<<std::endl;
#endif
hipMemcpy(h_columns_to_reduce, d_columns_to_reduce, sizeof(struct diameter_index_t_struct)*(*h_num_columns_to_reduce), hipMemcpyDeviceToHost);
}
template <>
void ripser<sparse_distance_matrix>::gpu_assemble_columns_to_reduce_plusplus(const index_t dim) {
index_t max_num_simplices= binomial_coeff(n,dim+1);
#ifdef COUNTING
//std::cerr<<"max possible num simplices: "<<max_num_simplices<<std::endl;
#endif
*h_num_columns_to_reduce= 0;
CUDACHECK(hipOccupancyMaxActiveBlocksPerMultiprocessor( &grid_size, populate_sparse_simplices_warpfiltering, 256, 0));
grid_size *= deviceProp.multiProcessorCount;
//columns_to_reduce contains the "new set" of simplices
#ifdef COUNTING
//std::cerr<<"(sparse) num simplices before kernel call: "<<*h_num_simplices<<std::endl;
#endif
hipLaunchKernelGGL(( populate_sparse_simplices_warpfiltering), dim3(grid_size), dim3(256), 256 * dim * sizeof(index_t), 0, d_simplices, d_num_simplices, d_columns_to_reduce, d_num_columns_to_reduce, d_CSR_distance_matrix, n, dim, threshold, d_binomial_coeff);
CUDACHECK(hipDeviceSynchronize());
hipMemcpy(d_simplices, d_columns_to_reduce, sizeof(struct diameter_index_t_struct)*(*h_num_columns_to_reduce),hipMemcpyDeviceToDevice);
*h_num_simplices= *h_num_columns_to_reduce;
#ifdef COUNTING
//std::cerr<<"(sparse) num simplices for dim "<<dim<<": "<<*h_num_simplices<<std::endl;
#endif
struct greaterdiam_lowerindex_diameter_index_t_struct_compare cmp;
thrust::sort(thrust::device, d_simplices, d_simplices+*h_num_simplices, cmp);
CUDACHECK(hipDeviceSynchronize());
hipMemcpy(h_simplices, d_simplices, sizeof(struct diameter_index_t_struct)*(*h_num_simplices),hipMemcpyDeviceToHost);
//populate the columns_to_reduce vector on CPU side
struct row_cidx_column_idx_struct_compare pair_cmp;
columns_to_reduce.clear();
for(index_t i=0; i<*h_num_simplices; i++){
struct diameter_index_t_struct s= h_simplices[i];
if(s.diameter<=threshold &&
get_value_pivot_array_hashmap(s.index, pair_cmp)==-1){
columns_to_reduce.push_back(s);
}
}
#ifdef COUNTING
//std::cerr<<"columns to reduce for dim: "<<dim<<": "<<columns_to_reduce.size()<<std::endl;
#endif
*h_num_columns_to_reduce= columns_to_reduce.size();
#pragma omp parallel for schedule(guided,1)
for(index_t i=0; i<columns_to_reduce.size(); i++){
h_columns_to_reduce[i]= columns_to_reduce[i];
}
}
template <>
void ripser<compressed_lower_distance_matrix>::cpu_byneighbor_assemble_columns_to_reduce(std::vector<diameter_index_t_struct>& simplices,
std::vector<diameter_index_t_struct>& columns_to_reduce,
hash_map<index_t,index_t>& pivot_column_index, index_t dim){
#ifdef INDICATE_PROGRESS
//std::cerr << clear_line << "assembling columns on CPU" << std::flush;
std::chrono::steady_clock::time_point next= std::chrono::steady_clock::now() + time_step;
#endif
--dim;
columns_to_reduce.clear();
std::vector<struct diameter_index_t_struct> next_simplices;
for (struct diameter_index_t_struct& simplex : simplices) {
simplex_coboundary_enumerator cofacets(simplex, dim, *this);
while (cofacets.has_next(false)) {
#ifdef INDICATE_PROGRESS
if (std::chrono::steady_clock::now() > next) {
//std::cerr << clear_line << "assembling " << next_simplices.size()
<< " columns (processing " << std::distance(&simplices[0], &simplex)
<< "/" << simplices.size() << " simplices)" << std::flush;
next= std::chrono::steady_clock::now() + time_step;
}
#endif
auto cofacet= cofacets.next();
if (cofacet.diameter <= threshold) {
next_simplices.push_back(cofacet);
if (pivot_column_index.find(cofacet.index) == pivot_column_index.end()) {
columns_to_reduce.push_back(cofacet);
}
}
}
}
simplices.swap(next_simplices);
#ifdef INDICATE_PROGRESS
//std::cerr << clear_line << "sorting " << columns_to_reduce.size() << " columns"
<< std::flush;
#endif
struct greaterdiam_lowerindex_diameter_index_t_struct_compare cmp;
std::sort(columns_to_reduce.begin(), columns_to_reduce.end(),
cmp);
#ifdef INDICATE_PROGRESS
//std::cerr << clear_line << std::flush;
#endif
}
template <>
void ripser<sparse_distance_matrix>::cpu_byneighbor_assemble_columns_to_reduce(std::vector<diameter_index_t_struct>& simplices,
std::vector<diameter_index_t_struct>& columns_to_reduce,
hash_map<index_t,index_t>& pivot_column_index, index_t dim){
#ifdef INDICATE_PROGRESS
//std::cerr << clear_line << "assembling columns" << std::flush;
std::chrono::steady_clock::time_point next= std::chrono::steady_clock::now() + time_step;
#endif
--dim;
columns_to_reduce.clear();
std::vector<struct diameter_index_t_struct> next_simplices;
for (struct diameter_index_t_struct& simplex : simplices) {
simplex_coboundary_enumerator cofacets(simplex, dim, *this);
while (cofacets.has_next(false)) {
#ifdef INDICATE_PROGRESS
if (std::chrono::steady_clock::now() > next) {
//std::cerr << clear_line << "assembling " << next_simplices.size()
<< " columns (processing " << std::distance(&simplices[0], &simplex)
<< "/" << simplices.size() << " simplices)" << std::flush;
next= std::chrono::steady_clock::now() + time_step;
}
#endif
auto cofacet= cofacets.next();
if (cofacet.diameter <= threshold) {
next_simplices.push_back(cofacet);
if (pivot_column_index.find(cofacet.index) == pivot_column_index.end()) { //|| pivot_column_index[cofacet.index]==-1)
columns_to_reduce.push_back(cofacet);
}
}
}
}
simplices.swap(next_simplices);
#ifdef INDICATE_PROGRESS
//std::cerr << clear_line << "sorting " << columns_to_reduce.size() << " columns"
<< std::flush;
#endif
struct greaterdiam_lowerindex_diameter_index_t_struct_compare cmp;
std::sort(columns_to_reduce.begin(), columns_to_reduce.end(),
cmp);
#ifdef INDICATE_PROGRESS
//std::cerr << clear_line << std::flush;
#endif
}
template <>
void ripser<compressed_lower_distance_matrix>::cpu_assemble_columns_to_reduce(std::vector<diameter_index_t_struct>& columns_to_reduce,
hash_map<index_t, index_t>& pivot_column_index,
index_t dim) {
index_t num_simplices= binomial_coeff(n, dim + 1);
#ifdef COUNTING
//std::cerr<<"max num possible simplices: "<<num_simplices<<std::endl;
#endif
columns_to_reduce.clear();
#ifdef INDICATE_PROGRESS
//std::cerr << "\033[K"
<< "assembling " << num_simplices << " columns" << std::flush << "\r";
#endif
index_t count= 0;
for (index_t index= 0; index < num_simplices; ++index) {
if (pivot_column_index.find(index) == pivot_column_index.end()) {
value_t diameter= compute_diameter(index, dim);
if (diameter <= threshold){
columns_to_reduce.push_back({diameter,index});
count++;
}
#ifdef INDICATE_PROGRESS
if ((index + 1) % 1000000 == 0)
//std::cerr << "\033[K"
<< "assembled " << columns_to_reduce.size() << " out of " << (index + 1)
<< "/" << num_simplices << " columns" << std::flush << "\r";
#endif
}
}
#ifdef INDICATE_PROGRESS
//std::cerr << "\033[K"
<< "sorting " << num_simplices << " columns" << std::flush << "\r";
#endif
struct greaterdiam_lowerindex_diameter_index_t_struct_compare cmp;
std::sort(columns_to_reduce.begin(), columns_to_reduce.end(),
cmp);
#ifdef INDICATE_PROGRESS
//std::cerr << "\033[K";
#endif
}
template <>
void ripser<compressed_lower_distance_matrix>::assemble_columns_gpu_accel_transition_to_cpu_only(const bool& more_than_one_dim_cpu_only,std::vector<diameter_index_t_struct>& simplices, std::vector<diameter_index_t_struct>& columns_to_reduce, hash_map<index_t,index_t>& cpu_pivot_column_index,
index_t dim){
index_t max_num_simplices= binomial_coeff(n,dim+1);
//insert all pivots from the two gpu pivot data structures into cpu_pivot_column_index, cannot parallelize this for loop due to concurrency issues of hashmaps
for (index_t i= 0; i < max_num_simplices; i++) {
#ifdef USE_PHASHMAP
index_t col_idx= phmap_get_value(i);
if(col_idx!=-1) {
cpu_pivot_column_index[i]= col_idx;
}
#endif
#ifdef USE_GOOGLE_HASHMAP
auto pair= pivot_column_index.find(i);
if(pair!=pivot_column_index.end()) {
cpu_pivot_column_index[i]= pair->second;
}
//}else{
//h_pivot_column_index_array_OR_nonapparent_cols[i]= -1;
//}
#endif
}
num_apparent= *h_num_columns_to_reduce-*h_num_nonapparent;
if(num_apparent>0) {
//we can't insert into the hashmap in parallel
for (index_t i= 0; i < num_apparent; i++) {
index_t row_cidx= h_pivot_array[i].row_cidx;
index_t column_idx= h_pivot_array[i].column_idx;
if(column_idx!=-1) {
cpu_pivot_column_index[row_cidx]= column_idx;
}
}
}
#ifdef INDICATE_PROGRESS
//std::cerr << clear_line << "assembling columns" << std::flush;
std::chrono::steady_clock::time_point next= std::chrono::steady_clock::now() + time_step;
#endif
columns_to_reduce.clear();
simplices.clear();
index_t count_simplices= 0;
//cpu_pivot_column_index can't be parallelized for lookup
for (index_t index= 0; index < max_num_simplices; ++index) {
value_t diameter= -MAX_FLOAT;
//the second condition after the || should never happen, since we never insert such pairs into cpu_pivot_column_index
if (cpu_pivot_column_index.find(index) == cpu_pivot_column_index.end() || cpu_pivot_column_index[index]==-1) {
diameter= compute_diameter(index, dim);
if (diameter <= threshold) {
columns_to_reduce.push_back({diameter, index});
}
#ifdef INDICATE_PROGRESS
if ((index + 1) % 1000000 == 0)
//std::cerr << "\033[K"
<< "assembled " << columns_to_reduce.size() << " out of " << (index + 1)
<< "/" << max_num_simplices << " columns" << std::flush << "\r";
#endif
}
if(more_than_one_dim_cpu_only){
if(diameter==-MAX_FLOAT){
diameter= compute_diameter(index, dim);
}
if(diameter<=threshold){
simplices.push_back({diameter,index});
count_simplices++;
}
}
}
#ifdef COUNTING
if(more_than_one_dim_cpu_only){
//std::cerr<<"(if there are multiple dimensions needed to compute) num simplices for dim: "<<dim<<" is: "<<count_simplices<<std::endl;
}
#endif
#ifdef INDICATE_PROGRESS
//std::cerr << "\033[K"
<< "sorting " << columns_to_reduce.size() << " columns" << std::flush << "\r";
#endif
greaterdiam_lowerindex_diameter_index_t_struct_compare cmp;
std::sort(columns_to_reduce.begin(), columns_to_reduce.end(), cmp);
#ifdef COUNTING
//std::cerr<<"NUM COLS to reduce for CPU: "<<columns_to_reduce.size()<<std::endl;
#endif
#ifdef INDICATE_PROGRESS
//std::cerr << clear_line << std::flush;
#endif
}
template <>
void ripser<compressed_lower_distance_matrix>::compute_barcodes() {
Stopwatch sw, gpu_accel_timer;
gpu_accel_timer.start();
sw.start();
index_t gpu_dim_max= calculate_gpu_dim_max_for_fullrips_computation_from_memory(dim_max, true);
#ifdef PROFILING
//std::cerr<<"recalculated dim_max based on GPU free DRAM capacity: "<<gpu_dim_max<<std::endl;
#endif
max_num_simplices_forall_dims= gpu_dim_max<(n/2)-1?get_num_simplices_for_dim(gpu_dim_max): get_num_simplices_for_dim((n/2)-1);
if(gpu_dim_max>=1){
//std::cerr<<"max possible num simplices over all dim<=dim_max (without clearing) for memory allocation: "<<max_num_simplices_forall_dims<<std::endl;
CUDACHECK(hipMalloc((void **) &d_columns_to_reduce, sizeof(struct diameter_index_t_struct) * max_num_simplices_forall_dims));
h_columns_to_reduce= (struct diameter_index_t_struct*) malloc(sizeof(struct diameter_index_t_struct)* max_num_simplices_forall_dims);
if(h_columns_to_reduce==NULL){
//std::cerr<<"malloc for h_columns_to_reduce failed"<<std::endl;
exit(1);
}
#ifndef ASSEMBLE_REDUCTION_SUBMATRIX
CUDACHECK(hipMalloc((void**) &d_flagarray, sizeof(char)*max_num_simplices_forall_dims));
#endif
CUDACHECK(hipMalloc((void **) &d_cidx_to_diameter, sizeof(value_t)*max_num_simplices_forall_dims));
#if defined(ASSEMBLE_REDUCTION_SUBMATRIX)
CUDACHECK(hipMalloc((void **) &d_flagarray_OR_index_to_subindex, sizeof(index_t)*max_num_simplices_forall_dims));
h_flagarray_OR_index_to_subindex= (index_t*) malloc(sizeof(index_t)*max_num_simplices_forall_dims);
if(h_flagarray_OR_index_to_subindex==NULL) {
//std::cerr<<"malloc for h_index_to_subindex failed"<<std::endl;
}
#endif
CUDACHECK(hipMalloc((void **) &d_distance_matrix, sizeof(value_t)*dist.size()*(dist.size()-1)/2));
hipMemcpy(d_distance_matrix, dist.distances.data(), sizeof(value_t)*dist.size()*(dist.size()-1)/2, hipMemcpyHostToDevice);
CUDACHECK(hipMalloc((void **) &d_pivot_column_index_OR_nonapparent_cols, sizeof(index_t)*max_num_simplices_forall_dims));
//this array is used for both the pivot column index hash table array as well as the nonapparent cols array as an unstructured hashmap
h_pivot_column_index_array_OR_nonapparent_cols= (index_t*) malloc(sizeof(index_t)*max_num_simplices_forall_dims);
if(h_pivot_column_index_array_OR_nonapparent_cols==NULL){
//std::cerr<<"malloc for h_pivot_column_index_array_OR_nonapparent_cols failed"<<std::endl;
exit(1);
}
//copy object over to GPU
CUDACHECK(hipMalloc((void**) &d_binomial_coeff, sizeof(binomial_coeff_table)));
hipMemcpy(d_binomial_coeff, &binomial_coeff, sizeof(binomial_coeff_table), hipMemcpyHostToDevice);
index_t num_binoms= binomial_coeff.get_num_n()*binomial_coeff.get_max_tuple_length();
index_t* h_d_binoms;
CUDACHECK(hipMalloc((void **) &h_d_binoms, sizeof(index_t)*num_binoms));
hipMemcpy(h_d_binoms, binomial_coeff.binoms, sizeof(index_t)*num_binoms, hipMemcpyHostToDevice);
hipMemcpy(&(d_binomial_coeff->binoms), &h_d_binoms, sizeof(index_t*), hipMemcpyHostToDevice);
hipHostMalloc((void **)&h_num_columns_to_reduce, sizeof(index_t), hipHostMallocPortable | hipHostMallocMapped);
hipHostGetDevicePointer(&d_num_columns_to_reduce, h_num_columns_to_reduce,0);
hipHostMalloc((void **)&h_num_nonapparent, sizeof(index_t), hipHostMallocPortable | hipHostMallocMapped);
hipHostGetDevicePointer(&d_num_nonapparent, h_num_nonapparent,0);
CUDACHECK(hipMalloc((void**) &d_lowest_one_of_apparent_pair, sizeof(index_t)*max_num_simplices_forall_dims));
CUDACHECK(hipMalloc((void**) &d_pivot_array, sizeof(struct index_t_pair_struct)*max_num_simplices_forall_dims));
h_pivot_array= (struct index_t_pair_struct*) malloc(sizeof(struct index_t_pair_struct)*max_num_simplices_forall_dims);
if(h_pivot_array==NULL){
//std::cerr<<"malloc for h_pivot_array failed"<<std::endl;
exit(1);
}
#ifdef PROFILING
hipMemGetInfo(&freeMem,&totalMem);
//std::cerr<<"GPU memory after full rips memory calculation and allocation, total mem: "<< totalMem<<" bytes, free mem: "<<freeMem<<" bytes"<<std::endl;
#endif
}
sw.stop();
#ifdef PROFILING
//std::cerr<<"CUDA PREPROCESSING TIME (e.g. memory allocation time): "<<sw.ms()/1000.0<<"s"<<std::endl;
#endif
sw.start();
columns_to_reduce.clear();
std::vector<diameter_index_t_struct> simplices;
if(gpu_dim_max>=1) {
gpu_compute_dim_0_pairs(columns_to_reduce);
sw.stop();
#ifdef PROFILING
//std::cerr<<"0-dimensional persistence total computation time with GPU: "<<sw.ms()/1000.0<<"s"<<std::endl;
#endif
}else{
compute_dim_0_pairs(simplices, columns_to_reduce);
sw.stop();
#ifdef PROFILING
//std::cerr<<"0-dimensional persistence total computation time with CPU alone: "<<sw.ms()/1000.0<<"s"<<std::endl;
#endif
}
//index_t dim_forgpuscan= MAX_INT64;//never do gpu scan
index_t dim_forgpuscan= 1;
for (index_t dim= 1; dim <= gpu_dim_max; ++dim) {
Stopwatch sw;
sw.start();
#ifdef USE_PHASHMAP
phmap_clear();
#endif
#ifdef USE_GOOGLE_HASHMAP
pivot_column_index.clear();
pivot_column_index.resize(*h_num_columns_to_reduce);
#endif
*h_num_nonapparent= 0;
//search for apparent pairs
gpuscan(dim);
//dim_forgpuscan= dim;//update dim_forgpuscan to the dimension that gpuscan was just done at
sw.stop();
#ifdef PROFILING
//std::cerr<<"-SUM OF GPU MATRIX SCAN and post processing time for dim "<<dim<<": "<<sw.ms()/1000.0<<"s"<<std::endl;
#endif
sw.start();
compute_pairs_plusplus(
dim, dim_forgpuscan);
sw.stop();
#ifdef PROFILING
//std::cerr<<"SUBMATRIX REDUCTION TIME for dim "<< dim<<": "<<sw.ms()/1000.0<<"s"<<"\n"<<std::endl;
#endif
if (dim < gpu_dim_max) {
sw.start();
gpu_assemble_columns_to_reduce_plusplus(dim+1);
sw.stop();
#ifdef PROFILING
//std::cerr << "ASSEMBLE COLS TIME for dim " << dim + 1 << ": " << sw.ms() / 1000.0
//<< "s" << std::endl;
#endif
}
}
gpu_accel_timer.stop();
#ifdef PROFILING
if(gpu_dim_max>=1)
//std::cerr<<"GPU ACCELERATED COMPUTATION from dim 0 to dim "<<gpu_dim_max<<": "<<gpu_accel_timer.ms()/1000.0<<"s"<<std::endl;
#endif
if(dim_max>gpu_dim_max){//do cpu only computation from this point on
#ifdef CPUONLY_SPARSE_HASHMAP
//std::cerr<<"MEMORY EFFICIENT/BUT TIME INEFFICIENT CPU-ONLY MODE FOR REMAINDER OF HIGH DIMENSIONAL COMPUTATION (NOT ENOUGH GPU DEVICE MEMORY)"<<std::endl;
#endif
#ifndef CPUONLY_SPARSE_HASHMAP
//std::cerr<<"CPU-ONLY MODE FOR REMAINDER OF HIGH DIMENSIONAL COMPUTATION (NOT ENOUGH GPU DEVICE MEMORY)"<<std::endl;
#endif
free_init_cpumem();
hash_map<index_t,index_t> cpu_pivot_column_index;
cpu_pivot_column_index.reserve(*h_num_columns_to_reduce);
bool more_than_one_dim_to_compute= dim_max>gpu_dim_max+1;
assemble_columns_gpu_accel_transition_to_cpu_only(more_than_one_dim_to_compute, simplices, columns_to_reduce, cpu_pivot_column_index, gpu_dim_max+1);
free_remaining_cpumem();
for (index_t dim= gpu_dim_max+1; dim <= dim_max; ++dim) {
cpu_pivot_column_index.clear();
cpu_pivot_column_index.reserve(columns_to_reduce.size());
compute_pairs(columns_to_reduce, cpu_pivot_column_index, dim);
if(dim<dim_max){
sw.start();
//cpu_byneighbor_assemble_columns is a little faster?
cpu_byneighbor_assemble_columns_to_reduce(simplices, columns_to_reduce, cpu_pivot_column_index, dim+1);
//cpu_assemble_columns_to_reduce(columns_to_reduce,cpu_pivot_column_index, dim+1);
sw.stop();
#ifdef PROFILING
//std::cerr<<"TIME FOR CPU ASSEMBLE: "<<sw.ms()/1000.0<<"s"<<std::endl;
#endif
}
}
}
}
template <>
void ripser<sparse_distance_matrix>::compute_barcodes(){
Stopwatch sw, gpu_accel_timer;
gpu_accel_timer.start();
sw.start();
index_t maxgpu_dim= calculate_gpu_dim_max_for_fullrips_computation_from_memory(dim_max, false);
if(maxgpu_dim<dim_max){
max_num_simplices_forall_dims= calculate_gpu_max_columns_for_sparserips_computation_from_memory();
//std::cerr<<"(sparse) max possible num simplices for memory allocation forall dims: "<<max_num_simplices_forall_dims<<std::endl;
}else {
max_num_simplices_forall_dims =
dim_max < (n/2)-1 ? get_num_simplices_for_dim(dim_max) : get_num_simplices_for_dim((n/2)-1);
//std::cerr<<"(dense case used in sparse computation) max possible num simplices for memory allocation forall dims: "<<max_num_simplices_forall_dims<<std::endl;
}
//we assume that we have enough memory to last up to dim_max (should be fine with a >=32GB GPU); growth of num simplices can be very slow for sparse case
if(dim_max>=1) {
CUDACHECK(hipMalloc((void **) &d_columns_to_reduce, sizeof(struct diameter_index_t_struct) * max_num_simplices_forall_dims));//46000000
h_columns_to_reduce= (struct diameter_index_t_struct*) malloc(sizeof(struct diameter_index_t_struct)* max_num_simplices_forall_dims);
if(h_columns_to_reduce==NULL){
//std::cerr<<"malloc for h_columns_to_reduce failed"<<std::endl;
exit(1);
}
#if defined(ASSEMBLE_REDUCTION_SUBMATRIX)
CUDACHECK(hipMalloc((void **) &d_flagarray_OR_index_to_subindex, sizeof(index_t)*max_num_simplices_forall_dims));
h_flagarray_OR_index_to_subindex= (index_t*) malloc(sizeof(index_t)*max_num_simplices_forall_dims);
if(h_flagarray_OR_index_to_subindex==NULL) {
//std::cerr<<"malloc for h_index_to_subindex failed"<<std::endl;
}
#endif
CSR_distance_matrix CSR_distance_matrix= dist.toCSR();
//copy CSR_distance_matrix object over to GPU
CUDACHECK(hipMalloc((void **) &d_CSR_distance_matrix, sizeof(CSR_distance_matrix)));
hipMemcpy(d_CSR_distance_matrix, &CSR_distance_matrix, sizeof(CSR_distance_matrix), hipMemcpyHostToDevice);
index_t *h_d_offsets;
value_t *h_d_entries;
index_t *h_d_col_indices;
CUDACHECK(hipMalloc((void **) &h_d_offsets, sizeof(index_t) * (CSR_distance_matrix.n + 1)));
hipMemcpy(h_d_offsets, CSR_distance_matrix.offsets, sizeof(index_t) * (CSR_distance_matrix.n + 1), hipMemcpyHostToDevice);
hipMemcpy(&(d_CSR_distance_matrix->offsets), &h_d_offsets, sizeof(index_t *), hipMemcpyHostToDevice);
CUDACHECK(hipMalloc((void **) &h_d_entries, sizeof(value_t) * CSR_distance_matrix.num_entries));
hipMemcpy(h_d_entries, CSR_distance_matrix.entries, sizeof(value_t) * CSR_distance_matrix.num_entries, hipMemcpyHostToDevice);
hipMemcpy(&(d_CSR_distance_matrix->entries), &h_d_entries, sizeof(value_t *), hipMemcpyHostToDevice);
CUDACHECK(hipMalloc((void **) &h_d_col_indices, sizeof(index_t) * CSR_distance_matrix.num_entries));
hipMemcpy(h_d_col_indices, CSR_distance_matrix.col_indices, sizeof(index_t) * CSR_distance_matrix.num_entries,
hipMemcpyHostToDevice);
hipMemcpy(&(d_CSR_distance_matrix->col_indices), &h_d_col_indices, sizeof(index_t *), hipMemcpyHostToDevice);
//this replaces d_cidx_to_diameter
CUDACHECK(hipMalloc((void **) &d_cidx_diameter_pairs_sortedlist, sizeof(struct diameter_index_t_struct)*max_num_simplices_forall_dims));
CUDACHECK(hipMalloc((void **) &d_pivot_column_index_OR_nonapparent_cols, sizeof(index_t)*max_num_simplices_forall_dims));
//this array is used for both the pivot column index hash table array as well as the nonapparent cols array as an unstructured hashmap
h_pivot_column_index_array_OR_nonapparent_cols= (index_t*) malloc(sizeof(index_t)*max_num_simplices_forall_dims);
if(h_pivot_column_index_array_OR_nonapparent_cols==NULL){
//std::cerr<<"malloc for h_pivot_column_index_array_OR_nonapparent_cols failed"<<std::endl;
exit(1);
}
//copy object over to GPU
CUDACHECK(hipMalloc((void**) &d_binomial_coeff, sizeof(binomial_coeff_table)));
hipMemcpy(d_binomial_coeff, &binomial_coeff, sizeof(binomial_coeff_table), hipMemcpyHostToDevice);
index_t num_binoms= binomial_coeff.get_num_n()*binomial_coeff.get_max_tuple_length();
index_t* h_d_binoms;
CUDACHECK(hipMalloc((void **) &h_d_binoms, sizeof(index_t)*num_binoms));
hipMemcpy(h_d_binoms, binomial_coeff.binoms, sizeof(index_t)*num_binoms, hipMemcpyHostToDevice);
hipMemcpy(&(d_binomial_coeff->binoms), &h_d_binoms, sizeof(index_t*), hipMemcpyHostToDevice);
hipHostMalloc((void **)&h_num_columns_to_reduce, sizeof(index_t), hipHostMallocPortable | hipHostMallocMapped);
hipHostGetDevicePointer(&d_num_columns_to_reduce, h_num_columns_to_reduce,0);
hipHostMalloc((void **)&h_num_nonapparent, sizeof(index_t), hipHostMallocPortable | hipHostMallocMapped);
hipHostGetDevicePointer(&d_num_nonapparent, h_num_nonapparent,0);
hipHostMalloc((void **)&h_num_simplices, sizeof(index_t), hipHostMallocPortable | hipHostMallocMapped);
hipHostGetDevicePointer(&d_num_simplices, h_num_simplices,0);
CUDACHECK(hipMalloc((void**) &d_lowest_one_of_apparent_pair, sizeof(index_t)*max_num_simplices_forall_dims));
CUDACHECK(hipMalloc((void**) &d_pivot_array, sizeof(struct index_t_pair_struct)*max_num_simplices_forall_dims));
h_pivot_array= (struct index_t_pair_struct*) malloc(sizeof(struct index_t_pair_struct)*max_num_simplices_forall_dims);
if(h_pivot_array==NULL){
//std::cerr<<"malloc for h_pivot_array failed"<<std::endl;
exit(1);
}
CUDACHECK(hipMalloc((void**) &d_simplices, sizeof(struct diameter_index_t_struct)*max_num_simplices_forall_dims));
h_simplices= (struct diameter_index_t_struct*) malloc(sizeof(struct diameter_index_t_struct)*max_num_simplices_forall_dims);
if(h_simplices==NULL){
//std::cerr<<"malloc for h_simplices failed"<<std::endl;
exit(1);
}
#ifdef PROFILING
hipMemGetInfo(&freeMem,&totalMem);
//std::cerr<<"after GPU memory allocation: total mem, free mem: " <<totalMem<<" bytes, "<<freeMem<<" bytes"<<std::endl;
#endif
}
sw.stop();
#ifdef PROFILING
//std::cerr<<"CUDA PREPROCESSING TIME (e.g. memory allocation): "<<sw.ms()/1000.0<<"s"<<std::endl;
#endif
sw.start();
columns_to_reduce.clear();
if(dim_max>=1) {
gpu_compute_dim_0_pairs(columns_to_reduce);
sw.stop();
#ifdef PROFILING
//std::cerr<<"0-dimensional persistence total computation time with GPU: "<<sw.ms()/1000.0<<"s"<<std::endl;
#endif
}else{
std::vector<diameter_index_t_struct> simplices;
compute_dim_0_pairs(simplices, columns_to_reduce);
sw.stop();
#ifdef PROFILING
//std::cerr<<"0-dimensional persistence total computation time with CPU alone: "<<sw.ms()/1000.0<<"s"<<std::endl;
#endif
}
//index_t dim_forgpuscan= MAX_INT64;//never do gpuscan
index_t dim_forgpuscan= 1;
for (index_t dim= 1; dim <= dim_max; ++dim) {
Stopwatch sw;
sw.start();
#ifdef USE_PHASHMAP
phmap_clear();
#endif
#ifdef USE_GOOGLE_HASHMAP
pivot_column_index.clear();
pivot_column_index.resize(*h_num_columns_to_reduce);
#endif
*h_num_nonapparent= 0;
gpuscan(dim);
//dim_forgpuscan= dim;
sw.stop();
#ifdef PROFILING
//std::cerr << "-SUM OF GPU MATRIX SCAN and post processing time for dim " << dim << ": " << sw.ms() / 1000.0
//<< "s" << std::endl;
#endif
sw.start();
compute_pairs_plusplus(
dim, dim_forgpuscan);
sw.stop();
#ifdef PROFILING
//std::cerr << "SUBMATRIX REDUCTION TIME for dim " << dim << ": " << sw.ms() / 1000.0 << "s" << "\n" << std::endl;
#endif
if (dim < dim_max) {
sw.start();
gpu_assemble_columns_to_reduce_plusplus(dim + 1);
sw.stop();
#ifdef PROFILING
//std::cerr << "ASSEMBLE COLS TIME for dim " << dim + 1 << ": " << sw.ms() / 1000.0
//<< "s" << std::endl;
#endif
}
}
gpu_accel_timer.stop();
#ifdef PROFILING
//std::cerr<<"GPU ACCELERATED COMPUTATION: "<<gpu_accel_timer.ms()/1000.0<<"s"<<std::endl;
#endif
}
///I/O code
enum file_format { LOWER_DISTANCE_MATRIX, DISTANCE_MATRIX, POINT_CLOUD, DIPHA, SPARSE, BINARY };
template <typename T> T read(std::istream& s) {
T result;
s.read(reinterpret_cast<char*>(&result), sizeof(T));
return result; // on little endian: boost::endian::little_to_native(result);
}
compressed_lower_distance_matrix read_point_cloud_python(value_t* matrix, int num_rows, int num_columns){
std::vector<std::vector<value_t>> points;
for(int i= 0; i < num_rows; i++) {
std::vector <value_t> point;
for (int j= 0; j < num_columns; j++) {
point.push_back(matrix[i * num_columns + j]);
}
if (!point.empty()) {
points.push_back(point);
}
assert(point.size() == points.front().size());
}
//only l2 distance implemented so far
euclidean_distance_matrix eucl_dist(std::move(points));
index_t n= eucl_dist.size();
//std::cerr << "point cloud with " << n << " points in dimension "
//<< eucl_dist.points.front().size() << std::endl;
std::vector<value_t> distances;
for (int i= 0; i < n; ++i)
for (int j= 0; j < i; ++j) distances.push_back(eucl_dist(i, j));
return compressed_lower_distance_matrix(std::move(distances));
}
compressed_lower_distance_matrix read_point_cloud(std::istream& input_stream) {
std::vector<std::vector<value_t>> points;
std::string line;
value_t value;
while (std::getline(input_stream, line)) {
std::vector<value_t> point;
std::istringstream s(line);
while (s >> value) {
point.push_back(value);
s.ignore();
}
if (!point.empty()) points.push_back(point);
assert(point.size() == points.front().size());
}
euclidean_distance_matrix eucl_dist(std::move(points));
index_t n= eucl_dist.size();
//std::cerr << "point cloud with " << n << " points in dimension "
//<< eucl_dist.points.front().size() << std::endl;
std::vector<value_t> distances;
for (int i= 0; i < n; ++i)
for (int j= 0; j < i; ++j) distances.push_back(eucl_dist(i, j));
return compressed_lower_distance_matrix(std::move(distances));
}
//the coo format input is of a lower triangular matrix
sparse_distance_matrix read_sparse_distance_matrix(std::istream& input_stream) {
std::vector<std::vector<index_diameter_t_struct>> neighbors;
index_t num_edges= 0;
std::string line;
while (std::getline(input_stream, line)) {
std::istringstream s(line);
size_t i, j;
value_t value;
s >> i;
s >> j;
s >> value;
if (i != j) {
neighbors.resize(::max({neighbors.size(), i + 1, j + 1}));
neighbors[i].push_back({j, value});
neighbors[j].push_back({i, value});
++num_edges;
}
}
struct lowerindex_lowerdiameter_index_t_struct_compare cmp_index_diameter;
for (size_t i= 0; i < neighbors.size(); ++i)
std::sort(neighbors[i].begin(), neighbors[i].end(), cmp_index_diameter);
return sparse_distance_matrix(std::move(neighbors), num_edges);
}
compressed_lower_distance_matrix read_lower_distance_matrix_python(value_t* matrix, int matrix_length) {
std::vector<value_t> distances(matrix, matrix + matrix_length);
return compressed_lower_distance_matrix(std::move(distances));
}
compressed_lower_distance_matrix read_lower_distance_matrix(std::istream& input_stream) {
std::vector<value_t> distances;
value_t value;
while (input_stream >> value) {
distances.push_back(value);
input_stream.ignore();
}
return compressed_lower_distance_matrix(std::move(distances));
}
compressed_lower_distance_matrix read_distance_matrix_python(value_t* matrix, int matrix_length) {
std::vector<value_t> distances(matrix, matrix + matrix_length);
return compressed_lower_distance_matrix(std::move(distances));
}
compressed_lower_distance_matrix read_distance_matrix(std::istream& input_stream) {
std::vector<value_t> distances;
std::string line;
value_t value;
for (int i= 0; std::getline(input_stream, line); ++i) {
std::istringstream s(line);
for (int j= 0; j < i && s >> value; ++j) {
distances.push_back(value);
s.ignore();
}
}
return compressed_lower_distance_matrix(std::move(distances));
}
compressed_lower_distance_matrix read_dipha(std::istream& input_stream) {
if (read<int64_t>(input_stream) != 8067171840) {
//std::cerr << "input is not a Dipha file (magic number: 8067171840)" << std::endl;
exit(-1);
}
if (read<int64_t>(input_stream) != 7) {
//std::cerr << "input is not a Dipha distance matrix (file type: 7)" << std::endl;
exit(-1);
}
index_t n= read<int64_t>(input_stream);
std::vector<value_t> distances;
for (int i= 0; i < n; ++i)
for (int j= 0; j < n; ++j)
if (i > j)
distances.push_back(read<double>(input_stream));
else
read<double>(input_stream);
return compressed_lower_distance_matrix(std::move(distances));
}
compressed_lower_distance_matrix read_binary(std::istream& input_stream) {
std::vector<value_t> distances;
while (!input_stream.eof()) distances.push_back(read<value_t>(input_stream));
return compressed_lower_distance_matrix(std::move(distances));
}
compressed_lower_distance_matrix read_matrix_python(value_t* matrix, int num_entries, int num_rows, int num_columns, file_format format) {
switch (format) {
case LOWER_DISTANCE_MATRIX:
return read_lower_distance_matrix_python(matrix, num_entries);
case DISTANCE_MATRIX://assume that the distance matrix has been changed into lower_distance matrix format
return read_distance_matrix_python(matrix, num_entries);
case POINT_CLOUD:
return read_point_cloud_python(matrix, num_rows, num_columns);
}
//std::cerr<<"unsupported input file format for python interface"<<std::endl;
exit(-1);
}
compressed_lower_distance_matrix read_file(std::istream& input_stream, file_format format) {
switch (format) {
case LOWER_DISTANCE_MATRIX:
return read_lower_distance_matrix(input_stream);
case DISTANCE_MATRIX:
return read_distance_matrix(input_stream);
case POINT_CLOUD:
return read_point_cloud(input_stream);
case DIPHA:
return read_dipha(input_stream);
default:
return read_binary(input_stream);
}
//std::cerr<<"unsupported input file format"<<std::endl;
}
void print_usage_and_exit(int exit_code) {
std::cerr
<< "Usage: "
<< "ripser++ "
<< "[options] [filename]" << std::endl
<< std::endl
<< "Options:" << std::endl
<< std::endl
<< " --help print this screen" << std::endl
<< " --format use the specified file format for the input. Options are:"
<< std::endl
<< " lower-distance (lower triangular distance matrix; default)"
<< std::endl
<< " distance (full distance matrix)" << std::endl
<< " point-cloud (point cloud in Euclidean space)" << std::endl
<< " dipha (distance matrix in DIPHA file format)" << std::endl
<< " sparse (sparse distance matrix in sparse triplet (COO) format)"
<< std::endl
<< " binary (distance matrix in Ripser binary file format)"
<< std::endl
<< " --dim <k> compute persistent homology up to dimension <k>" << std::endl
<< " --threshold <t> compute Rips complexes up to diameter <t>" << std::endl
<< " --sparse force sparse computation "<<std::endl
<< " --ratio <r> only show persistence pairs with death/birth ratio > r" << std::endl
<< std::endl;
exit(exit_code);
}
extern "C" void run_main_filename(int argc, char** argv, const char* filename) {
Stopwatch sw;
#ifdef PROFILING
hipDeviceProp_t deviceProp;
size_t freeMem_start, freeMem_end, totalMemory;
hipGetDeviceProperties(&deviceProp, 0);
hipMemGetInfo(&freeMem_start,&totalMemory);
#endif
sw.start();
file_format format= DISTANCE_MATRIX;
index_t dim_max= 1;
value_t threshold= std::numeric_limits<value_t>::max();
float ratio= 1;
bool use_sparse= false;
for (index_t i= 0; i < argc; i++) {
const std::string arg(argv[i]);
if (arg == "--help") {
print_usage_and_exit(0);
} else if (arg == "--dim") {
std::string parameter= std::string(argv[++i]);
size_t next_pos;
dim_max= std::stol(parameter, &next_pos);
if (next_pos != parameter.size()) print_usage_and_exit(-1);
} else if (arg == "--threshold") {
std::string parameter= std::string(argv[++i]);
size_t next_pos;
threshold= std::stof(parameter, &next_pos);
if (next_pos != parameter.size()) print_usage_and_exit(-1);
} else if (arg == "--ratio") {
std::string parameter= std::string(argv[++i]);
size_t next_pos;
ratio= std::stof(parameter, &next_pos);
if (next_pos != parameter.size()) print_usage_and_exit(-1);
} else if (arg == "--format") {
std::string parameter= std::string(argv[++i]);
if (parameter == "lower-distance")
format= LOWER_DISTANCE_MATRIX;
else if (parameter == "distance")
format= DISTANCE_MATRIX;
else if (parameter == "point-cloud")
format= POINT_CLOUD;
else if (parameter == "dipha")
format= DIPHA;
else if (parameter == "sparse")
format= SPARSE;
else if (parameter == "binary")
format= BINARY;
else
print_usage_and_exit(-1);
} else if(arg=="--sparse") {
use_sparse= true;
}
}
std::ifstream file_stream(filename);
if (filename && file_stream.fail()) {
//std::cerr << "couldn't open file " << filename << std::endl;
exit(-1);
}
if (format == SPARSE) {
Stopwatch IOsw;
IOsw.start();
sparse_distance_matrix dist =
read_sparse_distance_matrix(filename ? file_stream : std::cin);
IOsw.stop();
#ifdef PROFILING
//std::cerr<<IOsw.ms()/1000.0<<"s time to load sparse distance matrix (I/O)"<<std::endl;
#endif
assert(dist.num_entries%2==0);
//std::cerr << "sparse distance matrix with " << dist.size() << " points and "
//<< dist.num_entries/2 << "/" << (dist.size() * (dist.size() - 1)) / 2 << " entries"
//<< std::endl;
ripser<sparse_distance_matrix>(std::move(dist), dim_max, threshold, ratio)
.compute_barcodes();
}else {
Stopwatch IOsw;
IOsw.start();
compressed_lower_distance_matrix dist= read_file(filename ? file_stream : std::cin, format);
IOsw.stop();
#ifdef PROFILING
//std::cerr<<IOsw.ms()/1000.0<<"s time to load distance matrix (I/O)"<<std::endl;
#endif
value_t min= std::numeric_limits<value_t>::infinity(),
max= -std::numeric_limits<value_t>::infinity(), max_finite= max;
int num_edges= 0;
value_t enclosing_radius= std::numeric_limits<value_t>::infinity();
for (index_t i= 0; i < dist.size(); ++i) {
value_t r_i= -std::numeric_limits<value_t>::infinity();
for (index_t j= 0; j < dist.size(); ++j) r_i= ::max(r_i, dist(i, j));
enclosing_radius= ::min(enclosing_radius, r_i);
}
if (threshold == std::numeric_limits<value_t>::max()) threshold= enclosing_radius;
for (auto d : dist.distances) {
min= ::min(min, d);
max= ::max(max, d);
max_finite= d != std::numeric_limits<value_t>::infinity() ? ::max(max, d) : max_finite;
if (d <= threshold) ++num_edges;
}
//std::cerr << "value range: [" << min << "," << max_finite << "]" << std::endl;
if (use_sparse) {
//std::cerr << "sparse distance matrix with " << dist.size() << " points and "
//<< num_edges << "/" << (dist.size() * (dist.size() - 1)) / 2 << " entries"
//<< std::endl;
ripser<sparse_distance_matrix>(sparse_distance_matrix(std::move(dist), threshold),
dim_max, threshold, ratio)
.compute_barcodes();
} else {
//std::cerr << "distance matrix with " << dist.size() << " points" << std::endl;
ripser<compressed_lower_distance_matrix>(std::move(dist), dim_max, threshold, ratio).compute_barcodes();
}
}
sw.stop();
#ifdef INDICATE_PROGRESS
//std::cerr<<clear_line<<std::flush;
#endif
#ifdef PROFILING
//std::cerr<<"total time: "<<sw.ms()/1000.0<<"s"<<std::endl;
hipGetDeviceProperties(&deviceProp, 0);
hipMemGetInfo(&freeMem_end,&totalMemory);
//std::cerr<<"total GPU memory used: "<<(freeMem_start-freeMem_end)/1000.0/1000.0/1000.0<<"GB"<<std::endl;
#endif
}
extern "C" void run_main(int argc, char** argv, value_t* matrix, int num_entries, int num_rows, int num_columns) {
Stopwatch sw;
#ifdef PROFILING
hipDeviceProp_t deviceProp;
size_t freeMem_start, freeMem_end, totalMemory;
hipGetDeviceProperties(&deviceProp, 0);
hipMemGetInfo(&freeMem_start,&totalMemory);
#endif
sw.start();
const char* filename= nullptr;
file_format format= DISTANCE_MATRIX;
index_t dim_max= 1;
value_t threshold= std::numeric_limits<value_t>::max();
float ratio= 1;
bool use_sparse= false;
for (index_t i= 0; i < argc; i++) {
const std::string arg(argv[i]);
if (arg == "--help") {
print_usage_and_exit(0);
} else if (arg == "--dim") {
std::string parameter= std::string(argv[++i]);
size_t next_pos;
dim_max= std::stol(parameter, &next_pos);
if (next_pos != parameter.size()) print_usage_and_exit(-1);
} else if (arg == "--threshold") {
std::string parameter= std::string(argv[++i]);
size_t next_pos;
threshold= std::stof(parameter, &next_pos);
if (next_pos != parameter.size()) print_usage_and_exit(-1);
} else if (arg == "--ratio") {
std::string parameter= std::string(argv[++i]);
size_t next_pos;
ratio= std::stof(parameter, &next_pos);
if (next_pos != parameter.size()) print_usage_and_exit(-1);
} else if (arg == "--format") {
std::string parameter= std::string(argv[++i]);
if (parameter == "lower-distance")
format= LOWER_DISTANCE_MATRIX;
else if (parameter == "distance")
format= DISTANCE_MATRIX;
else if (parameter == "point-cloud")
format= POINT_CLOUD;
else if (parameter == "dipha")
format= DIPHA;
else if (parameter == "sparse")
format= SPARSE;
else if (parameter == "binary")
format= BINARY;
else
print_usage_and_exit(-1);
} else if(arg=="--sparse") {
use_sparse= true;
}
}
if (format == SPARSE) {//this branch is currently unsupported in run_main, see run_main_filename() instead
Stopwatch IOsw;
IOsw.start();
std::ifstream file_stream(filename);
sparse_distance_matrix dist= read_sparse_distance_matrix(filename ? file_stream : std::cin);
IOsw.stop();
#ifdef PROFILING
//std::cerr<<IOsw.ms()/1000.0<<"s time to load sparse distance matrix (I/O)"<<std::endl;
#endif
assert(dist.num_entries%2==0);
//std::cerr << "sparse distance matrix with " << dist.size() << " points and "
//<< dist.num_entries/2 << "/" << (dist.size() * (dist.size() - 1)) / 2 << " entries"
//<< std::endl;
ripser<sparse_distance_matrix>(std::move(dist), dim_max, threshold, ratio)
.compute_barcodes();
}else{
//Stopwatch IOsw;
//IOsw.start();
compressed_lower_distance_matrix dist= read_matrix_python(matrix, num_entries, num_rows, num_columns, format);
//IOsw.stop();
#ifdef PROFILING
////std::cerr<<IOsw.ms()/1000.0<<"s time to load python matrix"<<std::endl;
//std::cerr<<"loaded python dense user matrix"<<std::endl;
#endif
value_t min= std::numeric_limits<value_t>::infinity(),
max= -std::numeric_limits<value_t>::infinity(), max_finite= max;
int num_edges= 0;
value_t enclosing_radius= std::numeric_limits<value_t>::infinity();
for (index_t i= 0; i < dist.size(); ++i) {
value_t r_i= -std::numeric_limits<value_t>::infinity();
for (index_t j= 0; j < dist.size(); ++j) r_i= ::max(r_i, dist(i, j));
enclosing_radius= ::min(enclosing_radius, r_i);
}
if (threshold == std::numeric_limits<value_t>::max()) threshold= enclosing_radius;
for (auto d : dist.distances) {
min= ::min(min, d);
max= ::max(max, d);
max_finite= d != std::numeric_limits<value_t>::infinity() ? ::max(max, d) : max_finite;
if (d <= threshold) ++num_edges;
}
//std::cerr << "value range: [" << min << "," << max_finite << "]" << std::endl;
if (use_sparse) {
//std::cerr << "sparse distance matrix with " << dist.size() << " points and "
//<< num_edges << "/" << (dist.size() * (dist.size() - 1)) / 2 << " entries"
//<< std::endl;
ripser<sparse_distance_matrix>(sparse_distance_matrix(std::move(dist), threshold),
dim_max, threshold, ratio)
.compute_barcodes();
} else {
//std::cerr << "distance matrix with " << dist.size() << " points" << std::endl;
ripser<compressed_lower_distance_matrix>(std::move(dist), dim_max, threshold, ratio).compute_barcodes();
}
}
sw.stop();
#ifdef INDICATE_PROGRESS
//std::cerr<<clear_line<<std::flush;
#endif
#ifdef PROFILING
//std::cerr<<"total time: "<<sw.ms()/1000.0<<"s"<<std::endl;
hipGetDeviceProperties(&deviceProp, 0);
hipMemGetInfo(&freeMem_end,&totalMemory);
//std::cerr<<"total GPU memory used: "<<(freeMem_start-freeMem_end)/1000.0/1000.0/1000.0<<"GB"<<std::endl;
#endif
}
int main(int argc, char** argv) {
Stopwatch sw;
#ifdef PROFILING
hipDeviceProp_t deviceProp;
size_t freeMem_start, freeMem_end, totalMemory;
hipGetDeviceProperties(&deviceProp, 0);
hipMemGetInfo(&freeMem_start,&totalMemory);
#endif
sw.start();
const char* filename= nullptr;
file_format format= DISTANCE_MATRIX;
index_t dim_max= 1;
value_t threshold= std::numeric_limits<value_t>::max();
float ratio= 1;
bool use_sparse= false;
for (index_t i= 1; i < argc; ++i) {
const std::string arg(argv[i]);
if (arg == "--help") {
print_usage_and_exit(0);
} else if (arg == "--dim") {
std::string parameter= std::string(argv[++i]);
size_t next_pos;
dim_max= std::stol(parameter, &next_pos);
if (next_pos != parameter.size()) print_usage_and_exit(-1);
} else if (arg == "--threshold") {
std::string parameter= std::string(argv[++i]);
size_t next_pos;
threshold= std::stof(parameter, &next_pos);
if (next_pos != parameter.size()) print_usage_and_exit(-1);
} else if (arg == "--ratio") {
std::string parameter= std::string(argv[++i]);
size_t next_pos;
ratio= std::stof(parameter, &next_pos);
if (next_pos != parameter.size()) print_usage_and_exit(-1);
} else if (arg == "--format") {
std::string parameter= std::string(argv[++i]);
if (parameter == "lower-distance")
format= LOWER_DISTANCE_MATRIX;
else if (parameter == "distance")
format= DISTANCE_MATRIX;
else if (parameter == "point-cloud")
format= POINT_CLOUD;
else if (parameter == "dipha")
format= DIPHA;
else if (parameter == "sparse")
format= SPARSE;
else if (parameter == "binary")
format= BINARY;
else
print_usage_and_exit(-1);
} else if(arg=="--sparse") {
use_sparse= true;
}else {
if (filename) { print_usage_and_exit(-1); }
filename= argv[i];
}
}
std::ifstream file_stream(filename);
if (filename && file_stream.fail()) {
//std::cerr << "couldn't open file " << filename << std::endl;
exit(-1);
}
if (format == SPARSE) {
Stopwatch IOsw;
IOsw.start();
sparse_distance_matrix dist =
read_sparse_distance_matrix(filename ? file_stream : std::cin);
IOsw.stop();
#ifdef PROFILING
//std::cerr<<IOsw.ms()/1000.0<<"s time to load sparse distance matrix (I/O)"<<std::endl;
#endif
assert(dist.num_entries%2==0);
//std::cerr << "sparse distance matrix with " << dist.size() << " points and "
//<< dist.num_entries/2 << "/" << (dist.size() * (dist.size() - 1)) / 2 << " entries"
//<< std::endl;
ripser<sparse_distance_matrix>(std::move(dist), dim_max, threshold, ratio)
.compute_barcodes();
}else {
Stopwatch IOsw;
IOsw.start();
compressed_lower_distance_matrix dist= read_file(filename ? file_stream : std::cin, format);
IOsw.stop();
#ifdef PROFILING
//std::cerr<<IOsw.ms()/1000.0<<"s time to load distance matrix (I/O)"<<std::endl;
#endif
value_t min= std::numeric_limits<value_t>::infinity(),
max= -std::numeric_limits<value_t>::infinity(), max_finite= max;
int num_edges= 0;
value_t enclosing_radius= std::numeric_limits<value_t>::infinity();
for (index_t i= 0; i < dist.size(); ++i) {
value_t r_i= -std::numeric_limits<value_t>::infinity();
for (index_t j= 0; j < dist.size(); ++j) r_i= ::max(r_i, dist(i, j));
enclosing_radius= ::min(enclosing_radius, r_i);
}
if (threshold == std::numeric_limits<value_t>::max()) threshold= enclosing_radius;
for (auto d : dist.distances) {
min= ::min(min, d);
max= ::max(max, d);
max_finite= d != std::numeric_limits<value_t>::infinity() ? ::max(max, d) : max_finite;
if (d <= threshold) ++num_edges;
}
//std::cerr << "value range: [" << min << "," << max_finite << "]" << std::endl;
if (use_sparse) {
//std::cerr << "sparse distance matrix with " << dist.size() << " points and "
//<< num_edges << "/" << (dist.size() * (dist.size() - 1)) / 2 << " entries"
//<< std::endl;
ripser<sparse_distance_matrix>(sparse_distance_matrix(std::move(dist), threshold),
dim_max, threshold, ratio)
.compute_barcodes();
} else {
//std::cerr << "distance matrix with " << dist.size() << " points" << std::endl;
ripser<compressed_lower_distance_matrix>(std::move(dist), dim_max, threshold, ratio).compute_barcodes();
}
}
sw.stop();
#ifdef INDICATE_PROGRESS
//std::cerr<<clear_line<<std::flush;
#endif
#ifdef PROFILING
//std::cerr<<"total time: "<<sw.ms()/1000.0<<"s"<<std::endl;
hipGetDeviceProperties(&deviceProp, 0);
hipMemGetInfo(&freeMem_end,&totalMemory);
hipDeviceReset();
//std::cerr<<"total GPU memory used: "<<(freeMem_start-freeMem_end)/1000.0/1000.0/1000.0<<"GB"<<std::endl;
#endif
}
| 2ada112a7d854a370d56d6eb345101603cba4398.cu | #include <typeinfo>
/*
Ripser++: accelerated Vietoris-Rips persistence barcodes computation with GPU
MIT License
Copyright (c) 2019, 2020 Simon Zhang, Mengbai Xiao, Hao Wang
Python Bindings: Birkan Gokbag
Copyright (c) 2015-2019 Ripser codebase, written by Ulrich Bauer
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
You are under no obligation whatsoever to provide any bug fixes, patches, or
upgrades to the features, functionality or performance of the source code
("Enhancements") to anyone; however, if you choose to make your Enhancements
available either publicly, or directly to the author of this software, without
imposing a separate written license agreement for such Enhancements, then you
hereby grant the following license: a non-exclusive, royalty-free perpetual
license to install, use, modify, prepare derivative works, incorporate into
other computer software, distribute, and sublicense such enhancements or
derivative works thereof, in binary and source code form.
*/
#define CUDACHECK(cmd) do {\
cudaError_t e= cmd;\
if( e != cudaSuccess ) {\
printf("Failed: Cuda error %s:%d '%s'\n",\
__FILE__,__LINE__,cudaGetErrorString(e));\
exit(EXIT_FAILURE);\
}\
} while(0)
//#define INDICATE_PROGRESS//DO NOT UNCOMMENT THIS IF YOU WANT TO LOG PROFILING NUMBERS FROM stderr TO FILE
#define PRINT_PERSISTENCE_PAIRS//print out all persistence paris to stdout
//#define CPUONLY_ASSEMBLE_REDUCTION_MATRIX//do full matrix reduction on CPU with the sparse coefficient matrix V
//#define ASSEMBLE_REDUCTION_SUBMATRIX//do submatrix reduction with the sparse coefficient submatrix of V
#define PROFILING
#define COUNTING
#define USE_PHASHMAP//www.github.com/greg7mdp/parallel-hashmap
#ifndef USE_PHASHMAP
#define USE_GOOGLE_HASHMAP
#endif
//#define CPUONLY_SPARSE_HASHMAP//WARNING: MAY NEED LOWER GCC VERSION TO RUN, TESTED ON: NVCC VERSION 9.2 WITH GCC VERSIONS >=5.3.0 AND <=7.3.0
#define MIN_INT64 (-9223372036854775807-1)
#define MAX_INT64 (9223372036854775807)
#define MAX_FLOAT (340282346638528859811704183484516925440.000000)
#include <cassert>
#include <fstream>
#include <iostream>
#include <numeric>
#include <queue>
#include <sstream>
#include <unordered_map>
#include <cmath>
#include <algorithm>
#include <profiling/stopwatch.h>
#include <sparsehash/dense_hash_map>
#include <phmap_interface/phmap_interface.h>
#include <omp.h>
#include <thrust/fill.h>
#include <thrust/device_vector.h>
#include <thrust/scan.h>
#include <thrust/execution_policy.h>
#include <thrust/unique.h>
#include <cuda_runtime.h>
#ifdef CPUONLY_SPARSE_HASHMAP
#include <sparsehash/sparse_hash_map>
template <class Key, class T> class hash_map : public google::sparse_hash_map<Key, T> {
public:
explicit hash_map() : google::sparse_hash_map<Key, T>() {
}
inline void reserve(size_t hint) { this->resize(hint); }
};
#endif
#ifndef CPUONLY_SPARSE_HASHMAP
template <class Key, class T> class hash_map : public google::dense_hash_map<Key, T> {
public:
explicit hash_map() : google::dense_hash_map<Key, T>() {
this->set_empty_key(-1);
}
inline void reserve(size_t hint) { this->resize(hint); }
};
#endif
static const std::string clear_line("\r\033[K");
#ifdef INDICATE_PROGRESS
static const std::chrono::milliseconds time_step(40);
#endif
typedef float value_t;
typedef int64_t index_t;
struct diameter_index_t_struct{
value_t diameter;
index_t index;
};
struct index_diameter_t_struct{
index_t index;
value_t diameter;
};
struct lowerindex_lowerdiameter_index_t_struct_compare{
__host__ __device__ bool operator() (struct index_diameter_t_struct a, struct index_diameter_t_struct b){
return a.index!=b.index ? a.index<b.index : a.diameter<b.diameter;
}
};
struct greaterdiam_lowerindex_diameter_index_t_struct_compare {
__host__ __device__ bool operator() (struct diameter_index_t_struct a, struct diameter_index_t_struct b){
return a.diameter!=b.diameter ? a.diameter>b.diameter : a.index<b.index;
}
};
struct greaterdiam_lowerindex_diameter_index_t_struct_compare_reverse {
__host__ __device__ bool operator() (struct diameter_index_t_struct a, struct diameter_index_t_struct b){
return a.diameter!=b.diameter ? a.diameter<b.diameter : a.index>b.index;
}
};
struct lowerindex_lowerdiam_diameter_index_t_struct_compare{
__host__ __device__ bool operator() (struct diameter_index_t_struct a, struct diameter_index_t_struct b){
return a.index!=b.index ? a.index<b.index : a.diameter<b.diameter;
}
};
struct index_t_pair_struct{//data type for a pivot in the coboundary matrix: (row,column)
index_t row_cidx;
index_t column_idx;
};
struct row_cidx_column_idx_struct_compare{
__host__ __device__ bool operator()(struct index_t_pair_struct a, struct index_t_pair_struct b){
//return a.row_cidx!=b.row_cidx ? a.row_cidx<b.row_cidx : a.column_idx<b.column_idx;//the second condition should never happen if sorting pivot pairs since pivots do not conflict on rows or columns
return a.row_cidx<b.row_cidx || (a.row_cidx==b.row_cidx && a.column_idx<b.column_idx);
}
};
__host__ __device__ value_t hd_max(value_t a, value_t b){
return a>b?a:b;
}
void check_overflow(index_t i){
if(i<0){
throw std::overflow_error("simplex index "+std::to_string((uint64_t)i)+" in filtration is overflowing past 64 bits signed integer");
}
}
//assume i>j (lower triangular with i indexing rows and j indexing columns
#define LOWER_DISTANCE_INDEX(i,j,n) (((i)*((i)-1)/2)+(j))
class binomial_coeff_table {
index_t num_n;
index_t max_tuple_length;
#define BINOM_TRANSPOSE(i,j) ((j)*(num_n)+(i))
#define BINOM(i,j) ((i)*(max_tuple_length)+(j))
public:
index_t* binoms;
binomial_coeff_table(index_t n, index_t k) {
binoms= (index_t*)malloc(sizeof(index_t)*(n+1)*(k+1));
if(binoms==NULL){
//std::cerr<<"malloc for binoms failed"<<std::endl;
exit(1);
}
num_n= n+1;
max_tuple_length= k+1;
memset(binoms, 0, sizeof(index_t)*num_n*max_tuple_length);
for (index_t i= 0; i <= n; i++) {
for (index_t j= 0; j <= std::min(i, k); j++){
if (j == 0 || j == i) {
binoms[BINOM_TRANSPOSE(i,j)]= 1;
} else {
binoms[BINOM_TRANSPOSE(i,j)]= binoms[BINOM_TRANSPOSE(i-1,j-1)]+binoms[BINOM_TRANSPOSE(i-1,j)];
}
}
check_overflow(binoms[BINOM_TRANSPOSE(i,std::min(i>>1,k))]);
}
}
index_t get_num_n() const{
return num_n;
}
index_t get_max_tuple_length() const{
return max_tuple_length;
}
__host__ __device__ index_t operator()(index_t n, index_t k) const{
assert(n<num_n && k<max_tuple_length);
return binoms[BINOM_TRANSPOSE(n,k)];
}
};
typedef std::pair<value_t, index_t> diameter_index_t;
value_t get_diameter(const diameter_index_t& i) { return i.first; }
index_t get_index(const diameter_index_t& i) { return i.second; }
template <typename Entry> struct greater_diameter_or_smaller_index {
bool operator()(const Entry& a, const Entry& b) {
return (get_diameter(a) > get_diameter(b)) ||
((get_diameter(a) == get_diameter(b)) && (get_index(a) < get_index(b)));
}
};
struct CSR_distance_matrix{
index_t capacity;
value_t* entries;
index_t* offsets;
index_t* col_indices;
index_t n;
index_t num_edges;
index_t num_entries;
public:
CSR_distance_matrix(){}//avoid calling malloc in constructor for GPU side
index_t size(){return n;}
};
class compressed_lower_distance_matrix {
public:
std::vector<value_t> distances;
std::vector<value_t*> rows;
void init_rows() {
value_t* pointer= &distances[0];
for (index_t i= 1; i < size(); ++i) {
rows[i]= pointer;
pointer+= i;
}
}
compressed_lower_distance_matrix(std::vector<value_t>&& _distances)
: distances(std::move(_distances)), rows((1 + std::sqrt(1 + 8 * distances.size())) / 2) {
assert(distances.size() == size() * (size() - 1) / 2);
init_rows();
}
template <typename DistanceMatrix>
compressed_lower_distance_matrix(const DistanceMatrix& mat)
: distances(mat.size() * (mat.size() - 1) / 2), rows(mat.size()) {
init_rows();
for (index_t i= 1; i < size(); ++i)
for (index_t j= 0; j < i; ++j) rows[i][j]= mat(i, j);
}
value_t operator()(const index_t i, const index_t j) const {
return i == j ? 0 : i < j ? rows[j][i] : rows[i][j];
}
value_t distance(index_t i, index_t j){
return i == j ? 0 : i < j ? rows[j][i] : rows[i][j];
}
size_t size() const { return rows.size(); }
};
struct sparse_distance_matrix {
std::vector<std::vector<index_diameter_t_struct>> neighbors;
index_t num_entries;
mutable std::vector<std::vector<index_diameter_t_struct>::const_reverse_iterator> neighbor_it;
mutable std::vector<std::vector<index_diameter_t_struct>::const_reverse_iterator> neighbor_end;
sparse_distance_matrix(std::vector<std::vector<index_diameter_t_struct>>&& _neighbors,
index_t _num_edges)
: neighbors(std::move(_neighbors)), num_entries(_num_edges*2) {}
value_t distance(index_t i, index_t j){
// TODO (@captain-pool / anyone who is reading)
// This is a dummy, just to fool the compiler. The compiler looks for it,
// When it doesn't get, it raises an issue.
// The reason for this extra distance() is because the operator() can only called with
// const index_t / mutable
// I tried those, but have no effing clue, why it doesn't work.
// Now I'm defining another generic function that doesn't needs const and it works.
// It is not yet implemented for sparse_distance_matrix, so please implement it.
return 0;
}
template <typename DistanceMatrix>
sparse_distance_matrix(const DistanceMatrix& mat, const value_t threshold)
: neighbors(mat.size()), num_entries(0) {
//std::cerr << "threshold: " << threshold << std::endl;
for (index_t i= 0; i < size(); ++i) {
for (index_t j= 0; j < size(); ++j) {
if (i != j && mat(i, j) <= threshold) {
++num_entries;
neighbors[i].push_back({j, mat(i, j)});
}
}
}
}
size_t size() const { return neighbors.size(); }
private:
//this should only be called from CPU side
void append_sparse(CSR_distance_matrix& dist, value_t e, index_t j) {
if (dist.capacity == 0) {
dist.entries= (value_t *) malloc(sizeof(value_t) * size() * 10);
if(dist.entries==NULL){
//std::cerr<<"entries could not be malloced"<<std::endl;
exit(1);
}
dist.col_indices= (index_t *) malloc(sizeof(index_t) * size() * 10);
if(dist.col_indices==NULL){
//std::cerr<<"col_indices could not be malloced"<<std::endl;
exit(1);
}
dist.capacity= size() * 10;
}
if (dist.num_entries >= dist.capacity) {
dist.capacity*= 2;
dist.entries= (value_t *) realloc(dist.entries, sizeof(value_t) * dist.capacity);
if(dist.entries==NULL){
//std::cerr<<"col_indices could not be realloced with double memory"<<std::endl;
exit(1);
}
dist.col_indices= (index_t *) realloc(dist.col_indices, sizeof(index_t) * dist.capacity);
if(dist.col_indices==NULL){
//std::cerr<<"col_indices could not be realloced with double memory"<<std::endl;
exit(1);
}
}
dist.entries[dist.num_entries]= e;
dist.col_indices[dist.num_entries++]= j;
}
//this should only be called on CPU side
void update_offsets(CSR_distance_matrix& dist, index_t row_index, index_t offset_increment){
if(row_index==0){
dist.offsets[0]= 0;
}
dist.offsets[row_index+1]= dist.offsets[row_index]+offset_increment;
}
public:
CSR_distance_matrix toCSR(){
CSR_distance_matrix dist;
dist.n= size();
dist.num_entries= 0;
dist.capacity= num_entries;//this sets the matrix to exactly num_entries memory allocation
dist.offsets= (index_t*) malloc(sizeof(index_t)*(size()+1));
if(dist.offsets==NULL){
//std::cerr<<"malloc for offsets failed"<<std::endl;
exit(1);
}
dist.col_indices= (index_t*) malloc(sizeof(index_t)*dist.capacity);
if(dist.col_indices==NULL){
//std::cerr<<"malloc for col_indices failed"<<std::endl;
exit(1);
}
dist.entries= (value_t*) malloc(sizeof(value_t)*dist.capacity);
if(dist.entries==NULL){
//std::cerr<<"malloc for entries failed"<<std::endl;
exit(1);
}
for(index_t i= 0; i<size(); i++){
index_t nnz_inrow= 0;
for(index_t j=0; j<neighbors[i].size(); j++){
append_sparse(dist, neighbors[i][j].diameter, neighbors[i][j].index);
nnz_inrow++;
}
update_offsets(dist, i, nnz_inrow);
}
dist.num_edges= num_entries/2;
return dist;
}
};
class euclidean_distance_matrix {
public:
std::vector<std::vector<value_t>> points;
euclidean_distance_matrix(std::vector<std::vector<value_t>>&& _points)
: points(std::move(_points)) {
for (auto p : points) { assert(p.size() == points.front().size()); }
}
value_t operator()(const index_t i, const index_t j) const {
assert(i < points.size());
assert(j < points.size());
return std::sqrt(std::inner_product(
points[i].begin(), points[i].end(), points[j].begin(), value_t(), std::plus<value_t>(),
[](value_t u, value_t v) { return (u - v) * (u - v); }));
}
size_t size() const { return points.size(); }
};
class union_find {
std::vector<index_t> parent;
std::vector<uint8_t> rank;
public:
union_find(index_t n) : parent(n), rank(n, 0) {
for (index_t i= 0; i < n; ++i) parent[i]= i;
}
index_t find(index_t x) {
index_t y= x, z;
while ((z= parent[y]) != y) y= z;
while ((z= parent[x]) != y) {
parent[x]= y;
x= z;
}
return z;
}
void link(index_t x, index_t y) {
if ((x= find(x)) == (y= find(y))) return;
if (rank[x] > rank[y])
parent[y]= x;
else {
parent[x]= y;
if (rank[x] == rank[y]) ++rank[y];
}
}
};
template <typename Heap> struct diameter_index_t_struct pop_pivot(Heap& column) {
if(column.empty()) {
return {0,-1};
}
auto pivot= column.top();
column.pop();
while(!column.empty() && (column.top()).index == pivot.index) {
column.pop();
if (column.empty()) {
return {0,-1};
}
else {
pivot= column.top();
column.pop();
}
}
return pivot;
}
template <typename Heap> struct diameter_index_t_struct get_pivot(Heap& column) {
struct diameter_index_t_struct result= pop_pivot(column);
if (result.index != -1) column.push(result);
return result;
}
template <typename T> T begin(std::pair<T, T>& p) { return p.first; }
template <typename T> T end(std::pair<T, T>& p) { return p.second; }
template <typename ValueType> class compressed_sparse_matrix {
std::vector<size_t> bounds;
std::vector<ValueType> entries;
typedef typename std::vector<ValueType>::iterator iterator;
typedef std::pair<iterator, iterator> iterator_pair;
public:
size_t size() const { return bounds.size(); }
iterator_pair subrange(const index_t index) {
return {entries.begin() + (index == 0 ? 0 : bounds[index - 1]),
entries.begin() + bounds[index]};
}
void append_column() { bounds.push_back(entries.size()); }
void push_back(const ValueType e) {
assert(0 < size());
entries.push_back(e);
++bounds.back();
}
};
template <typename ValueType> class compressed_sparse_submatrix {
std::vector<size_t> sub_bounds;//the 0-based indices for
std::vector<ValueType> entries;
typedef typename std::vector<ValueType>::iterator iterator;
typedef std::pair<iterator, iterator> iterator_pair;
public:
size_t size() const { return sub_bounds.size(); }
//assume we are given a "subindex" for the submatrix
//allows iteration from sub_bounds[index_to_subindex[index]] to sub_bounds[index_to_subindex[index+1]]-1
iterator_pair subrange(const index_t subindex) {
return {entries.begin() + (subindex == 0 ? 0 : sub_bounds[subindex - 1]),
entries.begin() + sub_bounds[subindex]};
}
void append_column() { sub_bounds.push_back(entries.size()); }
void push_back(const ValueType e) {
assert(0 < size());
entries.push_back(e);
++sub_bounds.back();
}
};
template <class Predicate> index_t upper_bound(index_t top, Predicate pred) {
if (!pred(top)) {
index_t count= top;
while (count > 0) {
index_t step= count >> 1;
if (!pred(top - step)) {
top-= step + 1;
count-= step + 1;
} else
count= step;
}
}
return top;
}
__global__ void gpu_insert_pivots_kernel(struct index_t_pair_struct* d_pivot_array, index_t* d_lowest_one_of_apparent_pair, index_t* d_pivot_column_index_OR_nonapparent_cols, index_t num_columns_to_reduce, index_t* d_num_nonapparent){
index_t tid= (index_t)threadIdx.x+(index_t)blockIdx.x*(index_t)blockDim.x;
index_t stride= (index_t)blockDim.x*(index_t)gridDim.x;
//index_t* d_pivot_column_index_OR_nonapparent_cols is being used as d_nonapparent_cols
for(; tid<num_columns_to_reduce; tid+= stride) {
int keep_tid= d_lowest_one_of_apparent_pair[tid] == -1;
if (!keep_tid) {//insert pivot
d_pivot_array[tid].row_cidx= d_lowest_one_of_apparent_pair[tid];
d_pivot_array[tid].column_idx= tid;
}else {//keep track of nonapparent columns
d_pivot_array[tid].row_cidx= MAX_INT64;
d_pivot_array[tid].column_idx= MAX_INT64;
//do standard warp based filtering under the assumption that there are few nonapparent columns
#define FULL_MASK 0xFFFFFFFF
int lane_id= threadIdx.x % 32;
int mask= __ballot_sync(FULL_MASK, keep_tid);
int leader= __ffs(mask) - 1;
int base;
if (lane_id == leader)
base= atomicAdd((unsigned long long int *) d_num_nonapparent, __popc(mask));
base= __shfl_sync(mask, base, leader);
int pos= base + __popc(mask & ((1 << lane_id) - 1));
if (keep_tid) {
d_pivot_column_index_OR_nonapparent_cols[pos]= tid;//being used as d_nonapparent_cols
}
}
}
}
__global__ void populate_edges_warpfiltering(struct diameter_index_t_struct* d_columns_to_reduce, value_t threshold, value_t* d_distance_matrix, index_t max_num_simplices, index_t num_points, binomial_coeff_table* d_binomial_coeff, index_t* d_num_columns_to_reduce){
index_t tid= (index_t)threadIdx.x+(index_t)blockIdx.x*(index_t)blockDim.x;
index_t stride= (index_t)blockDim.x*(index_t)gridDim.x;
__shared__ index_t shared_vertices[256][3];//eliminate bank conflicts (that's what the 3 is for)
for(; tid<max_num_simplices; tid+= stride) {
index_t offset= 0;
index_t v= num_points - 1;
index_t idx= tid;
for (index_t k= 2; k > 0; --k) {
if (!((*d_binomial_coeff)(v, k) <= idx)) {
index_t count= v;
while (count > 0) {
index_t step= count >> 1;
if (!((*d_binomial_coeff)(v - step, k) <= idx)) {
v-= step + 1;
count-= step + 1;//+1 is here to preserve the induction hypothesis (check v=4, k=4)
} else
count= step;//went too far, need to try a smaller step size to subtract from top
}
}
shared_vertices[threadIdx.x][offset++]= v;
idx -= (*d_binomial_coeff)(v, k);
}
//shared_vertices is always sorted in decreasing order
value_t diam= d_distance_matrix[LOWER_DISTANCE_INDEX(shared_vertices[threadIdx.x][0], shared_vertices[threadIdx.x][1], num_points)];
#define FULL_MASK 0xFFFFFFFF
int lane_id= threadIdx.x % 32;
int keep_tid= diam<=threshold;
int mask= __ballot_sync(FULL_MASK, keep_tid);
int leader= __ffs(mask) - 1;
int base;
if (lane_id == leader)
base= atomicAdd((unsigned long long int *)d_num_columns_to_reduce, __popc(mask));
base= __shfl_sync(mask, base, leader);
int pos= base + __popc(mask & ((1 << lane_id) - 1));
if(keep_tid){
d_columns_to_reduce[pos].diameter= diam;
d_columns_to_reduce[pos].index= tid;
}
}
}
template <typename T> __global__ void populate_edges(T* d_flagarray, struct diameter_index_t_struct* d_columns_to_reduce, value_t threshold, value_t* d_distance_matrix, index_t max_num_simplices, index_t num_points, binomial_coeff_table* d_binomial_coeff){
index_t tid= (index_t)threadIdx.x+(index_t)blockIdx.x*(index_t)blockDim.x;
index_t stride= (index_t)blockDim.x*(index_t)gridDim.x;
__shared__ index_t shared_vertices[256][3];//designed to eliminate bank conflicts (that's what the 3 is for)
for(; tid<max_num_simplices; tid+= stride) {
index_t offset= 0;
index_t v= num_points - 1;
index_t idx= tid;
for (index_t k= 2; k > 0; --k) {
if (!((*d_binomial_coeff)(v, k) <= idx)) {
index_t count= v;
while (count > 0) {
index_t step= count >> 1;
if (!((*d_binomial_coeff)(v - step, k) <= idx)) {
v-= step + 1;
count-= step + 1;//+1 is here to preserve the induction hypothesis (check v=4, k=4)
} else
count= step;//went too far, need to try a smaller step size to subtract from top
}
}
shared_vertices[threadIdx.x][offset++]= v;
idx-= (*d_binomial_coeff)(v, k);
}
//shared_vertices is sorted in decreasing order
value_t diam= d_distance_matrix[LOWER_DISTANCE_INDEX(shared_vertices[threadIdx.x][0], shared_vertices[threadIdx.x][1], num_points)];
if(diam<=threshold){
d_columns_to_reduce[tid].diameter= diam;
d_columns_to_reduce[tid].index= tid;
d_flagarray[tid]= 1;
}else{
d_columns_to_reduce[tid].diameter= MAX_FLOAT;//the sorting is in boundary matrix filtration order
d_columns_to_reduce[tid].index= MIN_INT64;
d_flagarray[tid]= 0;
}
}
}
__global__ void populate_columns_to_reduce_warpfiltering(struct diameter_index_t_struct* d_columns_to_reduce, index_t* d_num_columns_to_reduce, index_t* d_pivot_column_index, value_t* d_distance_matrix, index_t num_points, index_t max_num_simplices, index_t dim, value_t threshold, binomial_coeff_table* d_binomial_coeff) {
index_t tid= (index_t)threadIdx.x + (index_t)blockIdx.x * (index_t)blockDim.x;
index_t stride= (index_t)blockDim.x * (index_t)gridDim.x;
extern __shared__ index_t shared_vertices[];//a 256x(dim+1) matrix; shared_vertices[threadIdx.x*(dim+1)+j]=the jth vertex for threadIdx.x thread in the thread block
for (; tid < max_num_simplices; tid+= stride) {
index_t offset= 0;
index_t v= num_points - 1;
index_t idx= tid;
for (index_t k= dim + 1; k > 0; --k) {
if (!((*d_binomial_coeff)(v, k) <= idx)) {
index_t count= v;
while (count > 0) {
index_t step= count >> 1;
if (!((*d_binomial_coeff)(v - step, k) <= idx)) {
v-= step + 1;
count-= step + 1;//+1 is here to preserve the induction hypothesis (check v=4, k=4)
} else
count= step;//went too far, need to try a smaller step size to subtract from top
}
}
shared_vertices[threadIdx.x * (dim + 1) + offset++]= v;
idx-= (*d_binomial_coeff)(v, k);
}
value_t diam= -MAX_FLOAT;
for (index_t j= 0; j <= dim; ++j) {
for (index_t i= 0; i < j; ++i) {
diam= hd_max(diam, d_distance_matrix[LOWER_DISTANCE_INDEX(shared_vertices[threadIdx.x * (dim + 1) + i], shared_vertices[threadIdx.x *(dim + 1) + j], num_points)]);
}
}
#define FULL_MASK 0xFFFFFFFF
int lane_id= threadIdx.x % 32;
int keep_tid= d_pivot_column_index[tid] == -1 && diam<=threshold;
int mask= __ballot_sync(FULL_MASK, keep_tid);
int leader= __ffs(mask) - 1;
int base;
if (lane_id == leader)
base= atomicAdd((unsigned long long int *)d_num_columns_to_reduce, __popc(mask));
base= __shfl_sync(mask, base, leader);
int pos= base + __popc(mask & ((1 << lane_id) - 1));
if(keep_tid){
d_columns_to_reduce[pos].diameter= diam;
d_columns_to_reduce[pos].index= tid;
}
}
}
__global__ void populate_sparse_edges_preparingcount(int* d_num, CSR_distance_matrix* d_CSR_distance_matrix, index_t num_points, index_t* d_num_simplices){
index_t tid= (index_t)threadIdx.x+(index_t)blockIdx.x*(index_t)blockDim.x;
index_t stride= (index_t)blockDim.x * (index_t)gridDim.x;
index_t* offsets= d_CSR_distance_matrix->offsets;
index_t* col_indices= d_CSR_distance_matrix->col_indices;
for(; tid<num_points; tid+= stride){
int _num=0;
index_t col_start= offsets[tid];
index_t col_end= offsets[tid+1];
for(index_t entry_idx= col_start; entry_idx<col_end; entry_idx++){
index_t neighbor_of_tid= col_indices[entry_idx];
if(tid>neighbor_of_tid)_num++;
}
d_num[tid]= _num;
}
}
__global__ void populate_sparse_edges_prefixsum(struct diameter_index_t_struct* d_simplices, int* d_num, CSR_distance_matrix* d_CSR_distance_matrix, binomial_coeff_table* d_binomial_coeff, index_t num_points, index_t* d_num_simplices){
index_t tid= (index_t)threadIdx.x+(index_t)blockIdx.x*(index_t)blockDim.x;
index_t stride= (index_t)blockDim.x * (index_t)gridDim.x;
value_t* entries= d_CSR_distance_matrix->entries;
index_t* offsets= d_CSR_distance_matrix->offsets;
index_t* col_indices= d_CSR_distance_matrix->col_indices;
for(; tid<num_points; tid+= stride){
int _pos=0;
index_t col_start= offsets[tid];
index_t col_end= offsets[tid+1];
for(index_t entry_idx= col_start; entry_idx<col_end; entry_idx++){
index_t neighbor_of_tid= col_indices[entry_idx];
if(tid>neighbor_of_tid){
d_simplices[d_num[tid]+_pos].diameter= entries[entry_idx];
d_simplices[d_num[tid]+_pos++].index= (*d_binomial_coeff)(tid,2) + neighbor_of_tid;
}
}
if(tid==num_points-1){
*d_num_simplices= d_num[tid]+_pos;
}
}
}
__global__ void populate_sparse_simplices_warpfiltering(struct diameter_index_t_struct* d_simplices, index_t* d_num_simplices, struct diameter_index_t_struct* d_columns_to_reduce, index_t* d_num_columns_to_reduce, CSR_distance_matrix* d_CSR_distance_matrix, index_t num_points, index_t dim, value_t threshold, binomial_coeff_table* d_binomial_coeff){
index_t tid= (index_t)threadIdx.x + (index_t)blockIdx.x * (index_t)blockDim.x;
index_t stride= (index_t)blockDim.x * (index_t)gridDim.x;
dim--;//keep dim in terms of the dimension of the simplices
extern __shared__ index_t shared_vertices[];//a 256x(dim+1) matrix; shared_vertices[threadIdx.x*(dim+1)+j]=the jth vertex for threadIdx.x thread in the thread block
for (; tid < *d_num_simplices; tid += stride) {
index_t offset= 0;
index_t v= num_points - 1;
index_t idx= d_simplices[tid].index;
for (index_t k= dim + 1; k > 0; --k) {
if (!((*d_binomial_coeff)(v, k) <= idx)) {
index_t count= v;
while (count > 0) {
index_t step= count >> 1;
if (!((*d_binomial_coeff)(v - step, k) <= idx)) {
v-= step + 1;
count-= step + 1;//+1 is here to preserve the induction hypothesis (check v=4, k=4)
} else
count= step;//went too far, need to try a smaller step size to subtract from top
}
}
shared_vertices[threadIdx.x * (dim + 1) + offset++]= v;
idx-= (*d_binomial_coeff)(v, k);
}
index_t k= dim+1;
bool next_cofacet= false;
value_t nbr_diameter= -1;
index_t nbr_index= -1;
index_t idx_below= d_simplices[tid].index;
index_t idx_above= 0;
index_t base_vertex_index= shared_vertices[threadIdx.x * (dim + 1)]; //shared_vertices[threadIdx.x][0];
//this gives the entry indices of the right and left ends of the row indexed by base_vertex_index in the CSR distance matrix
index_t base_vertex_nbr_itr= d_CSR_distance_matrix->offsets[base_vertex_index+1]-1;
index_t base_vertex_nbr_end= d_CSR_distance_matrix->offsets[base_vertex_index];
for(; base_vertex_nbr_itr>=base_vertex_nbr_end; base_vertex_nbr_itr--){
//nbr is the neighboring vertex to the simplex corresponding to this tid
nbr_diameter= d_CSR_distance_matrix->entries[base_vertex_nbr_itr];
nbr_index= d_CSR_distance_matrix->col_indices[base_vertex_nbr_itr];
//there are dim other vertices along with the base_vertex
for(index_t other_vertex_idx=1; other_vertex_idx<dim+1; other_vertex_idx++){
index_t other_vertex= shared_vertices[threadIdx.x * (dim + 1) + other_vertex_idx];
index_t other_vertex_nbr_itr= d_CSR_distance_matrix->offsets[other_vertex+1]-1;
index_t other_vertex_nbr_end= d_CSR_distance_matrix->offsets[other_vertex];
index_t other_vertex_nbr_index= d_CSR_distance_matrix->col_indices[other_vertex_nbr_itr];
while(other_vertex_nbr_index>nbr_index){
if(other_vertex_nbr_itr==other_vertex_nbr_end) {
next_cofacet= false;
goto end_search;
}
other_vertex_nbr_itr--;
other_vertex_nbr_index= d_CSR_distance_matrix->col_indices[other_vertex_nbr_itr];
}
if(other_vertex_nbr_index!=nbr_index){
goto try_next_vertex;
}else{
nbr_diameter= hd_max(nbr_diameter, d_CSR_distance_matrix->entries[other_vertex_nbr_itr]);
}
}
//this simply says we only consider nbr_index (the appending point) to be of larger index than the largest of shared_vertices (the vertices of the current simplex)
if(shared_vertices[threadIdx.x * (dim + 1)]>nbr_index){
next_cofacet= false;
goto end_search;
}
next_cofacet= true;
goto end_search;
try_next_vertex:;
}
next_cofacet= false;
end_search:;
//end of search for next cofacet (sparse version)
while(next_cofacet){
base_vertex_nbr_itr--;
value_t cofacet_diameter= hd_max(d_simplices[tid].diameter, nbr_diameter);
index_t cofacet_index= idx_above + (*d_binomial_coeff)(nbr_index, k + 1) + idx_below;
#define FULL_MASK 0xFFFFFFFF
int lane_id= threadIdx.x % 32;
int keep_cofacet= cofacet_diameter<=threshold;
int mask= __ballot_sync(FULL_MASK, keep_cofacet);
int leader= __ffs(mask) - 1;
int base;
if (lane_id == leader)
base= atomicAdd((unsigned long long int *)d_num_columns_to_reduce, __popc(mask));
base= __shfl_sync(mask, base, leader);
int pos= base + __popc(mask & ((1 << lane_id) - 1));
if(keep_cofacet){
d_columns_to_reduce[pos].diameter= cofacet_diameter;
d_columns_to_reduce[pos].index= cofacet_index;
}
//isn't a way to represent the hash table on gpu in a cheap way, so we ignore the hash table for assembling columns to reduce
next_cofacet= false;
for(; base_vertex_nbr_itr>=base_vertex_nbr_end; base_vertex_nbr_itr--){
//nbr is the neighboring vertex to the simplex corresponding to this tid
nbr_diameter= d_CSR_distance_matrix->entries[base_vertex_nbr_itr];
nbr_index= d_CSR_distance_matrix->col_indices[base_vertex_nbr_itr];
//there are dim other vertices, in addition to the base_vertex
for(index_t other_vertex_index= 1; other_vertex_index<dim+1; other_vertex_index++){
index_t other_vertex= shared_vertices[threadIdx.x * (dim + 1) + other_vertex_index];
index_t other_vertex_nbr_itr= d_CSR_distance_matrix->offsets[other_vertex+1]-1;
index_t other_vertex_nbr_end= d_CSR_distance_matrix->offsets[other_vertex];
index_t other_vertex_nbr_index= d_CSR_distance_matrix->col_indices[other_vertex_nbr_itr];
while(other_vertex_nbr_index>nbr_index){
if(other_vertex_nbr_itr==other_vertex_nbr_end) {
next_cofacet= false;
goto end_search_inloop;
}
other_vertex_nbr_itr--;
other_vertex_nbr_index= d_CSR_distance_matrix->col_indices[other_vertex_nbr_itr];
}
if(other_vertex_nbr_index!=nbr_index){
goto try_next_vertex_inloop;
}else{
nbr_diameter= hd_max(nbr_diameter, d_CSR_distance_matrix->entries[other_vertex_nbr_itr]);
}
}
//notice we must reverse the shared_vertices in the original ripser code since they are sorted in decreasing order
if(shared_vertices[threadIdx.x * (dim + 1)]>nbr_index){
next_cofacet= false;
goto end_search_inloop;
}
next_cofacet= true;
goto end_search_inloop;
try_next_vertex_inloop:;
}
next_cofacet= false;
end_search_inloop:;
}
}
}
//the hope is that this is concurrency-bug free, however this is very bad for sparse graph performance
__global__ void populate_sparse_simplices_pairedfiltering(struct diameter_index_t_struct* d_simplices, index_t* d_num_simplices, struct diameter_index_t_struct* d_columns_to_reduce, index_t* d_num_columns_to_reduce, CSR_distance_matrix* d_CSR_distance_matrix, index_t num_points, index_t dim, value_t threshold, binomial_coeff_table* d_binomial_coeff){
//a thread per (simplex , point) pair
//if the point is a "neighbor" of the simplex, then include that cofacet in d_columns_to_reduce (a filtering of d_simplices),
index_t tid= (index_t)threadIdx.x + (index_t)blockIdx.x * (index_t)blockDim.x;
index_t stride= (index_t)blockDim.x * (index_t)gridDim.x;
dim--;
extern __shared__ index_t shared_vertices[];//a 256x(dim+1) matrix; shared_vertices[threadIdx.x*(dim+1)+j]=the jth vertex for threadIdx.x thread in the thread block
for (; tid < *d_num_simplices*num_points; tid+= stride) {
index_t vertex= tid%num_points;
index_t simplex= tid/num_points;
index_t offset= 0;
index_t v= num_points-1;
index_t idx= d_simplices[simplex].index;
for (index_t k= dim +1; k > 0; --k) {
if (!((*d_binomial_coeff)(v, k) <= idx)) {
index_t count= v;
while (count > 0) {
index_t step= count >> 1;
if (!((*d_binomial_coeff)(v - step, k) <= idx)) {
v-= step + 1;
count-= step + 1;//+1 is here to preserve the induction hypothesis (check v=4, k=4)
} else
count= step;//went too far, need to try a smaller step size to subtract from top
}
}
shared_vertices[threadIdx.x * (dim + 1) + offset++]= v;
idx-= (*d_binomial_coeff)(v, k);
}
index_t* offsets= d_CSR_distance_matrix->offsets;
index_t* col_indices= d_CSR_distance_matrix->col_indices;
value_t* entries= d_CSR_distance_matrix->entries;
bool alledges_exist= true;
index_t start_idx= offsets[vertex];
index_t end_idx= offsets[vertex+1];
value_t cofacet_diameter= d_simplices[simplex].diameter;
for(index_t vidx= 0; vidx<dim+1; vidx++) {
index_t v= shared_vertices[threadIdx.x * (dim + 1) + vidx];
index_t left= start_idx;
index_t right= end_idx-1;
//binary search for v in row vertex with start and end start_idx and end_idx respectively
while(left<=right){
index_t mid= left+(right-left)/2;
if(col_indices[mid]==v){
cofacet_diameter= hd_max(cofacet_diameter, entries[mid]);
goto next_vertex;
}
if(col_indices[mid]<v){
left= mid+1;
}else{
right= mid-1;
}
}
alledges_exist= false;
break;
next_vertex:;
}
if(!alledges_exist){
cofacet_diameter= threshold+1;
}
if(shared_vertices[threadIdx.x * (dim + 1)]>vertex){
alledges_exist= false;//we only include this vertex "vertex" if "vertex" has a strictly larger value than all other vertices in simplex
}
index_t cofacet_index= (*d_binomial_coeff)(vertex, dim+2) + d_simplices[simplex].index;
#define FULL_MASK 0xFFFFFFFF
int lane_id= threadIdx.x % 32;
int keep_cofacet= cofacet_diameter<=threshold && alledges_exist;
int mask= __ballot_sync(FULL_MASK, keep_cofacet);
int leader= __ffs(mask) - 1;
int base;
if (lane_id == leader)
base= atomicAdd((unsigned long long int *)d_num_columns_to_reduce, __popc(mask));
base= __shfl_sync(mask, base, leader);
int pos= base + __popc(mask & ((1 << lane_id) - 1));
if(keep_cofacet){
d_columns_to_reduce[pos].diameter= cofacet_diameter;
d_columns_to_reduce[pos].index= cofacet_index;
}
}
}
template <typename T>__global__ void populate_columns_to_reduce(T* d_flagarray, struct diameter_index_t_struct* d_columns_to_reduce, index_t* d_pivot_column_index,
value_t* d_distance_matrix, index_t num_points, index_t max_num_simplices, index_t dim, value_t threshold, binomial_coeff_table* d_binomial_coeff) {
index_t tid= (index_t)threadIdx.x + (index_t)blockIdx.x * (index_t)blockDim.x;
index_t stride= (index_t)blockDim.x * (index_t)gridDim.x;
extern __shared__ index_t shared_vertices[];//a 256x(dim+1) matrix; shared_vertices[threadIdx.x*(dim+1)+j]=the jth vertex for threadIdx.x thread in the thread block
for (; tid < max_num_simplices; tid+= stride) {
index_t offset= 0;
index_t v= num_points - 1;
index_t idx= tid;
for (index_t k= dim + 1; k > 0; --k) {
if (!((*d_binomial_coeff)(v, k) <= idx)) {
index_t count= v;
while (count > 0) {
index_t step= count >> 1;
if (!((*d_binomial_coeff)(v - step, k) <= idx)) {
v-= step + 1;
count-= step + 1;//+1 is here to preserve the induction hypothesis (check v=4, k=4)
} else
count= step;//went too far, need to try a smaller step size to subtract from top
}
}
shared_vertices[threadIdx.x * (dim + 1) + offset++]= v;
idx-= (*d_binomial_coeff)(v, k);
}
value_t diam= -MAX_FLOAT;
for(index_t i= 0; i<=dim; i++){
for(index_t j= i+1; j<=dim; j++){
diam= hd_max(diam, d_distance_matrix[LOWER_DISTANCE_INDEX(shared_vertices[threadIdx.x * (dim + 1) + i], shared_vertices[threadIdx.x * (dim + 1) + j], num_points)]);
}
}
if(d_pivot_column_index[tid]==-1 && diam<=threshold){
d_columns_to_reduce[tid].diameter= diam;
d_columns_to_reduce[tid].index= tid;
d_flagarray[tid]= 1;
}else{
d_columns_to_reduce[tid].diameter= -MAX_FLOAT;
d_columns_to_reduce[tid].index= MAX_INT64;
d_flagarray[tid]= 0;
}
}
}
__global__ void init_cidx_to_diam(value_t* d_cidx_to_diameter, struct diameter_index_t_struct* d_columns_to_reduce, index_t num_columns_to_reduce){
index_t tid= (index_t) threadIdx.x + (index_t) blockIdx.x * (index_t) blockDim.x;
index_t stride= (index_t) blockDim.x * (index_t) gridDim.x;
for (; tid < num_columns_to_reduce; tid += stride) {
d_cidx_to_diameter[d_columns_to_reduce[tid].index]= d_columns_to_reduce[tid].diameter;
}
}
//scatter operation
__global__ void init_index_to_subindex(index_t* d_index_to_subindex, index_t* d_nonapparent_columns, index_t num_nonapparent){
index_t tid= (index_t) threadIdx.x + (index_t) blockIdx.x * (index_t) blockDim.x;
index_t stride= (index_t) blockDim.x * (index_t) gridDim.x;
for (; tid < num_nonapparent; tid += stride) {
d_index_to_subindex[d_nonapparent_columns[tid]]= tid;
}
}
//THIS IS THE GPU SCAN KERNEL for the dense case!!
__global__ void coboundary_findapparent_single_kernel(value_t* d_cidx_to_diameter, struct diameter_index_t_struct * d_columns_to_reduce, index_t* d_lowest_one_of_apparent_pair, const index_t dim, index_t num_simplices, const index_t num_points, binomial_coeff_table* d_binomial_coeff, index_t num_columns_to_reduce, value_t* d_distance_matrix, value_t threshold) {
index_t tid= (index_t) threadIdx.x + (index_t) blockIdx.x * (index_t) blockDim.x;
index_t stride= (index_t) blockDim.x * (index_t) gridDim.x;
extern __shared__ index_t shared_vertices[];//a 256x(dim+1) matrix; shared_vertices[threadIdx.x*(dim+1)+j]=the jth vertex for threadIdx.x thread in the thread block
for (; tid < num_columns_to_reduce; tid += stride) {
//populate the shared_vertices[][] matrix with vertex indices of the column index= shared_vertices[threadIdx.x][-];
//shared_vertices[][] matrix has row index threadIdx.x and col index offset, represented by: shared_vertices[threadIdx.x * (dim + 1) + offset]=
index_t offset= 0;
index_t v= num_points - 1;
index_t idx= d_columns_to_reduce[tid].index;
for (index_t k= dim + 1; k > 0; --k) {
if (!((*d_binomial_coeff)(v, k) <= idx)) {
index_t count= v;
while (count > 0) {
index_t step= count >> 1;
if (!((*d_binomial_coeff)(v - step, k) <= idx)) {
v-= step + 1;
count-= step + 1;//+1 is here to preserve the induction hypothesis (check v=4, k=4)
} else
count= step;//went too far, need to try a smaller step size to subtract from top
}
}
shared_vertices[threadIdx.x * (dim + 1) + offset++]= v;//set v to the largest possible vertex index given idx as a combinatorial index
idx-= (*d_binomial_coeff)(v, k);
}
v= num_points-1;//this keeps track of the newly added vertex to the set of vertices stored in shared_vertices[threadIdx.x][-] to form a cofacet of the columns
index_t k= dim+1;
index_t idx_below= d_columns_to_reduce[tid].index;
index_t idx_above= 0;
while ((v != -1) && ((*d_binomial_coeff)(v, k) <= idx_below)) {
idx_below -= (*d_binomial_coeff)(v, k);
idx_above += (*d_binomial_coeff)(v, k + 1);
--v;
--k;
assert(k != -1);
}
while(v!=-1) {//need to enumerate cofacet combinatorial index in reverse lexicographic order (largest cidx down to lowest cidx)
index_t row_combinatorial_index= idx_above + (*d_binomial_coeff)(v--, k + 1) + idx_below;
//find the cofacet diameter
value_t cofacet_diam= d_columns_to_reduce[tid].diameter;
for(index_t j=0; j<dim+1; j++){
index_t last_v= v+1;
index_t simplex_v= shared_vertices[threadIdx.x * (dim + 1) + j];
if(last_v>simplex_v){
cofacet_diam= hd_max(cofacet_diam, d_distance_matrix[LOWER_DISTANCE_INDEX(last_v, shared_vertices[threadIdx.x * (dim + 1) + j], num_points)]);
}else{
cofacet_diam= hd_max(cofacet_diam, d_distance_matrix[LOWER_DISTANCE_INDEX(shared_vertices[threadIdx.x * (dim + 1) + j], last_v, num_points)]);
}
}
if(d_columns_to_reduce[tid].diameter==cofacet_diam) {//this is a sufficient condition to finding a lowest one
//check if there is a nonzero to the left of (row_combinatorial_index, tid) in the coboundary matrix
//extra_vertex is the "added" vertex to shared_vertices
//FACT: {shared_vertices[threadIdx.x*(dim+1)+0]... threadIdx.x*(dim+1)+dim] union extra_vertex} equals cofacet vertices
index_t prev_remove_v= -1;
index_t s_v= shared_vertices[threadIdx.x * (dim + 1)];//the largest indexed vertex, shared_vertices is sorted in decreasing orders
bool passed_extra_v= false;
index_t remove_v;//this is the vertex to remove from the cofacet
index_t extra_vertex= v+1;//the +1 is here to counteract the last v-- line of code
if(s_v>extra_vertex){
remove_v= s_v;
}else{
remove_v= extra_vertex;
passed_extra_v= true;
}
prev_remove_v= remove_v;
index_t facet_of_row_combinatorial_index= row_combinatorial_index;
facet_of_row_combinatorial_index-= (*d_binomial_coeff)(remove_v, dim+2);//subtract the largest binomial coefficient to get the new cidx
index_t col_cidx= d_columns_to_reduce[tid].index;
value_t facet_of_row_diameter= d_cidx_to_diameter[facet_of_row_combinatorial_index];
value_t col_diameter= d_columns_to_reduce[tid].diameter;
if(facet_of_row_combinatorial_index==col_cidx && facet_of_row_diameter== col_diameter){//if there is an exact match of the tid column and the face of the row, then all subsequent faces to search will be to the right of column tid
//coboundary column tid has an apparent pair, record it
d_lowest_one_of_apparent_pair[tid]= row_combinatorial_index;
break;
}
//else if(d_cidx_to_diameter[facet_of_row_combinatorial_index]<= threshold && (
// d_cidx_to_diameter[facet_of_row_combinatorial_index]>d_columns_to_reduce[tid].diameter
// || (d_cidx_to_diameter[facet_of_row_combinatorial_index]==d_columns_to_reduce[tid].diameter && facet_of_row_combinatorial_index<d_columns_to_reduce[tid].index)
// || facet_of_row_combinatorial_index> d_columns_to_reduce[tid].index)){
//FACT: it turns out we actually only need to check facet_of_row_diameter<= threshold &&(facet_of_row_diameter==col_diameter && facet_of_row_combinatorial_index<col_cidx)
//since we should never have a facet of the cofacet with diameter larger than the cofacet's diameter= column's diameter
//in fact, we don't even need to check facet_of_row_diameter<=threshold since diam(face(cofacet(simplex)))<=diam(cofacet(simplex))=diam(simplex)<=threshold
//furthermore, we don't even need to check facet_of_row_combinatorial_index<col_cidx since we will exit upon col_cidx while iterating in increasing combinatorial index
else if(facet_of_row_diameter==col_diameter){
assert(facet_of_row_diameter<= threshold && (facet_of_row_diameter==col_diameter && facet_of_row_combinatorial_index<col_cidx));
d_lowest_one_of_apparent_pair[tid]= -1;
break;
}
bool found_apparent_or_found_nonzero_to_left= false;
//need to remove the last vertex: extra_v during searches
//there are dim+2 total number of vertices, the largest vertex was already checked so that is why k starts at dim+1
//j is the col. index e.g. shared_vertices[threadIdx.x][j]=shared_vertices[threadIdx.x*(dim+1)+j]
for(index_t k= dim+1, j=passed_extra_v?0:1; k>=1; k--){//start the loop after checking the lexicographically smallest facet boundary case
if(passed_extra_v) {
remove_v= shared_vertices[threadIdx.x * (dim + 1) + j];
j++;
}
else if(j<dim+1) {
//compare s_v in shared_vertices with v
index_t s_v= shared_vertices[threadIdx.x * (dim + 1) + j];
if (s_v > extra_vertex) {
remove_v= s_v;
j++;
} else {
remove_v= extra_vertex;//recall: extra_vertex= v+1
passed_extra_v= true;
}
//this last else says: if j==dim+1 and we never passed extra vertex, then we must remove extra_vertex as the last vertex to remove to form a facet.
}else {//there is no need to check s_v>extra_vertex, we never passed extra_vertex, so we need to remove extra_vertex for the last check
remove_v= extra_vertex;//recall; v+1 since there is a v-- before this
passed_extra_v= true;
}
//exchange remove_v choose k with prev_remove_v choose k
facet_of_row_combinatorial_index-=(*d_binomial_coeff)(remove_v,k);
facet_of_row_combinatorial_index+= (*d_binomial_coeff)(prev_remove_v,k);
value_t facet_of_row_diameter= d_cidx_to_diameter[facet_of_row_combinatorial_index];
if(facet_of_row_combinatorial_index==col_cidx && facet_of_row_diameter==col_diameter){
//coboundary column tid has an apparent pair, record it
d_lowest_one_of_apparent_pair[tid]= row_combinatorial_index;
found_apparent_or_found_nonzero_to_left= true;
break;///need to break out the while(v!=-1) loop
}
//else if(d_cidx_to_diameter[facet_of_row_combinatorial_index]<=threshold &&
//( d_cidx_to_diameter[facet_of_row_combinatorial_index]>d_columns_to_reduce[tid].diameter
//|| (d_cidx_to_diameter[facet_of_row_combinatorial_index]==d_columns_to_reduce[tid].diameter && facet_of_row_combinatorial_index<d_columns_to_reduce[tid].index)
//|| facet_of_row_combinatorial_index>d_columns_to_reduce[tid].index)){
else if(facet_of_row_diameter==col_diameter){
assert(facet_of_row_diameter<= threshold && (facet_of_row_diameter==col_diameter && facet_of_row_combinatorial_index<col_cidx));
//d_lowest_one_of_apparent_pair[] is set to -1's already though...
d_lowest_one_of_apparent_pair[tid]= -1;
found_apparent_or_found_nonzero_to_left= true;
break;
}
prev_remove_v= remove_v;
}
//we must exit early if we have a nonzero to left or the column is apparent
if(found_apparent_or_found_nonzero_to_left){
break;
}
//end check for nonzero to left
//need to record the found pairs in the global hash_map for pairs (post processing)
//see post processing section in gpuscan method
}
while ((v != -1) && ((*d_binomial_coeff)(v, k) <= idx_below)) {
idx_below -= (*d_binomial_coeff)(v, k);
idx_above += (*d_binomial_coeff)(v, k + 1);
--v;
--k;
assert(k != -1);
}
}
}
}
//gpuscan for sparse case
__global__ void coboundary_findapparent_sparse_single_kernel(struct diameter_index_t_struct* d_cidx_diameter_sorted_list, struct diameter_index_t_struct * d_columns_to_reduce, index_t* d_lowest_one_of_apparent_pair, const index_t dim, const index_t num_points, binomial_coeff_table* d_binomial_coeff, index_t num_columns_to_reduce, CSR_distance_matrix* d_CSR_distance_matrix, value_t threshold){//(this was for debugging), index_t* d_leftmostnz_inrow) {
index_t tid= (index_t) threadIdx.x + (index_t) blockIdx.x * (index_t) blockDim.x;
index_t stride= (index_t) blockDim.x * (index_t) gridDim.x;
extern __shared__ index_t shared_vertices[];//a 256x(dim+1) matrix; shared_vertices[threadIdx.x*(dim+1)+j]=the jth vertex for threadIdx.x thread in the thread block
//vertices sorted in reverse order
for (; tid < num_columns_to_reduce; tid += stride) {
//populate the shared_vertices[][] matrix with vertex indices of the column tid;
//row index of the shared_vertices matrix is threadIdx.x, col index of the shared_vertices matrix is offset
index_t offset= 0;
index_t v= num_points - 1;
index_t idx= d_columns_to_reduce[tid].index;
for (index_t k= dim + 1; k > 0; --k) {
if (!((*d_binomial_coeff)(v, k) <= idx)) {
index_t count= v;
while (count > 0) {
index_t step= count >> 1;
if (!((*d_binomial_coeff)(v - step, k) <= idx)) {
v-= step + 1;
count-= step + 1;//+1 is here to preserve the induction hypothesis (check v=4, k=4)
} else
count= step;//went too far, need to try a smaller step size to subtract from top
}
}
shared_vertices[threadIdx.x * (dim + 1) + offset++]= v;//set v to the largest possible vertex index given idx as a combinatorial index
idx-= (*d_binomial_coeff)(v, k);
}
index_t k= dim+1;
bool next_cofacet= false;
value_t nbr_diameter= -1;
index_t nbr_index= -1;
index_t idx_below= d_columns_to_reduce[tid].index;
index_t idx_above= 0;
index_t base_vertex_index= shared_vertices[threadIdx.x * (dim + 1)];
//this gives the entry indices of the right and left ends of the row indexed by base_vertex_index in the CSR distance matrix
index_t base_vertex_nbr_itr= d_CSR_distance_matrix->offsets[base_vertex_index+1]-1;
index_t base_vertex_nbr_end= d_CSR_distance_matrix->offsets[base_vertex_index];
for(; base_vertex_nbr_itr>=base_vertex_nbr_end; base_vertex_nbr_itr--){
//nbr is the neighboring vertex to the simplex corresponding to this tid
nbr_diameter= d_CSR_distance_matrix->entries[base_vertex_nbr_itr];
nbr_index= d_CSR_distance_matrix->col_indices[base_vertex_nbr_itr];
//there are dim other vertices besides the base_vertex
for(index_t other_vertex_idx=1; other_vertex_idx<dim+1; other_vertex_idx++){
index_t other_vertex= shared_vertices[threadIdx.x * (dim + 1) + other_vertex_idx];
index_t other_vertex_nbr_itr= d_CSR_distance_matrix->offsets[other_vertex+1]-1;
index_t other_vertex_nbr_end= d_CSR_distance_matrix->offsets[other_vertex];
index_t other_vertex_nbr_index= d_CSR_distance_matrix->col_indices[other_vertex_nbr_itr];
while(other_vertex_nbr_index>nbr_index){
if(other_vertex_nbr_itr==other_vertex_nbr_end) {
next_cofacet= false;
goto end_search;
}
other_vertex_nbr_itr--;
other_vertex_nbr_index= d_CSR_distance_matrix->col_indices[other_vertex_nbr_itr];
}
if(other_vertex_nbr_index!=nbr_index){
goto try_next_vertex;
}else{
nbr_diameter= hd_max(nbr_diameter, d_CSR_distance_matrix->entries[other_vertex_nbr_itr]);
}
}
while (k > 0 && shared_vertices[threadIdx.x * (dim + 1) + dim- (k - 1)] > nbr_index) {
idx_below -= (*d_binomial_coeff)(shared_vertices[threadIdx.x * (dim + 1) + dim- (k - 1)], k);
idx_above += (*d_binomial_coeff)(shared_vertices[threadIdx.x * (dim + 1) + dim- (k - 1)], k + 1);
--k;
}
next_cofacet= true;
goto end_search;
try_next_vertex:;
}
next_cofacet= false;
end_search:;
//end of search for next cofacet (sparse version)
while(next_cofacet) {
base_vertex_nbr_itr--;
value_t cofacet_diameter= hd_max(d_columns_to_reduce[tid].diameter, nbr_diameter);
index_t row_combinatorial_index= idx_above + (*d_binomial_coeff)(nbr_index, k + 1) + idx_below;
if(d_columns_to_reduce[tid].diameter==cofacet_diameter) {//this is a sufficient condition to finding a lowest one
//check if there is a nonzero to the left of (row_combinatorial_index, tid) in the coboundary matrix
//extra_vertex is the "added" vertex to shared_verticess
//FACT: {shared_vertices[threadIdx.x*(dim+1)+0]... shared_vertices[threadIdx.x*(dim+1)+dim] union extra_vertex} equals cofacet vertices
index_t prev_remove_v= -1;
index_t s_v= shared_vertices[threadIdx.x * (dim + 1)];//the largest indexed vertex, shared_vertices is sorted in decreasing orders
bool passed_extra_v= false;
index_t remove_v;//this is the vertex to remove from the cofacet
index_t extra_vertex= nbr_index;//the +1 is here to counteract the last v-- line of code
if (s_v > extra_vertex) {
remove_v= s_v;
} else {
remove_v= extra_vertex;
passed_extra_v= true;
}
prev_remove_v= remove_v;
index_t facet_of_row_combinatorial_index= row_combinatorial_index;
facet_of_row_combinatorial_index-= (*d_binomial_coeff)(remove_v, dim + 2);//subtract the largest binomial coefficient to get the new cidx
index_t col_cidx= d_columns_to_reduce[tid].index;
value_t col_diameter= d_columns_to_reduce[tid].diameter;
//binary search d_columns_to_reduce to get face_of_row_diameter
value_t facet_of_row_diameter= -1;// there is no direct mapping: d_cidx_to_diameter[facet_of_row_combinatorial_index];
///binary search goes here on d_cidx_diameter_sorted_list
index_t left= 0;
index_t right= num_columns_to_reduce-1;
while(left<=right){
index_t mid= left + (right-left)/2;
if(d_cidx_diameter_sorted_list[mid].index==facet_of_row_combinatorial_index){
facet_of_row_diameter= d_cidx_diameter_sorted_list[mid].diameter;
break;
}
if(d_cidx_diameter_sorted_list[mid].index<facet_of_row_combinatorial_index){
left= mid+1;
}else{
right= mid-1;
}
}
if (facet_of_row_combinatorial_index == col_cidx && facet_of_row_diameter == col_diameter) {//if there is an exact match of the tid column and the face of the row, then all subsequent faces to search will be to the right of column tid
//coboundary column tid has an apparent pair, record it
d_lowest_one_of_apparent_pair[tid]= row_combinatorial_index;
break;
}
//else if(d_cidx_to_diameter[facet_of_row_combinatorial_index]<= threshold && (
// d_cidx_to_diameter[facet_of_row_combinatorial_index]>d_columns_to_reduce[tid].diameter
// || (d_cidx_to_diameter[facet_of_row_combinatorial_index]==d_columns_to_reduce[tid].diameter && facet_of_row_combinatorial_index<d_columns_to_reduce[tid].index)
// || facet_of_row_combinatorial_index> d_columns_to_reduce[tid].index)){
//FACT: it turns out we actually only need to check facet_of_row_diameter<= threshold &&(facet_of_row_diameter==col_diameter && facet_of_row_combinatorial_index<col_cidx)
//since we should never have a face of the cofacet with diameter larger than the cofacet's diameter= column's diameter
//in fact, we don't even need to check facet_of_row_diameter<=threshold since diam(face(cofacet(simplex)))<=diam(cofacet(simplex))=diam(simplex)<=threshold
//furthremore, we don't even need to check facet_of_row_combinatorial_index<col_cidx since we will exit upon col_cidx while iterating in increasing combinatorial index
else if (facet_of_row_diameter == col_diameter) {
assert(facet_of_row_diameter <= threshold &&
(facet_of_row_diameter == col_diameter && facet_of_row_combinatorial_index < col_cidx));
d_lowest_one_of_apparent_pair[tid]= -1;
break;
}
bool found_apparent_or_found_nonzero_to_left= false;
//need to remove the last vertex: extra_v during searches
//there are dim+2 total number of vertices, the largest vertex was already checked so that is why k starts at dim+1
//j is the col. index, e.g. shared_vertices[threadIdx.x][j]=shared_vertices[threadIdx.x*(dim+1)+j]
for (index_t k= dim + 1, j= passed_extra_v ? 0 : 1;
k >= 1; k--) {//start the loop after checking the lexicographically smallest facet boundary case
if (passed_extra_v) {
remove_v= shared_vertices[threadIdx.x * (dim + 1) + j];
j++;
} else if (j < dim + 1) {
//compare s_v in shared_vertices with v
index_t s_v= shared_vertices[threadIdx.x * (dim + 1) + j];
if (s_v > extra_vertex) {
remove_v= s_v;
j++;
} else {
remove_v= extra_vertex;//recall: extra_vertex=nbr_index;
passed_extra_v= true;
}
//this last else says: if j==dim+1 and we never passed extra vertex, then we must remove extra_vertex as the last vertex to remove to form a face.
} else {//there is no need to check s_v>extra_vertex, we never passed extra_vertex, so we need to remove extra_vertex for the last check
remove_v= extra_vertex;//recall; extra_vertex= nbr_index
passed_extra_v= true;
}
//exchange remove_v choose k with prev_remove_v choose k
facet_of_row_combinatorial_index -= (*d_binomial_coeff)(remove_v, k);
facet_of_row_combinatorial_index += (*d_binomial_coeff)(prev_remove_v, k);
//replace d_cidx_to_diameter with d_cidx_diameter_sorted_list;
value_t facet_of_row_diameter= -1;// replacing direct map:: d_cidx_to_diameter[facet_of_row_combinatorial_index];
///binary search goes here on d_cidx_diameter_sorted_list
index_t left= 0;
index_t right= num_columns_to_reduce-1;
while(left<=right){
index_t mid= left + (right-left)/2;
if(d_cidx_diameter_sorted_list[mid].index==facet_of_row_combinatorial_index){
facet_of_row_diameter= d_cidx_diameter_sorted_list[mid].diameter;
break;
}
if(d_cidx_diameter_sorted_list[mid].index<facet_of_row_combinatorial_index){
left= mid+1;
}else{
right= mid-1;
}
}
if (facet_of_row_combinatorial_index == col_cidx && facet_of_row_diameter == col_diameter) {
//coboundary column tid has an apparent pair, record it
d_lowest_one_of_apparent_pair[tid]= row_combinatorial_index;
found_apparent_or_found_nonzero_to_left= true;
break;///need to break out the while(v!=-1) loop
}
//else if(d_cidx_to_diameter[facet_of_row_combinatorial_index]<=threshold &&
//( d_cidx_to_diameter[facet_of_row_combinatorial_index]>d_columns_to_reduce[tid].diameter
//|| (d_cidx_to_diameter[facet_of_row_combinatorial_index]==d_columns_to_reduce[tid].diameter && facet_of_row_combinatorial_index<d_columns_to_reduce[tid].index)
//|| facet_of_row_combinatorial_index>d_columns_to_reduce[tid].index)){
else if (facet_of_row_diameter == col_diameter) {
assert(facet_of_row_diameter <= threshold &&
(facet_of_row_diameter == col_diameter && facet_of_row_combinatorial_index < col_cidx));
//d_lowest_one_of_apparent_pair[tid]= -1;
found_apparent_or_found_nonzero_to_left= true;
break;
}
prev_remove_v= remove_v;
}
//we must exit early if we have a nonzero to left or the column is apparent
if (found_apparent_or_found_nonzero_to_left) {
break;
}
//end check for nonzero to left
}
next_cofacet= false;
for(; base_vertex_nbr_itr>=base_vertex_nbr_end; base_vertex_nbr_itr--){
//nbr is the neighboring vertex to the simplex corresponding to this tid
nbr_diameter= d_CSR_distance_matrix->entries[base_vertex_nbr_itr];
nbr_index= d_CSR_distance_matrix->col_indices[base_vertex_nbr_itr];
//there are dim other vertices besides the base_vertex
for(index_t other_vertex_index=1; other_vertex_index<dim+1; other_vertex_index++){
index_t other_vertex= shared_vertices[threadIdx.x * (dim + 1) + other_vertex_index];
index_t other_vertex_nbr_itr= d_CSR_distance_matrix->offsets[other_vertex+1]-1;
index_t other_vertex_nbr_end= d_CSR_distance_matrix->offsets[other_vertex];
index_t other_vertex_nbr_index= d_CSR_distance_matrix->col_indices[other_vertex_nbr_itr];
while(other_vertex_nbr_index>nbr_index){
if(other_vertex_nbr_itr==other_vertex_nbr_end) {
next_cofacet= false;
goto end_search_inloop;
}
other_vertex_nbr_itr--;
other_vertex_nbr_index= d_CSR_distance_matrix->col_indices[other_vertex_nbr_itr];
}
if(other_vertex_nbr_index!=nbr_index){
goto try_next_vertex_inloop;
}else{
nbr_diameter= hd_max(nbr_diameter, d_CSR_distance_matrix->entries[other_vertex_nbr_itr]);
}
}
//notice we must reverse the shared_vertices since they are sorted in decreasing order
while (k > 0 && shared_vertices[threadIdx.x * (dim + 1) + dim- (k - 1)] > nbr_index) {
idx_below -= (*d_binomial_coeff)(shared_vertices[threadIdx.x * (dim + 1) + dim- (k - 1)], k);
idx_above += (*d_binomial_coeff)(shared_vertices[threadIdx.x * (dim + 1) + dim- (k - 1)], k + 1);
--k;
}
next_cofacet= true;
goto end_search_inloop;
try_next_vertex_inloop:;
}
next_cofacet= false;
end_search_inloop:;
}
}
}
template <typename DistanceMatrix> class ripser {
DistanceMatrix dist;//this can be either sparse or compressed
index_t n, dim_max;//n is the number of points, dim_max is the max dimension to compute PH
value_t threshold;//this truncates the filtration by removing simplices too large. low values of threshold should use --sparse option
float ratio;
const binomial_coeff_table binomial_coeff;
mutable std::vector<index_t> vertices;
mutable std::vector<index_t> vertices_of_birth;
mutable std::vector<index_t> vertices_of_death;
mutable std::vector<diameter_index_t_struct> cofacet_entries;
private:
size_t freeMem, totalMem;
cudaDeviceProp deviceProp;
int grid_size;
hash_map<index_t, index_t> pivot_column_index;//small hash map for matrix reduction
//we are removing d_flagarray for a more general array: d_flagarray_OR_index_to_subindex
//char* type is 3x faster for thrust::count than index_t*
#ifndef ASSEMBLE_REDUCTION_SUBMATRIX
char* d_flagarray;//an array where d_flagarray[i]= 1 if i satisfies some property and d_flagarray[i]=0 otherwise
#endif
index_t* h_pivot_column_index_array_OR_nonapparent_cols;//the pivot column index hashmap represented by an array OR the set of nonapparent column indices
value_t* d_distance_matrix;//GPU copy of the distance matrix
CSR_distance_matrix* d_CSR_distance_matrix;
//d_pivot_column_index_OR_nonapparent_cols is d_nonapparent_cols when used in gpuscan() and compute_pairs() and is d_pivot_column_index when in gpu_assemble_columns()
index_t* d_pivot_column_index_OR_nonapparent_cols;//the pivot column index hashmap represented on GPU as an array OR the set of nonapparent columns on GPU
index_t max_num_simplices_forall_dims;//the total number of simplices of dimension dim_max possible (this assumes no threshold condition to sparsify the simplicial complex)
//the total number of simplices in the dim_max+1 dimension (a factor n larger than max_num_simplices_forall_dims), infeasible to allocate with this number if max_num_simplices_forall_dims is already pushing the memory limits.
struct diameter_index_t_struct* d_columns_to_reduce;//GPU copy of the columns to reduce depending on the current dimension
struct diameter_index_t_struct* h_columns_to_reduce;//columns to reduce depending on the current dimension
binomial_coeff_table* d_binomial_coeff;//GPU copy of the binomial coefficient table
index_t* d_num_columns_to_reduce=NULL;//use d_num_columns_to_reduce to keep track of the number of columns to reduce
index_t* h_num_columns_to_reduce;//h_num_columns_to_reduce is tied to d_num_columns_to_reduce in pinned memory?
index_t* d_num_nonapparent= NULL;//the number of nonapparent columns. *d_num_columns_to_reduce-*d_num_nonapparent= number of apparent columns
index_t* h_num_nonapparent;//h_num_nonapparent is tied to d_num_nonapparent in pinned memory?
index_t num_apparent;//the number of apparent pairs found
value_t* d_cidx_to_diameter;//GPU side mapping from cidx to diameters for gpuscan faces of a given row of a "lowest one" search
struct diameter_index_t_struct* d_cidx_diameter_pairs_sortedlist;//used as a sorted list of cidx,diameter pairs for lookup in gpuscan kernel for sparse case
#if defined(ASSEMBLE_REDUCTION_SUBMATRIX)//assemble reduction submatrix
index_t* d_flagarray_OR_index_to_subindex;//GPU data structure that maps index to subindex
index_t* h_flagarray_OR_index_to_subindex;//copy of index_to_subindex data structure that acts as a map for matrix index to reduction submatrix indexing on CPU side
#endif
//for GPU-scan (finding apparent pairs)
index_t* d_lowest_one_of_apparent_pair;//GPU copy of the lowest ones, d_lowest_one_of_apparent_pair[col]= lowest one row of column col
//index_t* h_lowest_one_of_apparent_pair;//the lowest ones, d_lowest_one_of_apparent_pair[col]= lowest one row of column col
struct index_t_pair_struct* d_pivot_array;//sorted array of all pivots, substitute for a structured hashmap with lookup done by log(n) binary search
struct index_t_pair_struct* h_pivot_array;//sorted array of all pivots
std::vector<struct diameter_index_t_struct> columns_to_reduce;
//used for sparse_distance_matrix ONLY:
struct diameter_index_t_struct* d_simplices;//GPU copy of h_simplices
struct diameter_index_t_struct* h_simplices;//the simplices filtered by diameter that need to be considered for the next dimension's simplices
index_t* d_num_simplices=NULL;//use d_num_simplices to keep track of the number of simplices in h_ or d_ simplices
index_t* h_num_simplices;//h_num_simplices is tied to d_num_simplices in pinned memory
public:
std::ofstream outfile;
ripser(DistanceMatrix&& _dist, index_t _dim_max, value_t _threshold, float _ratio)
: dist(std::move(_dist)), n(dist.size()),
dim_max(std::min(_dim_max, index_t(dist.size() - 2))), threshold(_threshold),
ratio(_ratio), binomial_coeff(n, dim_max + 2) {
outfile.open("/tmp/features.txt", std::ios::trunc | std::ios::out);
}
void free_init_cpumem() {
free(h_pivot_column_index_array_OR_nonapparent_cols);
}
void free_remaining_cpumem(){
free(h_columns_to_reduce);
free(h_pivot_array);
pivot_column_index.resize(0);
}
//calulate gpu_num_simplices_forall_dims based on GPU memory limit
index_t calculate_gpu_max_columns_for_sparserips_computation_from_memory(){
cudaGetDeviceProperties(&deviceProp, 0);
cudaMemGetInfo(&freeMem,&totalMem);
#ifdef PROFILING
//std::cerr<<"before calculation, sparse: total mem, free mem: "<<totalMem <<" bytes, "<<freeMem<<" bytes"<<std::endl;
#endif
index_t gpumem_char_array_bytes_factor= sizeof(char);
index_t gpumem_index_t_array_bytes_factor= sizeof(index_t);
index_t gpumem_value_t_array_bytes_factor= sizeof(value_t);
index_t gpumem_index_t_pairs_array_bytes_factor= sizeof(index_t_pair_struct);
index_t gpumem_diameter_index_t_array_bytes_factor= sizeof(diameter_index_t_struct);
index_t gpumem_CSR_dist_matrix_bytes= sizeof(index_t)*(n+1+4)+(sizeof(index_t)+sizeof(value_t))*dist.num_entries;//sizeof(value_t)*(n*(n-1))/2;
index_t gpumem_binomial_coeff_table_bytes= sizeof(index_t)*binomial_coeff.get_num_n()*binomial_coeff.get_max_tuple_length() +sizeof(binomial_coeff_table);
index_t gpumem_index_t_bytes= sizeof(index_t);
index_t padding= 1024*1024*1024;//1GB padding
index_t fixedmemory= gpumem_index_t_bytes*4+gpumem_binomial_coeff_table_bytes+gpumem_CSR_dist_matrix_bytes+padding;
//this can be larger but not smaller than actual sizeof(-) sum
index_t sizeof_factor_sum=
gpumem_diameter_index_t_array_bytes_factor
#ifdef ASSEMBLE_REDUCTION_SUBMATRIX
+gpumem_index_t_array_bytes_factor
#endif
+gpumem_diameter_index_t_array_bytes_factor
+gpumem_index_t_array_bytes_factor
+gpumem_index_t_array_bytes_factor
+gpumem_index_t_pairs_array_bytes_factor
+gpumem_diameter_index_t_array_bytes_factor
+gpumem_index_t_pairs_array_bytes_factor;
#ifdef PROFILING
//std::cerr<<"sparse final calculation for memory, free memory: "<<freeMem <<" bytes, sizeof_factor_sum: "<<sizeof_factor_sum<<" bytes"<<std::endl;
#endif
return (freeMem*0.7-fixedmemory)/sizeof_factor_sum;
}
index_t calculate_gpu_dim_max_for_fullrips_computation_from_memory(const index_t dim_max, const bool isfullrips){
if(dim_max==0)return 0;
index_t gpu_dim_max= dim_max;
index_t gpu_alloc_memory_in_bytes= 0;
cudaGetDeviceProperties(&deviceProp, 0);
cudaMemGetInfo(&freeMem,&totalMem);
#ifdef PROFILING
//std::cerr<<"GPU memory before full rips memory calculation, total mem: "<< totalMem<<" bytes, free mem: "<<freeMem<<" bytes"<<std::endl;
#endif
do{
index_t gpu_num_simplices_forall_dims= gpu_dim_max<n/2?get_num_simplices_for_dim(gpu_dim_max): get_num_simplices_for_dim(n/2);
index_t gpumem_char_array_bytes= sizeof(char)*gpu_num_simplices_forall_dims;
index_t gpumem_index_t_array_bytes= sizeof(index_t)*gpu_num_simplices_forall_dims;
index_t gpumem_value_t_array_bytes= sizeof(value_t)*gpu_num_simplices_forall_dims;
index_t gpumem_index_t_pairs_array_bytes= sizeof(index_t_pair_struct)*gpu_num_simplices_forall_dims;
index_t gpumem_diameter_index_t_array_bytes= sizeof(diameter_index_t_struct)*gpu_num_simplices_forall_dims;
index_t gpumem_dist_matrix_bytes= sizeof(value_t)*(n*(n-1))/2;
index_t gpumem_binomial_coeff_table_bytes= sizeof(index_t)*binomial_coeff.get_num_n()*binomial_coeff.get_max_tuple_length() +sizeof(binomial_coeff_table);
index_t gpumem_index_t_bytes= sizeof(index_t);
//gpumem_CSR_dist_matrix_bytes is estimated to have n*(n-1)/2 number of nonzeros as an upper bound
index_t gpumem_CSR_dist_matrix_bytes= sizeof(index_t)*(n+1+4)+(sizeof(index_t)+sizeof(value_t))*n*(n-1)/2;//dist.num_entries;//sizeof(value_t)*(n*(n-1))/2;
if(isfullrips) {//count the allocated memory for dense case
gpu_alloc_memory_in_bytes= gpumem_diameter_index_t_array_bytes +
#ifndef ASSEMBLE_REDUCTION_SUBMATRIX
gpumem_char_array_bytes +
#endif
gpumem_value_t_array_bytes +
#ifdef ASSEMBLE_REDUCTION_SUBMATRIX
gpumem_index_t_array_bytes+
#endif
gpumem_dist_matrix_bytes +
gpumem_index_t_array_bytes +
gpumem_binomial_coeff_table_bytes +
gpumem_index_t_bytes * 2 +
gpumem_index_t_array_bytes +
gpumem_index_t_pairs_array_bytes +
gpumem_index_t_pairs_array_bytes;//this last one is for thrust radix sorting buffer
#ifdef PROFILING
////std::cerr<<"free gpu memory for full rips by calculation in bytes for gpu dim: "<<gpu_dim_max<<": "<<freeMem-gpu_alloc_memory_in_bytes<<std::endl;
//std::cerr<<"gpu memory needed for full rips by calculation in bytes for dim: "<<gpu_dim_max<<": "<<gpu_alloc_memory_in_bytes<<" bytes"<<std::endl;
#endif
if (gpu_alloc_memory_in_bytes <= freeMem){
return gpu_dim_max;
}
}else{//count the alloced memory for sparse case
//includes the d_simplices array used in sparse computation for an approximation for both sparse and full rips compelexes?
gpu_alloc_memory_in_bytes= gpumem_diameter_index_t_array_bytes
#ifdef ASSEMBlE_REDUCTION_SUBMATRIX
+ gpumem_index_t_array_bytes
#endif
+ gpumem_CSR_dist_matrix_bytes
+ gpumem_diameter_index_t_array_bytes
+ gpumem_index_t_array_bytes
+ gpumem_binomial_coeff_table_bytes
+ gpumem_index_t_array_bytes
+ gpumem_index_t_pairs_array_bytes
+ gpumem_index_t_bytes*4
+ gpumem_diameter_index_t_array_bytes
+ gpumem_index_t_pairs_array_bytes;//last one is for buffer needed for sorting
#ifdef PROFILING
////std::cerr<<"(sparse) free gpu memory for full rips by calculation in bytes for gpu dim: "<<gpu_dim_max<<": "<<freeMem-gpu_alloc_memory_in_bytes<<std::endl;
//std::cerr<<"(sparse) gpu memory needed for full rips by calculation in bytes for dim: "<<gpu_dim_max<<": "<<gpu_alloc_memory_in_bytes<<" bytes"<<std::endl;
#endif
if (gpu_alloc_memory_in_bytes <= freeMem){
return gpu_dim_max;
}
}
gpu_dim_max--;
}while(gpu_dim_max>=0);
return 0;
}
index_t get_num_simplices_for_dim(index_t dim){
//beware if dim+1>n and where dim is negative
assert(dim+1<=n && dim+1>=0);
return binomial_coeff(n, dim + 1);
}
index_t get_next_vertex(index_t& v, const index_t idx, const index_t k) const {
return v= upper_bound(
v, [&](const index_t& w) -> bool { return (binomial_coeff(w, k) <= idx); });
}
index_t get_edge_index(const index_t i, const index_t j) const {
return binomial_coeff(i, 2) + j;
}
template <typename OutputIterator>
OutputIterator get_simplex_vertices(index_t idx, const index_t dim, index_t v,
OutputIterator out) const {
--v;
for (index_t k= dim + 1; k > 0; --k) {
get_next_vertex(v, idx, k);
*out++= v;
idx-= binomial_coeff(v, k);
}
return out;
}
value_t compute_diameter(const index_t index, index_t dim) const {
value_t diam= -std::numeric_limits<value_t>::infinity();
vertices.clear();
get_simplex_vertices(index, dim, dist.size(), std::back_inserter(vertices));
for (index_t i= 0; i <= dim; ++i)
for (index_t j= 0; j < i; ++j) {
diam= std::max(diam, dist(vertices[i], vertices[j]));
}
return diam;
}
class simplex_coboundary_enumerator;
void gpu_assemble_columns_to_reduce_plusplus(const index_t dim);
void cpu_byneighbor_assemble_columns_to_reduce(std::vector<struct diameter_index_t_struct>& simplices, std::vector<struct diameter_index_t_struct>& columns_to_reduce,
hash_map<index_t, index_t>& pivot_column_index, index_t dim);
void cpu_assemble_columns_to_reduce(std::vector<struct diameter_index_t_struct>& columns_to_reduce,
hash_map<index_t, index_t>& pivot_column_index, index_t dim);
void assemble_columns_gpu_accel_transition_to_cpu_only(const bool& more_than_one_dim_cpu_only, std::vector<diameter_index_t_struct>& simplices, std::vector<diameter_index_t_struct>& columns_to_reduce, hash_map<index_t,index_t>& cpu_pivot_column_index, index_t dim);
index_t get_value_pivot_array_hashmap(index_t row_cidx, struct row_cidx_column_idx_struct_compare cmp){
#ifdef USE_PHASHMAP
index_t col_idx= phmap_get_value(row_cidx);
if(col_idx==-1){
#endif
#ifdef USE_GOOGLE_HASHMAP
auto pair= pivot_column_index.find(row_cidx);
if(pair==pivot_column_index.end()){
#endif
index_t first= 0;
index_t last= num_apparent- 1;
while(first<=last){
index_t mid= first + (last-first)/2;
if(h_pivot_array[mid].row_cidx==row_cidx){
return h_pivot_array[mid].column_idx;
}
if(h_pivot_array[mid].row_cidx<row_cidx){
first= mid+1;
}else{
last= mid-1;
}
}
return -1;
}else{
#ifdef USE_PHASHMAP
return col_idx;
#endif
#ifdef USE_GOOGLE_HASHMAP
return pair->second;
#endif
}
}
void compute_dim_0_pairs(std::vector<diameter_index_t_struct>& edges,
std::vector<diameter_index_t_struct>& columns_to_reduce) {
#ifdef PRINT_PERSISTENCE_PAIRS
//std::cerr << "persistence intervals in dim 0:" << std::endl;
#endif
union_find dset(n);
edges= get_edges();
struct greaterdiam_lowerindex_diameter_index_t_struct_compare cmp;
std::sort(edges.rbegin(), edges.rend(), cmp);
std::vector<index_t> vertices_of_edge(2);
for (auto e : edges) {
get_simplex_vertices(e.index, 1, n, vertices_of_edge.rbegin());
index_t u= dset.find(vertices_of_edge[0]), v= dset.find(vertices_of_edge[1]);
if (u != v) {
#ifdef PRINT_PERSISTENCE_PAIRS
if(e.diameter!=0) {
outfile << "0 0 " << e.diameter << " " << vertices_of_edge[0]<< " "<< vertices_of_edge[1] << " inf" << std::endl;
}
#endif
dset.link(u, v);
} else {
columns_to_reduce.push_back(e);
}
}
std::reverse(columns_to_reduce.begin(), columns_to_reduce.end());
#ifdef PRINT_PERSISTENCE_PAIRS
for (index_t i= 0; i < n; ++i)
if (dset.find(i) == i) outfile << "0 0 inf" << i << " " << i << " inf" <<std::endl;
#endif
}
void gpu_compute_dim_0_pairs(std::vector<struct diameter_index_t_struct>& columns_to_reduce);
void gpuscan(const index_t dim);
template <typename Column>
diameter_index_t_struct init_coboundary_and_get_pivot_fullmatrix(const diameter_index_t_struct simplex,
Column& working_coboundary, const index_t& dim
, hash_map<index_t, index_t>& pivot_column_index) {
bool check_for_emergent_pair= true;
cofacet_entries.clear();
simplex_coboundary_enumerator cofacets(simplex, dim, *this);
while (cofacets.has_next()) {
diameter_index_t_struct cofacet= cofacets.next();
if (cofacet.diameter <= threshold) {
cofacet_entries.push_back(cofacet);
if (check_for_emergent_pair && (simplex.diameter == cofacet.diameter)) {
if (pivot_column_index.find(cofacet.index) == pivot_column_index.end()){
return cofacet;
}
check_for_emergent_pair= false;
}
}
}
for (auto cofacet : cofacet_entries) working_coboundary.push(cofacet);
return get_pivot(working_coboundary);
}
template <typename Column>
diameter_index_t_struct init_coboundary_and_get_pivot_submatrix(const diameter_index_t_struct simplex,
Column& working_coboundary, index_t dim, struct row_cidx_column_idx_struct_compare cmp) {
bool check_for_emergent_pair= true;
cofacet_entries.clear();
simplex_coboundary_enumerator cofacets(simplex, dim, *this);
while (cofacets.has_next()) {
diameter_index_t_struct cofacet= cofacets.next();
if (cofacet.diameter <= threshold) {
cofacet_entries.push_back(cofacet);
if (check_for_emergent_pair && (simplex.diameter == cofacet.diameter)) {
if(get_value_pivot_array_hashmap(cofacet.index, cmp)==-1) {
return cofacet;
}
check_for_emergent_pair= false;
}
}
}
for (auto cofacet : cofacet_entries) working_coboundary.push(cofacet);
return get_pivot(working_coboundary);
}
template <typename Column>
void add_simplex_coboundary_oblivious(const diameter_index_t_struct simplex, const index_t& dim,
Column& working_coboundary) {
simplex_coboundary_enumerator cofacets(simplex, dim, *this);
while (cofacets.has_next()) {
diameter_index_t_struct cofacet= cofacets.next();
if (cofacet.diameter <= threshold) working_coboundary.push(cofacet);
}
}
template <typename Column>
void add_simplex_coboundary_use_reduction_column(const diameter_index_t_struct simplex, const index_t& dim,
Column& working_reduction_column, Column& working_coboundary) {
working_reduction_column.push(simplex);
simplex_coboundary_enumerator cofacets(simplex, dim, *this);
while (cofacets.has_next()) {
diameter_index_t_struct cofacet= cofacets.next();
if (cofacet.diameter <= threshold) working_coboundary.push(cofacet);
}
}
//THIS IS THE METHOD TO CALL FOR CPU SIDE FULL MATRIX REDUCTION
template <typename Column>
void add_coboundary_fullmatrix(compressed_sparse_matrix<diameter_index_t_struct>& reduction_matrix,
const std::vector<diameter_index_t_struct>& columns_to_reduce,
const size_t index_column_to_add, const size_t& dim,
Column& working_reduction_column, Column& working_coboundary) {
diameter_index_t_struct column_to_add= columns_to_reduce[index_column_to_add];
add_simplex_coboundary_use_reduction_column(column_to_add, dim, working_reduction_column, working_coboundary);
for (diameter_index_t_struct simplex : reduction_matrix.subrange(index_column_to_add)) {
add_simplex_coboundary_use_reduction_column(simplex, dim, working_reduction_column, working_coboundary);
}
}
//THIS IS THE METHOD TO CALL FOR SUBMATRIX REDUCTION ON CPU SIDE
#ifdef ASSEMBLE_REDUCTION_SUBMATRIX
template <typename Column>
void add_coboundary_reduction_submatrix(compressed_sparse_submatrix<diameter_index_t_struct>& reduction_submatrix,
const size_t index_column_to_add, const size_t& dim,
Column& working_reduction_column, Column& working_coboundary) {
diameter_index_t_struct column_to_add= h_columns_to_reduce[index_column_to_add];
add_simplex_coboundary_use_reduction_column(column_to_add, dim, working_reduction_column, working_coboundary);
index_t subindex= h_flagarray_OR_index_to_subindex[index_column_to_add];//this is only defined when ASSEMBLE_REDUCTION_SUBMATRIX is defined
if(subindex>-1) {
for (diameter_index_t_struct simplex : reduction_submatrix.subrange(subindex)) {
add_simplex_coboundary_use_reduction_column(simplex, dim, working_reduction_column, working_coboundary);
}
}
}
#endif
void compute_pairs(std::vector<diameter_index_t_struct>& columns_to_reduce,
hash_map<index_t, index_t>& pivot_column_index, index_t dim) {
#ifdef PRINT_PERSISTENCE_PAIRS
//std::cerr << "persistence intervals in dim " << dim << ":" << std::endl;
#endif
#ifdef CPUONLY_ASSEMBLE_REDUCTION_MATRIX
compressed_sparse_matrix<diameter_index_t_struct> reduction_matrix;
#endif
for (index_t index_column_to_reduce= 0; index_column_to_reduce < columns_to_reduce.size();
++index_column_to_reduce) {
auto column_to_reduce= columns_to_reduce[index_column_to_reduce];
std::priority_queue<diameter_index_t_struct, std::vector<diameter_index_t_struct>,
greaterdiam_lowerindex_diameter_index_t_struct_compare>
#ifdef CPUONLY_ASSEMBLE_REDUCTION_MATRIX
working_reduction_column,
#endif
working_coboundary;
value_t diameter= column_to_reduce.diameter;
vertices_of_birth.clear();
get_simplex_vertices(column_to_reduce.index, dim + 1, n, std::back_inserter(vertices_of_birth));
#ifdef INDICATE_PROGRESS
if ((index_column_to_reduce + 1) % 1000000 == 0)
//std::cerr << "\033[K"
<< "reducing column " << index_column_to_reduce + 1 << "/"
<< columns_to_reduce.size() << " (diameter " << diameter << ")"
<< std::flush << "\r";
#endif
index_t index_column_to_add= index_column_to_reduce;
diameter_index_t_struct pivot;
// initialize index bounds of reduction matrix
#ifdef CPUONLY_ASSEMBLE_REDUCTION_MATRIX
reduction_matrix.append_column();
#endif
pivot= init_coboundary_and_get_pivot_fullmatrix(columns_to_reduce[index_column_to_add], working_coboundary, dim, pivot_column_index);
while (true) {
if(pivot.index!=-1){
auto left_pair= pivot_column_index.find(pivot.index);
if (left_pair != pivot_column_index.end()) {
index_column_to_add= left_pair->second;
#ifdef CPUONLY_ASSEMBLE_REDUCTION_MATRIX
add_coboundary_fullmatrix(reduction_matrix, columns_to_reduce, index_column_to_add, dim, working_reduction_column, working_coboundary);
pivot= get_pivot(working_coboundary);
#else
add_simplex_coboundary_oblivious(columns_to_reduce[index_column_to_add], dim, working_coboundary);
pivot= get_pivot(working_coboundary);
#endif
} else {
#ifdef PRINT_PERSISTENCE_PAIRS
value_t death= pivot.diameter;
vertices_of_death.clear();
get_simplex_vertices(pivot.index, dim + 1, n, std::back_inserter(vertices_of_death));
if (death > diameter * ratio) {
#ifdef INDICATE_PROGRESS
//std::cerr << "\033[K";
#endif
//std::cout << diameter << " " << death << ")" << std::endl
// << std::flush;
}
#endif
pivot_column_index[pivot.index]= index_column_to_reduce;
break;
}
} else {
#ifdef PRINT_PERSISTENCE_PAIRS
outfile << dim << " " << diameter <<" inf "<< vertices_of_birth[0] << " " << vertices_of_birth[1] << " inf" << std::endl << std::flush;
#endif
break;
}
}
}
}
void compute_pairs_plusplus(
index_t dim,
index_t gpuscan_startingdim) {
#ifdef PRINT_PERSISTENCE_PAIRS
//std::cerr << "persistence intervals in dim " << dim << ":" << std::endl;
#endif
#ifdef ASSEMBLE_REDUCTION_SUBMATRIX
compressed_sparse_submatrix<diameter_index_t_struct> reduction_submatrix;
#endif
#ifdef INDICATE_PROGRESS
std::chrono::steady_clock::time_point next= std::chrono::steady_clock::now() + time_step;
#endif
struct row_cidx_column_idx_struct_compare cmp_pivots;
index_t num_columns_to_iterate= *h_num_columns_to_reduce;
if(dim>=gpuscan_startingdim){
num_columns_to_iterate= *h_num_nonapparent;
}
for (index_t sub_index_column_to_reduce= 0; sub_index_column_to_reduce < num_columns_to_iterate;
++sub_index_column_to_reduce) {
index_t index_column_to_reduce =sub_index_column_to_reduce;
if(dim>=gpuscan_startingdim) {
index_column_to_reduce= h_pivot_column_index_array_OR_nonapparent_cols[sub_index_column_to_reduce];//h_nonapparent_cols
}
auto column_to_reduce= h_columns_to_reduce[index_column_to_reduce];
std::priority_queue<diameter_index_t_struct, std::vector<diameter_index_t_struct>,
greaterdiam_lowerindex_diameter_index_t_struct_compare>
#ifdef ASSEMBLE_REDUCTION_SUBMATRIX
working_reduction_column,
#endif
working_coboundary;
value_t diameter= column_to_reduce.diameter;
vertices_of_birth.clear();
get_simplex_vertices(column_to_reduce.index, dim + 1, n, std::back_inserter(vertices_of_birth));
index_t index_column_to_add= index_column_to_reduce;
struct diameter_index_t_struct pivot;
#ifdef ASSEMBLE_REDUCTION_SUBMATRIX
reduction_submatrix.append_column();
#endif
pivot= init_coboundary_and_get_pivot_submatrix(column_to_reduce, working_coboundary, dim, cmp_pivots);
while (true) {
#ifdef INDICATE_PROGRESS
//if(sub_index_column_to_reduce%2==0){
if (std::chrono::steady_clock::now() > next) {
//std::cerr<< clear_line << "reducing column " << index_column_to_reduce + 1
<< "/" << *h_num_columns_to_reduce << " (diameter " << diameter << ")"
<< std::flush;
next= std::chrono::steady_clock::now() + time_step;
}
#endif
if(pivot.index!=-1){
index_column_to_add= get_value_pivot_array_hashmap(pivot.index,cmp_pivots);
if(index_column_to_add!=-1) {
#ifdef ASSEMBLE_REDUCTION_SUBMATRIX
add_coboundary_reduction_submatrix(reduction_submatrix, index_column_to_add,
dim, working_reduction_column, working_coboundary);
pivot= get_pivot(working_coboundary);
#else
add_simplex_coboundary_oblivious(h_columns_to_reduce[index_column_to_add], dim, working_coboundary);
pivot= get_pivot(working_coboundary);
#endif
}else{
#ifdef PRINT_PERSISTENCE_PAIRS
value_t death= pivot.diameter;
// TODO(@captain-pool): What's the length of the vertices when doing higher dimensions?
// if(dim == 1){
// vertices_of_death.clear();
// get_simplex_vertices(pivot.index, dim + 1, n, std::back_inserter(vertices_of_death));
//// // Feature gets created by Edges (1-simplex) and get closed by triangles (2-simplex)
//// // Selecting vertex of maximum length edge
// outfile << vertices_of_birth[0] << " " << vertices_of_birth[1] << " " << vertices_of_death[0] << " " << vertices_of_death[1] <<" " << vertices_of_death[2] << std::endl;
// }
if (death > diameter * ratio) {
#ifdef INDICATE_PROGRESS
std::cerr << clear_line << std::flush;
#endif
if(dim == 1){
vertices_of_death.clear();
get_simplex_vertices(pivot.index, dim + 1, n, std::back_inserter(vertices_of_death));
// Feature gets created by Edges (1-simplex) and get closed by triangles (2-simplex)
// Selecting vertex of maximum length edge
// outfile << dim << " " << diameter << " " << death << " " << vertices_of_death[0] << " " << vertices_of_death[1] <<" " << vertices_of_death[2] << std::endl;
value_t d1 = dist.distance(vertices_of_death[0], vertices_of_death[1]);
value_t d2 = dist.distance(vertices_of_death[0], vertices_of_death[2]);
value_t d3 = dist.distance(vertices_of_death[1], vertices_of_death[2]);
value_t c = dist.distance(vertices_of_birth[0], vertices_of_birth[1]);
auto k = vertices_of_birth[0];
auto l = vertices_of_birth[1];
if( d1 >= c){
k = vertices_of_death[0];
l = vertices_of_death[1];
}
else if(d2 >= c){
k = vertices_of_death[0];
l = vertices_of_death[2];
}
else if(d3 >= c){
k = vertices_of_death[1];
l = vertices_of_death[2];
}
outfile<< dim << " " << dim << " " << vertices_of_birth[0] << " " << vertices_of_birth[1] << " " << k << " " << l << std::endl;
//outfile<< vertices_of_birth[0] << " " << vertices_of_birth[1] << " " << vertices_of_death[0] << " " << vertices_of_death[1] <<" " << vertices_of_death[2] << std::endl;
}
else if(dim == 2){
// outfile << dim << " " << diameter << " " << death << " " << vertices_of_death[0] << " " << vertices_of_death[1] <<" " << vertices_of_death[2] << std::endl;
//outfile << dim << " " << diameter << " " << birth << " " << vertices_of_birth[0] << " " << vertices_of_birth[1] <<" " << vertices_of_birth[2] << std::endl;
}
else{
std::cout << " [" << diameter << "," << death << ")" << std::endl
<< std::flush;
}
}
#endif
#ifdef USE_PHASHMAP
phmap_put(pivot.index, index_column_to_reduce);
#endif
#ifdef USE_GOOGLE_HASHMAP
pivot_column_index[pivot.index]= index_column_to_reduce;
#endif
#ifdef ASSEMBLE_REDUCTION_SUBMATRIX
while (true) {
diameter_index_t_struct e= pop_pivot(working_reduction_column);
if (e.index == -1) break;
reduction_submatrix.push_back(e);
}
#endif
break;
}
} else {
#ifdef PRINT_PERSISTENCE_PAIRS
#ifdef INDICATE_PROGRESS
//std::cerr << clear_line << std::flush;
#endif
outfile << dim << " " << diameter <<" inf "<< vertices_of_birth[0] << " " << vertices_of_birth[1] << " inf" << std::endl << std::flush;
#endif
break;
}
}
}
#ifdef INDICATE_PROGRESS
//std::cerr << clear_line << std::flush;
#endif
}
std::vector<diameter_index_t_struct> get_edges();
void compute_barcodes();
};
template<>
class ripser<compressed_lower_distance_matrix>::simplex_coboundary_enumerator {
private:
index_t idx_below, idx_above, v, k;
std::vector<index_t> vertices;
///const diameter_index_t simplex;
const struct diameter_index_t_struct simplex;
const compressed_lower_distance_matrix& dist;
const binomial_coeff_table& binomial_coeff;
public:
simplex_coboundary_enumerator(
const struct diameter_index_t_struct _simplex, index_t _dim,
const ripser<compressed_lower_distance_matrix>& parent)
: idx_below(_simplex.index),
idx_above(0), v(parent.n - 1), k(_dim + 1),
vertices(_dim + 1), simplex(_simplex), dist(parent.dist),
binomial_coeff(parent.binomial_coeff) {
parent.get_simplex_vertices(_simplex.index, _dim, parent.n, vertices.begin());
}
bool has_next(bool all_cofacets= true) {
return (v >= k && (all_cofacets || binomial_coeff(v, k) > idx_below));//second condition after the || is to ensure iteration of cofacets with no need to adjust
}
struct diameter_index_t_struct next() {
while ((binomial_coeff(v, k) <= idx_below)) {
idx_below -= binomial_coeff(v, k);
idx_above += binomial_coeff(v, k + 1);
--v;
--k;
assert(k != -1);
}
value_t cofacet_diameter= simplex.diameter;
for (index_t w : vertices) cofacet_diameter= std::max(cofacet_diameter, dist(v, w));
index_t cofacet_index= idx_above + binomial_coeff(v--, k + 1) + idx_below;
return {cofacet_diameter, cofacet_index};
}
};
template <> class ripser<sparse_distance_matrix>::simplex_coboundary_enumerator {
const ripser& parent;
index_t idx_below, idx_above, k;
std::vector<index_t> vertices;
const diameter_index_t_struct simplex;
const sparse_distance_matrix& dist;
std::vector<std::vector<index_diameter_t_struct>::const_reverse_iterator>& neighbor_it;
std::vector<std::vector<index_diameter_t_struct>::const_reverse_iterator>& neighbor_end;
index_diameter_t_struct neighbor;
public:
simplex_coboundary_enumerator(const diameter_index_t_struct _simplex, const index_t _dim,
const ripser& _parent)
: parent(_parent), idx_below(_simplex.index), idx_above(0), k(_dim + 1),
vertices(_dim + 1), simplex(_simplex),
dist(parent.dist),
neighbor_it(dist.neighbor_it),
neighbor_end(dist.neighbor_end) {
neighbor_it.clear();
neighbor_end.clear();
parent.get_simplex_vertices(idx_below, _dim, parent.n, vertices.rbegin());
for (auto v : vertices) {
neighbor_it.push_back(dist.neighbors[v].rbegin());
neighbor_end.push_back(dist.neighbors[v].rend());
}
}
bool has_next(bool all_cofacets= true) {
//auto& x will permanently change upon updates to it.
for (auto &it0= neighbor_it[0], &end0= neighbor_end[0]; it0 != end0; ++it0) {
neighbor= *it0;//neighbor is a pair: diameter_index_t_struct
for (size_t idx= 1; idx < neighbor_it.size(); ++idx) {
auto &it= neighbor_it[idx], end= neighbor_end[idx];
//enforce the invariant that get_index(*it)<=get_index(neighbor)
while(it->index > neighbor.index)
if (++it == end) return false;
if(it->index != neighbor.index)
goto continue_outer;//try the next number in neighbor_it[0]
else
//update neighbor to the max of matching vertices of "neighbors" of each vertex in simplex
neighbor= (neighbor.diameter>it->diameter)?neighbor:*it;
}
while(k>0 && vertices[k-1]>neighbor.index){
if (!all_cofacets) return false;
idx_below -= parent.binomial_coeff(vertices[k - 1], k);
idx_above += parent.binomial_coeff(vertices[k - 1], k + 1);
--k;
}
return true;
continue_outer:;
}
return false;
}
diameter_index_t_struct next() {
++neighbor_it[0];
value_t cofacet_diameter= std::max(simplex.diameter, neighbor.diameter);
index_t cofacet_index= idx_above+parent.binomial_coeff(neighbor.index,k+1)+idx_below;
return {cofacet_diameter,cofacet_index};
}
};
template<> std::vector<diameter_index_t_struct> ripser<compressed_lower_distance_matrix>::get_edges() {
std::vector<diameter_index_t_struct> edges;
for (index_t index= binomial_coeff(n, 2); index-- > 0;) {
value_t diameter= compute_diameter(index, 1);
if (diameter <= threshold) edges.push_back({diameter, index});
}
return edges;
}
template <> std::vector<diameter_index_t_struct> ripser<sparse_distance_matrix>::get_edges() {
std::vector<diameter_index_t_struct> edges;
for (index_t i= 0; i < n; ++i)
for (auto nbr : dist.neighbors[i]) {
index_t j= nbr.index;
//(i choose 2) + (j choose 1) is the combinatorial index of nbr
if (i > j) edges.push_back({nbr.diameter, binomial_coeff(i, 2) + j});
}
return edges;
}
template <>
void ripser<compressed_lower_distance_matrix>::gpu_compute_dim_0_pairs(std::vector<struct diameter_index_t_struct>& columns_to_reduce
){
union_find dset(n);
index_t max_num_edges= binomial_coeff(n, 2);
struct greaterdiam_lowerindex_diameter_index_t_struct_compare_reverse cmp_reverse;
#ifdef ASSEMBLE_REDUCTION_SUBMATRIX
cudaMemset(d_flagarray_OR_index_to_subindex, 0, sizeof(index_t)*max_num_edges);
CUDACHECK(cudaDeviceSynchronize());
#else
cudaMemset(d_flagarray, 0, sizeof(char)*max_num_edges);
CUDACHECK(cudaDeviceSynchronize());
#endif
#ifdef ASSEMBLE_REDUCTION_SUBMATRIX
CUDACHECK(cudaOccupancyMaxActiveBlocksPerMultiprocessor( &grid_size, populate_edges<index_t>, 256, 0));
grid_size *= deviceProp.multiProcessorCount;
populate_edges<<<grid_size, 256>>>(d_flagarray_OR_index_to_subindex, d_columns_to_reduce, threshold, d_distance_matrix, max_num_edges, n, d_binomial_coeff);
CUDACHECK(cudaDeviceSynchronize());
*h_num_columns_to_reduce= thrust::count(thrust::device , d_flagarray_OR_index_to_subindex, d_flagarray_OR_index_to_subindex+max_num_edges, 1);
CUDACHECK(cudaDeviceSynchronize());
thrust::sort(thrust::device, d_columns_to_reduce, d_columns_to_reduce+ max_num_edges, cmp_reverse);
#else
CUDACHECK(cudaOccupancyMaxActiveBlocksPerMultiprocessor( &grid_size, populate_edges<char>, 256, 0));
grid_size *= deviceProp.multiProcessorCount;
populate_edges<<<grid_size, 256>>>(d_flagarray, d_columns_to_reduce, threshold, d_distance_matrix, max_num_edges, n, d_binomial_coeff);
CUDACHECK(cudaDeviceSynchronize());
*h_num_columns_to_reduce= thrust::count(thrust::device , d_flagarray, d_flagarray+max_num_edges, 1);
CUDACHECK(cudaDeviceSynchronize());
thrust::sort(thrust::device, d_columns_to_reduce, d_columns_to_reduce+ max_num_edges, cmp_reverse);
#endif
#ifdef COUNTING
//std::cerr<<"num edges filtered by diameter: "<<*h_num_columns_to_reduce<<std::endl;
#endif
cudaMemcpy(h_columns_to_reduce, d_columns_to_reduce, sizeof(struct diameter_index_t_struct)*(*h_num_columns_to_reduce), cudaMemcpyDeviceToHost);
#ifdef PRINT_PERSISTENCE_PAIRS
//std::cerr << "persistence intervals in dim 0:" << std::endl;
#endif
std::vector<index_t> vertices_of_edge(2);
for(index_t idx=0; idx<*h_num_columns_to_reduce; idx++){
struct diameter_index_t_struct e= h_columns_to_reduce[idx];
vertices_of_edge.clear();
get_simplex_vertices(e.index, 1, n, std::back_inserter(vertices_of_edge));
index_t u= dset.find(vertices_of_edge[0]), v= dset.find(vertices_of_edge[1]);
if (u != v) {
#ifdef PRINT_PERSISTENCE_PAIRS
//remove paired destroyer columns (we compute cohomology)
if(e.diameter!=0) {
// std::cout << " [0," << e.diameter << ")" << std::endl;
outfile << "0 0 " << e.diameter << " " << vertices_of_edge[0] << " " << vertices_of_edge[1] << " inf" << std::endl;
}
#endif
dset.link(u, v);
} else {
columns_to_reduce.push_back(e);
}
}
std::reverse(columns_to_reduce.begin(), columns_to_reduce.end());
//don't want to reverse the h_columns_to_reduce so just put into vector and copy later
#pragma omp parallel for schedule(guided,1)
for(index_t i=0; i<columns_to_reduce.size(); i++){
h_columns_to_reduce[i]= columns_to_reduce[i];
}
*h_num_columns_to_reduce= columns_to_reduce.size();
*h_num_nonapparent= *h_num_columns_to_reduce;//we haven't found any apparent columns yet, so set all columns to nonapparent
#ifdef PRINT_PERSISTENCE_PAIRS
for (index_t i= 0; i < n; ++i)
if (dset.find(i) == i) outfile << "0 0 inf " << i << " " << i << " inf" <<std::endl << std::flush;
#endif
#ifdef COUNTING
//std::cerr<<"num cols to reduce: dim 1, "<<*h_num_columns_to_reduce<<std::endl;
#endif
}
template <>
void ripser<sparse_distance_matrix>::gpu_compute_dim_0_pairs(std::vector<struct diameter_index_t_struct>& columns_to_reduce
){
union_find dset(n);
struct greaterdiam_lowerindex_diameter_index_t_struct_compare_reverse cmp_reverse;
CUDACHECK(cudaOccupancyMaxActiveBlocksPerMultiprocessor( &grid_size, populate_sparse_edges_preparingcount, 256, 0));
grid_size *= deviceProp.multiProcessorCount;
//grid_size will return 0 if we have CPU-only code inside d_CSR_distance_matrix
*h_num_simplices= 0;
//populate edges kernel cannot have some threads iterating in the inner for loop, preventing shfl_sync() from runnning
int* d_num;
CUDACHECK(cudaMalloc((void **) & d_num, sizeof(int)*(n+1)));
cudaMemset(d_num, 0, sizeof(int)*(n+1));
populate_sparse_edges_preparingcount<<<grid_size, 256>>>(d_num, d_CSR_distance_matrix, n, d_num_simplices);
CUDACHECK(cudaDeviceSynchronize());
thrust::exclusive_scan(thrust::device, d_num, d_num+n+1, d_num, 0);
CUDACHECK(cudaOccupancyMaxActiveBlocksPerMultiprocessor( &grid_size, populate_sparse_edges_prefixsum, 256, 0));
grid_size *= deviceProp.multiProcessorCount;
populate_sparse_edges_prefixsum<<<grid_size,256>>>(d_simplices, d_num, d_CSR_distance_matrix, d_binomial_coeff, n, d_num_simplices);
CUDACHECK(cudaDeviceSynchronize());
thrust::sort(thrust::device, d_simplices, d_simplices+ *h_num_simplices, cmp_reverse);
CUDACHECK(cudaDeviceSynchronize());
#ifdef COUNTING
//std::cerr<<"num (sparse) edges filtered: "<<*h_num_simplices<<std::endl;
#endif
cudaMemcpy(h_simplices, d_simplices, sizeof(struct diameter_index_t_struct)*(*h_num_simplices), cudaMemcpyDeviceToHost);
#ifdef PRINT_PERSISTENCE_PAIRS
//std::cerr << "persistence intervals in dim 0:" << std::endl;
#endif
std::vector<index_t> vertices_of_edge(2);
for(index_t idx=0; idx<*h_num_simplices; idx++){
struct diameter_index_t_struct e= h_simplices[idx];
vertices_of_edge.clear();
get_simplex_vertices(e.index, 1, n, std::back_inserter(vertices_of_edge));
index_t u= dset.find(vertices_of_edge[0]), v= dset.find(vertices_of_edge[1]);
if (u != v) {
#ifdef PRINT_PERSISTENCE_PAIRS
if(e.diameter!=0) {
//std::cerr << clear_line << "Writing Line . . ." << std::flush;
std::cout << "0 " << vertices_of_edge[0] << " " << vertices_of_edge[1] << " inf inf" << std::endl;
}
#endif
dset.link(u, v);
} else {
columns_to_reduce.push_back(e);
}
}
std::reverse(columns_to_reduce.begin(), columns_to_reduce.end());
//don't want to reverse the h_columns_to_reduce so just put into vector and copy later
#pragma omp parallel for schedule(guided,1)
for(index_t i=0; i<columns_to_reduce.size(); i++){
h_columns_to_reduce[i]= columns_to_reduce[i];
}
*h_num_columns_to_reduce= columns_to_reduce.size();
*h_num_nonapparent= *h_num_columns_to_reduce;//we haven't found any apparent columns yet, so set all columns to nonapparent
#ifdef PRINT_PERSISTENCE_PAIRS
for (index_t i= 0; i < n; ++i)
if (dset.find(i) == i) std::cout << "0 " << i << " " << i << " inf inf" << std::endl << std::flush;
#endif
#ifdef COUNTING
//std::cerr<<"num cols to reduce: dim 1, "<<*h_num_columns_to_reduce<<std::endl;
#endif
}
//finding apparent pairs
template <>
void ripser<compressed_lower_distance_matrix>::gpuscan(const index_t dim){
//(need to sort for filtration order before gpuscan first, then apply gpu scan then sort again)
//note: scan kernel can eliminate high percentage of columns in little time.
//filter by fully reduced columns (apparent pairs) found by gpu scan
//need this to prevent 0-blocks kernels from executing
if(*h_num_columns_to_reduce==0){
return;
}
index_t num_simplices= binomial_coeff(n,dim+1);
#ifdef COUNTING
//std::cerr<<"max possible num simplices: "<<num_simplices<<std::endl;
#endif
cudaMemcpy(d_columns_to_reduce, h_columns_to_reduce,
sizeof(struct diameter_index_t_struct) * *h_num_columns_to_reduce, cudaMemcpyHostToDevice);
CUDACHECK(cudaDeviceSynchronize());
thrust::fill(thrust::device, d_cidx_to_diameter, d_cidx_to_diameter + num_simplices, -MAX_FLOAT);
CUDACHECK(cudaDeviceSynchronize());
CUDACHECK(cudaOccupancyMaxActiveBlocksPerMultiprocessor( &grid_size, init_cidx_to_diam, 256, 0));
grid_size *= deviceProp.multiProcessorCount;
//there will be kernel launch errors if columns_to_reduce.size()==0; it causes thrust to complain later in the code execution
init_cidx_to_diam << < grid_size, 256 >> >
(d_cidx_to_diameter, d_columns_to_reduce, *h_num_columns_to_reduce);
CUDACHECK(cudaDeviceSynchronize());
cudaMemset(d_lowest_one_of_apparent_pair, -1, sizeof(index_t) * *h_num_columns_to_reduce);
CUDACHECK(cudaDeviceSynchronize());
Stopwatch sw;
sw.start();
CUDACHECK(cudaOccupancyMaxActiveBlocksPerMultiprocessor( &grid_size, coboundary_findapparent_single_kernel, 256, 0));
grid_size *= deviceProp.multiProcessorCount;
coboundary_findapparent_single_kernel << < grid_size, 256, 256 * (dim + 1) * sizeof(index_t) >> >
(d_cidx_to_diameter, d_columns_to_reduce, d_lowest_one_of_apparent_pair, dim, num_simplices, n, d_binomial_coeff, *h_num_columns_to_reduce, d_distance_matrix, threshold);
CUDACHECK(cudaDeviceSynchronize());
sw.stop();
#ifdef PROFILING
//std::cerr<<"gpu scan kernel time for dim: "<<dim<<": "<<sw.ms()/1000.0<<"s"<<std::endl;
#endif
CUDACHECK(cudaDeviceSynchronize());
//post processing (inserting appararent pairs into a "hash map": 2 level data structure) now on GPU
Stopwatch postprocessing;
postprocessing.start();
struct row_cidx_column_idx_struct_compare cmp_pivots;
//put pairs into an array
CUDACHECK(cudaOccupancyMaxActiveBlocksPerMultiprocessor( &grid_size, gpu_insert_pivots_kernel, 256, 0));
grid_size *= deviceProp.multiProcessorCount;
gpu_insert_pivots_kernel<< < grid_size, 256 >> >(d_pivot_array, d_lowest_one_of_apparent_pair, d_pivot_column_index_OR_nonapparent_cols, *h_num_columns_to_reduce, d_num_nonapparent);
CUDACHECK(cudaDeviceSynchronize());
thrust::sort(thrust::device, d_pivot_array, d_pivot_array+*h_num_columns_to_reduce, cmp_pivots);
thrust::sort(thrust::device, d_pivot_column_index_OR_nonapparent_cols, d_pivot_column_index_OR_nonapparent_cols+*h_num_nonapparent);
num_apparent= *h_num_columns_to_reduce-*h_num_nonapparent;
#ifdef COUNTING
//std::cerr<<"num apparent for dim: "<<dim<<" is: " <<num_apparent<<std::endl;
#endif
//transfer to CPU side all GPU data structures
cudaMemcpy(h_pivot_array, d_pivot_array, sizeof(index_t_pair_struct)*(num_apparent), cudaMemcpyDeviceToHost);
cudaMemcpy(h_pivot_column_index_array_OR_nonapparent_cols, d_pivot_column_index_OR_nonapparent_cols, sizeof(index_t)*(*h_num_nonapparent), cudaMemcpyDeviceToHost);
#ifdef ASSEMBLE_REDUCTION_SUBMATRIX
cudaMemset(d_flagarray_OR_index_to_subindex, -1, sizeof(index_t)* *h_num_columns_to_reduce);
//perform the scatter operation
CUDACHECK(cudaOccupancyMaxActiveBlocksPerMultiprocessor( &grid_size, init_index_to_subindex, 256, 0));
grid_size *= deviceProp.multiProcessorCount;
init_index_to_subindex<< < grid_size, 256 >> >
(d_flagarray_OR_index_to_subindex, d_pivot_column_index_OR_nonapparent_cols, *h_num_nonapparent);
cudaMemcpy(h_flagarray_OR_index_to_subindex, d_flagarray_OR_index_to_subindex, sizeof(index_t)*(*h_num_columns_to_reduce), cudaMemcpyDeviceToHost);
#endif
postprocessing.stop();
#ifdef PROFILING
//std::cerr<<"INSERTION POSTPROCESSING FOR GPU IN DIM "<<dim<<": "<<postprocessing.ms()/1000.0<<"s"<<std::endl;
#endif
}
//finding apparent pairs
template <>
void ripser<sparse_distance_matrix>::gpuscan(const index_t dim){
//(need to sort for filtration order before gpuscan first, then apply gpu scan then sort again)
//note: scan kernel can eliminate high percentage of columns in little time.
//filter by fully reduced columns (apparent pairs) found by gpu scan
//need this to prevent 0-blocks kernels from executing
if(*h_num_columns_to_reduce==0){
return;
}
index_t num_simplices= binomial_coeff(n,dim+1);
#ifdef COUNTING
//std::cerr<<"max possible num simplices: "<<num_simplices<<std::endl;
#endif
cudaMemcpy(d_columns_to_reduce, h_columns_to_reduce,
sizeof(struct diameter_index_t_struct) * *h_num_columns_to_reduce, cudaMemcpyHostToDevice);
CUDACHECK(cudaDeviceSynchronize());
//use binary search on d_columns_to_reduce as retrival process
cudaMemcpy(d_cidx_diameter_pairs_sortedlist, d_columns_to_reduce, sizeof(struct diameter_index_t_struct)*(*h_num_columns_to_reduce), cudaMemcpyDeviceToDevice);
struct lowerindex_lowerdiam_diameter_index_t_struct_compare cmp_cidx_diameter;
thrust::sort(thrust::device, d_cidx_diameter_pairs_sortedlist, d_cidx_diameter_pairs_sortedlist+*h_num_columns_to_reduce, cmp_cidx_diameter);
CUDACHECK(cudaDeviceSynchronize());
cudaMemset(d_lowest_one_of_apparent_pair, -1, sizeof(index_t) * *h_num_columns_to_reduce);
CUDACHECK(cudaDeviceSynchronize());
Stopwatch sw;
sw.start();
CUDACHECK(cudaOccupancyMaxActiveBlocksPerMultiprocessor( &grid_size, coboundary_findapparent_sparse_single_kernel, 256, 0));
grid_size *= deviceProp.multiProcessorCount;
coboundary_findapparent_sparse_single_kernel << < grid_size, 256, 256 * (dim + 1) * sizeof(index_t) >> >
(d_cidx_diameter_pairs_sortedlist, d_columns_to_reduce, d_lowest_one_of_apparent_pair, dim, n, d_binomial_coeff, *h_num_columns_to_reduce, d_CSR_distance_matrix, threshold);
CUDACHECK(cudaDeviceSynchronize());
sw.stop();
#ifdef PROFILING
//std::cerr<<"gpu scan kernel time for dim: "<<dim<<": "<<sw.ms()/1000.0<<"s"<<std::endl;
#endif
CUDACHECK(cudaDeviceSynchronize());
//post processing (inserting appararent pairs into a "hash map": 2 level data structure) now on GPU
Stopwatch postprocessing;
postprocessing.start();
struct row_cidx_column_idx_struct_compare cmp_pivots;
//put pairs into an array
CUDACHECK(cudaOccupancyMaxActiveBlocksPerMultiprocessor( &grid_size, gpu_insert_pivots_kernel, 256, 0));
grid_size *= deviceProp.multiProcessorCount;
gpu_insert_pivots_kernel<< < grid_size, 256 >> >(d_pivot_array, d_lowest_one_of_apparent_pair, d_pivot_column_index_OR_nonapparent_cols, *h_num_columns_to_reduce, d_num_nonapparent);
CUDACHECK(cudaDeviceSynchronize());
thrust::sort(thrust::device, d_pivot_array, d_pivot_array+*h_num_columns_to_reduce, cmp_pivots);
thrust::sort(thrust::device, d_pivot_column_index_OR_nonapparent_cols, d_pivot_column_index_OR_nonapparent_cols+*h_num_nonapparent);
num_apparent= *h_num_columns_to_reduce-*h_num_nonapparent;
#ifdef COUNTING
//std::cerr<<"num apparent for dim: "<<dim<<" is: "<<num_apparent<<std::endl;
#endif
//transfer to CPU side all GPU data structures
cudaMemcpy(h_pivot_array, d_pivot_array, sizeof(index_t_pair_struct)*(num_apparent), cudaMemcpyDeviceToHost);
cudaMemcpy(h_pivot_column_index_array_OR_nonapparent_cols, d_pivot_column_index_OR_nonapparent_cols, sizeof(index_t)*(*h_num_nonapparent), cudaMemcpyDeviceToHost);
#ifdef ASSEMBLE_REDUCTION_SUBMATRIX
cudaMemset(d_flagarray_OR_index_to_subindex, -1, sizeof(index_t)* *h_num_columns_to_reduce);
//perform the scatter operation
CUDACHECK(cudaOccupancyMaxActiveBlocksPerMultiprocessor( &grid_size, init_index_to_subindex, 256, 0));
grid_size *= deviceProp.multiProcessorCount;
init_index_to_subindex<< < grid_size, 256 >> >
(d_flagarray_OR_index_to_subindex, d_pivot_column_index_OR_nonapparent_cols, *h_num_nonapparent);
cudaMemcpy(h_flagarray_OR_index_to_subindex, d_flagarray_OR_index_to_subindex, sizeof(index_t)*(*h_num_columns_to_reduce), cudaMemcpyDeviceToHost);
#endif
postprocessing.stop();
#ifdef PROFILING
//std::cerr<<"INSERTION POSTPROCESSING FOR GPU IN DIM "<<dim<<": "<<postprocessing.ms()/1000.0<<"s"<<std::endl;
#endif
}
template <>
void ripser<compressed_lower_distance_matrix>::gpu_assemble_columns_to_reduce_plusplus(const index_t dim) {
index_t max_num_simplices= binomial_coeff(n, dim + 1);
Stopwatch sw;
sw.start();
#pragma omp parallel for schedule(guided,1)
for (index_t i= 0; i < max_num_simplices; i++) {
#ifdef USE_PHASHMAP
h_pivot_column_index_array_OR_nonapparent_cols[i]= phmap_get_value(i);
#endif
#ifdef USE_GOOGLE_HASHMAP
auto pair= pivot_column_index.find(i);
if(pair!=pivot_column_index.end()){
h_pivot_column_index_array_OR_nonapparent_cols[i]= pair->second;
}else{
h_pivot_column_index_array_OR_nonapparent_cols[i]= -1;
}
#endif
}
num_apparent= *h_num_columns_to_reduce-*h_num_nonapparent;
if(num_apparent>0) {
#pragma omp parallel for schedule(guided, 1)
for (index_t i= 0; i < num_apparent; i++) {
index_t row_cidx= h_pivot_array[i].row_cidx;
h_pivot_column_index_array_OR_nonapparent_cols[row_cidx]= h_pivot_array[i].column_idx;
}
}
*h_num_columns_to_reduce= 0;
cudaMemcpy(d_pivot_column_index_OR_nonapparent_cols, h_pivot_column_index_array_OR_nonapparent_cols, sizeof(index_t)*max_num_simplices, cudaMemcpyHostToDevice);
sw.stop();
#ifdef PROFILING
//std::cerr<<"time to copy hash map for dim "<<dim<<": "<<sw.ms()/1000.0<<"s"<<std::endl;
#endif
#ifdef ASSEMBLE_REDUCTION_SUBMATRIX
cudaMemset(d_flagarray_OR_index_to_subindex, 0, sizeof(index_t)*max_num_simplices);
CUDACHECK(cudaDeviceSynchronize());
#else
cudaMemset(d_flagarray, 0, sizeof(char)*max_num_simplices);
CUDACHECK(cudaDeviceSynchronize());
#endif
Stopwatch pop_cols_timer;
pop_cols_timer.start();
#ifdef ASSEMBLE_REDUCTION_SUBMATRIX
CUDACHECK(cudaOccupancyMaxActiveBlocksPerMultiprocessor( &grid_size, populate_columns_to_reduce<index_t>, 256, 0));
grid_size *= deviceProp.multiProcessorCount;
populate_columns_to_reduce<<<grid_size, 256, 256 * (dim + 1) * sizeof(index_t)>>>(d_flagarray_OR_index_to_subindex, d_columns_to_reduce, d_pivot_column_index_OR_nonapparent_cols, d_distance_matrix, n, max_num_simplices, dim, threshold, d_binomial_coeff);
#else
CUDACHECK(cudaOccupancyMaxActiveBlocksPerMultiprocessor( &grid_size, populate_columns_to_reduce<char>, 256, 0));
grid_size *= deviceProp.multiProcessorCount;
populate_columns_to_reduce<<<grid_size, 256, 256 * (dim + 1) * sizeof(index_t)>>>(d_flagarray, d_columns_to_reduce, d_pivot_column_index_OR_nonapparent_cols, d_distance_matrix, n, max_num_simplices, dim, threshold, d_binomial_coeff);
#endif
CUDACHECK(cudaDeviceSynchronize());
pop_cols_timer.stop();
struct greaterdiam_lowerindex_diameter_index_t_struct_compare cmp;
#ifdef ASSEMBLE_REDUCTION_SUBMATRIX
*h_num_columns_to_reduce= thrust::count(thrust::device , d_flagarray_OR_index_to_subindex, d_flagarray_OR_index_to_subindex+max_num_simplices, 1);
CUDACHECK(cudaDeviceSynchronize());
thrust::sort(thrust::device, d_columns_to_reduce, d_columns_to_reduce+ max_num_simplices, cmp);
#else
*h_num_columns_to_reduce= thrust::count(thrust::device , d_flagarray, d_flagarray+max_num_simplices, 1);
CUDACHECK(cudaDeviceSynchronize());
thrust::sort(thrust::device, d_columns_to_reduce, d_columns_to_reduce+ max_num_simplices, cmp);
#endif
#ifdef COUNTING
//std::cerr<<"num cols to reduce for dim "<<dim<<": "<<*h_num_columns_to_reduce<<std::endl;
#endif
cudaMemcpy(h_columns_to_reduce, d_columns_to_reduce, sizeof(struct diameter_index_t_struct)*(*h_num_columns_to_reduce), cudaMemcpyDeviceToHost);
}
template <>
void ripser<sparse_distance_matrix>::gpu_assemble_columns_to_reduce_plusplus(const index_t dim) {
index_t max_num_simplices= binomial_coeff(n,dim+1);
#ifdef COUNTING
//std::cerr<<"max possible num simplices: "<<max_num_simplices<<std::endl;
#endif
*h_num_columns_to_reduce= 0;
CUDACHECK(cudaOccupancyMaxActiveBlocksPerMultiprocessor( &grid_size, populate_sparse_simplices_warpfiltering, 256, 0));
grid_size *= deviceProp.multiProcessorCount;
//columns_to_reduce contains the "new set" of simplices
#ifdef COUNTING
//std::cerr<<"(sparse) num simplices before kernel call: "<<*h_num_simplices<<std::endl;
#endif
populate_sparse_simplices_warpfiltering<<<grid_size, 256, 256 * dim * sizeof(index_t)>>>(d_simplices, d_num_simplices, d_columns_to_reduce, d_num_columns_to_reduce, d_CSR_distance_matrix, n, dim, threshold, d_binomial_coeff);
CUDACHECK(cudaDeviceSynchronize());
cudaMemcpy(d_simplices, d_columns_to_reduce, sizeof(struct diameter_index_t_struct)*(*h_num_columns_to_reduce),cudaMemcpyDeviceToDevice);
*h_num_simplices= *h_num_columns_to_reduce;
#ifdef COUNTING
//std::cerr<<"(sparse) num simplices for dim "<<dim<<": "<<*h_num_simplices<<std::endl;
#endif
struct greaterdiam_lowerindex_diameter_index_t_struct_compare cmp;
thrust::sort(thrust::device, d_simplices, d_simplices+*h_num_simplices, cmp);
CUDACHECK(cudaDeviceSynchronize());
cudaMemcpy(h_simplices, d_simplices, sizeof(struct diameter_index_t_struct)*(*h_num_simplices),cudaMemcpyDeviceToHost);
//populate the columns_to_reduce vector on CPU side
struct row_cidx_column_idx_struct_compare pair_cmp;
columns_to_reduce.clear();
for(index_t i=0; i<*h_num_simplices; i++){
struct diameter_index_t_struct s= h_simplices[i];
if(s.diameter<=threshold &&
get_value_pivot_array_hashmap(s.index, pair_cmp)==-1){
columns_to_reduce.push_back(s);
}
}
#ifdef COUNTING
//std::cerr<<"columns to reduce for dim: "<<dim<<": "<<columns_to_reduce.size()<<std::endl;
#endif
*h_num_columns_to_reduce= columns_to_reduce.size();
#pragma omp parallel for schedule(guided,1)
for(index_t i=0; i<columns_to_reduce.size(); i++){
h_columns_to_reduce[i]= columns_to_reduce[i];
}
}
template <>
void ripser<compressed_lower_distance_matrix>::cpu_byneighbor_assemble_columns_to_reduce(std::vector<diameter_index_t_struct>& simplices,
std::vector<diameter_index_t_struct>& columns_to_reduce,
hash_map<index_t,index_t>& pivot_column_index, index_t dim){
#ifdef INDICATE_PROGRESS
//std::cerr << clear_line << "assembling columns on CPU" << std::flush;
std::chrono::steady_clock::time_point next= std::chrono::steady_clock::now() + time_step;
#endif
--dim;
columns_to_reduce.clear();
std::vector<struct diameter_index_t_struct> next_simplices;
for (struct diameter_index_t_struct& simplex : simplices) {
simplex_coboundary_enumerator cofacets(simplex, dim, *this);
while (cofacets.has_next(false)) {
#ifdef INDICATE_PROGRESS
if (std::chrono::steady_clock::now() > next) {
//std::cerr << clear_line << "assembling " << next_simplices.size()
<< " columns (processing " << std::distance(&simplices[0], &simplex)
<< "/" << simplices.size() << " simplices)" << std::flush;
next= std::chrono::steady_clock::now() + time_step;
}
#endif
auto cofacet= cofacets.next();
if (cofacet.diameter <= threshold) {
next_simplices.push_back(cofacet);
if (pivot_column_index.find(cofacet.index) == pivot_column_index.end()) {
columns_to_reduce.push_back(cofacet);
}
}
}
}
simplices.swap(next_simplices);
#ifdef INDICATE_PROGRESS
//std::cerr << clear_line << "sorting " << columns_to_reduce.size() << " columns"
<< std::flush;
#endif
struct greaterdiam_lowerindex_diameter_index_t_struct_compare cmp;
std::sort(columns_to_reduce.begin(), columns_to_reduce.end(),
cmp);
#ifdef INDICATE_PROGRESS
//std::cerr << clear_line << std::flush;
#endif
}
template <>
void ripser<sparse_distance_matrix>::cpu_byneighbor_assemble_columns_to_reduce(std::vector<diameter_index_t_struct>& simplices,
std::vector<diameter_index_t_struct>& columns_to_reduce,
hash_map<index_t,index_t>& pivot_column_index, index_t dim){
#ifdef INDICATE_PROGRESS
//std::cerr << clear_line << "assembling columns" << std::flush;
std::chrono::steady_clock::time_point next= std::chrono::steady_clock::now() + time_step;
#endif
--dim;
columns_to_reduce.clear();
std::vector<struct diameter_index_t_struct> next_simplices;
for (struct diameter_index_t_struct& simplex : simplices) {
simplex_coboundary_enumerator cofacets(simplex, dim, *this);
while (cofacets.has_next(false)) {
#ifdef INDICATE_PROGRESS
if (std::chrono::steady_clock::now() > next) {
//std::cerr << clear_line << "assembling " << next_simplices.size()
<< " columns (processing " << std::distance(&simplices[0], &simplex)
<< "/" << simplices.size() << " simplices)" << std::flush;
next= std::chrono::steady_clock::now() + time_step;
}
#endif
auto cofacet= cofacets.next();
if (cofacet.diameter <= threshold) {
next_simplices.push_back(cofacet);
if (pivot_column_index.find(cofacet.index) == pivot_column_index.end()) { //|| pivot_column_index[cofacet.index]==-1)
columns_to_reduce.push_back(cofacet);
}
}
}
}
simplices.swap(next_simplices);
#ifdef INDICATE_PROGRESS
//std::cerr << clear_line << "sorting " << columns_to_reduce.size() << " columns"
<< std::flush;
#endif
struct greaterdiam_lowerindex_diameter_index_t_struct_compare cmp;
std::sort(columns_to_reduce.begin(), columns_to_reduce.end(),
cmp);
#ifdef INDICATE_PROGRESS
//std::cerr << clear_line << std::flush;
#endif
}
template <>
void ripser<compressed_lower_distance_matrix>::cpu_assemble_columns_to_reduce(std::vector<diameter_index_t_struct>& columns_to_reduce,
hash_map<index_t, index_t>& pivot_column_index,
index_t dim) {
index_t num_simplices= binomial_coeff(n, dim + 1);
#ifdef COUNTING
//std::cerr<<"max num possible simplices: "<<num_simplices<<std::endl;
#endif
columns_to_reduce.clear();
#ifdef INDICATE_PROGRESS
//std::cerr << "\033[K"
<< "assembling " << num_simplices << " columns" << std::flush << "\r";
#endif
index_t count= 0;
for (index_t index= 0; index < num_simplices; ++index) {
if (pivot_column_index.find(index) == pivot_column_index.end()) {
value_t diameter= compute_diameter(index, dim);
if (diameter <= threshold){
columns_to_reduce.push_back({diameter,index});
count++;
}
#ifdef INDICATE_PROGRESS
if ((index + 1) % 1000000 == 0)
//std::cerr << "\033[K"
<< "assembled " << columns_to_reduce.size() << " out of " << (index + 1)
<< "/" << num_simplices << " columns" << std::flush << "\r";
#endif
}
}
#ifdef INDICATE_PROGRESS
//std::cerr << "\033[K"
<< "sorting " << num_simplices << " columns" << std::flush << "\r";
#endif
struct greaterdiam_lowerindex_diameter_index_t_struct_compare cmp;
std::sort(columns_to_reduce.begin(), columns_to_reduce.end(),
cmp);
#ifdef INDICATE_PROGRESS
//std::cerr << "\033[K";
#endif
}
template <>
void ripser<compressed_lower_distance_matrix>::assemble_columns_gpu_accel_transition_to_cpu_only(const bool& more_than_one_dim_cpu_only,std::vector<diameter_index_t_struct>& simplices, std::vector<diameter_index_t_struct>& columns_to_reduce, hash_map<index_t,index_t>& cpu_pivot_column_index,
index_t dim){
index_t max_num_simplices= binomial_coeff(n,dim+1);
//insert all pivots from the two gpu pivot data structures into cpu_pivot_column_index, cannot parallelize this for loop due to concurrency issues of hashmaps
for (index_t i= 0; i < max_num_simplices; i++) {
#ifdef USE_PHASHMAP
index_t col_idx= phmap_get_value(i);
if(col_idx!=-1) {
cpu_pivot_column_index[i]= col_idx;
}
#endif
#ifdef USE_GOOGLE_HASHMAP
auto pair= pivot_column_index.find(i);
if(pair!=pivot_column_index.end()) {
cpu_pivot_column_index[i]= pair->second;
}
//}else{
//h_pivot_column_index_array_OR_nonapparent_cols[i]= -1;
//}
#endif
}
num_apparent= *h_num_columns_to_reduce-*h_num_nonapparent;
if(num_apparent>0) {
//we can't insert into the hashmap in parallel
for (index_t i= 0; i < num_apparent; i++) {
index_t row_cidx= h_pivot_array[i].row_cidx;
index_t column_idx= h_pivot_array[i].column_idx;
if(column_idx!=-1) {
cpu_pivot_column_index[row_cidx]= column_idx;
}
}
}
#ifdef INDICATE_PROGRESS
//std::cerr << clear_line << "assembling columns" << std::flush;
std::chrono::steady_clock::time_point next= std::chrono::steady_clock::now() + time_step;
#endif
columns_to_reduce.clear();
simplices.clear();
index_t count_simplices= 0;
//cpu_pivot_column_index can't be parallelized for lookup
for (index_t index= 0; index < max_num_simplices; ++index) {
value_t diameter= -MAX_FLOAT;
//the second condition after the || should never happen, since we never insert such pairs into cpu_pivot_column_index
if (cpu_pivot_column_index.find(index) == cpu_pivot_column_index.end() || cpu_pivot_column_index[index]==-1) {
diameter= compute_diameter(index, dim);
if (diameter <= threshold) {
columns_to_reduce.push_back({diameter, index});
}
#ifdef INDICATE_PROGRESS
if ((index + 1) % 1000000 == 0)
//std::cerr << "\033[K"
<< "assembled " << columns_to_reduce.size() << " out of " << (index + 1)
<< "/" << max_num_simplices << " columns" << std::flush << "\r";
#endif
}
if(more_than_one_dim_cpu_only){
if(diameter==-MAX_FLOAT){
diameter= compute_diameter(index, dim);
}
if(diameter<=threshold){
simplices.push_back({diameter,index});
count_simplices++;
}
}
}
#ifdef COUNTING
if(more_than_one_dim_cpu_only){
//std::cerr<<"(if there are multiple dimensions needed to compute) num simplices for dim: "<<dim<<" is: "<<count_simplices<<std::endl;
}
#endif
#ifdef INDICATE_PROGRESS
//std::cerr << "\033[K"
<< "sorting " << columns_to_reduce.size() << " columns" << std::flush << "\r";
#endif
greaterdiam_lowerindex_diameter_index_t_struct_compare cmp;
std::sort(columns_to_reduce.begin(), columns_to_reduce.end(), cmp);
#ifdef COUNTING
//std::cerr<<"NUM COLS to reduce for CPU: "<<columns_to_reduce.size()<<std::endl;
#endif
#ifdef INDICATE_PROGRESS
//std::cerr << clear_line << std::flush;
#endif
}
template <>
void ripser<compressed_lower_distance_matrix>::compute_barcodes() {
Stopwatch sw, gpu_accel_timer;
gpu_accel_timer.start();
sw.start();
index_t gpu_dim_max= calculate_gpu_dim_max_for_fullrips_computation_from_memory(dim_max, true);
#ifdef PROFILING
//std::cerr<<"recalculated dim_max based on GPU free DRAM capacity: "<<gpu_dim_max<<std::endl;
#endif
max_num_simplices_forall_dims= gpu_dim_max<(n/2)-1?get_num_simplices_for_dim(gpu_dim_max): get_num_simplices_for_dim((n/2)-1);
if(gpu_dim_max>=1){
//std::cerr<<"max possible num simplices over all dim<=dim_max (without clearing) for memory allocation: "<<max_num_simplices_forall_dims<<std::endl;
CUDACHECK(cudaMalloc((void **) &d_columns_to_reduce, sizeof(struct diameter_index_t_struct) * max_num_simplices_forall_dims));
h_columns_to_reduce= (struct diameter_index_t_struct*) malloc(sizeof(struct diameter_index_t_struct)* max_num_simplices_forall_dims);
if(h_columns_to_reduce==NULL){
//std::cerr<<"malloc for h_columns_to_reduce failed"<<std::endl;
exit(1);
}
#ifndef ASSEMBLE_REDUCTION_SUBMATRIX
CUDACHECK(cudaMalloc((void**) &d_flagarray, sizeof(char)*max_num_simplices_forall_dims));
#endif
CUDACHECK(cudaMalloc((void **) &d_cidx_to_diameter, sizeof(value_t)*max_num_simplices_forall_dims));
#if defined(ASSEMBLE_REDUCTION_SUBMATRIX)
CUDACHECK(cudaMalloc((void **) &d_flagarray_OR_index_to_subindex, sizeof(index_t)*max_num_simplices_forall_dims));
h_flagarray_OR_index_to_subindex= (index_t*) malloc(sizeof(index_t)*max_num_simplices_forall_dims);
if(h_flagarray_OR_index_to_subindex==NULL) {
//std::cerr<<"malloc for h_index_to_subindex failed"<<std::endl;
}
#endif
CUDACHECK(cudaMalloc((void **) &d_distance_matrix, sizeof(value_t)*dist.size()*(dist.size()-1)/2));
cudaMemcpy(d_distance_matrix, dist.distances.data(), sizeof(value_t)*dist.size()*(dist.size()-1)/2, cudaMemcpyHostToDevice);
CUDACHECK(cudaMalloc((void **) &d_pivot_column_index_OR_nonapparent_cols, sizeof(index_t)*max_num_simplices_forall_dims));
//this array is used for both the pivot column index hash table array as well as the nonapparent cols array as an unstructured hashmap
h_pivot_column_index_array_OR_nonapparent_cols= (index_t*) malloc(sizeof(index_t)*max_num_simplices_forall_dims);
if(h_pivot_column_index_array_OR_nonapparent_cols==NULL){
//std::cerr<<"malloc for h_pivot_column_index_array_OR_nonapparent_cols failed"<<std::endl;
exit(1);
}
//copy object over to GPU
CUDACHECK(cudaMalloc((void**) &d_binomial_coeff, sizeof(binomial_coeff_table)));
cudaMemcpy(d_binomial_coeff, &binomial_coeff, sizeof(binomial_coeff_table), cudaMemcpyHostToDevice);
index_t num_binoms= binomial_coeff.get_num_n()*binomial_coeff.get_max_tuple_length();
index_t* h_d_binoms;
CUDACHECK(cudaMalloc((void **) &h_d_binoms, sizeof(index_t)*num_binoms));
cudaMemcpy(h_d_binoms, binomial_coeff.binoms, sizeof(index_t)*num_binoms, cudaMemcpyHostToDevice);
cudaMemcpy(&(d_binomial_coeff->binoms), &h_d_binoms, sizeof(index_t*), cudaMemcpyHostToDevice);
cudaHostAlloc((void **)&h_num_columns_to_reduce, sizeof(index_t), cudaHostAllocPortable | cudaHostAllocMapped);
cudaHostGetDevicePointer(&d_num_columns_to_reduce, h_num_columns_to_reduce,0);
cudaHostAlloc((void **)&h_num_nonapparent, sizeof(index_t), cudaHostAllocPortable | cudaHostAllocMapped);
cudaHostGetDevicePointer(&d_num_nonapparent, h_num_nonapparent,0);
CUDACHECK(cudaMalloc((void**) &d_lowest_one_of_apparent_pair, sizeof(index_t)*max_num_simplices_forall_dims));
CUDACHECK(cudaMalloc((void**) &d_pivot_array, sizeof(struct index_t_pair_struct)*max_num_simplices_forall_dims));
h_pivot_array= (struct index_t_pair_struct*) malloc(sizeof(struct index_t_pair_struct)*max_num_simplices_forall_dims);
if(h_pivot_array==NULL){
//std::cerr<<"malloc for h_pivot_array failed"<<std::endl;
exit(1);
}
#ifdef PROFILING
cudaMemGetInfo(&freeMem,&totalMem);
//std::cerr<<"GPU memory after full rips memory calculation and allocation, total mem: "<< totalMem<<" bytes, free mem: "<<freeMem<<" bytes"<<std::endl;
#endif
}
sw.stop();
#ifdef PROFILING
//std::cerr<<"CUDA PREPROCESSING TIME (e.g. memory allocation time): "<<sw.ms()/1000.0<<"s"<<std::endl;
#endif
sw.start();
columns_to_reduce.clear();
std::vector<diameter_index_t_struct> simplices;
if(gpu_dim_max>=1) {
gpu_compute_dim_0_pairs(columns_to_reduce);
sw.stop();
#ifdef PROFILING
//std::cerr<<"0-dimensional persistence total computation time with GPU: "<<sw.ms()/1000.0<<"s"<<std::endl;
#endif
}else{
compute_dim_0_pairs(simplices, columns_to_reduce);
sw.stop();
#ifdef PROFILING
//std::cerr<<"0-dimensional persistence total computation time with CPU alone: "<<sw.ms()/1000.0<<"s"<<std::endl;
#endif
}
//index_t dim_forgpuscan= MAX_INT64;//never do gpu scan
index_t dim_forgpuscan= 1;
for (index_t dim= 1; dim <= gpu_dim_max; ++dim) {
Stopwatch sw;
sw.start();
#ifdef USE_PHASHMAP
phmap_clear();
#endif
#ifdef USE_GOOGLE_HASHMAP
pivot_column_index.clear();
pivot_column_index.resize(*h_num_columns_to_reduce);
#endif
*h_num_nonapparent= 0;
//search for apparent pairs
gpuscan(dim);
//dim_forgpuscan= dim;//update dim_forgpuscan to the dimension that gpuscan was just done at
sw.stop();
#ifdef PROFILING
//std::cerr<<"-SUM OF GPU MATRIX SCAN and post processing time for dim "<<dim<<": "<<sw.ms()/1000.0<<"s"<<std::endl;
#endif
sw.start();
compute_pairs_plusplus(
dim, dim_forgpuscan);
sw.stop();
#ifdef PROFILING
//std::cerr<<"SUBMATRIX REDUCTION TIME for dim "<< dim<<": "<<sw.ms()/1000.0<<"s"<<"\n"<<std::endl;
#endif
if (dim < gpu_dim_max) {
sw.start();
gpu_assemble_columns_to_reduce_plusplus(dim+1);
sw.stop();
#ifdef PROFILING
//std::cerr << "ASSEMBLE COLS TIME for dim " << dim + 1 << ": " << sw.ms() / 1000.0
//<< "s" << std::endl;
#endif
}
}
gpu_accel_timer.stop();
#ifdef PROFILING
if(gpu_dim_max>=1)
//std::cerr<<"GPU ACCELERATED COMPUTATION from dim 0 to dim "<<gpu_dim_max<<": "<<gpu_accel_timer.ms()/1000.0<<"s"<<std::endl;
#endif
if(dim_max>gpu_dim_max){//do cpu only computation from this point on
#ifdef CPUONLY_SPARSE_HASHMAP
//std::cerr<<"MEMORY EFFICIENT/BUT TIME INEFFICIENT CPU-ONLY MODE FOR REMAINDER OF HIGH DIMENSIONAL COMPUTATION (NOT ENOUGH GPU DEVICE MEMORY)"<<std::endl;
#endif
#ifndef CPUONLY_SPARSE_HASHMAP
//std::cerr<<"CPU-ONLY MODE FOR REMAINDER OF HIGH DIMENSIONAL COMPUTATION (NOT ENOUGH GPU DEVICE MEMORY)"<<std::endl;
#endif
free_init_cpumem();
hash_map<index_t,index_t> cpu_pivot_column_index;
cpu_pivot_column_index.reserve(*h_num_columns_to_reduce);
bool more_than_one_dim_to_compute= dim_max>gpu_dim_max+1;
assemble_columns_gpu_accel_transition_to_cpu_only(more_than_one_dim_to_compute, simplices, columns_to_reduce, cpu_pivot_column_index, gpu_dim_max+1);
free_remaining_cpumem();
for (index_t dim= gpu_dim_max+1; dim <= dim_max; ++dim) {
cpu_pivot_column_index.clear();
cpu_pivot_column_index.reserve(columns_to_reduce.size());
compute_pairs(columns_to_reduce, cpu_pivot_column_index, dim);
if(dim<dim_max){
sw.start();
//cpu_byneighbor_assemble_columns is a little faster?
cpu_byneighbor_assemble_columns_to_reduce(simplices, columns_to_reduce, cpu_pivot_column_index, dim+1);
//cpu_assemble_columns_to_reduce(columns_to_reduce,cpu_pivot_column_index, dim+1);
sw.stop();
#ifdef PROFILING
//std::cerr<<"TIME FOR CPU ASSEMBLE: "<<sw.ms()/1000.0<<"s"<<std::endl;
#endif
}
}
}
}
template <>
void ripser<sparse_distance_matrix>::compute_barcodes(){
Stopwatch sw, gpu_accel_timer;
gpu_accel_timer.start();
sw.start();
index_t maxgpu_dim= calculate_gpu_dim_max_for_fullrips_computation_from_memory(dim_max, false);
if(maxgpu_dim<dim_max){
max_num_simplices_forall_dims= calculate_gpu_max_columns_for_sparserips_computation_from_memory();
//std::cerr<<"(sparse) max possible num simplices for memory allocation forall dims: "<<max_num_simplices_forall_dims<<std::endl;
}else {
max_num_simplices_forall_dims =
dim_max < (n/2)-1 ? get_num_simplices_for_dim(dim_max) : get_num_simplices_for_dim((n/2)-1);
//std::cerr<<"(dense case used in sparse computation) max possible num simplices for memory allocation forall dims: "<<max_num_simplices_forall_dims<<std::endl;
}
//we assume that we have enough memory to last up to dim_max (should be fine with a >=32GB GPU); growth of num simplices can be very slow for sparse case
if(dim_max>=1) {
CUDACHECK(cudaMalloc((void **) &d_columns_to_reduce, sizeof(struct diameter_index_t_struct) * max_num_simplices_forall_dims));//46000000
h_columns_to_reduce= (struct diameter_index_t_struct*) malloc(sizeof(struct diameter_index_t_struct)* max_num_simplices_forall_dims);
if(h_columns_to_reduce==NULL){
//std::cerr<<"malloc for h_columns_to_reduce failed"<<std::endl;
exit(1);
}
#if defined(ASSEMBLE_REDUCTION_SUBMATRIX)
CUDACHECK(cudaMalloc((void **) &d_flagarray_OR_index_to_subindex, sizeof(index_t)*max_num_simplices_forall_dims));
h_flagarray_OR_index_to_subindex= (index_t*) malloc(sizeof(index_t)*max_num_simplices_forall_dims);
if(h_flagarray_OR_index_to_subindex==NULL) {
//std::cerr<<"malloc for h_index_to_subindex failed"<<std::endl;
}
#endif
CSR_distance_matrix CSR_distance_matrix= dist.toCSR();
//copy CSR_distance_matrix object over to GPU
CUDACHECK(cudaMalloc((void **) &d_CSR_distance_matrix, sizeof(CSR_distance_matrix)));
cudaMemcpy(d_CSR_distance_matrix, &CSR_distance_matrix, sizeof(CSR_distance_matrix), cudaMemcpyHostToDevice);
index_t *h_d_offsets;
value_t *h_d_entries;
index_t *h_d_col_indices;
CUDACHECK(cudaMalloc((void **) &h_d_offsets, sizeof(index_t) * (CSR_distance_matrix.n + 1)));
cudaMemcpy(h_d_offsets, CSR_distance_matrix.offsets, sizeof(index_t) * (CSR_distance_matrix.n + 1), cudaMemcpyHostToDevice);
cudaMemcpy(&(d_CSR_distance_matrix->offsets), &h_d_offsets, sizeof(index_t *), cudaMemcpyHostToDevice);
CUDACHECK(cudaMalloc((void **) &h_d_entries, sizeof(value_t) * CSR_distance_matrix.num_entries));
cudaMemcpy(h_d_entries, CSR_distance_matrix.entries, sizeof(value_t) * CSR_distance_matrix.num_entries, cudaMemcpyHostToDevice);
cudaMemcpy(&(d_CSR_distance_matrix->entries), &h_d_entries, sizeof(value_t *), cudaMemcpyHostToDevice);
CUDACHECK(cudaMalloc((void **) &h_d_col_indices, sizeof(index_t) * CSR_distance_matrix.num_entries));
cudaMemcpy(h_d_col_indices, CSR_distance_matrix.col_indices, sizeof(index_t) * CSR_distance_matrix.num_entries,
cudaMemcpyHostToDevice);
cudaMemcpy(&(d_CSR_distance_matrix->col_indices), &h_d_col_indices, sizeof(index_t *), cudaMemcpyHostToDevice);
//this replaces d_cidx_to_diameter
CUDACHECK(cudaMalloc((void **) &d_cidx_diameter_pairs_sortedlist, sizeof(struct diameter_index_t_struct)*max_num_simplices_forall_dims));
CUDACHECK(cudaMalloc((void **) &d_pivot_column_index_OR_nonapparent_cols, sizeof(index_t)*max_num_simplices_forall_dims));
//this array is used for both the pivot column index hash table array as well as the nonapparent cols array as an unstructured hashmap
h_pivot_column_index_array_OR_nonapparent_cols= (index_t*) malloc(sizeof(index_t)*max_num_simplices_forall_dims);
if(h_pivot_column_index_array_OR_nonapparent_cols==NULL){
//std::cerr<<"malloc for h_pivot_column_index_array_OR_nonapparent_cols failed"<<std::endl;
exit(1);
}
//copy object over to GPU
CUDACHECK(cudaMalloc((void**) &d_binomial_coeff, sizeof(binomial_coeff_table)));
cudaMemcpy(d_binomial_coeff, &binomial_coeff, sizeof(binomial_coeff_table), cudaMemcpyHostToDevice);
index_t num_binoms= binomial_coeff.get_num_n()*binomial_coeff.get_max_tuple_length();
index_t* h_d_binoms;
CUDACHECK(cudaMalloc((void **) &h_d_binoms, sizeof(index_t)*num_binoms));
cudaMemcpy(h_d_binoms, binomial_coeff.binoms, sizeof(index_t)*num_binoms, cudaMemcpyHostToDevice);
cudaMemcpy(&(d_binomial_coeff->binoms), &h_d_binoms, sizeof(index_t*), cudaMemcpyHostToDevice);
cudaHostAlloc((void **)&h_num_columns_to_reduce, sizeof(index_t), cudaHostAllocPortable | cudaHostAllocMapped);
cudaHostGetDevicePointer(&d_num_columns_to_reduce, h_num_columns_to_reduce,0);
cudaHostAlloc((void **)&h_num_nonapparent, sizeof(index_t), cudaHostAllocPortable | cudaHostAllocMapped);
cudaHostGetDevicePointer(&d_num_nonapparent, h_num_nonapparent,0);
cudaHostAlloc((void **)&h_num_simplices, sizeof(index_t), cudaHostAllocPortable | cudaHostAllocMapped);
cudaHostGetDevicePointer(&d_num_simplices, h_num_simplices,0);
CUDACHECK(cudaMalloc((void**) &d_lowest_one_of_apparent_pair, sizeof(index_t)*max_num_simplices_forall_dims));
CUDACHECK(cudaMalloc((void**) &d_pivot_array, sizeof(struct index_t_pair_struct)*max_num_simplices_forall_dims));
h_pivot_array= (struct index_t_pair_struct*) malloc(sizeof(struct index_t_pair_struct)*max_num_simplices_forall_dims);
if(h_pivot_array==NULL){
//std::cerr<<"malloc for h_pivot_array failed"<<std::endl;
exit(1);
}
CUDACHECK(cudaMalloc((void**) &d_simplices, sizeof(struct diameter_index_t_struct)*max_num_simplices_forall_dims));
h_simplices= (struct diameter_index_t_struct*) malloc(sizeof(struct diameter_index_t_struct)*max_num_simplices_forall_dims);
if(h_simplices==NULL){
//std::cerr<<"malloc for h_simplices failed"<<std::endl;
exit(1);
}
#ifdef PROFILING
cudaMemGetInfo(&freeMem,&totalMem);
//std::cerr<<"after GPU memory allocation: total mem, free mem: " <<totalMem<<" bytes, "<<freeMem<<" bytes"<<std::endl;
#endif
}
sw.stop();
#ifdef PROFILING
//std::cerr<<"CUDA PREPROCESSING TIME (e.g. memory allocation): "<<sw.ms()/1000.0<<"s"<<std::endl;
#endif
sw.start();
columns_to_reduce.clear();
if(dim_max>=1) {
gpu_compute_dim_0_pairs(columns_to_reduce);
sw.stop();
#ifdef PROFILING
//std::cerr<<"0-dimensional persistence total computation time with GPU: "<<sw.ms()/1000.0<<"s"<<std::endl;
#endif
}else{
std::vector<diameter_index_t_struct> simplices;
compute_dim_0_pairs(simplices, columns_to_reduce);
sw.stop();
#ifdef PROFILING
//std::cerr<<"0-dimensional persistence total computation time with CPU alone: "<<sw.ms()/1000.0<<"s"<<std::endl;
#endif
}
//index_t dim_forgpuscan= MAX_INT64;//never do gpuscan
index_t dim_forgpuscan= 1;
for (index_t dim= 1; dim <= dim_max; ++dim) {
Stopwatch sw;
sw.start();
#ifdef USE_PHASHMAP
phmap_clear();
#endif
#ifdef USE_GOOGLE_HASHMAP
pivot_column_index.clear();
pivot_column_index.resize(*h_num_columns_to_reduce);
#endif
*h_num_nonapparent= 0;
gpuscan(dim);
//dim_forgpuscan= dim;
sw.stop();
#ifdef PROFILING
//std::cerr << "-SUM OF GPU MATRIX SCAN and post processing time for dim " << dim << ": " << sw.ms() / 1000.0
//<< "s" << std::endl;
#endif
sw.start();
compute_pairs_plusplus(
dim, dim_forgpuscan);
sw.stop();
#ifdef PROFILING
//std::cerr << "SUBMATRIX REDUCTION TIME for dim " << dim << ": " << sw.ms() / 1000.0 << "s" << "\n" << std::endl;
#endif
if (dim < dim_max) {
sw.start();
gpu_assemble_columns_to_reduce_plusplus(dim + 1);
sw.stop();
#ifdef PROFILING
//std::cerr << "ASSEMBLE COLS TIME for dim " << dim + 1 << ": " << sw.ms() / 1000.0
//<< "s" << std::endl;
#endif
}
}
gpu_accel_timer.stop();
#ifdef PROFILING
//std::cerr<<"GPU ACCELERATED COMPUTATION: "<<gpu_accel_timer.ms()/1000.0<<"s"<<std::endl;
#endif
}
///I/O code
enum file_format { LOWER_DISTANCE_MATRIX, DISTANCE_MATRIX, POINT_CLOUD, DIPHA, SPARSE, BINARY };
template <typename T> T read(std::istream& s) {
T result;
s.read(reinterpret_cast<char*>(&result), sizeof(T));
return result; // on little endian: boost::endian::little_to_native(result);
}
compressed_lower_distance_matrix read_point_cloud_python(value_t* matrix, int num_rows, int num_columns){
std::vector<std::vector<value_t>> points;
for(int i= 0; i < num_rows; i++) {
std::vector <value_t> point;
for (int j= 0; j < num_columns; j++) {
point.push_back(matrix[i * num_columns + j]);
}
if (!point.empty()) {
points.push_back(point);
}
assert(point.size() == points.front().size());
}
//only l2 distance implemented so far
euclidean_distance_matrix eucl_dist(std::move(points));
index_t n= eucl_dist.size();
//std::cerr << "point cloud with " << n << " points in dimension "
//<< eucl_dist.points.front().size() << std::endl;
std::vector<value_t> distances;
for (int i= 0; i < n; ++i)
for (int j= 0; j < i; ++j) distances.push_back(eucl_dist(i, j));
return compressed_lower_distance_matrix(std::move(distances));
}
compressed_lower_distance_matrix read_point_cloud(std::istream& input_stream) {
std::vector<std::vector<value_t>> points;
std::string line;
value_t value;
while (std::getline(input_stream, line)) {
std::vector<value_t> point;
std::istringstream s(line);
while (s >> value) {
point.push_back(value);
s.ignore();
}
if (!point.empty()) points.push_back(point);
assert(point.size() == points.front().size());
}
euclidean_distance_matrix eucl_dist(std::move(points));
index_t n= eucl_dist.size();
//std::cerr << "point cloud with " << n << " points in dimension "
//<< eucl_dist.points.front().size() << std::endl;
std::vector<value_t> distances;
for (int i= 0; i < n; ++i)
for (int j= 0; j < i; ++j) distances.push_back(eucl_dist(i, j));
return compressed_lower_distance_matrix(std::move(distances));
}
//the coo format input is of a lower triangular matrix
sparse_distance_matrix read_sparse_distance_matrix(std::istream& input_stream) {
std::vector<std::vector<index_diameter_t_struct>> neighbors;
index_t num_edges= 0;
std::string line;
while (std::getline(input_stream, line)) {
std::istringstream s(line);
size_t i, j;
value_t value;
s >> i;
s >> j;
s >> value;
if (i != j) {
neighbors.resize(std::max({neighbors.size(), i + 1, j + 1}));
neighbors[i].push_back({j, value});
neighbors[j].push_back({i, value});
++num_edges;
}
}
struct lowerindex_lowerdiameter_index_t_struct_compare cmp_index_diameter;
for (size_t i= 0; i < neighbors.size(); ++i)
std::sort(neighbors[i].begin(), neighbors[i].end(), cmp_index_diameter);
return sparse_distance_matrix(std::move(neighbors), num_edges);
}
compressed_lower_distance_matrix read_lower_distance_matrix_python(value_t* matrix, int matrix_length) {
std::vector<value_t> distances(matrix, matrix + matrix_length);
return compressed_lower_distance_matrix(std::move(distances));
}
compressed_lower_distance_matrix read_lower_distance_matrix(std::istream& input_stream) {
std::vector<value_t> distances;
value_t value;
while (input_stream >> value) {
distances.push_back(value);
input_stream.ignore();
}
return compressed_lower_distance_matrix(std::move(distances));
}
compressed_lower_distance_matrix read_distance_matrix_python(value_t* matrix, int matrix_length) {
std::vector<value_t> distances(matrix, matrix + matrix_length);
return compressed_lower_distance_matrix(std::move(distances));
}
compressed_lower_distance_matrix read_distance_matrix(std::istream& input_stream) {
std::vector<value_t> distances;
std::string line;
value_t value;
for (int i= 0; std::getline(input_stream, line); ++i) {
std::istringstream s(line);
for (int j= 0; j < i && s >> value; ++j) {
distances.push_back(value);
s.ignore();
}
}
return compressed_lower_distance_matrix(std::move(distances));
}
compressed_lower_distance_matrix read_dipha(std::istream& input_stream) {
if (read<int64_t>(input_stream) != 8067171840) {
//std::cerr << "input is not a Dipha file (magic number: 8067171840)" << std::endl;
exit(-1);
}
if (read<int64_t>(input_stream) != 7) {
//std::cerr << "input is not a Dipha distance matrix (file type: 7)" << std::endl;
exit(-1);
}
index_t n= read<int64_t>(input_stream);
std::vector<value_t> distances;
for (int i= 0; i < n; ++i)
for (int j= 0; j < n; ++j)
if (i > j)
distances.push_back(read<double>(input_stream));
else
read<double>(input_stream);
return compressed_lower_distance_matrix(std::move(distances));
}
compressed_lower_distance_matrix read_binary(std::istream& input_stream) {
std::vector<value_t> distances;
while (!input_stream.eof()) distances.push_back(read<value_t>(input_stream));
return compressed_lower_distance_matrix(std::move(distances));
}
compressed_lower_distance_matrix read_matrix_python(value_t* matrix, int num_entries, int num_rows, int num_columns, file_format format) {
switch (format) {
case LOWER_DISTANCE_MATRIX:
return read_lower_distance_matrix_python(matrix, num_entries);
case DISTANCE_MATRIX://assume that the distance matrix has been changed into lower_distance matrix format
return read_distance_matrix_python(matrix, num_entries);
case POINT_CLOUD:
return read_point_cloud_python(matrix, num_rows, num_columns);
}
//std::cerr<<"unsupported input file format for python interface"<<std::endl;
exit(-1);
}
compressed_lower_distance_matrix read_file(std::istream& input_stream, file_format format) {
switch (format) {
case LOWER_DISTANCE_MATRIX:
return read_lower_distance_matrix(input_stream);
case DISTANCE_MATRIX:
return read_distance_matrix(input_stream);
case POINT_CLOUD:
return read_point_cloud(input_stream);
case DIPHA:
return read_dipha(input_stream);
default:
return read_binary(input_stream);
}
//std::cerr<<"unsupported input file format"<<std::endl;
}
void print_usage_and_exit(int exit_code) {
std::cerr
<< "Usage: "
<< "ripser++ "
<< "[options] [filename]" << std::endl
<< std::endl
<< "Options:" << std::endl
<< std::endl
<< " --help print this screen" << std::endl
<< " --format use the specified file format for the input. Options are:"
<< std::endl
<< " lower-distance (lower triangular distance matrix; default)"
<< std::endl
<< " distance (full distance matrix)" << std::endl
<< " point-cloud (point cloud in Euclidean space)" << std::endl
<< " dipha (distance matrix in DIPHA file format)" << std::endl
<< " sparse (sparse distance matrix in sparse triplet (COO) format)"
<< std::endl
<< " binary (distance matrix in Ripser binary file format)"
<< std::endl
<< " --dim <k> compute persistent homology up to dimension <k>" << std::endl
<< " --threshold <t> compute Rips complexes up to diameter <t>" << std::endl
<< " --sparse force sparse computation "<<std::endl
<< " --ratio <r> only show persistence pairs with death/birth ratio > r" << std::endl
<< std::endl;
exit(exit_code);
}
extern "C" void run_main_filename(int argc, char** argv, const char* filename) {
Stopwatch sw;
#ifdef PROFILING
cudaDeviceProp deviceProp;
size_t freeMem_start, freeMem_end, totalMemory;
cudaGetDeviceProperties(&deviceProp, 0);
cudaMemGetInfo(&freeMem_start,&totalMemory);
#endif
sw.start();
file_format format= DISTANCE_MATRIX;
index_t dim_max= 1;
value_t threshold= std::numeric_limits<value_t>::max();
float ratio= 1;
bool use_sparse= false;
for (index_t i= 0; i < argc; i++) {
const std::string arg(argv[i]);
if (arg == "--help") {
print_usage_and_exit(0);
} else if (arg == "--dim") {
std::string parameter= std::string(argv[++i]);
size_t next_pos;
dim_max= std::stol(parameter, &next_pos);
if (next_pos != parameter.size()) print_usage_and_exit(-1);
} else if (arg == "--threshold") {
std::string parameter= std::string(argv[++i]);
size_t next_pos;
threshold= std::stof(parameter, &next_pos);
if (next_pos != parameter.size()) print_usage_and_exit(-1);
} else if (arg == "--ratio") {
std::string parameter= std::string(argv[++i]);
size_t next_pos;
ratio= std::stof(parameter, &next_pos);
if (next_pos != parameter.size()) print_usage_and_exit(-1);
} else if (arg == "--format") {
std::string parameter= std::string(argv[++i]);
if (parameter == "lower-distance")
format= LOWER_DISTANCE_MATRIX;
else if (parameter == "distance")
format= DISTANCE_MATRIX;
else if (parameter == "point-cloud")
format= POINT_CLOUD;
else if (parameter == "dipha")
format= DIPHA;
else if (parameter == "sparse")
format= SPARSE;
else if (parameter == "binary")
format= BINARY;
else
print_usage_and_exit(-1);
} else if(arg=="--sparse") {
use_sparse= true;
}
}
std::ifstream file_stream(filename);
if (filename && file_stream.fail()) {
//std::cerr << "couldn't open file " << filename << std::endl;
exit(-1);
}
if (format == SPARSE) {
Stopwatch IOsw;
IOsw.start();
sparse_distance_matrix dist =
read_sparse_distance_matrix(filename ? file_stream : std::cin);
IOsw.stop();
#ifdef PROFILING
//std::cerr<<IOsw.ms()/1000.0<<"s time to load sparse distance matrix (I/O)"<<std::endl;
#endif
assert(dist.num_entries%2==0);
//std::cerr << "sparse distance matrix with " << dist.size() << " points and "
//<< dist.num_entries/2 << "/" << (dist.size() * (dist.size() - 1)) / 2 << " entries"
//<< std::endl;
ripser<sparse_distance_matrix>(std::move(dist), dim_max, threshold, ratio)
.compute_barcodes();
}else {
Stopwatch IOsw;
IOsw.start();
compressed_lower_distance_matrix dist= read_file(filename ? file_stream : std::cin, format);
IOsw.stop();
#ifdef PROFILING
//std::cerr<<IOsw.ms()/1000.0<<"s time to load distance matrix (I/O)"<<std::endl;
#endif
value_t min= std::numeric_limits<value_t>::infinity(),
max= -std::numeric_limits<value_t>::infinity(), max_finite= max;
int num_edges= 0;
value_t enclosing_radius= std::numeric_limits<value_t>::infinity();
for (index_t i= 0; i < dist.size(); ++i) {
value_t r_i= -std::numeric_limits<value_t>::infinity();
for (index_t j= 0; j < dist.size(); ++j) r_i= std::max(r_i, dist(i, j));
enclosing_radius= std::min(enclosing_radius, r_i);
}
if (threshold == std::numeric_limits<value_t>::max()) threshold= enclosing_radius;
for (auto d : dist.distances) {
min= std::min(min, d);
max= std::max(max, d);
max_finite= d != std::numeric_limits<value_t>::infinity() ? std::max(max, d) : max_finite;
if (d <= threshold) ++num_edges;
}
//std::cerr << "value range: [" << min << "," << max_finite << "]" << std::endl;
if (use_sparse) {
//std::cerr << "sparse distance matrix with " << dist.size() << " points and "
//<< num_edges << "/" << (dist.size() * (dist.size() - 1)) / 2 << " entries"
//<< std::endl;
ripser<sparse_distance_matrix>(sparse_distance_matrix(std::move(dist), threshold),
dim_max, threshold, ratio)
.compute_barcodes();
} else {
//std::cerr << "distance matrix with " << dist.size() << " points" << std::endl;
ripser<compressed_lower_distance_matrix>(std::move(dist), dim_max, threshold, ratio).compute_barcodes();
}
}
sw.stop();
#ifdef INDICATE_PROGRESS
//std::cerr<<clear_line<<std::flush;
#endif
#ifdef PROFILING
//std::cerr<<"total time: "<<sw.ms()/1000.0<<"s"<<std::endl;
cudaGetDeviceProperties(&deviceProp, 0);
cudaMemGetInfo(&freeMem_end,&totalMemory);
//std::cerr<<"total GPU memory used: "<<(freeMem_start-freeMem_end)/1000.0/1000.0/1000.0<<"GB"<<std::endl;
#endif
}
extern "C" void run_main(int argc, char** argv, value_t* matrix, int num_entries, int num_rows, int num_columns) {
Stopwatch sw;
#ifdef PROFILING
cudaDeviceProp deviceProp;
size_t freeMem_start, freeMem_end, totalMemory;
cudaGetDeviceProperties(&deviceProp, 0);
cudaMemGetInfo(&freeMem_start,&totalMemory);
#endif
sw.start();
const char* filename= nullptr;
file_format format= DISTANCE_MATRIX;
index_t dim_max= 1;
value_t threshold= std::numeric_limits<value_t>::max();
float ratio= 1;
bool use_sparse= false;
for (index_t i= 0; i < argc; i++) {
const std::string arg(argv[i]);
if (arg == "--help") {
print_usage_and_exit(0);
} else if (arg == "--dim") {
std::string parameter= std::string(argv[++i]);
size_t next_pos;
dim_max= std::stol(parameter, &next_pos);
if (next_pos != parameter.size()) print_usage_and_exit(-1);
} else if (arg == "--threshold") {
std::string parameter= std::string(argv[++i]);
size_t next_pos;
threshold= std::stof(parameter, &next_pos);
if (next_pos != parameter.size()) print_usage_and_exit(-1);
} else if (arg == "--ratio") {
std::string parameter= std::string(argv[++i]);
size_t next_pos;
ratio= std::stof(parameter, &next_pos);
if (next_pos != parameter.size()) print_usage_and_exit(-1);
} else if (arg == "--format") {
std::string parameter= std::string(argv[++i]);
if (parameter == "lower-distance")
format= LOWER_DISTANCE_MATRIX;
else if (parameter == "distance")
format= DISTANCE_MATRIX;
else if (parameter == "point-cloud")
format= POINT_CLOUD;
else if (parameter == "dipha")
format= DIPHA;
else if (parameter == "sparse")
format= SPARSE;
else if (parameter == "binary")
format= BINARY;
else
print_usage_and_exit(-1);
} else if(arg=="--sparse") {
use_sparse= true;
}
}
if (format == SPARSE) {//this branch is currently unsupported in run_main, see run_main_filename() instead
Stopwatch IOsw;
IOsw.start();
std::ifstream file_stream(filename);
sparse_distance_matrix dist= read_sparse_distance_matrix(filename ? file_stream : std::cin);
IOsw.stop();
#ifdef PROFILING
//std::cerr<<IOsw.ms()/1000.0<<"s time to load sparse distance matrix (I/O)"<<std::endl;
#endif
assert(dist.num_entries%2==0);
//std::cerr << "sparse distance matrix with " << dist.size() << " points and "
//<< dist.num_entries/2 << "/" << (dist.size() * (dist.size() - 1)) / 2 << " entries"
//<< std::endl;
ripser<sparse_distance_matrix>(std::move(dist), dim_max, threshold, ratio)
.compute_barcodes();
}else{
//Stopwatch IOsw;
//IOsw.start();
compressed_lower_distance_matrix dist= read_matrix_python(matrix, num_entries, num_rows, num_columns, format);
//IOsw.stop();
#ifdef PROFILING
////std::cerr<<IOsw.ms()/1000.0<<"s time to load python matrix"<<std::endl;
//std::cerr<<"loaded python dense user matrix"<<std::endl;
#endif
value_t min= std::numeric_limits<value_t>::infinity(),
max= -std::numeric_limits<value_t>::infinity(), max_finite= max;
int num_edges= 0;
value_t enclosing_radius= std::numeric_limits<value_t>::infinity();
for (index_t i= 0; i < dist.size(); ++i) {
value_t r_i= -std::numeric_limits<value_t>::infinity();
for (index_t j= 0; j < dist.size(); ++j) r_i= std::max(r_i, dist(i, j));
enclosing_radius= std::min(enclosing_radius, r_i);
}
if (threshold == std::numeric_limits<value_t>::max()) threshold= enclosing_radius;
for (auto d : dist.distances) {
min= std::min(min, d);
max= std::max(max, d);
max_finite= d != std::numeric_limits<value_t>::infinity() ? std::max(max, d) : max_finite;
if (d <= threshold) ++num_edges;
}
//std::cerr << "value range: [" << min << "," << max_finite << "]" << std::endl;
if (use_sparse) {
//std::cerr << "sparse distance matrix with " << dist.size() << " points and "
//<< num_edges << "/" << (dist.size() * (dist.size() - 1)) / 2 << " entries"
//<< std::endl;
ripser<sparse_distance_matrix>(sparse_distance_matrix(std::move(dist), threshold),
dim_max, threshold, ratio)
.compute_barcodes();
} else {
//std::cerr << "distance matrix with " << dist.size() << " points" << std::endl;
ripser<compressed_lower_distance_matrix>(std::move(dist), dim_max, threshold, ratio).compute_barcodes();
}
}
sw.stop();
#ifdef INDICATE_PROGRESS
//std::cerr<<clear_line<<std::flush;
#endif
#ifdef PROFILING
//std::cerr<<"total time: "<<sw.ms()/1000.0<<"s"<<std::endl;
cudaGetDeviceProperties(&deviceProp, 0);
cudaMemGetInfo(&freeMem_end,&totalMemory);
//std::cerr<<"total GPU memory used: "<<(freeMem_start-freeMem_end)/1000.0/1000.0/1000.0<<"GB"<<std::endl;
#endif
}
int main(int argc, char** argv) {
Stopwatch sw;
#ifdef PROFILING
cudaDeviceProp deviceProp;
size_t freeMem_start, freeMem_end, totalMemory;
cudaGetDeviceProperties(&deviceProp, 0);
cudaMemGetInfo(&freeMem_start,&totalMemory);
#endif
sw.start();
const char* filename= nullptr;
file_format format= DISTANCE_MATRIX;
index_t dim_max= 1;
value_t threshold= std::numeric_limits<value_t>::max();
float ratio= 1;
bool use_sparse= false;
for (index_t i= 1; i < argc; ++i) {
const std::string arg(argv[i]);
if (arg == "--help") {
print_usage_and_exit(0);
} else if (arg == "--dim") {
std::string parameter= std::string(argv[++i]);
size_t next_pos;
dim_max= std::stol(parameter, &next_pos);
if (next_pos != parameter.size()) print_usage_and_exit(-1);
} else if (arg == "--threshold") {
std::string parameter= std::string(argv[++i]);
size_t next_pos;
threshold= std::stof(parameter, &next_pos);
if (next_pos != parameter.size()) print_usage_and_exit(-1);
} else if (arg == "--ratio") {
std::string parameter= std::string(argv[++i]);
size_t next_pos;
ratio= std::stof(parameter, &next_pos);
if (next_pos != parameter.size()) print_usage_and_exit(-1);
} else if (arg == "--format") {
std::string parameter= std::string(argv[++i]);
if (parameter == "lower-distance")
format= LOWER_DISTANCE_MATRIX;
else if (parameter == "distance")
format= DISTANCE_MATRIX;
else if (parameter == "point-cloud")
format= POINT_CLOUD;
else if (parameter == "dipha")
format= DIPHA;
else if (parameter == "sparse")
format= SPARSE;
else if (parameter == "binary")
format= BINARY;
else
print_usage_and_exit(-1);
} else if(arg=="--sparse") {
use_sparse= true;
}else {
if (filename) { print_usage_and_exit(-1); }
filename= argv[i];
}
}
std::ifstream file_stream(filename);
if (filename && file_stream.fail()) {
//std::cerr << "couldn't open file " << filename << std::endl;
exit(-1);
}
if (format == SPARSE) {
Stopwatch IOsw;
IOsw.start();
sparse_distance_matrix dist =
read_sparse_distance_matrix(filename ? file_stream : std::cin);
IOsw.stop();
#ifdef PROFILING
//std::cerr<<IOsw.ms()/1000.0<<"s time to load sparse distance matrix (I/O)"<<std::endl;
#endif
assert(dist.num_entries%2==0);
//std::cerr << "sparse distance matrix with " << dist.size() << " points and "
//<< dist.num_entries/2 << "/" << (dist.size() * (dist.size() - 1)) / 2 << " entries"
//<< std::endl;
ripser<sparse_distance_matrix>(std::move(dist), dim_max, threshold, ratio)
.compute_barcodes();
}else {
Stopwatch IOsw;
IOsw.start();
compressed_lower_distance_matrix dist= read_file(filename ? file_stream : std::cin, format);
IOsw.stop();
#ifdef PROFILING
//std::cerr<<IOsw.ms()/1000.0<<"s time to load distance matrix (I/O)"<<std::endl;
#endif
value_t min= std::numeric_limits<value_t>::infinity(),
max= -std::numeric_limits<value_t>::infinity(), max_finite= max;
int num_edges= 0;
value_t enclosing_radius= std::numeric_limits<value_t>::infinity();
for (index_t i= 0; i < dist.size(); ++i) {
value_t r_i= -std::numeric_limits<value_t>::infinity();
for (index_t j= 0; j < dist.size(); ++j) r_i= std::max(r_i, dist(i, j));
enclosing_radius= std::min(enclosing_radius, r_i);
}
if (threshold == std::numeric_limits<value_t>::max()) threshold= enclosing_radius;
for (auto d : dist.distances) {
min= std::min(min, d);
max= std::max(max, d);
max_finite= d != std::numeric_limits<value_t>::infinity() ? std::max(max, d) : max_finite;
if (d <= threshold) ++num_edges;
}
//std::cerr << "value range: [" << min << "," << max_finite << "]" << std::endl;
if (use_sparse) {
//std::cerr << "sparse distance matrix with " << dist.size() << " points and "
//<< num_edges << "/" << (dist.size() * (dist.size() - 1)) / 2 << " entries"
//<< std::endl;
ripser<sparse_distance_matrix>(sparse_distance_matrix(std::move(dist), threshold),
dim_max, threshold, ratio)
.compute_barcodes();
} else {
//std::cerr << "distance matrix with " << dist.size() << " points" << std::endl;
ripser<compressed_lower_distance_matrix>(std::move(dist), dim_max, threshold, ratio).compute_barcodes();
}
}
sw.stop();
#ifdef INDICATE_PROGRESS
//std::cerr<<clear_line<<std::flush;
#endif
#ifdef PROFILING
//std::cerr<<"total time: "<<sw.ms()/1000.0<<"s"<<std::endl;
cudaGetDeviceProperties(&deviceProp, 0);
cudaMemGetInfo(&freeMem_end,&totalMemory);
cudaDeviceReset();
//std::cerr<<"total GPU memory used: "<<(freeMem_start-freeMem_end)/1000.0/1000.0/1000.0<<"GB"<<std::endl;
#endif
}
|
ccd7b0bf085fee4aa8ac948b6b12f41c351ed73e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <opencv2/core/core.hpp>
#include <device_launch_parameters.h>
#include "caffe/util/gpu_math_functions.cuh"
#include "caffe/data_transformer.hpp"
namespace caffe {
template <typename Dtype>
__global__
void transform_kernel(int N, int C,
int H, int W, // original size
int Hc, int Wc, // cropped size
bool param_mirror,
int datum_height, int datum_width, // offsets
int crop_size, Phase phase,
size_t sizeof_element,
const void *in,
Dtype *out, // buffers
float scale,
int has_mean_file,
int has_mean_values,
float *mean,
const unsigned int *random_numbers) {
const int c = blockIdx.y;
// loop over images
for (int n = blockIdx.x; n < N; n += gridDim.x) {
// get mirror and offsets
unsigned int rand1 = random_numbers[n*3 ];
unsigned int rand2 = random_numbers[n*3 + 1];
unsigned int rand3 = random_numbers[n*3 + 2];
bool mirror = param_mirror && (rand1 % 2);
int h_off = 0, w_off = 0;
if (crop_size) {
if (phase == TRAIN) {
h_off = rand2 % (datum_height - crop_size + 1);
w_off = rand3 % (datum_width - crop_size + 1);
} else {
h_off = (datum_height - crop_size) / 2;
w_off = (datum_width - crop_size) / 2;
}
}
const uint8_t *in_ptri;
const float *in_ptrf;
// offsets into start of (image, channel) = (n, c)
// channel is handled by blockIdx.y
// Initial offset per Dtype:
const Dtype *in_ptr;// = &in[n*C*H*W];
// Element-specific offset to a channel c
if (sizeof_element == sizeof(uint8_t)) {
in_ptri = &(reinterpret_cast<const uint8_t*>(in))[n*C*H*W];
in_ptri += c*H*W;
} else if (sizeof_element == sizeof(float)) {
in_ptrf = &(reinterpret_cast<const float*>(in))[n*C*H*W];
in_ptrf += c*H*W;
} else {
in_ptr = &(reinterpret_cast<const Dtype*>(in))[n*C*H*W];
in_ptr += c*H*W;
}
Dtype *out_ptr = &out[n*C*Hc*Wc + c*Hc*Wc];
Dtype element;
// loop over pixels using threads
for (int h = threadIdx.y; h < Hc; h += blockDim.y) {
for (int w = threadIdx.x; w < Wc; w += blockDim.x) {
// get the indices for in, out buffers
int in_idx = (h_off + h) * W + w_off + w;
int out_idx = mirror ? h * Wc + (Wc - 1 - w) : h * Wc + w;
if (sizeof_element == sizeof(uint8_t)) {
element = in_ptri[in_idx];
} else if (sizeof_element == sizeof(float)) {
element = in_ptrf[in_idx];
} else {
element = in_ptr[in_idx];
}
// perform the transform
if (has_mean_file) {
out_ptr[out_idx] = (element - mean[c*H*W + in_idx]) * scale;
} else {
if (has_mean_values) {
out_ptr[out_idx] = (element - mean[c]) * scale;
} else {
out_ptr[out_idx] = element * scale;
}
}
}
}
}
}
template <>
__global__
void transform_kernel<__half>(int N, int C,
int H, int W, // original size
int Hc, int Wc, // cropped size
bool param_mirror,
int datum_height, int datum_width, // offsets
int crop_size, Phase phase,
size_t sizeof_element,
const void* in,
__half* out, // buffers
float scale,
int has_mean_file,
int has_mean_values,
float* mean,
const unsigned int *random_numbers) {
const int c = blockIdx.y;
// loop over images
for (int n = blockIdx.x; n < N; n += gridDim.x) {
// get mirror and offsets
unsigned int rand1 = random_numbers[n*3 ];
unsigned int rand2 = random_numbers[n*3 + 1];
unsigned int rand3 = random_numbers[n*3 + 2];
bool mirror = param_mirror && (rand1 % 2);
int h_off = 0, w_off = 0;
if (crop_size) {
if (phase == TRAIN) {
h_off = rand2 % (datum_height - crop_size + 1);
w_off = rand3 % (datum_width - crop_size + 1);
} else {
h_off = (datum_height - crop_size) / 2;
w_off = (datum_width - crop_size) / 2;
}
}
const uint8_t *in_ptri;
const float *in_ptrf;
// offsets into start of (image, channel) = (n, c)
// channel is handled by blockIdx.y
// Initial offset per Dtype:
const __half *in_ptr;// = &in[n*C*H*W];
// Element-specific offset to a channel c
if (sizeof_element == sizeof(uint8_t)) {
in_ptri = &(reinterpret_cast<const uint8_t*>(in))[n*C*H*W];
in_ptri += c*H*W;
} else if (sizeof_element == sizeof(float)) {
in_ptrf = &(reinterpret_cast<const float*>(in))[n*C*H*W];
in_ptrf += c*H*W;
} else {
in_ptr = &(reinterpret_cast<const __half*>(in))[n*C*H*W];
in_ptr += c*H*W;
}
__half* out_ptr = &out[n*C*Hc*Wc + c*Hc*Wc];
float element;
// loop over pixels using threads
for (int h = threadIdx.y; h < Hc; h += blockDim.y) {
for (int w = threadIdx.x; w < Wc; w += blockDim.x) {
// get the indices for in, out buffers
int in_idx = (h_off + h) * W + w_off + w;
int out_idx = mirror ? h * Wc + (Wc - 1 - w) : h * Wc + w;
if (sizeof_element == sizeof(uint8_t)) {
element = in_ptri[in_idx];
} else if (sizeof_element == sizeof(float)) {
element = in_ptrf[in_idx];
} else {
element = __half2float(in_ptr[in_idx]);
}
// perform the transform
if (has_mean_file) {
out_ptr[out_idx] = float2half_clip((element - mean[c*H*W + in_idx]) * scale);
} else {
if (has_mean_values) {
out_ptr[out_idx] = float2half_clip((element - mean[c]) * scale);
} else {
out_ptr[out_idx] = float2half_clip(element * scale);
}
}
}
}
}
}
template <typename Dtype>
void DataTransformer::TransformGPU(int N, int C, int H, int W,
size_t sizeof_element,
const void *in, Dtype *out,
const unsigned int *random_numbers) {
const int datum_channels = C;
const int datum_height = H;
const int datum_width = W;
const int crop_size = param_.crop_size();
float scale = param_.scale();
const bool mirror = param_.mirror();
const bool has_mean_file = param_.has_mean_file();
const bool has_mean_values = mean_values_.size() > 0;
CHECK_GT(datum_channels, 0);
CHECK_GE(datum_height, crop_size);
CHECK_GE(datum_width, crop_size);
float* mean = nullptr;
if (has_mean_file) {
CHECK_EQ(datum_channels, data_mean_.channels());
// no need to check equality anymore
// datum_{height, width} are _output_ not input
mean = data_mean_.mutable_gpu_data();
}
if (has_mean_values) {
if (mean_values_gpu_.empty()) {
CHECK(mean_values_.size() == 1 || mean_values_.size() == datum_channels)
<< "Specify either 1 mean_value or as many as channels: "
<< datum_channels;
if (datum_channels > 1 && mean_values_.size() == 1) {
// Replicate the mean_value for simplicity
for (int c = 1; c < datum_channels; ++c) {
mean_values_.push_back(mean_values_[0]);
}
}
mean_values_gpu_.reserve(sizeof(float) * mean_values_.size());
caffe_copy(static_cast<int>(mean_values_.size()), &mean_values_.front(),
reinterpret_cast<float*>(mean_values_gpu_.data()));
}
mean = reinterpret_cast<float*>(mean_values_gpu_.data());
}
int height = datum_height;
int width = datum_width;
if (crop_size) {
height = crop_size;
width = crop_size;
}
dim3 grid(N, C);
dim3 block(16, 16);
hipStream_t stream = Caffe::thread_stream();
// TODO clean
if (is_precise<Dtype>()) {
hipLaunchKernelGGL(( transform_kernel<Dtype>)
, dim3(grid), dim3(block), 0, stream , N, C, H, W,
height, width,
param_.mirror(),
datum_height, datum_width,
crop_size, phase_,
sizeof_element,
in, out,
scale,
static_cast<int>(has_mean_file),
static_cast<int>(has_mean_values),
mean, random_numbers);
} else {
hipLaunchKernelGGL(( transform_kernel<__half>)
, dim3(grid), dim3(block), 0, stream , N, C, H, W,
height, width,
param_.mirror(),
datum_height, datum_width,
crop_size, phase_,
sizeof_element,
in, reinterpret_cast<__half*>(out),
scale,
static_cast<int>(has_mean_file),
static_cast<int>(has_mean_values),
mean, random_numbers);
}
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(hipStreamSynchronize(stream));
}
template void DataTransformer::TransformGPU<float>(int, int, int, int,
size_t, const void*, float*, const unsigned int*);
template void DataTransformer::TransformGPU<double>(int, int, int, int,
size_t, const void*, double*, const unsigned int*);
template void DataTransformer::TransformGPU<float16>(int, int, int, int,
size_t, const void*, float16*, const unsigned int*);
} // namespace caffe
| ccd7b0bf085fee4aa8ac948b6b12f41c351ed73e.cu | #include <opencv2/core/core.hpp>
#include <device_launch_parameters.h>
#include "caffe/util/gpu_math_functions.cuh"
#include "caffe/data_transformer.hpp"
namespace caffe {
template <typename Dtype>
__global__
void transform_kernel(int N, int C,
int H, int W, // original size
int Hc, int Wc, // cropped size
bool param_mirror,
int datum_height, int datum_width, // offsets
int crop_size, Phase phase,
size_t sizeof_element,
const void *in,
Dtype *out, // buffers
float scale,
int has_mean_file,
int has_mean_values,
float *mean,
const unsigned int *random_numbers) {
const int c = blockIdx.y;
// loop over images
for (int n = blockIdx.x; n < N; n += gridDim.x) {
// get mirror and offsets
unsigned int rand1 = random_numbers[n*3 ];
unsigned int rand2 = random_numbers[n*3 + 1];
unsigned int rand3 = random_numbers[n*3 + 2];
bool mirror = param_mirror && (rand1 % 2);
int h_off = 0, w_off = 0;
if (crop_size) {
if (phase == TRAIN) {
h_off = rand2 % (datum_height - crop_size + 1);
w_off = rand3 % (datum_width - crop_size + 1);
} else {
h_off = (datum_height - crop_size) / 2;
w_off = (datum_width - crop_size) / 2;
}
}
const uint8_t *in_ptri;
const float *in_ptrf;
// offsets into start of (image, channel) = (n, c)
// channel is handled by blockIdx.y
// Initial offset per Dtype:
const Dtype *in_ptr;// = &in[n*C*H*W];
// Element-specific offset to a channel c
if (sizeof_element == sizeof(uint8_t)) {
in_ptri = &(reinterpret_cast<const uint8_t*>(in))[n*C*H*W];
in_ptri += c*H*W;
} else if (sizeof_element == sizeof(float)) {
in_ptrf = &(reinterpret_cast<const float*>(in))[n*C*H*W];
in_ptrf += c*H*W;
} else {
in_ptr = &(reinterpret_cast<const Dtype*>(in))[n*C*H*W];
in_ptr += c*H*W;
}
Dtype *out_ptr = &out[n*C*Hc*Wc + c*Hc*Wc];
Dtype element;
// loop over pixels using threads
for (int h = threadIdx.y; h < Hc; h += blockDim.y) {
for (int w = threadIdx.x; w < Wc; w += blockDim.x) {
// get the indices for in, out buffers
int in_idx = (h_off + h) * W + w_off + w;
int out_idx = mirror ? h * Wc + (Wc - 1 - w) : h * Wc + w;
if (sizeof_element == sizeof(uint8_t)) {
element = in_ptri[in_idx];
} else if (sizeof_element == sizeof(float)) {
element = in_ptrf[in_idx];
} else {
element = in_ptr[in_idx];
}
// perform the transform
if (has_mean_file) {
out_ptr[out_idx] = (element - mean[c*H*W + in_idx]) * scale;
} else {
if (has_mean_values) {
out_ptr[out_idx] = (element - mean[c]) * scale;
} else {
out_ptr[out_idx] = element * scale;
}
}
}
}
}
}
template <>
__global__
void transform_kernel<__half>(int N, int C,
int H, int W, // original size
int Hc, int Wc, // cropped size
bool param_mirror,
int datum_height, int datum_width, // offsets
int crop_size, Phase phase,
size_t sizeof_element,
const void* in,
__half* out, // buffers
float scale,
int has_mean_file,
int has_mean_values,
float* mean,
const unsigned int *random_numbers) {
const int c = blockIdx.y;
// loop over images
for (int n = blockIdx.x; n < N; n += gridDim.x) {
// get mirror and offsets
unsigned int rand1 = random_numbers[n*3 ];
unsigned int rand2 = random_numbers[n*3 + 1];
unsigned int rand3 = random_numbers[n*3 + 2];
bool mirror = param_mirror && (rand1 % 2);
int h_off = 0, w_off = 0;
if (crop_size) {
if (phase == TRAIN) {
h_off = rand2 % (datum_height - crop_size + 1);
w_off = rand3 % (datum_width - crop_size + 1);
} else {
h_off = (datum_height - crop_size) / 2;
w_off = (datum_width - crop_size) / 2;
}
}
const uint8_t *in_ptri;
const float *in_ptrf;
// offsets into start of (image, channel) = (n, c)
// channel is handled by blockIdx.y
// Initial offset per Dtype:
const __half *in_ptr;// = &in[n*C*H*W];
// Element-specific offset to a channel c
if (sizeof_element == sizeof(uint8_t)) {
in_ptri = &(reinterpret_cast<const uint8_t*>(in))[n*C*H*W];
in_ptri += c*H*W;
} else if (sizeof_element == sizeof(float)) {
in_ptrf = &(reinterpret_cast<const float*>(in))[n*C*H*W];
in_ptrf += c*H*W;
} else {
in_ptr = &(reinterpret_cast<const __half*>(in))[n*C*H*W];
in_ptr += c*H*W;
}
__half* out_ptr = &out[n*C*Hc*Wc + c*Hc*Wc];
float element;
// loop over pixels using threads
for (int h = threadIdx.y; h < Hc; h += blockDim.y) {
for (int w = threadIdx.x; w < Wc; w += blockDim.x) {
// get the indices for in, out buffers
int in_idx = (h_off + h) * W + w_off + w;
int out_idx = mirror ? h * Wc + (Wc - 1 - w) : h * Wc + w;
if (sizeof_element == sizeof(uint8_t)) {
element = in_ptri[in_idx];
} else if (sizeof_element == sizeof(float)) {
element = in_ptrf[in_idx];
} else {
element = __half2float(in_ptr[in_idx]);
}
// perform the transform
if (has_mean_file) {
out_ptr[out_idx] = float2half_clip((element - mean[c*H*W + in_idx]) * scale);
} else {
if (has_mean_values) {
out_ptr[out_idx] = float2half_clip((element - mean[c]) * scale);
} else {
out_ptr[out_idx] = float2half_clip(element * scale);
}
}
}
}
}
}
template <typename Dtype>
void DataTransformer::TransformGPU(int N, int C, int H, int W,
size_t sizeof_element,
const void *in, Dtype *out,
const unsigned int *random_numbers) {
const int datum_channels = C;
const int datum_height = H;
const int datum_width = W;
const int crop_size = param_.crop_size();
float scale = param_.scale();
const bool mirror = param_.mirror();
const bool has_mean_file = param_.has_mean_file();
const bool has_mean_values = mean_values_.size() > 0;
CHECK_GT(datum_channels, 0);
CHECK_GE(datum_height, crop_size);
CHECK_GE(datum_width, crop_size);
float* mean = nullptr;
if (has_mean_file) {
CHECK_EQ(datum_channels, data_mean_.channels());
// no need to check equality anymore
// datum_{height, width} are _output_ not input
mean = data_mean_.mutable_gpu_data();
}
if (has_mean_values) {
if (mean_values_gpu_.empty()) {
CHECK(mean_values_.size() == 1 || mean_values_.size() == datum_channels)
<< "Specify either 1 mean_value or as many as channels: "
<< datum_channels;
if (datum_channels > 1 && mean_values_.size() == 1) {
// Replicate the mean_value for simplicity
for (int c = 1; c < datum_channels; ++c) {
mean_values_.push_back(mean_values_[0]);
}
}
mean_values_gpu_.reserve(sizeof(float) * mean_values_.size());
caffe_copy(static_cast<int>(mean_values_.size()), &mean_values_.front(),
reinterpret_cast<float*>(mean_values_gpu_.data()));
}
mean = reinterpret_cast<float*>(mean_values_gpu_.data());
}
int height = datum_height;
int width = datum_width;
if (crop_size) {
height = crop_size;
width = crop_size;
}
dim3 grid(N, C);
dim3 block(16, 16);
cudaStream_t stream = Caffe::thread_stream();
// TODO clean
if (is_precise<Dtype>()) {
transform_kernel<Dtype>
<<< grid, block, 0, stream >>> (N, C, H, W,
height, width,
param_.mirror(),
datum_height, datum_width,
crop_size, phase_,
sizeof_element,
in, out,
scale,
static_cast<int>(has_mean_file),
static_cast<int>(has_mean_values),
mean, random_numbers);
} else {
transform_kernel<__half>
<<< grid, block, 0, stream >>> (N, C, H, W,
height, width,
param_.mirror(),
datum_height, datum_width,
crop_size, phase_,
sizeof_element,
in, reinterpret_cast<__half*>(out),
scale,
static_cast<int>(has_mean_file),
static_cast<int>(has_mean_values),
mean, random_numbers);
}
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(cudaStreamSynchronize(stream));
}
template void DataTransformer::TransformGPU<float>(int, int, int, int,
size_t, const void*, float*, const unsigned int*);
template void DataTransformer::TransformGPU<double>(int, int, int, int,
size_t, const void*, double*, const unsigned int*);
template void DataTransformer::TransformGPU<float16>(int, int, int, int,
size_t, const void*, float16*, const unsigned int*);
} // namespace caffe
|
d936287a242df4ba073536fd1b8bddd900f8be82.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kernel_bfs(int *g_left_weight, int *g_right_weight, int *g_down_weight, int *g_up_weight, int *g_graph_height, bool *g_pixel_mask, int vertex_num,int width,int height, int vertex_num1, int width1, int height1, bool *g_over, int *g_counter)
{
/*******************************
*threadId is calculated ******
*****************************/
int thid = __umul24(blockIdx.x, blockDim.x) + threadIdx.x ;
if(thid < vertex_num && g_pixel_mask[thid] == true)
{
int col = thid % width1 , row = thid / width1 ;
if(col < width - 1 && col > 0 && row < height - 1 && row > 0 )
{
int height_l = 0, height_d = 0, height_u = 0 , height_r = 0 ;
height_r = g_graph_height[thid+1] ;
height_l = g_graph_height[thid-1] ;
height_d = g_graph_height[thid+width1] ;
height_u = g_graph_height[thid-width1] ;
if(((height_l == (*g_counter) && g_right_weight[thid-1] > 0)) ||((height_d == (*g_counter) && g_up_weight[thid+width1] > 0) || ( height_r == (*g_counter) && g_left_weight[thid+1] > 0 ) || ( height_u == (*g_counter) && g_down_weight[thid-width1] > 0 ) ))
{
g_graph_height[thid] = (*g_counter) + 1 ;
g_pixel_mask[thid] = false ;
*g_over = true ;
}
}
}
} | d936287a242df4ba073536fd1b8bddd900f8be82.cu | #include "includes.h"
__global__ void kernel_bfs(int *g_left_weight, int *g_right_weight, int *g_down_weight, int *g_up_weight, int *g_graph_height, bool *g_pixel_mask, int vertex_num,int width,int height, int vertex_num1, int width1, int height1, bool *g_over, int *g_counter)
{
/*******************************
*threadId is calculated ******
*****************************/
int thid = __umul24(blockIdx.x, blockDim.x) + threadIdx.x ;
if(thid < vertex_num && g_pixel_mask[thid] == true)
{
int col = thid % width1 , row = thid / width1 ;
if(col < width - 1 && col > 0 && row < height - 1 && row > 0 )
{
int height_l = 0, height_d = 0, height_u = 0 , height_r = 0 ;
height_r = g_graph_height[thid+1] ;
height_l = g_graph_height[thid-1] ;
height_d = g_graph_height[thid+width1] ;
height_u = g_graph_height[thid-width1] ;
if(((height_l == (*g_counter) && g_right_weight[thid-1] > 0)) ||((height_d == (*g_counter) && g_up_weight[thid+width1] > 0) || ( height_r == (*g_counter) && g_left_weight[thid+1] > 0 ) || ( height_u == (*g_counter) && g_down_weight[thid-width1] > 0 ) ))
{
g_graph_height[thid] = (*g_counter) + 1 ;
g_pixel_mask[thid] = false ;
*g_over = true ;
}
}
}
} |
9b448385a555e69966fa41b23beb31e832e95719.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/native/Pool.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/hip/detail/TensorInfo.cuh>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/hip/detail/KernelUtils.h>
#include <THH/THHNumerics.cuh>
#include <c10/macros/Macros.h>
namespace at {
namespace native {
namespace {
__device__ inline int min(int a, int b) {
return a <= b ? a : b;
}
// kernels borrowed from Caffe
template <typename scalar_t, typename accscalar_t>
__global__ void MaxPoolForward(const int nthreads, const scalar_t* bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w,
const int dilation_h, const int dilation_w, scalar_t* top_data,
int64_t* top_mask) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + (kernel_h - 1) * dilation_h + 1, height);
int wend = min(wstart + (kernel_w - 1) * dilation_w + 1, width);
while(hstart < 0)
hstart += dilation_h;
while(wstart < 0)
wstart += dilation_w;
accscalar_t maxval = THCNumerics<accscalar_t>::min();
int maxidx = -1;
bottom_data += (n * channels + c) * height * width;
for (int h = hstart; h < hend; h += dilation_h) {
for (int w = wstart; w < wend; w += dilation_w) {
scalar_t val = bottom_data[h * width + w];
if ((ScalarConvert<scalar_t, accscalar_t>::to(val) > maxval) || THCNumerics<scalar_t>::isnan(val)) {
maxidx = h * width + w;
maxval = ScalarConvert<scalar_t, accscalar_t>::to(val);
}
}
}
top_data[index] = ScalarConvert<scalar_t, accscalar_t>::to(maxval);
top_mask[index] = maxidx;
}
}
static const int BACKWARD_THREADS = 256;
template <typename scalar_t, typename accscalar_t>
#if defined (__HIP_PLATFORM_HCC__)
C10_LAUNCH_BOUNDS_2(BACKWARD_THREADS, 4)
#else
C10_LAUNCH_BOUNDS_2(BACKWARD_THREADS, 8)
#endif
__global__ void MaxPoolBackward(const int nthreads, const scalar_t* top_diff,
const int64_t* top_mask, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
const int dilation_h, const int dilation_w,
scalar_t* bottom_diff) {
CUDA_KERNEL_LOOP(index, height*width) {
int h = index/width;
int w = index - h * width;
//get some templating performance benefits without actually templating
int phstart, phend, pwstart, pwend;
if (stride_h == 1) {
phstart =
(h + pad_h < ((kernel_h - 1) * dilation_h + 1)) ? 0 : (h + pad_h - ((kernel_h - 1) * dilation_h + 1)) + 1;
phend = min((h + pad_h) + 1, pooled_height);
} else if (stride_h == 2) {
phstart =
(h + pad_h < ((kernel_h - 1) * dilation_h + 1)) ? 0 : (h + pad_h - ((kernel_h - 1) * dilation_h + 1)) / 2 + 1;
phend = min((h + pad_h) / 2 + 1, pooled_height);
} else {
phstart =
(h + pad_h < ((kernel_h - 1) * dilation_h + 1)) ? 0 : (h + pad_h - ((kernel_h - 1) * dilation_h + 1)) / stride_h + 1;
phend = min((h + pad_h) / stride_h + 1, pooled_height);
}
if (stride_w == 1) {
pwstart =
(w + pad_w < ((kernel_w - 1) * dilation_w + 1)) ? 0 : (w + pad_w - ((kernel_w - 1) * dilation_w + 1)) + 1;
pwend = min((w + pad_w) + 1, pooled_width);
} else if (stride_w == 2) {
pwstart =
(w + pad_w < ((kernel_w - 1) * dilation_w + 1)) ? 0 : (w + pad_w - ((kernel_w - 1) * dilation_w + 1)) / 2 + 1;
pwend = min((w + pad_w) / 2 + 1, pooled_width);
} else {
pwstart =
(w + pad_w < ((kernel_w - 1) * dilation_w + 1)) ? 0 : (w + pad_w - ((kernel_w - 1) * dilation_w + 1)) / stride_w + 1;
pwend = min((w + pad_w) / stride_w + 1, pooled_width);
}
for (int n = blockIdx.y; n < num; n += gridDim.y)
for (int c = blockIdx.z; c < channels; c+= gridDim.z) {
accscalar_t gradient = accscalar_t(0);
int offset = (n * channels + c) * pooled_height * pooled_width;
top_diff += offset;
top_mask += offset;
//get some templating performance benefits without actually templating
if ((phstart + 1 != phend) || (pwstart + 1 != pwend)) {
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (top_mask[ph * pooled_width + pw] == h * width + w) {
gradient += ScalarConvert<scalar_t, accscalar_t>::to(top_diff[ph * pooled_width + pw]);
}
}
}
} else {
if (top_mask[phstart * pooled_width + pwstart] == h * width + w) {
gradient += ScalarConvert<scalar_t, accscalar_t>::to(top_diff[phstart * pooled_width + pwstart]);
}
}
bottom_diff[(n*channels+c)*height*width+index] = ScalarConvert<accscalar_t, scalar_t>::to(gradient);
}
}
}
void max_pool2d_with_indices_out_cuda_template(
Tensor& output,
Tensor& indices,
const Tensor& input_,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
bool ceil_mode)
{
TensorArg output_arg{ output, "output", 1 };
TensorArg indices_arg{ indices, "indices", 2 };
TensorArg input_arg{ input_, "input_", 3 };
checkAllSameGPU("max_pool2d_with_indices_out_cuda",
{output_arg, indices_arg, input_arg});
// XXX JIT: Pooling.cpp allows stride.empty().
// XXX IntegrationTest.MNIST: padding.size() == 1 && dilation.size() == 1.
TORCH_CHECK(kernel_size.size() == 2 &&
(stride.empty() || stride.size() == 2) &&
(padding.size() == 1 || padding.size() == 2) &&
(dilation.size() == 1 || dilation.size() == 2),
"max_pool2d_with_indices: internal error: all IntArrayRef sizes must be 2");
TORCH_CHECK((input_.ndimension() == 3 || input_.ndimension() == 4),
"non-empty 3D or 4D (batch mode) tensor expected for input");
const int kH = safe_downcast<int, int64_t>(kernel_size[0]);
const int kW = safe_downcast<int, int64_t>(kernel_size[1]);
const int dH = stride.empty() ? kH : safe_downcast<int, int64_t>(stride[0]);
const int dW = stride.empty() ? kW : safe_downcast<int, int64_t>(stride[1]);
const int padH = safe_downcast<int, int64_t>(padding[0]);
const int padW = padding.size() == 1 ? padH : safe_downcast<int, int64_t>(padding[1]);
const int dilationH = safe_downcast<int, int64_t>(dilation[0]);
const int dilationW = dilation.size() == 1 ? dilationH : safe_downcast<int, int64_t>(dilation[1]);
const int64_t nbatch = input_.ndimension() == 4 ? input_.size(-4) : 1;
const int64_t nInputPlane = input_.size(-3);
const int64_t inputHeight = input_.size(-2);
const int64_t inputWidth = input_.size(-1);
const int64_t outputWidth = pooling_output_shape<int64_t>(inputWidth, kW, padW, dW, dilationW, ceil_mode);
const int64_t outputHeight = pooling_output_shape<int64_t>(inputHeight, kH, padH, dH, dilationH, ceil_mode);
pool2d_shape_check(
input_,
kH, kW, dH, dW, padH, padW, dilationH, dilationW,
nInputPlane,
inputHeight, inputWidth,
outputHeight, outputWidth);
Tensor input = input_.contiguous();
output.resize_({nbatch, nInputPlane, outputHeight, outputWidth});
indices.resize_({nbatch, nInputPlane, outputHeight, outputWidth});
const int count = safe_downcast<int, int64_t>(output.numel());
const int num_threads = ::min(at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock,
BACKWARD_THREADS);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(),
"max_pool2d_with_indices_out_cuda_frame",
[&] {
using accscalar_t = acc_type<scalar_t, true>;
scalar_t *output_data = output.data<scalar_t>();
scalar_t *input_data = input.data<scalar_t>();
int64_t *indices_data = indices.data<int64_t>();
hipLaunchKernelGGL(( MaxPoolForward<scalar_t, scalar_t>)
, dim3(cuda::ATenCeilDiv(count, num_threads)), dim3(num_threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
count, input_data,
nbatch, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth,
kH, kW, dH, dW, padH, padW, dilationH, dilationW, output_data, indices_data); }
);
TORCH_CHECK(hipGetLastError() == hipSuccess,
"max_pool2d_with_indices_out_cuda_frame failed with error code ",
hipGetLastError());
if(input.ndimension() == 3) {
output.resize_({nInputPlane, outputHeight, outputWidth});
}
}
void max_pool2d_with_indices_backward_out_cuda_template(
Tensor& gradInput,
const Tensor& gradOutput_,
const Tensor& input_,
const Tensor& indices,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
bool ceil_mode)
{
TensorArg gradInput_arg{ gradInput, "gradInput", 1 };
TensorArg gradOutput_arg{ gradOutput_, "gradOutput_", 2 };
TensorArg input_arg{ input_, "input_", 3 };
TensorArg indices_arg{ indices, "indices", 4 };
checkAllSameGPU("max_pool2d_with_indices_out_cuda",
{gradInput_arg, gradOutput_arg, input_arg, indices_arg});
// XXX JIT: Pooling.cpp allows stride.empty().
// XXX IntegrationTest.MNIST: padding.size() == 1 && dilation.size() == 1.
TORCH_CHECK(kernel_size.size() == 2 &&
(stride.empty() || stride.size() == 2) &&
(padding.size() == 1 || padding.size() == 2) &&
(dilation.size() == 1 || dilation.size() == 2),
"max_pool2d_with_indices: internal error: all IntArrayRef sizes must be 2");
TORCH_CHECK((input_.ndimension() == 3 || input_.ndimension() == 4),
"non-empty 3D or 4D (batch mode) tensor expected for input");
const int kH = safe_downcast<int, int64_t>(kernel_size[0]);
const int kW = safe_downcast<int, int64_t>(kernel_size[1]);
const int dH = stride.empty() ? kH : safe_downcast<int, int64_t>(stride[0]);
const int dW = stride.empty() ? kW : safe_downcast<int, int64_t>(stride[1]);
const int padH = safe_downcast<int, int64_t>(padding[0]);
const int padW = padding.size() == 1 ? padH : safe_downcast<int, int64_t>(padding[1]);
const int dilationH = safe_downcast<int, int64_t>(dilation[0]);
const int dilationW = dilation.size() == 1 ? dilationH : safe_downcast<int, int64_t>(dilation[1]);
const Tensor input = input_.contiguous();
const int64_t nbatch = input.ndimension() == 4 ? input.size(-4) : 1;
const int64_t nInputPlane = input.size(-3);
const int64_t inputHeight = input.size(-2);
const int64_t inputWidth = input.size(-1);
const int64_t outputHeight = pooling_output_shape<int64_t>(inputHeight, kH, padH, dH, dilationH, ceil_mode);
const int64_t outputWidth = pooling_output_shape<int64_t>(inputWidth, kW, padW, dW, dilationW, ceil_mode);
max_pool2d_backward_shape_check(
input_,
gradOutput_,
indices,
nbatch,
kH, kW, dH, dW, padH, padW, dilationH, dilationW,
nInputPlane,
inputHeight, inputWidth,
outputHeight, outputWidth,
/*cuda=*/ true);
const Tensor gradOutput = gradOutput_.contiguous();
gradInput.resize_as_(input);
int64_t count = input.numel();
dim3 grid;
int imgcount = inputWidth * inputHeight;
const int blocks = (imgcount + BACKWARD_THREADS - 1) / BACKWARD_THREADS;
grid.x = blocks;
grid.y = nbatch;
grid.z = nInputPlane;
uint64_t maxGridY = at::cuda::getCurrentDeviceProperties()->maxGridSize[1];
uint64_t maxGridZ = at::cuda::getCurrentDeviceProperties()->maxGridSize[2];
if (maxGridY < grid.y) grid.y = maxGridY;
if (maxGridZ < grid.z) grid.z = maxGridZ;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(),
"max_pool2d_with_indices_out_cuda_frame",
[&] {
using accscalar_t = acc_type<scalar_t, true>;
scalar_t *gradOutput_data = gradOutput.data<scalar_t>();
scalar_t *gradInput_data = gradInput.data<scalar_t>();
int64_t *indices_data = indices.data<int64_t>();
hipLaunchKernelGGL(( MaxPoolBackward<scalar_t, accscalar_t>)
, dim3(grid), dim3(BACKWARD_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
count,
gradOutput_data,
indices_data,
nbatch,
nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth,
kH, kW, dH, dW, padH, padW, dilationH, dilationW,
gradInput_data);
}
);
TORCH_CHECK(hipGetLastError() == hipSuccess,
"fractional_max_pool2d_backward_out_cuda failed with error code ",
hipGetLastError());
}
} // namespace
std::tuple<Tensor&, Tensor&> max_pool2d_with_indices_out_cuda(
Tensor& output,
Tensor& indices,
const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
bool ceil_mode)
{
max_pool2d_with_indices_out_cuda_template(
output,
indices,
input,
kernel_size,
stride,
padding,
dilation,
ceil_mode);
return std::tuple<Tensor&, Tensor&>(output, indices);
}
std::tuple<Tensor, Tensor> max_pool2d_with_indices_cuda(
const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
bool ceil_mode)
{
Tensor output = at::empty({0}, input.options());
Tensor indices = at::empty({0}, input.options().dtype(kLong));
max_pool2d_with_indices_out_cuda_template(
output,
indices,
input,
kernel_size,
stride,
padding,
dilation,
ceil_mode);
return std::tuple<Tensor, Tensor>(output, indices);
}
Tensor& max_pool2d_with_indices_backward_out_cuda(
Tensor& gradInput,
const Tensor& gradOutput_,
const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
bool ceil_mode,
const Tensor& indices)
{
max_pool2d_with_indices_backward_out_cuda_template(
gradInput,
gradOutput_,
input,
indices,
kernel_size,
stride,
padding,
dilation,
ceil_mode);
return gradInput;
}
Tensor max_pool2d_with_indices_backward_cuda(
const Tensor& gradOutput_,
const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
bool ceil_mode,
const Tensor& indices)
{
auto gradInput = at::zeros_like(input);
max_pool2d_with_indices_backward_out_cuda_template(
gradInput,
gradOutput_,
input,
indices,
kernel_size,
stride,
padding,
dilation,
ceil_mode);
return gradInput;
}
} // at::native
} // at
| 9b448385a555e69966fa41b23beb31e832e95719.cu | #include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/native/Pool.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/detail/TensorInfo.cuh>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/cuda/detail/KernelUtils.h>
#include <THC/THCNumerics.cuh>
#include <c10/macros/Macros.h>
namespace at {
namespace native {
namespace {
__device__ inline int min(int a, int b) {
return a <= b ? a : b;
}
// kernels borrowed from Caffe
template <typename scalar_t, typename accscalar_t>
__global__ void MaxPoolForward(const int nthreads, const scalar_t* bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w,
const int dilation_h, const int dilation_w, scalar_t* top_data,
int64_t* top_mask) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + (kernel_h - 1) * dilation_h + 1, height);
int wend = min(wstart + (kernel_w - 1) * dilation_w + 1, width);
while(hstart < 0)
hstart += dilation_h;
while(wstart < 0)
wstart += dilation_w;
accscalar_t maxval = THCNumerics<accscalar_t>::min();
int maxidx = -1;
bottom_data += (n * channels + c) * height * width;
for (int h = hstart; h < hend; h += dilation_h) {
for (int w = wstart; w < wend; w += dilation_w) {
scalar_t val = bottom_data[h * width + w];
if ((ScalarConvert<scalar_t, accscalar_t>::to(val) > maxval) || THCNumerics<scalar_t>::isnan(val)) {
maxidx = h * width + w;
maxval = ScalarConvert<scalar_t, accscalar_t>::to(val);
}
}
}
top_data[index] = ScalarConvert<scalar_t, accscalar_t>::to(maxval);
top_mask[index] = maxidx;
}
}
static const int BACKWARD_THREADS = 256;
template <typename scalar_t, typename accscalar_t>
#if defined (__HIP_PLATFORM_HCC__)
C10_LAUNCH_BOUNDS_2(BACKWARD_THREADS, 4)
#else
C10_LAUNCH_BOUNDS_2(BACKWARD_THREADS, 8)
#endif
__global__ void MaxPoolBackward(const int nthreads, const scalar_t* top_diff,
const int64_t* top_mask, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
const int dilation_h, const int dilation_w,
scalar_t* bottom_diff) {
CUDA_KERNEL_LOOP(index, height*width) {
int h = index/width;
int w = index - h * width;
//get some templating performance benefits without actually templating
int phstart, phend, pwstart, pwend;
if (stride_h == 1) {
phstart =
(h + pad_h < ((kernel_h - 1) * dilation_h + 1)) ? 0 : (h + pad_h - ((kernel_h - 1) * dilation_h + 1)) + 1;
phend = min((h + pad_h) + 1, pooled_height);
} else if (stride_h == 2) {
phstart =
(h + pad_h < ((kernel_h - 1) * dilation_h + 1)) ? 0 : (h + pad_h - ((kernel_h - 1) * dilation_h + 1)) / 2 + 1;
phend = min((h + pad_h) / 2 + 1, pooled_height);
} else {
phstart =
(h + pad_h < ((kernel_h - 1) * dilation_h + 1)) ? 0 : (h + pad_h - ((kernel_h - 1) * dilation_h + 1)) / stride_h + 1;
phend = min((h + pad_h) / stride_h + 1, pooled_height);
}
if (stride_w == 1) {
pwstart =
(w + pad_w < ((kernel_w - 1) * dilation_w + 1)) ? 0 : (w + pad_w - ((kernel_w - 1) * dilation_w + 1)) + 1;
pwend = min((w + pad_w) + 1, pooled_width);
} else if (stride_w == 2) {
pwstart =
(w + pad_w < ((kernel_w - 1) * dilation_w + 1)) ? 0 : (w + pad_w - ((kernel_w - 1) * dilation_w + 1)) / 2 + 1;
pwend = min((w + pad_w) / 2 + 1, pooled_width);
} else {
pwstart =
(w + pad_w < ((kernel_w - 1) * dilation_w + 1)) ? 0 : (w + pad_w - ((kernel_w - 1) * dilation_w + 1)) / stride_w + 1;
pwend = min((w + pad_w) / stride_w + 1, pooled_width);
}
for (int n = blockIdx.y; n < num; n += gridDim.y)
for (int c = blockIdx.z; c < channels; c+= gridDim.z) {
accscalar_t gradient = accscalar_t(0);
int offset = (n * channels + c) * pooled_height * pooled_width;
top_diff += offset;
top_mask += offset;
//get some templating performance benefits without actually templating
if ((phstart + 1 != phend) || (pwstart + 1 != pwend)) {
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (top_mask[ph * pooled_width + pw] == h * width + w) {
gradient += ScalarConvert<scalar_t, accscalar_t>::to(top_diff[ph * pooled_width + pw]);
}
}
}
} else {
if (top_mask[phstart * pooled_width + pwstart] == h * width + w) {
gradient += ScalarConvert<scalar_t, accscalar_t>::to(top_diff[phstart * pooled_width + pwstart]);
}
}
bottom_diff[(n*channels+c)*height*width+index] = ScalarConvert<accscalar_t, scalar_t>::to(gradient);
}
}
}
void max_pool2d_with_indices_out_cuda_template(
Tensor& output,
Tensor& indices,
const Tensor& input_,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
bool ceil_mode)
{
TensorArg output_arg{ output, "output", 1 };
TensorArg indices_arg{ indices, "indices", 2 };
TensorArg input_arg{ input_, "input_", 3 };
checkAllSameGPU("max_pool2d_with_indices_out_cuda",
{output_arg, indices_arg, input_arg});
// XXX JIT: Pooling.cpp allows stride.empty().
// XXX IntegrationTest.MNIST: padding.size() == 1 && dilation.size() == 1.
TORCH_CHECK(kernel_size.size() == 2 &&
(stride.empty() || stride.size() == 2) &&
(padding.size() == 1 || padding.size() == 2) &&
(dilation.size() == 1 || dilation.size() == 2),
"max_pool2d_with_indices: internal error: all IntArrayRef sizes must be 2");
TORCH_CHECK((input_.ndimension() == 3 || input_.ndimension() == 4),
"non-empty 3D or 4D (batch mode) tensor expected for input");
const int kH = safe_downcast<int, int64_t>(kernel_size[0]);
const int kW = safe_downcast<int, int64_t>(kernel_size[1]);
const int dH = stride.empty() ? kH : safe_downcast<int, int64_t>(stride[0]);
const int dW = stride.empty() ? kW : safe_downcast<int, int64_t>(stride[1]);
const int padH = safe_downcast<int, int64_t>(padding[0]);
const int padW = padding.size() == 1 ? padH : safe_downcast<int, int64_t>(padding[1]);
const int dilationH = safe_downcast<int, int64_t>(dilation[0]);
const int dilationW = dilation.size() == 1 ? dilationH : safe_downcast<int, int64_t>(dilation[1]);
const int64_t nbatch = input_.ndimension() == 4 ? input_.size(-4) : 1;
const int64_t nInputPlane = input_.size(-3);
const int64_t inputHeight = input_.size(-2);
const int64_t inputWidth = input_.size(-1);
const int64_t outputWidth = pooling_output_shape<int64_t>(inputWidth, kW, padW, dW, dilationW, ceil_mode);
const int64_t outputHeight = pooling_output_shape<int64_t>(inputHeight, kH, padH, dH, dilationH, ceil_mode);
pool2d_shape_check(
input_,
kH, kW, dH, dW, padH, padW, dilationH, dilationW,
nInputPlane,
inputHeight, inputWidth,
outputHeight, outputWidth);
Tensor input = input_.contiguous();
output.resize_({nbatch, nInputPlane, outputHeight, outputWidth});
indices.resize_({nbatch, nInputPlane, outputHeight, outputWidth});
const int count = safe_downcast<int, int64_t>(output.numel());
const int num_threads = std::min(at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock,
BACKWARD_THREADS);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(),
"max_pool2d_with_indices_out_cuda_frame",
[&] {
using accscalar_t = acc_type<scalar_t, true>;
scalar_t *output_data = output.data<scalar_t>();
scalar_t *input_data = input.data<scalar_t>();
int64_t *indices_data = indices.data<int64_t>();
MaxPoolForward<scalar_t, scalar_t>
<<<cuda::ATenCeilDiv(count, num_threads), num_threads, 0, at::cuda::getCurrentCUDAStream()>>>(
count, input_data,
nbatch, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth,
kH, kW, dH, dW, padH, padW, dilationH, dilationW, output_data, indices_data); }
);
TORCH_CHECK(cudaGetLastError() == cudaSuccess,
"max_pool2d_with_indices_out_cuda_frame failed with error code ",
cudaGetLastError());
if(input.ndimension() == 3) {
output.resize_({nInputPlane, outputHeight, outputWidth});
}
}
void max_pool2d_with_indices_backward_out_cuda_template(
Tensor& gradInput,
const Tensor& gradOutput_,
const Tensor& input_,
const Tensor& indices,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
bool ceil_mode)
{
TensorArg gradInput_arg{ gradInput, "gradInput", 1 };
TensorArg gradOutput_arg{ gradOutput_, "gradOutput_", 2 };
TensorArg input_arg{ input_, "input_", 3 };
TensorArg indices_arg{ indices, "indices", 4 };
checkAllSameGPU("max_pool2d_with_indices_out_cuda",
{gradInput_arg, gradOutput_arg, input_arg, indices_arg});
// XXX JIT: Pooling.cpp allows stride.empty().
// XXX IntegrationTest.MNIST: padding.size() == 1 && dilation.size() == 1.
TORCH_CHECK(kernel_size.size() == 2 &&
(stride.empty() || stride.size() == 2) &&
(padding.size() == 1 || padding.size() == 2) &&
(dilation.size() == 1 || dilation.size() == 2),
"max_pool2d_with_indices: internal error: all IntArrayRef sizes must be 2");
TORCH_CHECK((input_.ndimension() == 3 || input_.ndimension() == 4),
"non-empty 3D or 4D (batch mode) tensor expected for input");
const int kH = safe_downcast<int, int64_t>(kernel_size[0]);
const int kW = safe_downcast<int, int64_t>(kernel_size[1]);
const int dH = stride.empty() ? kH : safe_downcast<int, int64_t>(stride[0]);
const int dW = stride.empty() ? kW : safe_downcast<int, int64_t>(stride[1]);
const int padH = safe_downcast<int, int64_t>(padding[0]);
const int padW = padding.size() == 1 ? padH : safe_downcast<int, int64_t>(padding[1]);
const int dilationH = safe_downcast<int, int64_t>(dilation[0]);
const int dilationW = dilation.size() == 1 ? dilationH : safe_downcast<int, int64_t>(dilation[1]);
const Tensor input = input_.contiguous();
const int64_t nbatch = input.ndimension() == 4 ? input.size(-4) : 1;
const int64_t nInputPlane = input.size(-3);
const int64_t inputHeight = input.size(-2);
const int64_t inputWidth = input.size(-1);
const int64_t outputHeight = pooling_output_shape<int64_t>(inputHeight, kH, padH, dH, dilationH, ceil_mode);
const int64_t outputWidth = pooling_output_shape<int64_t>(inputWidth, kW, padW, dW, dilationW, ceil_mode);
max_pool2d_backward_shape_check(
input_,
gradOutput_,
indices,
nbatch,
kH, kW, dH, dW, padH, padW, dilationH, dilationW,
nInputPlane,
inputHeight, inputWidth,
outputHeight, outputWidth,
/*cuda=*/ true);
const Tensor gradOutput = gradOutput_.contiguous();
gradInput.resize_as_(input);
int64_t count = input.numel();
dim3 grid;
int imgcount = inputWidth * inputHeight;
const int blocks = (imgcount + BACKWARD_THREADS - 1) / BACKWARD_THREADS;
grid.x = blocks;
grid.y = nbatch;
grid.z = nInputPlane;
uint64_t maxGridY = at::cuda::getCurrentDeviceProperties()->maxGridSize[1];
uint64_t maxGridZ = at::cuda::getCurrentDeviceProperties()->maxGridSize[2];
if (maxGridY < grid.y) grid.y = maxGridY;
if (maxGridZ < grid.z) grid.z = maxGridZ;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(),
"max_pool2d_with_indices_out_cuda_frame",
[&] {
using accscalar_t = acc_type<scalar_t, true>;
scalar_t *gradOutput_data = gradOutput.data<scalar_t>();
scalar_t *gradInput_data = gradInput.data<scalar_t>();
int64_t *indices_data = indices.data<int64_t>();
MaxPoolBackward<scalar_t, accscalar_t>
<<<grid, BACKWARD_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>(
count,
gradOutput_data,
indices_data,
nbatch,
nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth,
kH, kW, dH, dW, padH, padW, dilationH, dilationW,
gradInput_data);
}
);
TORCH_CHECK(cudaGetLastError() == cudaSuccess,
"fractional_max_pool2d_backward_out_cuda failed with error code ",
cudaGetLastError());
}
} // namespace
std::tuple<Tensor&, Tensor&> max_pool2d_with_indices_out_cuda(
Tensor& output,
Tensor& indices,
const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
bool ceil_mode)
{
max_pool2d_with_indices_out_cuda_template(
output,
indices,
input,
kernel_size,
stride,
padding,
dilation,
ceil_mode);
return std::tuple<Tensor&, Tensor&>(output, indices);
}
std::tuple<Tensor, Tensor> max_pool2d_with_indices_cuda(
const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
bool ceil_mode)
{
Tensor output = at::empty({0}, input.options());
Tensor indices = at::empty({0}, input.options().dtype(kLong));
max_pool2d_with_indices_out_cuda_template(
output,
indices,
input,
kernel_size,
stride,
padding,
dilation,
ceil_mode);
return std::tuple<Tensor, Tensor>(output, indices);
}
Tensor& max_pool2d_with_indices_backward_out_cuda(
Tensor& gradInput,
const Tensor& gradOutput_,
const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
bool ceil_mode,
const Tensor& indices)
{
max_pool2d_with_indices_backward_out_cuda_template(
gradInput,
gradOutput_,
input,
indices,
kernel_size,
stride,
padding,
dilation,
ceil_mode);
return gradInput;
}
Tensor max_pool2d_with_indices_backward_cuda(
const Tensor& gradOutput_,
const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
bool ceil_mode,
const Tensor& indices)
{
auto gradInput = at::zeros_like(input);
max_pool2d_with_indices_backward_out_cuda_template(
gradInput,
gradOutput_,
input,
indices,
kernel_size,
stride,
padding,
dilation,
ceil_mode);
return gradInput;
}
} // at::native
} // at
|
1497b9bd2638c47d8c805bad51c4fd5f6bab2028.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <unistd.h>
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include <helper_functions.h>
#include <stdint.h>
__device__ __attribute__((noinline)) void store(int* where, int what)
{
*where = what;
printf("Storing at %p\n", where);
}
__device__ int zglobal[32];
__global__ void tstfun(int *src, int* dst, const int m)
{
__shared__ int zshared[32];
int p;
int* pp = &p;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int val = src[i];
printf("Tst1: &src[%i]=%p &dst[%i]=%p m=%i\n", i, &src[i], i, &dst[i], m);
// printf("Tst1: &src[%i]=%p &dst[%i]=%p m=%i zhared[i]=%p zglobal[i]=%p, pp=%p\n", i, &src[i], i, &dst[i], m, &zshared[i], &zglobal[i], pp);
store(&dst[i], val * m);
store(&zshared[i], val * m);
store(&zglobal[i], val * m);
store(pp, val * m);
dst[i] = val * m;
zshared[i] = val * m;
zglobal[i] = val * m;
}
/// host code
int main(int argc, char* argv[])
{
int *dst, *src;
int *dev_dst, *dev_src;
int num_blocks = 2;
int num_threads = 2;
if(argc > 1)
{
num_threads = atoi(argv[1]);
if(argc > 2)
num_blocks = atoi(argv[2]);
}
int num_total = num_threads * num_blocks;
printf("Tst1: threads=%i blocks:=%i total=%i\n", num_threads, num_blocks, num_total);
dst = new int[num_total];
src = new int[num_total];
for(int i = 0; i < num_total; ++ i)
{
dst[i] = 0;
src[i] = i + 10;
}
checkCudaErrors(hipMalloc(&dev_src, sizeof(int) * num_total));
checkCudaErrors(hipMemcpy(dev_src, src, sizeof(int) * num_total, hipMemcpyHostToDevice));
checkCudaErrors(hipMalloc(&dev_dst, sizeof(int) * num_total));
checkCudaErrors(hipMemset(dev_dst, 0, sizeof(int) * num_total));
const int m = 5;
hipLaunchKernelGGL(( tstfun), dim3(num_blocks), dim3(num_threads), 0, 0, dev_src, dev_dst, m);
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipMemcpy(dst, dev_dst, sizeof(int) * num_total, hipMemcpyDeviceToHost));
for(int i = 0; i < num_total; ++ i)
{
if(dst[i] != src[i] * m)
{
fprintf(stderr, "Tst1: Error At index: %i: %i\n", i, dst[i]);
return -1;
}
}
printf("Tst1: Success (%i*%i=%i).\n", num_blocks, num_total, num_total);
printf("Tst1: no hazards expected.\n");
return 0;
}
| 1497b9bd2638c47d8c805bad51c4fd5f6bab2028.cu | #include <iostream>
#include <unistd.h>
#include <cuda_runtime.h>
#include <helper_cuda.h>
#include <helper_functions.h>
#include <stdint.h>
__device__ __attribute__((noinline)) void store(int* where, int what)
{
*where = what;
printf("Storing at %p\n", where);
}
__device__ int zglobal[32];
__global__ void tstfun(int *src, int* dst, const int m)
{
__shared__ int zshared[32];
int p;
int* pp = &p;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int val = src[i];
printf("Tst1: &src[%i]=%p &dst[%i]=%p m=%i\n", i, &src[i], i, &dst[i], m);
// printf("Tst1: &src[%i]=%p &dst[%i]=%p m=%i zhared[i]=%p zglobal[i]=%p, pp=%p\n", i, &src[i], i, &dst[i], m, &zshared[i], &zglobal[i], pp);
store(&dst[i], val * m);
store(&zshared[i], val * m);
store(&zglobal[i], val * m);
store(pp, val * m);
dst[i] = val * m;
zshared[i] = val * m;
zglobal[i] = val * m;
}
/// host code
int main(int argc, char* argv[])
{
int *dst, *src;
int *dev_dst, *dev_src;
int num_blocks = 2;
int num_threads = 2;
if(argc > 1)
{
num_threads = atoi(argv[1]);
if(argc > 2)
num_blocks = atoi(argv[2]);
}
int num_total = num_threads * num_blocks;
printf("Tst1: threads=%i blocks:=%i total=%i\n", num_threads, num_blocks, num_total);
dst = new int[num_total];
src = new int[num_total];
for(int i = 0; i < num_total; ++ i)
{
dst[i] = 0;
src[i] = i + 10;
}
checkCudaErrors(cudaMalloc(&dev_src, sizeof(int) * num_total));
checkCudaErrors(cudaMemcpy(dev_src, src, sizeof(int) * num_total, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMalloc(&dev_dst, sizeof(int) * num_total));
checkCudaErrors(cudaMemset(dev_dst, 0, sizeof(int) * num_total));
const int m = 5;
tstfun<<<num_blocks, num_threads>>>(dev_src, dev_dst, m);
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaMemcpy(dst, dev_dst, sizeof(int) * num_total, cudaMemcpyDeviceToHost));
for(int i = 0; i < num_total; ++ i)
{
if(dst[i] != src[i] * m)
{
fprintf(stderr, "Tst1: Error At index: %i: %i\n", i, dst[i]);
return -1;
}
}
printf("Tst1: Success (%i*%i=%i).\n", num_blocks, num_total, num_total);
printf("Tst1: no hazards expected.\n");
return 0;
}
|
898c56ed88a2b86dc48b622162b7442509bb7dbc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) ICG. All rights reserved.
*
* Institute for Computer Graphics and Vision
* Graz University of Technology / Austria
*
*
* This software is distributed WITHOUT ANY WARRANTY; without even
* the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the above copyright notices for more information.
*
*
* Project : ImageUtilities
* Module : Math
* Class : none
* Language : CUDA
* Description : Implementation of Cuda wrappers for arithmetic functions
*
* Author : Manuel Werlberger
* EMail : werlberger@icg.tugraz.at
*
*/
#ifndef IUMATH_ARITHMETIC_CU
#define IUMATH_ARITHMETIC_CU
#include <iucore/iutextures.cuh>
#include <iucutil.h>
#include "arithmetic.cuh"
namespace iuprivate {
/* ****************************************************************************
* weighted add
* ****************************************************************************/
// kernel: weighted add; 32-bit;
__global__ void cuAddWeightedKernel_32f_C1(
const float weight1, const float weight2, float* dst, const size_t stride,
const int xoff, const int yoff, const int width, const int height)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
const unsigned int oc = y*stride+x;
x += xoff;
y += yoff;
float xx = x+0.5f;
float yy = y+0.5f;
if(x>=0 && y>= 0 && x<width && y<height)
{
dst[oc] = weight1*tex2D(tex1_32f_C1__, xx, yy) +
weight2*tex2D(tex2_32f_C1__, xx, yy);
}
}
// wrapper: weighted add; 32-bit;
IuStatus cuAddWeighted(const iu::ImageGpu_32f_C1* src1, const float& weight1,
const iu::ImageGpu_32f_C1* src2, const float& weight2,
iu::ImageGpu_32f_C1* dst, const IuRect& roi)
{
// bind textures
hipChannelFormatDesc channel_desc = hipCreateChannelDesc<float>();
hipBindTexture2D(0, &tex1_32f_C1__, src1->data(), &channel_desc, src1->width(), src1->height(), src1->pitch());
hipBindTexture2D(0, &tex2_32f_C1__, src2->data(), &channel_desc, src2->width(), src2->height(), src2->pitch());
// fragmentation
unsigned int block_size = 16;
dim3 dimBlock(block_size, block_size);
dim3 dimGrid(iu::divUp(dst->width(), dimBlock.x),
iu::divUp(dst->height(), dimBlock.y));
hipLaunchKernelGGL(( cuAddWeightedKernel_32f_C1) , dim3(dimGrid), dim3(dimBlock) , 0, 0,
weight1, weight2, dst->data(roi.x, roi.y), dst->stride(),
roi.x, roi.y, roi.width, roi.height);
// unbind textures
hipUnbindTexture(&tex1_32f_C1__);
hipUnbindTexture(&tex2_32f_C1__);
// error check
IU_CHECK_AND_RETURN_CUDA_ERRORS();
}
/******************************************************************************
multiplication with factor
*******************************************************************************/
// kernel: multiplication with factor; 8-bit; 1-channel
__global__ void cuMulCKernel(const unsigned char factor, unsigned char* dst, const size_t stride,
const int xoff, const int yoff,
const int width, const int height)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
const unsigned int oc = y*stride+x;
x += xoff;
y += yoff;
float xx = x+0.5f;
float yy = y+0.5f;
if(x>=0 && y>= 0 && x<width && y<height)
{
unsigned char val = tex2D(tex1_8u_C1__, xx, yy);
dst[oc] = val * factor;
}
}
// wrapper: multiplication with factor; 8-bit; 1-channel
IuStatus cuMulC(const iu::ImageGpu_8u_C1* src, const unsigned char& factor, iu::ImageGpu_8u_C1* dst, const IuRect& roi)
{
// bind textures
hipChannelFormatDesc channel_desc = hipCreateChannelDesc<uchar1>();
hipBindTexture2D(0, &tex1_8u_C1__, src->data(), &channel_desc, src->width(), src->height(), src->pitch());
// fragmentation
unsigned int block_size = 16;
dim3 dimBlock(block_size, block_size);
dim3 dimGrid(iu::divUp(dst->width(), dimBlock.x),
iu::divUp(dst->height(), dimBlock.y));
hipLaunchKernelGGL(( cuMulCKernel) , dim3(dimGrid), dim3(dimBlock) , 0, 0,
factor, dst->data(roi.x, roi.y), dst->stride(), roi.x, roi.y, roi.width, roi.height);
// unbind textures
hipUnbindTexture(&tex1_8u_C1__);
// error check
IU_CHECK_AND_RETURN_CUDA_ERRORS();
}
// kernel: multiplication with factor; 8-bit; 4-channel
__global__ void cuMulCKernel(const uchar4 factor, uchar4* dst, const size_t stride,
const int xoff, const int yoff,
const int width, const int height)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
const unsigned int oc = y*stride+x;
x += xoff;
y += yoff;
float xx = x+0.5f;
float yy = y+0.5f;
if(x>=0 && y>= 0 && x<width && y<height)
{
uchar4 val = tex2D(tex1_8u_C4__, xx, yy);
dst[oc] = val * factor;
}
}
// wrapper: multiplication with factor; 8-bit; 4-channel
IuStatus cuMulC(const iu::ImageGpu_8u_C4* src, const uchar4& factor, iu::ImageGpu_8u_C4* dst, const IuRect& roi)
{
// bind textures
hipChannelFormatDesc channel_desc = hipCreateChannelDesc<uchar4>();
hipBindTexture2D(0, &tex1_8u_C4__, (uchar4*)src->data(), &channel_desc, src->width(), src->height(), src->pitch());
// fragmentation
unsigned int block_size = 16;
dim3 dimBlock(block_size, block_size);
dim3 dimGrid(iu::divUp(dst->width(), dimBlock.x),
iu::divUp(dst->height(), dimBlock.y));
hipLaunchKernelGGL(( cuMulCKernel) , dim3(dimGrid), dim3(dimBlock) , 0, 0,
factor, dst->data(roi.x, roi.y), dst->stride(),
roi.x, roi.y, roi.width, roi.height);
// unbind textures
hipUnbindTexture(&tex1_8u_C4__);
// error check
IU_CHECK_AND_RETURN_CUDA_ERRORS();
}
// kernel: multiplication with factor; 32-bit; 1-channel
__global__ void cuMulCKernel(const float factor, float* dst, const size_t stride,
const int xoff, const int yoff,
const int width, const int height)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
const unsigned int oc = y*stride+x;
x += xoff;
y += yoff;
float xx = x+0.5f;
float yy = y+0.5f;
if(x>=0 && y>= 0 && x<width && y<height)
{
float val = tex2D(tex1_32f_C1__, xx, yy);
dst[oc] = val * factor;
}
}
// wrapper: multiplication with factor; 32-bit; 1-channel
IuStatus cuMulC(const iu::ImageGpu_32f_C1* src, const float& factor, iu::ImageGpu_32f_C1* dst, const IuRect& roi)
{
// bind textures
hipChannelFormatDesc channel_desc = hipCreateChannelDesc<float>();
hipBindTexture2D(0, &tex1_32f_C1__, src->data(), &channel_desc, src->width(), src->height(), src->pitch());
// fragmentation
unsigned int block_size = 16;
dim3 dimBlock(block_size, block_size);
dim3 dimGrid(iu::divUp(dst->width(), dimBlock.x),
iu::divUp(dst->height(), dimBlock.y));
hipLaunchKernelGGL(( cuMulCKernel) , dim3(dimGrid), dim3(dimBlock) , 0, 0,
factor, dst->data(roi.x, roi.y), dst->stride(),
roi.x, roi.y, roi.width, roi.height);
// unbind textures
hipUnbindTexture(&tex1_32f_C1__);
// error check
IU_CHECK_AND_RETURN_CUDA_ERRORS();
}
// kernel: volume multiplication with factor; 32-bit; 1-channel
__global__ void cuVolMulCKernel(const float factor, float* dst, const float*src,
const size_t stride, const size_t slice_stride,
const int width, const int height, const int depth)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
const unsigned int oc = y*stride+x;
if(x<width && y<height)
{
for (int z=0; z<depth; z++)
{
int vc = oc + z*slice_stride;
dst[vc] = src[vc] * factor;
}
}
}
// wrapper: volume multiplication with factor; 32-bit; 1-channel
IuStatus cuMulC(const iu::VolumeGpu_32f_C1* src, const float& factor,
iu::VolumeGpu_32f_C1* dst)
{
// fragmentation
unsigned int block_size = 16;
dim3 dimBlock(block_size, block_size);
dim3 dimGrid(iu::divUp(dst->width(), dimBlock.x),
iu::divUp(dst->height(), dimBlock.y));
hipLaunchKernelGGL(( cuVolMulCKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, factor, dst->data(), src->data(),
dst->stride(), dst->slice_stride(), dst->width(), dst->height(), dst->depth());
// error check
IU_CHECK_AND_RETURN_CUDA_ERRORS();
}
// kernel: multiplication with factor; 32-bit; 2-channel
__global__ void cuMulCKernel(const float2 factor, float2* dst, const size_t stride,
const int xoff, const int yoff,
const int width, const int height)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
const unsigned int oc = y*stride+x;
x += xoff;
y += yoff;
float xx = x+0.5f;
float yy = y+0.5f;
if(x>=0 && y>= 0 && x<width && y<height)
{
float2 val = tex2D(tex1_32f_C2__, xx, yy);
dst[oc] = val * factor;
}
}
// wrapper: multiplication with factor; 32-bit; 4-channel
IuStatus cuMulC(const iu::ImageGpu_32f_C2* src, const float2& factor, iu::ImageGpu_32f_C2* dst, const IuRect& roi)
{
// bind textures
hipChannelFormatDesc channel_desc = hipCreateChannelDesc<float2>();
hipBindTexture2D(0, &tex1_32f_C2__, src->data(), &channel_desc, src->width(), src->height(), src->pitch());
// fragmentation
unsigned int block_size = 16;
dim3 dimBlock(block_size, block_size);
dim3 dimGrid(iu::divUp(dst->width(), dimBlock.x),
iu::divUp(dst->height(), dimBlock.y));
hipLaunchKernelGGL(( cuMulCKernel) , dim3(dimGrid), dim3(dimBlock) , 0, 0,
factor, dst->data(roi.x, roi.y), dst->stride(),
roi.x, roi.y, roi.width, roi.height);
// unbind textures
hipUnbindTexture(&tex1_32f_C2__);
// error check
IU_CHECK_AND_RETURN_CUDA_ERRORS();
}
// kernel: multiplication with factor; 32-bit; 4-channel
__global__ void cuMulCKernel(const float4 factor, float4* dst, const size_t stride,
const int xoff, const int yoff,
const int width, const int height)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
const unsigned int oc = y*stride+x;
x += xoff;
y += yoff;
float xx = x+0.5f;
float yy = y+0.5f;
if(x>=0 && y>= 0 && x<width && y<height)
{
float4 val = tex2D(tex1_32f_C4__, xx, yy);
dst[oc] = val * factor;
}
}
// wrapper: multiplication with factor; 32-bit; 4-channel
IuStatus cuMulC(const iu::ImageGpu_32f_C4* src, const float4& factor, iu::ImageGpu_32f_C4* dst, const IuRect& roi)
{
// bind textures
hipChannelFormatDesc channel_desc = hipCreateChannelDesc<float4>();
hipBindTexture2D(0, &tex1_32f_C4__, (float4*)src->data(), &channel_desc, src->width(), src->height(), src->pitch());
// fragmentation
unsigned int block_size = 16;
dim3 dimBlock(block_size, block_size);
dim3 dimGrid(iu::divUp(dst->width(), dimBlock.x),
iu::divUp(dst->height(), dimBlock.y));
hipLaunchKernelGGL(( cuMulCKernel) , dim3(dimGrid), dim3(dimBlock) , 0, 0,
factor, dst->data(roi.x, roi.y), dst->stride(),
roi.x, roi.y, roi.width, roi.height);
// unbind textures
hipUnbindTexture(&tex1_32f_C4__);
// error check
IU_CHECK_AND_RETURN_CUDA_ERRORS();
}
/******************************************************************************
add val
*******************************************************************************/
// kernel: add val; 8-bit; 1-channel
__global__ void cuAddCKernel(const unsigned char val, unsigned char* dst, const size_t stride,
const int xoff, const int yoff,
const int width, const int height)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
const unsigned int oc = y*stride+x;
x += xoff;
y += yoff;
float xx = x+0.5f;
float yy = y+0.5f;
if(x>=0 && y>= 0 && x<width && y<height)
{
dst[oc] = val + tex2D(tex1_8u_C1__, xx, yy);
}
}
// wrapper: add val; 8-bit; 1-channel
IuStatus cuAddC(const iu::ImageGpu_8u_C1* src, const unsigned char& val, iu::ImageGpu_8u_C1* dst, const IuRect& roi)
{
// bind textures
hipChannelFormatDesc channel_desc = hipCreateChannelDesc<uchar1>();
hipBindTexture2D(0, &tex1_8u_C1__, src->data(), &channel_desc, src->width(), src->height(), src->pitch());
// fragmentation
unsigned int block_size = 16;
dim3 dimBlock(block_size, block_size);
dim3 dimGrid(iu::divUp(dst->width(), dimBlock.x),
iu::divUp(dst->height(), dimBlock.y));
hipLaunchKernelGGL(( cuAddCKernel) , dim3(dimGrid), dim3(dimBlock) , 0, 0,
val, dst->data(roi.x, roi.y), dst->stride(), roi.x, roi.y, roi.width, roi.height);
// unbind textures
hipUnbindTexture(&tex1_8u_C1__);
// error check
IU_CHECK_AND_RETURN_CUDA_ERRORS();
}
// kernel: add val; 8-bit; 4-channel
__global__ void cuAddCKernel(const uchar4 val, uchar4* dst, const size_t stride,
const int xoff, const int yoff,
const int width, const int height)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
const unsigned int oc = y*stride+x;
x += xoff;
y += yoff;
float xx = x+0.5f;
float yy = y+0.5f;
if(x>=0 && y>= 0 && x<width && y<height)
{
uchar4 value = tex2D(tex1_8u_C4__, xx, yy);
value.x = value.x + val.x;
value.y = value.y + val.y;
value.z = value.z + val.z;
value.w = value.w + val.w;
dst[oc] = value;
}
}
// wrapper: add val; 8-bit; 4-channel
IuStatus cuAddC(const iu::ImageGpu_8u_C4* src, const uchar4& val, iu::ImageGpu_8u_C4* dst, const IuRect& roi)
{
// bind textures
hipChannelFormatDesc channel_desc = hipCreateChannelDesc<uchar4>();
hipBindTexture2D(0, &tex1_8u_C4__, (uchar4*)src->data(), &channel_desc, src->width(), src->height(), src->pitch());
// fragmentation
unsigned int block_size = 16;
dim3 dimBlock(block_size, block_size);
dim3 dimGrid(iu::divUp(dst->width(), dimBlock.x),
iu::divUp(dst->height(), dimBlock.y));
hipLaunchKernelGGL(( cuAddCKernel) , dim3(dimGrid), dim3(dimBlock) , 0, 0,
val, dst->data(roi.x, roi.y), dst->stride(),
roi.x, roi.y, roi.width, roi.height);
// unbind textures
hipUnbindTexture(&tex1_8u_C4__);
// error check
IU_CHECK_AND_RETURN_CUDA_ERRORS();
}
// kernel: add val; 32-bit; 1-channel
__global__ void cuAddCKernel(const float val, float* dst, const size_t stride,
const int xoff, const int yoff,
const int width, const int height)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
const unsigned int oc = y*stride+x;
x += xoff;
y += yoff;
float xx = x+0.5f;
float yy = y+0.5f;
if(x>=0 && y>= 0 && x<width && y<height)
{
dst[oc] = val + tex2D(tex1_32f_C1__, xx, yy);
}
}
// wrapper: add val; 32-bit; 1-channel
IuStatus cuAddC(const iu::ImageGpu_32f_C1* src, const float& val, iu::ImageGpu_32f_C1* dst, const IuRect& roi)
{
// bind textures
hipChannelFormatDesc channel_desc = hipCreateChannelDesc<float>();
hipBindTexture2D(0, &tex1_32f_C1__, src->data(), &channel_desc, src->width(), src->height(), src->pitch());
// fragmentation
unsigned int block_size = 16;
dim3 dimBlock(block_size, block_size);
dim3 dimGrid(iu::divUp(dst->width(), dimBlock.x),
iu::divUp(dst->height(), dimBlock.y));
hipLaunchKernelGGL(( cuAddCKernel) , dim3(dimGrid), dim3(dimBlock) , 0, 0,
val, dst->data(roi.x, roi.y), dst->stride(),
roi.x, roi.y, roi.width, roi.height);
// unbind textures
hipUnbindTexture(&tex1_32f_C1__);
// error check
IU_CHECK_AND_RETURN_CUDA_ERRORS();
}
// kernel: add val; 32-bit; 2-channel
__global__ void cuAddCKernel(const float2 val, float2* dst, const size_t stride,
const int xoff, const int yoff,
const int width, const int height)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
const unsigned int oc = y*stride+x;
x += xoff;
y += yoff;
float xx = x+0.5f;
float yy = y+0.5f;
if(x>=0 && y>= 0 && x<width && y<height)
{
dst[oc] = val + tex2D(tex1_32f_C2__, xx, yy);
}
}
// wrapper: add val; 32-bit; 4-channel
IuStatus cuAddC(const iu::ImageGpu_32f_C2* src, const float2& val, iu::ImageGpu_32f_C2* dst, const IuRect& roi)
{
// bind textures
hipChannelFormatDesc channel_desc = hipCreateChannelDesc<float2>();
hipBindTexture2D(0, &tex1_32f_C2__, src->data(), &channel_desc, src->width(), src->height(), src->pitch());
// fragmentation
unsigned int block_size = 16;
dim3 dimBlock(block_size, block_size);
dim3 dimGrid(iu::divUp(dst->width(), dimBlock.x),
iu::divUp(dst->height(), dimBlock.y));
hipLaunchKernelGGL(( cuAddCKernel) , dim3(dimGrid), dim3(dimBlock) , 0, 0,
val, dst->data(roi.x, roi.y), dst->stride(),
roi.x, roi.y, roi.width, roi.height);
// unbind textures
hipUnbindTexture(&tex1_32f_C2__);
// error check
IU_CHECK_AND_RETURN_CUDA_ERRORS();
}
// kernel: add val; 32-bit; 1-channel
__global__ void cuAddCKernel(const float4 val, float4* dst, const size_t stride,
const int xoff, const int yoff,
const int width, const int height)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
const unsigned int oc = y*stride+x;
x += xoff;
y += yoff;
float xx = x+0.5f;
float yy = y+0.5f;
if(x>=0 && y>= 0 && x<width && y<height)
{
dst[oc] = val + tex2D(tex1_32f_C4__, xx, yy);
}
}
// wrapper: add val; 32-bit; 4-channel
IuStatus cuAddC(const iu::ImageGpu_32f_C4* src, const float4& val, iu::ImageGpu_32f_C4* dst, const IuRect& roi)
{
// bind textures
hipChannelFormatDesc channel_desc = hipCreateChannelDesc<float4>();
hipBindTexture2D(0, &tex1_32f_C4__, (float4*)src->data(), &channel_desc, src->width(), src->height(), src->pitch());
// fragmentation
unsigned int block_size = 16;
dim3 dimBlock(block_size, block_size);
dim3 dimGrid(iu::divUp(dst->width(), dimBlock.x),
iu::divUp(dst->height(), dimBlock.y));
hipLaunchKernelGGL(( cuAddCKernel) , dim3(dimGrid), dim3(dimBlock) , 0, 0,
val, dst->data(roi.x, roi.y), dst->stride(),
roi.x, roi.y, roi.width, roi.height);
// unbind textures
hipUnbindTexture(&tex1_32f_C4__);
// error check
IU_CHECK_AND_RETURN_CUDA_ERRORS();
}
} // namespace iuprivate
#endif // IUMATH_ARITHMETIC_CU
| 898c56ed88a2b86dc48b622162b7442509bb7dbc.cu | /*
* Copyright (c) ICG. All rights reserved.
*
* Institute for Computer Graphics and Vision
* Graz University of Technology / Austria
*
*
* This software is distributed WITHOUT ANY WARRANTY; without even
* the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the above copyright notices for more information.
*
*
* Project : ImageUtilities
* Module : Math
* Class : none
* Language : CUDA
* Description : Implementation of Cuda wrappers for arithmetic functions
*
* Author : Manuel Werlberger
* EMail : werlberger@icg.tugraz.at
*
*/
#ifndef IUMATH_ARITHMETIC_CU
#define IUMATH_ARITHMETIC_CU
#include <iucore/iutextures.cuh>
#include <iucutil.h>
#include "arithmetic.cuh"
namespace iuprivate {
/* ****************************************************************************
* weighted add
* ****************************************************************************/
// kernel: weighted add; 32-bit;
__global__ void cuAddWeightedKernel_32f_C1(
const float weight1, const float weight2, float* dst, const size_t stride,
const int xoff, const int yoff, const int width, const int height)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
const unsigned int oc = y*stride+x;
x += xoff;
y += yoff;
float xx = x+0.5f;
float yy = y+0.5f;
if(x>=0 && y>= 0 && x<width && y<height)
{
dst[oc] = weight1*tex2D(tex1_32f_C1__, xx, yy) +
weight2*tex2D(tex2_32f_C1__, xx, yy);
}
}
// wrapper: weighted add; 32-bit;
IuStatus cuAddWeighted(const iu::ImageGpu_32f_C1* src1, const float& weight1,
const iu::ImageGpu_32f_C1* src2, const float& weight2,
iu::ImageGpu_32f_C1* dst, const IuRect& roi)
{
// bind textures
cudaChannelFormatDesc channel_desc = cudaCreateChannelDesc<float>();
cudaBindTexture2D(0, &tex1_32f_C1__, src1->data(), &channel_desc, src1->width(), src1->height(), src1->pitch());
cudaBindTexture2D(0, &tex2_32f_C1__, src2->data(), &channel_desc, src2->width(), src2->height(), src2->pitch());
// fragmentation
unsigned int block_size = 16;
dim3 dimBlock(block_size, block_size);
dim3 dimGrid(iu::divUp(dst->width(), dimBlock.x),
iu::divUp(dst->height(), dimBlock.y));
cuAddWeightedKernel_32f_C1 <<< dimGrid, dimBlock >>> (
weight1, weight2, dst->data(roi.x, roi.y), dst->stride(),
roi.x, roi.y, roi.width, roi.height);
// unbind textures
cudaUnbindTexture(&tex1_32f_C1__);
cudaUnbindTexture(&tex2_32f_C1__);
// error check
IU_CHECK_AND_RETURN_CUDA_ERRORS();
}
/******************************************************************************
multiplication with factor
*******************************************************************************/
// kernel: multiplication with factor; 8-bit; 1-channel
__global__ void cuMulCKernel(const unsigned char factor, unsigned char* dst, const size_t stride,
const int xoff, const int yoff,
const int width, const int height)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
const unsigned int oc = y*stride+x;
x += xoff;
y += yoff;
float xx = x+0.5f;
float yy = y+0.5f;
if(x>=0 && y>= 0 && x<width && y<height)
{
unsigned char val = tex2D(tex1_8u_C1__, xx, yy);
dst[oc] = val * factor;
}
}
// wrapper: multiplication with factor; 8-bit; 1-channel
IuStatus cuMulC(const iu::ImageGpu_8u_C1* src, const unsigned char& factor, iu::ImageGpu_8u_C1* dst, const IuRect& roi)
{
// bind textures
cudaChannelFormatDesc channel_desc = cudaCreateChannelDesc<uchar1>();
cudaBindTexture2D(0, &tex1_8u_C1__, src->data(), &channel_desc, src->width(), src->height(), src->pitch());
// fragmentation
unsigned int block_size = 16;
dim3 dimBlock(block_size, block_size);
dim3 dimGrid(iu::divUp(dst->width(), dimBlock.x),
iu::divUp(dst->height(), dimBlock.y));
cuMulCKernel <<< dimGrid, dimBlock >>> (
factor, dst->data(roi.x, roi.y), dst->stride(), roi.x, roi.y, roi.width, roi.height);
// unbind textures
cudaUnbindTexture(&tex1_8u_C1__);
// error check
IU_CHECK_AND_RETURN_CUDA_ERRORS();
}
// kernel: multiplication with factor; 8-bit; 4-channel
__global__ void cuMulCKernel(const uchar4 factor, uchar4* dst, const size_t stride,
const int xoff, const int yoff,
const int width, const int height)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
const unsigned int oc = y*stride+x;
x += xoff;
y += yoff;
float xx = x+0.5f;
float yy = y+0.5f;
if(x>=0 && y>= 0 && x<width && y<height)
{
uchar4 val = tex2D(tex1_8u_C4__, xx, yy);
dst[oc] = val * factor;
}
}
// wrapper: multiplication with factor; 8-bit; 4-channel
IuStatus cuMulC(const iu::ImageGpu_8u_C4* src, const uchar4& factor, iu::ImageGpu_8u_C4* dst, const IuRect& roi)
{
// bind textures
cudaChannelFormatDesc channel_desc = cudaCreateChannelDesc<uchar4>();
cudaBindTexture2D(0, &tex1_8u_C4__, (uchar4*)src->data(), &channel_desc, src->width(), src->height(), src->pitch());
// fragmentation
unsigned int block_size = 16;
dim3 dimBlock(block_size, block_size);
dim3 dimGrid(iu::divUp(dst->width(), dimBlock.x),
iu::divUp(dst->height(), dimBlock.y));
cuMulCKernel <<< dimGrid, dimBlock >>> (
factor, dst->data(roi.x, roi.y), dst->stride(),
roi.x, roi.y, roi.width, roi.height);
// unbind textures
cudaUnbindTexture(&tex1_8u_C4__);
// error check
IU_CHECK_AND_RETURN_CUDA_ERRORS();
}
// kernel: multiplication with factor; 32-bit; 1-channel
__global__ void cuMulCKernel(const float factor, float* dst, const size_t stride,
const int xoff, const int yoff,
const int width, const int height)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
const unsigned int oc = y*stride+x;
x += xoff;
y += yoff;
float xx = x+0.5f;
float yy = y+0.5f;
if(x>=0 && y>= 0 && x<width && y<height)
{
float val = tex2D(tex1_32f_C1__, xx, yy);
dst[oc] = val * factor;
}
}
// wrapper: multiplication with factor; 32-bit; 1-channel
IuStatus cuMulC(const iu::ImageGpu_32f_C1* src, const float& factor, iu::ImageGpu_32f_C1* dst, const IuRect& roi)
{
// bind textures
cudaChannelFormatDesc channel_desc = cudaCreateChannelDesc<float>();
cudaBindTexture2D(0, &tex1_32f_C1__, src->data(), &channel_desc, src->width(), src->height(), src->pitch());
// fragmentation
unsigned int block_size = 16;
dim3 dimBlock(block_size, block_size);
dim3 dimGrid(iu::divUp(dst->width(), dimBlock.x),
iu::divUp(dst->height(), dimBlock.y));
cuMulCKernel <<< dimGrid, dimBlock >>> (
factor, dst->data(roi.x, roi.y), dst->stride(),
roi.x, roi.y, roi.width, roi.height);
// unbind textures
cudaUnbindTexture(&tex1_32f_C1__);
// error check
IU_CHECK_AND_RETURN_CUDA_ERRORS();
}
// kernel: volume multiplication with factor; 32-bit; 1-channel
__global__ void cuVolMulCKernel(const float factor, float* dst, const float*src,
const size_t stride, const size_t slice_stride,
const int width, const int height, const int depth)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
const unsigned int oc = y*stride+x;
if(x<width && y<height)
{
for (int z=0; z<depth; z++)
{
int vc = oc + z*slice_stride;
dst[vc] = src[vc] * factor;
}
}
}
// wrapper: volume multiplication with factor; 32-bit; 1-channel
IuStatus cuMulC(const iu::VolumeGpu_32f_C1* src, const float& factor,
iu::VolumeGpu_32f_C1* dst)
{
// fragmentation
unsigned int block_size = 16;
dim3 dimBlock(block_size, block_size);
dim3 dimGrid(iu::divUp(dst->width(), dimBlock.x),
iu::divUp(dst->height(), dimBlock.y));
cuVolMulCKernel<<<dimGrid, dimBlock>>>(factor, dst->data(), src->data(),
dst->stride(), dst->slice_stride(), dst->width(), dst->height(), dst->depth());
// error check
IU_CHECK_AND_RETURN_CUDA_ERRORS();
}
// kernel: multiplication with factor; 32-bit; 2-channel
__global__ void cuMulCKernel(const float2 factor, float2* dst, const size_t stride,
const int xoff, const int yoff,
const int width, const int height)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
const unsigned int oc = y*stride+x;
x += xoff;
y += yoff;
float xx = x+0.5f;
float yy = y+0.5f;
if(x>=0 && y>= 0 && x<width && y<height)
{
float2 val = tex2D(tex1_32f_C2__, xx, yy);
dst[oc] = val * factor;
}
}
// wrapper: multiplication with factor; 32-bit; 4-channel
IuStatus cuMulC(const iu::ImageGpu_32f_C2* src, const float2& factor, iu::ImageGpu_32f_C2* dst, const IuRect& roi)
{
// bind textures
cudaChannelFormatDesc channel_desc = cudaCreateChannelDesc<float2>();
cudaBindTexture2D(0, &tex1_32f_C2__, src->data(), &channel_desc, src->width(), src->height(), src->pitch());
// fragmentation
unsigned int block_size = 16;
dim3 dimBlock(block_size, block_size);
dim3 dimGrid(iu::divUp(dst->width(), dimBlock.x),
iu::divUp(dst->height(), dimBlock.y));
cuMulCKernel <<< dimGrid, dimBlock >>> (
factor, dst->data(roi.x, roi.y), dst->stride(),
roi.x, roi.y, roi.width, roi.height);
// unbind textures
cudaUnbindTexture(&tex1_32f_C2__);
// error check
IU_CHECK_AND_RETURN_CUDA_ERRORS();
}
// kernel: multiplication with factor; 32-bit; 4-channel
__global__ void cuMulCKernel(const float4 factor, float4* dst, const size_t stride,
const int xoff, const int yoff,
const int width, const int height)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
const unsigned int oc = y*stride+x;
x += xoff;
y += yoff;
float xx = x+0.5f;
float yy = y+0.5f;
if(x>=0 && y>= 0 && x<width && y<height)
{
float4 val = tex2D(tex1_32f_C4__, xx, yy);
dst[oc] = val * factor;
}
}
// wrapper: multiplication with factor; 32-bit; 4-channel
IuStatus cuMulC(const iu::ImageGpu_32f_C4* src, const float4& factor, iu::ImageGpu_32f_C4* dst, const IuRect& roi)
{
// bind textures
cudaChannelFormatDesc channel_desc = cudaCreateChannelDesc<float4>();
cudaBindTexture2D(0, &tex1_32f_C4__, (float4*)src->data(), &channel_desc, src->width(), src->height(), src->pitch());
// fragmentation
unsigned int block_size = 16;
dim3 dimBlock(block_size, block_size);
dim3 dimGrid(iu::divUp(dst->width(), dimBlock.x),
iu::divUp(dst->height(), dimBlock.y));
cuMulCKernel <<< dimGrid, dimBlock >>> (
factor, dst->data(roi.x, roi.y), dst->stride(),
roi.x, roi.y, roi.width, roi.height);
// unbind textures
cudaUnbindTexture(&tex1_32f_C4__);
// error check
IU_CHECK_AND_RETURN_CUDA_ERRORS();
}
/******************************************************************************
add val
*******************************************************************************/
// kernel: add val; 8-bit; 1-channel
__global__ void cuAddCKernel(const unsigned char val, unsigned char* dst, const size_t stride,
const int xoff, const int yoff,
const int width, const int height)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
const unsigned int oc = y*stride+x;
x += xoff;
y += yoff;
float xx = x+0.5f;
float yy = y+0.5f;
if(x>=0 && y>= 0 && x<width && y<height)
{
dst[oc] = val + tex2D(tex1_8u_C1__, xx, yy);
}
}
// wrapper: add val; 8-bit; 1-channel
IuStatus cuAddC(const iu::ImageGpu_8u_C1* src, const unsigned char& val, iu::ImageGpu_8u_C1* dst, const IuRect& roi)
{
// bind textures
cudaChannelFormatDesc channel_desc = cudaCreateChannelDesc<uchar1>();
cudaBindTexture2D(0, &tex1_8u_C1__, src->data(), &channel_desc, src->width(), src->height(), src->pitch());
// fragmentation
unsigned int block_size = 16;
dim3 dimBlock(block_size, block_size);
dim3 dimGrid(iu::divUp(dst->width(), dimBlock.x),
iu::divUp(dst->height(), dimBlock.y));
cuAddCKernel <<< dimGrid, dimBlock >>> (
val, dst->data(roi.x, roi.y), dst->stride(), roi.x, roi.y, roi.width, roi.height);
// unbind textures
cudaUnbindTexture(&tex1_8u_C1__);
// error check
IU_CHECK_AND_RETURN_CUDA_ERRORS();
}
// kernel: add val; 8-bit; 4-channel
__global__ void cuAddCKernel(const uchar4 val, uchar4* dst, const size_t stride,
const int xoff, const int yoff,
const int width, const int height)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
const unsigned int oc = y*stride+x;
x += xoff;
y += yoff;
float xx = x+0.5f;
float yy = y+0.5f;
if(x>=0 && y>= 0 && x<width && y<height)
{
uchar4 value = tex2D(tex1_8u_C4__, xx, yy);
value.x = value.x + val.x;
value.y = value.y + val.y;
value.z = value.z + val.z;
value.w = value.w + val.w;
dst[oc] = value;
}
}
// wrapper: add val; 8-bit; 4-channel
IuStatus cuAddC(const iu::ImageGpu_8u_C4* src, const uchar4& val, iu::ImageGpu_8u_C4* dst, const IuRect& roi)
{
// bind textures
cudaChannelFormatDesc channel_desc = cudaCreateChannelDesc<uchar4>();
cudaBindTexture2D(0, &tex1_8u_C4__, (uchar4*)src->data(), &channel_desc, src->width(), src->height(), src->pitch());
// fragmentation
unsigned int block_size = 16;
dim3 dimBlock(block_size, block_size);
dim3 dimGrid(iu::divUp(dst->width(), dimBlock.x),
iu::divUp(dst->height(), dimBlock.y));
cuAddCKernel <<< dimGrid, dimBlock >>> (
val, dst->data(roi.x, roi.y), dst->stride(),
roi.x, roi.y, roi.width, roi.height);
// unbind textures
cudaUnbindTexture(&tex1_8u_C4__);
// error check
IU_CHECK_AND_RETURN_CUDA_ERRORS();
}
// kernel: add val; 32-bit; 1-channel
__global__ void cuAddCKernel(const float val, float* dst, const size_t stride,
const int xoff, const int yoff,
const int width, const int height)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
const unsigned int oc = y*stride+x;
x += xoff;
y += yoff;
float xx = x+0.5f;
float yy = y+0.5f;
if(x>=0 && y>= 0 && x<width && y<height)
{
dst[oc] = val + tex2D(tex1_32f_C1__, xx, yy);
}
}
// wrapper: add val; 32-bit; 1-channel
IuStatus cuAddC(const iu::ImageGpu_32f_C1* src, const float& val, iu::ImageGpu_32f_C1* dst, const IuRect& roi)
{
// bind textures
cudaChannelFormatDesc channel_desc = cudaCreateChannelDesc<float>();
cudaBindTexture2D(0, &tex1_32f_C1__, src->data(), &channel_desc, src->width(), src->height(), src->pitch());
// fragmentation
unsigned int block_size = 16;
dim3 dimBlock(block_size, block_size);
dim3 dimGrid(iu::divUp(dst->width(), dimBlock.x),
iu::divUp(dst->height(), dimBlock.y));
cuAddCKernel <<< dimGrid, dimBlock >>> (
val, dst->data(roi.x, roi.y), dst->stride(),
roi.x, roi.y, roi.width, roi.height);
// unbind textures
cudaUnbindTexture(&tex1_32f_C1__);
// error check
IU_CHECK_AND_RETURN_CUDA_ERRORS();
}
// kernel: add val; 32-bit; 2-channel
__global__ void cuAddCKernel(const float2 val, float2* dst, const size_t stride,
const int xoff, const int yoff,
const int width, const int height)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
const unsigned int oc = y*stride+x;
x += xoff;
y += yoff;
float xx = x+0.5f;
float yy = y+0.5f;
if(x>=0 && y>= 0 && x<width && y<height)
{
dst[oc] = val + tex2D(tex1_32f_C2__, xx, yy);
}
}
// wrapper: add val; 32-bit; 4-channel
IuStatus cuAddC(const iu::ImageGpu_32f_C2* src, const float2& val, iu::ImageGpu_32f_C2* dst, const IuRect& roi)
{
// bind textures
cudaChannelFormatDesc channel_desc = cudaCreateChannelDesc<float2>();
cudaBindTexture2D(0, &tex1_32f_C2__, src->data(), &channel_desc, src->width(), src->height(), src->pitch());
// fragmentation
unsigned int block_size = 16;
dim3 dimBlock(block_size, block_size);
dim3 dimGrid(iu::divUp(dst->width(), dimBlock.x),
iu::divUp(dst->height(), dimBlock.y));
cuAddCKernel <<< dimGrid, dimBlock >>> (
val, dst->data(roi.x, roi.y), dst->stride(),
roi.x, roi.y, roi.width, roi.height);
// unbind textures
cudaUnbindTexture(&tex1_32f_C2__);
// error check
IU_CHECK_AND_RETURN_CUDA_ERRORS();
}
// kernel: add val; 32-bit; 1-channel
__global__ void cuAddCKernel(const float4 val, float4* dst, const size_t stride,
const int xoff, const int yoff,
const int width, const int height)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
const unsigned int oc = y*stride+x;
x += xoff;
y += yoff;
float xx = x+0.5f;
float yy = y+0.5f;
if(x>=0 && y>= 0 && x<width && y<height)
{
dst[oc] = val + tex2D(tex1_32f_C4__, xx, yy);
}
}
// wrapper: add val; 32-bit; 4-channel
IuStatus cuAddC(const iu::ImageGpu_32f_C4* src, const float4& val, iu::ImageGpu_32f_C4* dst, const IuRect& roi)
{
// bind textures
cudaChannelFormatDesc channel_desc = cudaCreateChannelDesc<float4>();
cudaBindTexture2D(0, &tex1_32f_C4__, (float4*)src->data(), &channel_desc, src->width(), src->height(), src->pitch());
// fragmentation
unsigned int block_size = 16;
dim3 dimBlock(block_size, block_size);
dim3 dimGrid(iu::divUp(dst->width(), dimBlock.x),
iu::divUp(dst->height(), dimBlock.y));
cuAddCKernel <<< dimGrid, dimBlock >>> (
val, dst->data(roi.x, roi.y), dst->stride(),
roi.x, roi.y, roi.width, roi.height);
// unbind textures
cudaUnbindTexture(&tex1_32f_C4__);
// error check
IU_CHECK_AND_RETURN_CUDA_ERRORS();
}
} // namespace iuprivate
#endif // IUMATH_ARITHMETIC_CU
|
a2c3b510908448c54a53806ef9b5791c005bac5e.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include "common.h"
#include "cuda_common.cuh"
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
//warm up kernel
__global__ void warmup_kernel(int * input,int * temp, int size)
{
int tid = threadIdx.x;
int gid = blockDim.x * blockIdx.x + threadIdx.x;
if (gid > size)
return;
for (int offset = 1; offset <= blockDim.x / 2; offset *= 2)
{
if (tid % (2 * offset) == 0)
{
input[gid] += input[gid + offset];
}
__syncthreads();
}
if (tid == 0)
{
temp[blockIdx.x] = input[gid];
}
}
//reduction neighbored pairs kernel
__global__ void redunction_neighbored_pairs_1(int * input,
int * temp, int size)
{
int tid = threadIdx.x;
int gid = blockDim.x * blockIdx.x + threadIdx.x;
if (gid > size)
return;
for (int offset = 1; offset <= blockDim.x / 2; offset *= 2)
{
if (tid % (2 * offset) == 0)
{
input[gid] += input[gid + offset];
}
__syncthreads();
}
if (tid == 0)
{
temp[blockIdx.x] = input[gid];
}
}
//reduction neighbored pairs imporved kernel
__global__ void reduction_neighbored_pairs_improved_1(
int * int_array, int * temp_array, int size)
{
int tid = threadIdx.x;
int gid = blockDim.x * blockIdx.x + threadIdx.x;
//local data block pointer
int * i_data = int_array + blockDim.x * blockIdx.x;
if (gid > size)
return;
for (int offset = 1; offset <= blockDim.x / 2; offset *= 2)
{
int index = 2 * offset * tid;
if (index < blockDim.x)
{
i_data[index] += i_data[index + offset];
}
__syncthreads();
}
if (tid == 0)
{
temp_array[blockIdx.x] = int_array[gid];
}
}
//interleaved pairs kernel
__global__ void reduction_interleaved_pairs_1(int * int_array,
int * temp_array, int size)
{
int tid = threadIdx.x;
int gid = blockDim.x * blockIdx.x + threadIdx.x;
if (gid > size)
return;
for (int offset = blockDim.x / 2; offset > 0; offset = offset / 2)
{
if (tid < offset)
{
int_array[gid] += int_array[gid + offset];
}
__syncthreads();
}
if (tid == 0)
{
temp_array[blockIdx.x] = int_array[gid];
}
}
// reduction with unrolling 2 blocks
__global__ void reduction_interleaved_unrolling_blocks2_1(int * input,
int * temp, int size)
{
int tid = threadIdx.x;
//start index for this thread
int index = blockDim.x * blockIdx.x * 2 + threadIdx.x;
//local index for this block
int * i_data = input + blockDim.x * blockIdx.x * 2;
//unrolling two blocks
if ((index + blockDim.x)< size)
{
input[index] += input[index + blockDim.x];
}
__syncthreads();
for (int offset = blockDim.x / 2; offset > 0;
offset = offset / 2)
{
if (tid < offset)
{
i_data[tid] += i_data[tid + offset];
}
__syncthreads();
}
if (tid == 0)
{
temp[blockIdx.x] = i_data[0];
}
}
// reduction with unrolling 8 blocks
__global__ void reduction_interleaved_unrolling_blocks8_1(int * input,
int * temp, int size)
{
int tid = threadIdx.x;
//element index for this thread
int index = blockDim.x * blockIdx.x * 8 + threadIdx.x;
//local data pointer
int * i_data = input + blockDim.x * blockIdx.x * 8;
if ((index + 7 * blockDim.x) < size)
{
int a1 = input[index];
int a2 = input[index + blockDim.x];
int a3 = input[index + 2 * blockDim.x];
int a4 = input[index + 3 * blockDim.x];
int a5 = input[index + 4 * blockDim.x];
int a6 = input[index + 5 * blockDim.x];
int a7 = input[index + 6 * blockDim.x];
int a8 = input[index + 7 * blockDim.x];
input[index] = a1 + a2 + a3 + a4 + a5 + a6 + a7 + a8;
}
__syncthreads();
for (int offset = blockDim.x / 2; offset > 0; offset = offset / 2)
{
if (tid < offset)
{
i_data[tid] += i_data[tid + offset];
}
__syncthreads();
}
if (tid == 0)
{
temp[blockIdx.x] = i_data[0];
}
}
// reduction warp unrolling with 8 blocks unrolling
__global__ void reduction_kernel_interleaved_warp_unrolling8_1(int * input,
int * temp_array, int size)
{
int tid = threadIdx.x;
//element index for this thread
int index = blockDim.x * blockIdx.x * 8 + threadIdx.x;
//local data pointer
int * i_data = input + blockDim.x * blockIdx.x * 8;
if ((index + 7 * blockDim.x) < size)
{
int a1 = input[index];
int a2 = input[index + blockDim.x];
int a3 = input[index + 2 * blockDim.x];
int a4 = input[index + 3 * blockDim.x];
int a5 = input[index + 4 * blockDim.x];
int a6 = input[index + 5 * blockDim.x];
int a7 = input[index + 6 * blockDim.x];
int a8 = input[index + 7 * blockDim.x];
input[index] = a1 + a2 + a3 + a4 + a5 + a6 + a7 + a8;
}
__syncthreads();
for (int offset = blockDim.x / 2; offset >= 64;
offset = offset / 2)
{
if (tid < offset)
{
i_data[tid] += i_data[tid + offset];
}
__syncthreads();
}
if (tid < 32)
{
volatile int * vsmem = i_data;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
if (tid == 0)
{
temp_array[blockIdx.x] = i_data[0];
}
}
// reduction complete unrolling
__global__ void reduction_kernel_complete_unrolling8_1(int * input,
int * temp, int size)
{
int tid = threadIdx.x;
int index = blockDim.x * blockIdx.x * 8 + threadIdx.x;
int * i_data = input + blockDim.x * blockIdx.x * 8;
if ((index + 7 * blockDim.x) < size)
{
int a1 = input[index];
int a2 = input[index + blockDim.x];
int a3 = input[index + 2 * blockDim.x];
int a4 = input[index + 3 * blockDim.x];
int a5 = input[index + 4 * blockDim.x];
int a6 = input[index + 5 * blockDim.x];
int a7 = input[index + 6 * blockDim.x];
int a8 = input[index + 7 * blockDim.x];
input[index] = a1 + a2 + a3 + a4 + a5 + a6 + a7 + a8;
}
__syncthreads();
//complete unrolling manually
//if the block dim == 1024
if (blockDim.x == 1024 && tid < 512)
i_data[tid] += i_data[tid + 512];
__syncthreads();
if (blockDim.x >= 512 && tid < 256)
i_data[tid] += i_data[tid + 256];
__syncthreads();
if (blockDim.x >= 256 && tid < 128)
i_data[tid] += i_data[tid + 128];
__syncthreads();
if (blockDim.x >= 128 && tid < 64)
i_data[tid] += i_data[tid + 64];
__syncthreads();
// warp unrolling
if (tid < 32)
{
volatile int * vsmem = i_data;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
if (tid == 0)
{
temp[blockIdx.x] = i_data[0];
}
}
// reduction with complete unrolling and template functions
template<unsigned int iblock_size>
__global__ void reduction_kernel_complete_template8_1(int * input,
int * temp, int size)
{
int tid = threadIdx.x;
int index = blockDim.x * blockIdx.x * 8 + threadIdx.x;
int * i_data = input + blockDim.x * blockIdx.x * 8;
//unrolling blocks
if ((index + 7 * blockDim.x) < size)
{
int a1 = input[index];
int a2 = input[index + blockDim.x];
int a3 = input[index + 2 * blockDim.x];
int a4 = input[index + 3 * blockDim.x];
int a5 = input[index + 4 * blockDim.x];
int a6 = input[index + 5 * blockDim.x];
int a7 = input[index + 6 * blockDim.x];
int a8 = input[index + 7 * blockDim.x];
input[index] = a1 + a2 + a3 + a4 + a5 + a6 + a7 + a8;
}
__syncthreads();
//manual unrolling depending on block size
if (iblock_size >= 1024 && tid < 512)
i_data[tid] += i_data[tid + 512];
__syncthreads();
if (iblock_size >= 512 && tid < 256)
i_data[tid] += i_data[tid + 256];
__syncthreads();
if (iblock_size >= 256 && tid < 128)
i_data[tid] += i_data[tid + 128];
__syncthreads();
if (iblock_size >= 128 && tid < 64)
i_data[tid] += i_data[tid + 64];
__syncthreads();
//unrolling warp
if (tid < 32)
{
volatile int * vsmem = i_data;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
if (tid == 0)
{
temp[blockIdx.x] = i_data[0];
}
}
//int main(int argc, char ** argv)
//{
// //printf("Performance comparison of reduction kernels \n");
//
// int size = 1 << 25;
// int byte_size = size * sizeof(int);
// int block_size = 256;
//
// int * h_input, *h_ref;
// h_input = (int*)malloc(byte_size);
//
// initialize(h_input, size);
//
// int cpu_result = reduction_cpu(h_input, size);
//
// dim3 block(block_size);
// dim3 grid((size / block_size));
//
// printf("Kernel launch parameters || grid : %d, block : %d \n", grid.x, block.x);
//
// int temp_array_byte_size = sizeof(int)* grid.x;
//
// h_ref = (int*)malloc(temp_array_byte_size);
//
// int * d_input, *d_temp;
// gpuErrchk(hipMalloc((void**)&d_input, byte_size));
// gpuErrchk(hipMalloc((void**)&d_temp, temp_array_byte_size));
//
// int gpu_result = 0;
// dim3 new_grid2(grid.x / 2);
// dim3 new_grid8(grid.x / 8);
//
// // 0 warm up kernel
// gpuErrchk(hipMemset(d_temp, 0, temp_array_byte_size));
// gpuErrchk(hipMemcpy(d_input, h_input, byte_size,
// hipMemcpyHostToDevice));
//
// warmup_kernel << <grid, block >> > (d_input, d_temp, size);
// gpuErrchk(hipDeviceSynchronize());
//
// // 1 naive reduction implementation : neighbored pairs approach
// gpuErrchk(hipMemset(d_temp, 0, temp_array_byte_size));
// gpuErrchk(hipMemcpy(d_input, h_input, byte_size,
// hipMemcpyHostToDevice));
//
// redunction_neighbored_pairs_1 << <grid, block >> > (d_input, d_temp, size);
// gpuErrchk(hipDeviceSynchronize());
// gpuErrchk(hipMemcpy(h_ref, d_temp, temp_array_byte_size, hipMemcpyDeviceToHost));
//
//
// for (int i = 0; i < grid.x; i++)
// {
// gpu_result += h_ref[i];
// }
//
// compare_results(gpu_result, cpu_result);
//
// // 2. improved neighbored pairs approach
// gpuErrchk(hipMemset(d_temp, 0, temp_array_byte_size));
// gpuErrchk(hipMemcpy(d_input, h_input, byte_size,
// hipMemcpyHostToDevice));
//
// reduction_neighbored_pairs_improved_1 << <grid, block >> > (d_input, d_temp, size);
// gpuErrchk(hipDeviceSynchronize());
// gpuErrchk(hipMemcpy(h_ref, d_temp, temp_array_byte_size, hipMemcpyDeviceToHost));
//
// gpu_result = 0;
// for (int i = 0; i < grid.x; i++)
// {
// gpu_result += h_ref[i];
// }
// compare_results(gpu_result, cpu_result);
//
// // 3. interleaved pairs approach
// gpuErrchk(hipMemset(d_temp, 0, temp_array_byte_size));
// gpuErrchk(hipMemcpy(d_input, h_input, byte_size,
// hipMemcpyHostToDevice));
//
// reduction_interleaved_pairs_1 << <grid, block >> > (d_input, d_temp, size);
// gpuErrchk(hipDeviceSynchronize());
// gpuErrchk(hipMemcpy(h_ref, d_temp, temp_array_byte_size, hipMemcpyDeviceToHost));
//
// gpu_result = 0;
// for (int i = 0; i < grid.x; i++)
// {
// gpu_result += h_ref[i];
// }
// compare_results(gpu_result, cpu_result);
//
// // 4. 2 data blocks unrolled to one
// gpuErrchk(hipMemset(d_temp, 0, temp_array_byte_size));
// gpuErrchk(hipMemcpy(d_input, h_input, byte_size,
// hipMemcpyHostToDevice));
//
// reduction_interleaved_unrolling_blocks2_1 << <new_grid2, block >> > (d_input, d_temp, size);
// gpuErrchk(hipDeviceSynchronize());
// gpuErrchk(hipMemcpy(h_ref, d_temp, temp_array_byte_size, hipMemcpyDeviceToHost));
//
// gpu_result = 0;
// for (int i = 0; i < grid.x; i++)
// {
// gpu_result += h_ref[i];
// }
// compare_results(gpu_result, cpu_result);
//
// // 5. 8 data blocks unrolled to one
// gpuErrchk(hipMemset(d_temp, 0, temp_array_byte_size));
// gpuErrchk(hipMemcpy(d_input, h_input, byte_size,
// hipMemcpyHostToDevice));
//
// reduction_interleaved_unrolling_blocks8_1 << <new_grid8, block >> > (d_input, d_temp, size);
// gpuErrchk(hipDeviceSynchronize());
// gpuErrchk(hipMemcpy(h_ref, d_temp, temp_array_byte_size, hipMemcpyDeviceToHost));
//
// gpu_result = 0;
// for (int i = 0; i < grid.x; i++)
// {
// gpu_result += h_ref[i];
// }
// compare_results(gpu_result, cpu_result);
//
// // 6. warp unrolling for 8 blocks unrolloed
// gpuErrchk(hipMemset(d_temp, 0, temp_array_byte_size));
// gpuErrchk(hipMemcpy(d_input, h_input, byte_size,
// hipMemcpyHostToDevice));
//
// reduction_kernel_interleaved_warp_unrolling8_1<< <new_grid8, block >> > (d_input, d_temp, size);
// gpuErrchk(hipDeviceSynchronize());
// gpuErrchk(hipMemcpy(h_ref, d_temp, temp_array_byte_size, hipMemcpyDeviceToHost));
//
// gpu_result = 0;
// for (int i = 0; i < grid.x; i++)
// {
// gpu_result += h_ref[i];
// }
// compare_results(gpu_result, cpu_result);
//
// // 7. complete unrolling
// gpuErrchk(hipMemset(d_temp, 0, temp_array_byte_size));
// gpuErrchk(hipMemcpy(d_input, h_input, byte_size,
// hipMemcpyHostToDevice));
//
// reduction_kernel_complete_unrolling8_1 << <new_grid8, block >> > (d_input, d_temp, size);
// gpuErrchk(hipDeviceSynchronize());
// gpuErrchk(hipMemcpy(h_ref, d_temp, temp_array_byte_size, hipMemcpyDeviceToHost));
//
// gpu_result = 0;
// for (int i = 0; i < grid.x; i++)
// {
// gpu_result += h_ref[i];
// }
// compare_results(gpu_result, cpu_result);
//
// // 8. complete unrolling with templated functions
// gpuErrchk(hipMemset(d_temp, 0, temp_array_byte_size));
// gpuErrchk(hipMemcpy(d_input, h_input, byte_size,
// hipMemcpyHostToDevice));
//
// switch (block_size)
// {
// case 1024:
// reduction_kernel_complete_template8_1 <1024> << < new_grid8, block >> > (d_input, d_temp, size);
// break;
// case 512:
// reduction_kernel_complete_template8_1 <512> << < new_grid8, block >> > (d_input, d_temp, size);
// break;
// case 256:
// reduction_kernel_complete_template8_1 <256> << < new_grid8, block >> > (d_input, d_temp, size);
// break;
// case 128:
// reduction_kernel_complete_template8_1 <128> << < new_grid8, block >> > (d_input, d_temp, size);
// break;
// }
// gpuErrchk(hipDeviceSynchronize());
// gpuErrchk(hipMemcpy(h_ref, d_temp, temp_array_byte_size, hipMemcpyDeviceToHost));
//
// gpu_result = 0;
// for (int i = 0; i < grid.x; i++)
// {
// gpu_result += h_ref[i];
// }
// compare_results(gpu_result, cpu_result);
//
// gpuErrchk(hipFree(d_input));
// gpuErrchk(hipFree(d_temp));
// free(h_input);
// free(h_ref);
//
// gpuErrchk(hipDeviceReset());
// return 0;
//} | a2c3b510908448c54a53806ef9b5791c005bac5e.cu | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include "common.h"
#include "cuda_common.cuh"
#include "cuda.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
//warm up kernel
__global__ void warmup_kernel(int * input,int * temp, int size)
{
int tid = threadIdx.x;
int gid = blockDim.x * blockIdx.x + threadIdx.x;
if (gid > size)
return;
for (int offset = 1; offset <= blockDim.x / 2; offset *= 2)
{
if (tid % (2 * offset) == 0)
{
input[gid] += input[gid + offset];
}
__syncthreads();
}
if (tid == 0)
{
temp[blockIdx.x] = input[gid];
}
}
//reduction neighbored pairs kernel
__global__ void redunction_neighbored_pairs_1(int * input,
int * temp, int size)
{
int tid = threadIdx.x;
int gid = blockDim.x * blockIdx.x + threadIdx.x;
if (gid > size)
return;
for (int offset = 1; offset <= blockDim.x / 2; offset *= 2)
{
if (tid % (2 * offset) == 0)
{
input[gid] += input[gid + offset];
}
__syncthreads();
}
if (tid == 0)
{
temp[blockIdx.x] = input[gid];
}
}
//reduction neighbored pairs imporved kernel
__global__ void reduction_neighbored_pairs_improved_1(
int * int_array, int * temp_array, int size)
{
int tid = threadIdx.x;
int gid = blockDim.x * blockIdx.x + threadIdx.x;
//local data block pointer
int * i_data = int_array + blockDim.x * blockIdx.x;
if (gid > size)
return;
for (int offset = 1; offset <= blockDim.x / 2; offset *= 2)
{
int index = 2 * offset * tid;
if (index < blockDim.x)
{
i_data[index] += i_data[index + offset];
}
__syncthreads();
}
if (tid == 0)
{
temp_array[blockIdx.x] = int_array[gid];
}
}
//interleaved pairs kernel
__global__ void reduction_interleaved_pairs_1(int * int_array,
int * temp_array, int size)
{
int tid = threadIdx.x;
int gid = blockDim.x * blockIdx.x + threadIdx.x;
if (gid > size)
return;
for (int offset = blockDim.x / 2; offset > 0; offset = offset / 2)
{
if (tid < offset)
{
int_array[gid] += int_array[gid + offset];
}
__syncthreads();
}
if (tid == 0)
{
temp_array[blockIdx.x] = int_array[gid];
}
}
// reduction with unrolling 2 blocks
__global__ void reduction_interleaved_unrolling_blocks2_1(int * input,
int * temp, int size)
{
int tid = threadIdx.x;
//start index for this thread
int index = blockDim.x * blockIdx.x * 2 + threadIdx.x;
//local index for this block
int * i_data = input + blockDim.x * blockIdx.x * 2;
//unrolling two blocks
if ((index + blockDim.x)< size)
{
input[index] += input[index + blockDim.x];
}
__syncthreads();
for (int offset = blockDim.x / 2; offset > 0;
offset = offset / 2)
{
if (tid < offset)
{
i_data[tid] += i_data[tid + offset];
}
__syncthreads();
}
if (tid == 0)
{
temp[blockIdx.x] = i_data[0];
}
}
// reduction with unrolling 8 blocks
__global__ void reduction_interleaved_unrolling_blocks8_1(int * input,
int * temp, int size)
{
int tid = threadIdx.x;
//element index for this thread
int index = blockDim.x * blockIdx.x * 8 + threadIdx.x;
//local data pointer
int * i_data = input + blockDim.x * blockIdx.x * 8;
if ((index + 7 * blockDim.x) < size)
{
int a1 = input[index];
int a2 = input[index + blockDim.x];
int a3 = input[index + 2 * blockDim.x];
int a4 = input[index + 3 * blockDim.x];
int a5 = input[index + 4 * blockDim.x];
int a6 = input[index + 5 * blockDim.x];
int a7 = input[index + 6 * blockDim.x];
int a8 = input[index + 7 * blockDim.x];
input[index] = a1 + a2 + a3 + a4 + a5 + a6 + a7 + a8;
}
__syncthreads();
for (int offset = blockDim.x / 2; offset > 0; offset = offset / 2)
{
if (tid < offset)
{
i_data[tid] += i_data[tid + offset];
}
__syncthreads();
}
if (tid == 0)
{
temp[blockIdx.x] = i_data[0];
}
}
// reduction warp unrolling with 8 blocks unrolling
__global__ void reduction_kernel_interleaved_warp_unrolling8_1(int * input,
int * temp_array, int size)
{
int tid = threadIdx.x;
//element index for this thread
int index = blockDim.x * blockIdx.x * 8 + threadIdx.x;
//local data pointer
int * i_data = input + blockDim.x * blockIdx.x * 8;
if ((index + 7 * blockDim.x) < size)
{
int a1 = input[index];
int a2 = input[index + blockDim.x];
int a3 = input[index + 2 * blockDim.x];
int a4 = input[index + 3 * blockDim.x];
int a5 = input[index + 4 * blockDim.x];
int a6 = input[index + 5 * blockDim.x];
int a7 = input[index + 6 * blockDim.x];
int a8 = input[index + 7 * blockDim.x];
input[index] = a1 + a2 + a3 + a4 + a5 + a6 + a7 + a8;
}
__syncthreads();
for (int offset = blockDim.x / 2; offset >= 64;
offset = offset / 2)
{
if (tid < offset)
{
i_data[tid] += i_data[tid + offset];
}
__syncthreads();
}
if (tid < 32)
{
volatile int * vsmem = i_data;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
if (tid == 0)
{
temp_array[blockIdx.x] = i_data[0];
}
}
// reduction complete unrolling
__global__ void reduction_kernel_complete_unrolling8_1(int * input,
int * temp, int size)
{
int tid = threadIdx.x;
int index = blockDim.x * blockIdx.x * 8 + threadIdx.x;
int * i_data = input + blockDim.x * blockIdx.x * 8;
if ((index + 7 * blockDim.x) < size)
{
int a1 = input[index];
int a2 = input[index + blockDim.x];
int a3 = input[index + 2 * blockDim.x];
int a4 = input[index + 3 * blockDim.x];
int a5 = input[index + 4 * blockDim.x];
int a6 = input[index + 5 * blockDim.x];
int a7 = input[index + 6 * blockDim.x];
int a8 = input[index + 7 * blockDim.x];
input[index] = a1 + a2 + a3 + a4 + a5 + a6 + a7 + a8;
}
__syncthreads();
//complete unrolling manually
//if the block dim == 1024
if (blockDim.x == 1024 && tid < 512)
i_data[tid] += i_data[tid + 512];
__syncthreads();
if (blockDim.x >= 512 && tid < 256)
i_data[tid] += i_data[tid + 256];
__syncthreads();
if (blockDim.x >= 256 && tid < 128)
i_data[tid] += i_data[tid + 128];
__syncthreads();
if (blockDim.x >= 128 && tid < 64)
i_data[tid] += i_data[tid + 64];
__syncthreads();
// warp unrolling
if (tid < 32)
{
volatile int * vsmem = i_data;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
if (tid == 0)
{
temp[blockIdx.x] = i_data[0];
}
}
// reduction with complete unrolling and template functions
template<unsigned int iblock_size>
__global__ void reduction_kernel_complete_template8_1(int * input,
int * temp, int size)
{
int tid = threadIdx.x;
int index = blockDim.x * blockIdx.x * 8 + threadIdx.x;
int * i_data = input + blockDim.x * blockIdx.x * 8;
//unrolling blocks
if ((index + 7 * blockDim.x) < size)
{
int a1 = input[index];
int a2 = input[index + blockDim.x];
int a3 = input[index + 2 * blockDim.x];
int a4 = input[index + 3 * blockDim.x];
int a5 = input[index + 4 * blockDim.x];
int a6 = input[index + 5 * blockDim.x];
int a7 = input[index + 6 * blockDim.x];
int a8 = input[index + 7 * blockDim.x];
input[index] = a1 + a2 + a3 + a4 + a5 + a6 + a7 + a8;
}
__syncthreads();
//manual unrolling depending on block size
if (iblock_size >= 1024 && tid < 512)
i_data[tid] += i_data[tid + 512];
__syncthreads();
if (iblock_size >= 512 && tid < 256)
i_data[tid] += i_data[tid + 256];
__syncthreads();
if (iblock_size >= 256 && tid < 128)
i_data[tid] += i_data[tid + 128];
__syncthreads();
if (iblock_size >= 128 && tid < 64)
i_data[tid] += i_data[tid + 64];
__syncthreads();
//unrolling warp
if (tid < 32)
{
volatile int * vsmem = i_data;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
if (tid == 0)
{
temp[blockIdx.x] = i_data[0];
}
}
//int main(int argc, char ** argv)
//{
// //printf("Performance comparison of reduction kernels \n");
//
// int size = 1 << 25;
// int byte_size = size * sizeof(int);
// int block_size = 256;
//
// int * h_input, *h_ref;
// h_input = (int*)malloc(byte_size);
//
// initialize(h_input, size);
//
// int cpu_result = reduction_cpu(h_input, size);
//
// dim3 block(block_size);
// dim3 grid((size / block_size));
//
// printf("Kernel launch parameters || grid : %d, block : %d \n", grid.x, block.x);
//
// int temp_array_byte_size = sizeof(int)* grid.x;
//
// h_ref = (int*)malloc(temp_array_byte_size);
//
// int * d_input, *d_temp;
// gpuErrchk(cudaMalloc((void**)&d_input, byte_size));
// gpuErrchk(cudaMalloc((void**)&d_temp, temp_array_byte_size));
//
// int gpu_result = 0;
// dim3 new_grid2(grid.x / 2);
// dim3 new_grid8(grid.x / 8);
//
// // 0 warm up kernel
// gpuErrchk(cudaMemset(d_temp, 0, temp_array_byte_size));
// gpuErrchk(cudaMemcpy(d_input, h_input, byte_size,
// cudaMemcpyHostToDevice));
//
// warmup_kernel << <grid, block >> > (d_input, d_temp, size);
// gpuErrchk(cudaDeviceSynchronize());
//
// // 1 naive reduction implementation : neighbored pairs approach
// gpuErrchk(cudaMemset(d_temp, 0, temp_array_byte_size));
// gpuErrchk(cudaMemcpy(d_input, h_input, byte_size,
// cudaMemcpyHostToDevice));
//
// redunction_neighbored_pairs_1 << <grid, block >> > (d_input, d_temp, size);
// gpuErrchk(cudaDeviceSynchronize());
// gpuErrchk(cudaMemcpy(h_ref, d_temp, temp_array_byte_size, cudaMemcpyDeviceToHost));
//
//
// for (int i = 0; i < grid.x; i++)
// {
// gpu_result += h_ref[i];
// }
//
// compare_results(gpu_result, cpu_result);
//
// // 2. improved neighbored pairs approach
// gpuErrchk(cudaMemset(d_temp, 0, temp_array_byte_size));
// gpuErrchk(cudaMemcpy(d_input, h_input, byte_size,
// cudaMemcpyHostToDevice));
//
// reduction_neighbored_pairs_improved_1 << <grid, block >> > (d_input, d_temp, size);
// gpuErrchk(cudaDeviceSynchronize());
// gpuErrchk(cudaMemcpy(h_ref, d_temp, temp_array_byte_size, cudaMemcpyDeviceToHost));
//
// gpu_result = 0;
// for (int i = 0; i < grid.x; i++)
// {
// gpu_result += h_ref[i];
// }
// compare_results(gpu_result, cpu_result);
//
// // 3. interleaved pairs approach
// gpuErrchk(cudaMemset(d_temp, 0, temp_array_byte_size));
// gpuErrchk(cudaMemcpy(d_input, h_input, byte_size,
// cudaMemcpyHostToDevice));
//
// reduction_interleaved_pairs_1 << <grid, block >> > (d_input, d_temp, size);
// gpuErrchk(cudaDeviceSynchronize());
// gpuErrchk(cudaMemcpy(h_ref, d_temp, temp_array_byte_size, cudaMemcpyDeviceToHost));
//
// gpu_result = 0;
// for (int i = 0; i < grid.x; i++)
// {
// gpu_result += h_ref[i];
// }
// compare_results(gpu_result, cpu_result);
//
// // 4. 2 data blocks unrolled to one
// gpuErrchk(cudaMemset(d_temp, 0, temp_array_byte_size));
// gpuErrchk(cudaMemcpy(d_input, h_input, byte_size,
// cudaMemcpyHostToDevice));
//
// reduction_interleaved_unrolling_blocks2_1 << <new_grid2, block >> > (d_input, d_temp, size);
// gpuErrchk(cudaDeviceSynchronize());
// gpuErrchk(cudaMemcpy(h_ref, d_temp, temp_array_byte_size, cudaMemcpyDeviceToHost));
//
// gpu_result = 0;
// for (int i = 0; i < grid.x; i++)
// {
// gpu_result += h_ref[i];
// }
// compare_results(gpu_result, cpu_result);
//
// // 5. 8 data blocks unrolled to one
// gpuErrchk(cudaMemset(d_temp, 0, temp_array_byte_size));
// gpuErrchk(cudaMemcpy(d_input, h_input, byte_size,
// cudaMemcpyHostToDevice));
//
// reduction_interleaved_unrolling_blocks8_1 << <new_grid8, block >> > (d_input, d_temp, size);
// gpuErrchk(cudaDeviceSynchronize());
// gpuErrchk(cudaMemcpy(h_ref, d_temp, temp_array_byte_size, cudaMemcpyDeviceToHost));
//
// gpu_result = 0;
// for (int i = 0; i < grid.x; i++)
// {
// gpu_result += h_ref[i];
// }
// compare_results(gpu_result, cpu_result);
//
// // 6. warp unrolling for 8 blocks unrolloed
// gpuErrchk(cudaMemset(d_temp, 0, temp_array_byte_size));
// gpuErrchk(cudaMemcpy(d_input, h_input, byte_size,
// cudaMemcpyHostToDevice));
//
// reduction_kernel_interleaved_warp_unrolling8_1<< <new_grid8, block >> > (d_input, d_temp, size);
// gpuErrchk(cudaDeviceSynchronize());
// gpuErrchk(cudaMemcpy(h_ref, d_temp, temp_array_byte_size, cudaMemcpyDeviceToHost));
//
// gpu_result = 0;
// for (int i = 0; i < grid.x; i++)
// {
// gpu_result += h_ref[i];
// }
// compare_results(gpu_result, cpu_result);
//
// // 7. complete unrolling
// gpuErrchk(cudaMemset(d_temp, 0, temp_array_byte_size));
// gpuErrchk(cudaMemcpy(d_input, h_input, byte_size,
// cudaMemcpyHostToDevice));
//
// reduction_kernel_complete_unrolling8_1 << <new_grid8, block >> > (d_input, d_temp, size);
// gpuErrchk(cudaDeviceSynchronize());
// gpuErrchk(cudaMemcpy(h_ref, d_temp, temp_array_byte_size, cudaMemcpyDeviceToHost));
//
// gpu_result = 0;
// for (int i = 0; i < grid.x; i++)
// {
// gpu_result += h_ref[i];
// }
// compare_results(gpu_result, cpu_result);
//
// // 8. complete unrolling with templated functions
// gpuErrchk(cudaMemset(d_temp, 0, temp_array_byte_size));
// gpuErrchk(cudaMemcpy(d_input, h_input, byte_size,
// cudaMemcpyHostToDevice));
//
// switch (block_size)
// {
// case 1024:
// reduction_kernel_complete_template8_1 <1024> << < new_grid8, block >> > (d_input, d_temp, size);
// break;
// case 512:
// reduction_kernel_complete_template8_1 <512> << < new_grid8, block >> > (d_input, d_temp, size);
// break;
// case 256:
// reduction_kernel_complete_template8_1 <256> << < new_grid8, block >> > (d_input, d_temp, size);
// break;
// case 128:
// reduction_kernel_complete_template8_1 <128> << < new_grid8, block >> > (d_input, d_temp, size);
// break;
// }
// gpuErrchk(cudaDeviceSynchronize());
// gpuErrchk(cudaMemcpy(h_ref, d_temp, temp_array_byte_size, cudaMemcpyDeviceToHost));
//
// gpu_result = 0;
// for (int i = 0; i < grid.x; i++)
// {
// gpu_result += h_ref[i];
// }
// compare_results(gpu_result, cpu_result);
//
// gpuErrchk(cudaFree(d_input));
// gpuErrchk(cudaFree(d_temp));
// free(h_input);
// free(h_ref);
//
// gpuErrchk(cudaDeviceReset());
// return 0;
//} |
003ceed128d8542484a860810897113ae16e91a2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cudamat_kernels.cuh"
#include "float.h"
/* ------------------------- Random number generation ------------------------- */
__global__ void kSeedRandom(unsigned int* rndMults, unsigned long long* rndWords, unsigned int seed) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
// The initial x is the seed and the initial carry is 1
unsigned long long rndWord = ((unsigned long long)seed << 32) + 1;
const unsigned int rndMult = rndMults[idx];
/*
* Run the chain for a few steps so that all the streams have a chance
* to differentiate. They start out generating similar random numbers
* because all the multipliers are similar.
*/
for(unsigned int i = 0; i < NUM_RND_BURNIN; i++) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
}
rndWords[idx] = rndWord;
}
__global__ void kRandomUniform(unsigned int* rndMults, unsigned long long* rndWords, float* gData, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
for(unsigned int i = idx; i < numElements; i += NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
gData[i] = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
}
rndWords[idx] = rndWord;
}
__global__ void kRandomGaussian(unsigned int* rndMults, unsigned long long* rndWords, float* gData, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
float rnd1, rnd2, R, T;
for(unsigned int i = idx; i < numElements; i += 2*NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd1 = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd2 = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
T = 2 * PI * rnd2;
R = sqrtf(-2 * __logf(rnd1));
gData[i] = R * __cosf(T);
if (i + NUM_RND_STREAMS < numElements)
gData[i + NUM_RND_STREAMS] = R * __sinf(T);
}
rndWords[idx] = rndWord;
}
__global__ void kRandomDropout(unsigned int* rndMults, unsigned long long* rndWords, float* gData, unsigned int numElements, float dropprob, float val) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
for(unsigned int i = idx; i < numElements; i += NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
gData[i] = ((__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f) > dropprob ? gData[i]:val;
}
rndWords[idx] = rndWord;
}
__global__ void kSampleBernoulli(unsigned int* rndMults, unsigned long long* rndWords, float* gData, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
for(unsigned int i = idx; i < numElements; i += NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
target[i] = ((__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f) < gData[i] ? 1:0;
}
rndWords[idx] = rndWord;
}
__global__ void kSampleBernoulliTanh(unsigned int* rndMults, unsigned long long* rndWords, float* gData, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
for(unsigned int i = idx; i < numElements; i += NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
target[i] = ((__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f) < (1.0 + gData[i]) / 2.0 ? 1:0;
}
rndWords[idx] = rndWord;
}
__global__ void kSamplePoisson(unsigned int* rndMults, unsigned long long* rndWords, float* gData, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
for(unsigned int i = idx; i < numElements; i += NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
target[i] = gData[i];
}
rndWords[idx] = rndWord;
}
__global__ void kSampleGaussian(unsigned int* rndMults, unsigned long long* rndWords, float* gData, float* target, unsigned int numElements, float mult) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
float rnd1, rnd2, R, T;
for(unsigned int i = idx; i < numElements; i += 2*NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd1 = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd2 = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
T = 2 * PI * rnd2;
R = sqrtf(-2 * __logf(rnd1));
target[i] = gData[i] + mult * R * __cosf(T);
if (i + NUM_RND_STREAMS < numElements)
target[i + NUM_RND_STREAMS] = gData[i + NUM_RND_STREAMS] + mult * R * __sinf(T);
}
rndWords[idx] = rndWord;
}
__global__ void kPerturbEnergy(unsigned int* rndMults, unsigned long long* rndWords, float* gData, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
float rnd;
for(unsigned int i = idx; i < numElements; i += NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
target[i] = gData[i] - __logf( - __logf(rnd));
}
rndWords[idx] = rndWord;
}
__global__ void kPerturbProb(unsigned int* rndMults, unsigned long long* rndWords, float* gData, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
float rnd;
for(unsigned int i = idx; i < numElements; i += NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
target[i] = - gData[i] / __logf(rnd);
}
rndWords[idx] = rndWord;
}
/* ------------------------- Data copying ------------------------- */
/*
Copy row slice from source to target. There is a block for every 32x32 chunk being copied.
*/
__global__ void kGetRowSlice(float* source, float* target, int start, int end, int width, int height) {
const int row = start + blockIdx.x * 32 + threadIdx.x;
const int start_col = blockIdx.y * 32;
const int end_col = (start_col + 32 < width) ? start_col + 32: width;
const int target_height = end - start;
if (row < end) {
for (int cur_col = start_col; cur_col < end_col; cur_col++)
target[cur_col * target_height + row - start] = source[cur_col * height + row];
}
}
__global__ void kSetRowSlice(float* source, float* target, int start, int end, int width, int height) {
const int row = start + blockIdx.x * 32 + threadIdx.x;
const int start_col = blockIdx.y * 32;
const int end_col = (start_col + 32 < width) ? start_col + 32: width;
const int source_height = end - start;
if (row < end) {
for (int cur_col = start_col; cur_col < end_col; cur_col++)
target[cur_col * height + row] = source[cur_col * source_height + row - start];
//source[cur_col * height + row - start] = target[cur_col * target_height + row];
}
}
__global__ void kTranspose(float *odata, float *idata, int width, int height) {
__shared__ float block[COPY_BLOCK_SIZE][COPY_BLOCK_SIZE+1];
// read the matrix tile into shared memory
unsigned int xIndex = blockIdx.x * COPY_BLOCK_SIZE + threadIdx.x;
unsigned int yIndex = blockIdx.y * COPY_BLOCK_SIZE + threadIdx.y;
if((xIndex < width) && (yIndex < height)) {
unsigned int index_in = yIndex * width + xIndex;
block[threadIdx.y][threadIdx.x] = idata[index_in];
}
__syncthreads();
// write the transposed matrix tile to global memory
xIndex = blockIdx.y * COPY_BLOCK_SIZE + threadIdx.x;
yIndex = blockIdx.x * COPY_BLOCK_SIZE + threadIdx.y;
if((xIndex < height) && (yIndex < width)) {
unsigned int index_out = yIndex * height + xIndex;
odata[index_out] = block[threadIdx.x][threadIdx.y];
}
}
/* ------------------------- Mathematical operations ------------------------- */
__global__ void kLessThan(float* mat1, float* mat2, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = mat1[i] < mat2[i];
}
}
__global__ void kLessThanScalar(float* mat, float val, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = mat[i] < val;
}
}
__global__ void kGreaterThan(float* mat1, float* mat2, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = mat1[i] > mat2[i];
}
}
__global__ void kUpperBound(float* mat1, float* mat2, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = mat1[i] > mat2[i] ? mat2[i] : mat1[i];
}
}
__global__ void kLowerBound(float* mat1, float* mat2, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = mat1[i] < mat2[i] ? mat2[i] : mat1[i];
}
}
__global__ void kGreaterThanScalar(float* mat, float val, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = mat[i] > val;
}
}
__global__ void kUpperBoundScalar(float* mat, float val, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = mat[i] > val ? val:mat[i];
}
}
__global__ void kLowerBoundScalar(float* mat, float val, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = mat[i] < val ? val:mat[i];
}
}
__global__ void kSumAll(float* mat, unsigned int len) {
__shared__ float sum_vals[32];
float val = 0;
for (unsigned int i = threadIdx.x; i < len; i += 32)
val += mat[i];
sum_vals[threadIdx.x] = val;
__syncthreads();
if (threadIdx.x == 0) {
val = 0;
for (unsigned int i = 0; i < 32; i++)
val += sum_vals[i];
device_val = val;
}
}
__global__ void kCumsum(float *mat, float *target, float *temp, unsigned int height) {
// extern __shared__ float temp[];// allocated on invocation
const int thid = threadIdx.x;
if (2*thid < height) {
const int super_offset = blockIdx.x * height;
target += super_offset;
mat += super_offset;
temp += super_offset;
int offset = 1;
//float s = 0.0;
temp[2*thid] = mat[2*thid]; // load input into shared memory
temp[2*thid+1] = mat[2*thid+1];
for (int d = height>>1; d > 0; d >>= 1) {// build sum in place up the tree
__syncthreads();
if (thid < d) {
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
temp[bi] += temp[ai];
} else if (thid == d && thid % 2 == 1) {
//int bi = offset*(2*thid+2)-1;
//temp[bi] += temp[ai];
}
offset *= 2;
}
if (thid == 0) { temp[height - 1] = 0; } // clear the last element
for (int d = 1; d < height; d *= 2) { // traverse down tree & build scan
offset >>= 1;
__syncthreads();
if (thid < d) {
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
float t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
// write results to device memory
// if (thid == -1) {
// target[0] = temp[1];
// target[height-1] = s;
// } else {
target[2*thid] = temp[2*thid];
target[2*thid+1] = temp[2*thid+1];
// }
}
}
__global__ void kMaxColumnwise(float* mat, float* target, unsigned int width, unsigned int height) {
__shared__ float max_vals[32];
float cur_max = -FLT_MAX;
float val = 0;
for (unsigned int i = threadIdx.x; i < height; i += 32) {
val = mat[blockIdx.x * height + i];
if (val > cur_max)
cur_max = val;
}
max_vals[threadIdx.x] = cur_max;
__syncthreads();
if (threadIdx.x == 0) {
cur_max = -FLT_MAX;
for (unsigned int i = 0; i < 32; i++)
if (max_vals[i] > cur_max)
cur_max = max_vals[i];
target[blockIdx.x] = cur_max;
}
}
__global__ void kChooseMaxAndAccumulate(float* mat, float* acc, unsigned int height) {
__shared__ float max_vals[32];
__shared__ unsigned int max_val_args[32];
float cur_max = -FLT_MAX;
unsigned int cur_argmax = 0;
float val = 0;
for (unsigned int i = threadIdx.x; i < height; i += 32) {
val = mat[blockIdx.x * height + i];
if (val > cur_max) {
cur_max = val;
cur_argmax = i;
}
}
max_vals[threadIdx.x] = cur_max;
max_val_args[threadIdx.x] = cur_argmax;
__syncthreads();
if (threadIdx.x == 0) {
cur_max = -FLT_MAX;
cur_argmax = 0;
for (unsigned int i = 0; i < 32; i++)
if (max_vals[i] > cur_max) {
cur_max = max_vals[i];
cur_argmax = max_val_args[i];
}
acc[blockIdx.x * height + cur_argmax] += 1;
}
}
__global__ void kChooseMaxColumnwise(float* mat, float* target, unsigned int height) {
__shared__ float max_vals[32];
__shared__ unsigned int max_val_args[32];
float cur_max = -FLT_MAX;
unsigned int cur_argmax = 0;
float val = 0;
for (unsigned int i = threadIdx.x; i < height; i += 32) {
val = mat[blockIdx.x * height + i];
target[blockIdx.x * height + i] = 0;
if (val > cur_max) {
cur_max = val;
cur_argmax = i;
}
}
max_vals[threadIdx.x] = cur_max;
max_val_args[threadIdx.x] = cur_argmax;
__syncthreads();
if (threadIdx.x == 0) {
cur_max = -FLT_MAX;
cur_argmax = 0;
for (unsigned int i = 0; i < 32; i++)
if (max_vals[i] > cur_max) {
cur_max = max_vals[i];
cur_argmax = max_val_args[i];
}
target[blockIdx.x * height + cur_argmax] = 1;
}
}
__global__ void kArgMaxColumnwise(float* mat, float* target, unsigned int width, unsigned int height) {
__shared__ float max_vals[32];
__shared__ unsigned int max_val_args[32];
float cur_max = -FLT_MAX;
float cur_argmax = 0;
float val = 0;
for (unsigned int i = threadIdx.x; i < height; i += 32) {
val = mat[blockIdx.x * height + i];
if (val > cur_max) {
cur_max = val;
cur_argmax = i;
}
}
max_vals[threadIdx.x] = cur_max;
max_val_args[threadIdx.x] = cur_argmax;
__syncthreads();
if (threadIdx.x == 0) {
cur_max = -FLT_MAX;
cur_argmax = 0;
for (unsigned int i = 0; i < 32; i++)
if (max_vals[i] > cur_max) {
cur_max = max_vals[i];
cur_argmax = max_val_args[i];
}
target[blockIdx.x] = cur_argmax;
}
}
__global__ void kSqSumColumnwise(float* mat, float* target, unsigned int width, unsigned int height) {
__shared__ float sum_vals[32];
float cur_sum = 0;
for (unsigned int i = threadIdx.x; i < height; i += 32) {
cur_sum += mat[blockIdx.x * height + i] * mat[blockIdx.x * height + i];
}
sum_vals[threadIdx.x] = cur_sum;
__syncthreads();
if (threadIdx.x == 0) {
cur_sum = 0;
for (unsigned int i = 0; i < 32; i++)
cur_sum += sum_vals[i];
target[blockIdx.x] = cur_sum;
}
}
__global__ void kNormLimitColumnwise(float* mat, float* target,
float norm, unsigned int width,
unsigned int height) {
__shared__ float sum_vals[33];
float cur_sum = 0;
for (unsigned int i = threadIdx.x; i < height; i += 32) {
cur_sum += mat[blockIdx.x * height + i] * mat[blockIdx.x * height + i];
}
sum_vals[threadIdx.x] = cur_sum;
__syncthreads();
if (threadIdx.x == 0) {
cur_sum = 0;
for (unsigned int i = 0; i < 32; i++)
cur_sum += sum_vals[i];
sum_vals[32] = norm > cur_sum ? 1 : norm / sqrt(cur_sum);
}
float scale = sum_vals[32];
for (unsigned int i = threadIdx.x; i < height; i += 32) {
target[blockIdx.x * height + i] = mat[blockIdx.x * height + i] * scale;
}
__syncthreads();
}
__global__ void kSign(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = mat[i] ? copysignf(1., mat[i]) : 0.;
}
}
__global__ void kApplySin(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = __sinf(mat[i]);
}
}
__global__ void kApplyCos(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = __cosf(mat[i]);
}
}
__global__ void kApplySigmoid(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = 1 / (1 + __expf(-mat[i]));
}
}
__global__ void kApplyTanh(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
float mat_i, exp2x;
for (unsigned int i = idx; i < len; i += numThreads) {
mat_i = mat[i];
exp2x = __expf(2 * mat_i);
target[i] = 1 - 2 / (exp2x + 1);
}
}
__global__ void kApplyAbs(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = mat[i] * ((mat[i] > 0) - (mat[i] < 0));
}
}
__global__ void kApplyLog1PlusExpExact(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
float mat_i;
for (unsigned int i = idx; i < len; i += numThreads) {
mat_i = mat[i];
if (mat_i > 0)
target[i] = (log(1 + exp(-mat_i)) + mat_i);
else
target[i] = log(1 + exp(mat_i));
}
}
__global__ void kApplyLog1PlusExp(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
float mat_i;
for (unsigned int i = idx; i < len; i += numThreads) {
mat_i = mat[i];
if (mat_i > 0)
target[i] = (__logf(1 + __expf(-mat_i)) + mat_i);
else
target[i] = __logf(1 + __expf(mat_i));
}
}
__global__ void kLog(float* mat, float* target, unsigned int len, float tiny) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = __logf(mat[i] + tiny);
}
}
__global__ void kExp(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = __expf(mat[i]);
}
}
__global__ void kCeil(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = ceil(mat[i]);
}
}
__global__ void kFloor(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = floor(mat[i]);
}
}
__global__ void kSqrt(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = sqrt(mat[i]);
}
}
__global__ void kPow(float* mat, float pow, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = powf(mat[i], pow);
}
}
__global__ void kPowMatrix(float* mat, float* pow, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = powf(mat[i], pow[i]);
}
}
__global__ void kCrossEntropy(float* mat, float* p, float* target, unsigned int len, float tiny) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = -mat[i] * __logf(p[i] + tiny);
}
}
__global__ void kCrossEntropyBernoulli(float* mat, float* p, float* target, unsigned int len, float tiny) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = -mat[i] * __logf(p[i] + tiny) - (1 - mat[i]) * __logf(1 - p[i] + tiny);
}
}
__global__ void kCorrectPreds(float* mat, float* p, float* target, unsigned int len, float cutoff) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = mat[i] * (p[i] >= cutoff) + (1 - mat[i]) * (p[i] < cutoff);
}
}
__global__ void kReciprocal(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads)
target[i] = 1. / mat[i];
}
__global__ void kAddColVector(float* mat, float* vec, float* tgtMat, unsigned int width,
unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] + vec[i % height];
}
}
__global__ void kAddDiagonalScalar(float* mat, float val, float* tgtMat, unsigned int width) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width; i += numThreads) {
tgtMat[width*i + i] = mat[width*i + i] + val;
}
}
__global__ void kAddDiagonal(float* mat, float* vec, float* tgtMat, unsigned int width) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width; i += numThreads) {
tgtMat[width*i + i] = mat[width*i + i] + vec[i];
}
}
__global__ void kMultDiagonalScalar(float* mat, float val, float* tgtMat, unsigned int width) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width; i += numThreads) {
tgtMat[width*i + i] = mat[width*i + i] * val;
}
}
__global__ void kMultDiagonal(float* mat, float* vec, float* tgtMat, unsigned int width) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width; i += numThreads) {
tgtMat[width*i + i] = mat[width*i + i] * vec[i];
}
}
__global__ void kAddRowVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] + vec[i / height];
}
}
__global__ void kAddColMult(float* mat, float* vec, float* tgtMat, float mult,
unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] + mult * vec[i % height];
}
}
__global__ void kAddRowMult(float* mat, float* vec, float* tgtMat, float mult,
unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] + mult * vec[i / height];
}
}
__global__ void kMultByColVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] * vec[i % height];
}
}
__global__ void kDivByRowVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] / vec[i / height];
}
}
__global__ void kDivByColVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] / vec[i % height];
}
}
__global__ void kMultByRowVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] * vec[i / height];
}
}
__global__ void kAdd(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] + b[i];
}
}
__global__ void kSubtract(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] - b[i];
}
}
__global__ void kDivide(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] / b[i];
}
}
__global__ void kMult(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] * b[i];
}
}
__global__ void kCosDeriv(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = -a[i] * __sinf(b[i]);
}
}
__global__ void kSinDeriv(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] * __cosf(b[i]);
}
}
__global__ void kLogisticDeriv(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] * b[i] * (1.0 - b[i]);
}
}
__global__ void kTanhDeriv(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] * (1.0 + b[i]) * (1.0 - b[i]) * 0.5;
}
}
__global__ void kRectifiedLinearDeriv(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] * (b[i] > 0 ? 1 : 0);
}
}
__global__ void kRectifiedLinearSmoothDeriv(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] * (1 - __expf(-b[i]));
}
}
__global__ void kMultScalar(float* mat, float alpha, float* dest, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
dest[i] = alpha * mat[i];
}
}
__global__ void kAssignScalar(float* dest, float alpha, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
dest[i] = alpha;
}
}
__global__ void kDivideScalar(float* mat, float alpha, float* dest, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
dest[i] = mat[i] / alpha;
}
}
__global__ void kAddScalar(float* a, float alpha, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] + alpha;
}
}
__global__ void kSelectRows(float* source, float* target, float* indices, int nRowIs, int nCols, int nSourceRows){
__shared__ int sourceRowIndices[32];
const int startTargetRowI = blockIdx.x * 32;
const int tid = threadIdx.x;
const int localNRowIs = min(32, nRowIs-startTargetRowI);
// cooperatively load 32 row indices
if (tid < localNRowIs){
sourceRowIndices[tid] = int(indices[startTargetRowI + tid]);
if (sourceRowIndices[tid]<0)
sourceRowIndices[tid] += nSourceRows;
if (sourceRowIndices[tid]<0 || sourceRowIndices[tid]>=nSourceRows)
sourceRowIndices[tid] = -1;
}
__syncthreads();
// copy 32 rows
for (int i=0; i<localNRowIs; i++){
const int targetRowI = startTargetRowI + i, sourceRowI = sourceRowIndices[i];
for (int colI=tid; colI<nCols; colI+=32)
target[targetRowI * nCols + colI] = sourceRowI==-1 ? (1.0/0.0 -1.0/0.0) : source[sourceRowI * nCols + colI];
}
}
__global__ void kSwapRows(float* source, float* target, float* indices1, float* indices2, int nRowIs, int nCols, int nRows){
__shared__ int sourceRowIndices[32], targetRowIndices[32];
const int startRowI = blockIdx.x * 32;
const int tid = threadIdx.x;
const int localNRowIs = min(32, nRowIs-startRowI);
// cooperatively load 32 row indices
if (tid < localNRowIs){
sourceRowIndices[tid] = int(indices1[startRowI + tid]);
targetRowIndices[tid] = int(indices2[startRowI + tid]);
if (sourceRowIndices[tid]<0)
sourceRowIndices[tid] += nRows;
if (sourceRowIndices[tid]<0 || sourceRowIndices[tid]>=nRows)
sourceRowIndices[tid] = -1;
if (targetRowIndices[tid]<0)
targetRowIndices[tid] += nRows;
if (targetRowIndices[tid]<0 || targetRowIndices[tid]>=nRows)
targetRowIndices[tid] = -1;
}
__syncthreads();
// copy 32 rows
for (int i=0; i<localNRowIs; i++){
const int sourceRowI = sourceRowIndices[i], targetRowI = targetRowIndices[i];
for (int colI=tid; colI<nCols; colI+=32) {
const float temp1 = sourceRowI==-1 ? (1.0/0.0 -1.0/0.0) : source[sourceRowI * nCols + colI];
const float temp2 = targetRowI==-1 ? (1.0/0.0 -1.0/0.0) : target[targetRowI * nCols + colI];
if (sourceRowI != -1)
source[sourceRowI * nCols + colI] = temp2;
if (targetRowI != -1)
target[targetRowI * nCols + colI] = temp1;
}
}
}
__global__ void kSetSelectedRows(float* target, float* source, float* indices, int nRowIs, int nCols, int nTargetRows){
__shared__ int targetRowIndices[32];
const int startSourceRowI = blockIdx.x * 32;
const int tid = threadIdx.x;
const int localNRowIs = min(32, nRowIs-startSourceRowI);
// cooperatively load 32 row indices
if (tid < localNRowIs){
targetRowIndices[tid] = int(indices[startSourceRowI + tid]);
if (targetRowIndices[tid]<0)
targetRowIndices[tid] += nTargetRows;
if (targetRowIndices[tid]<0 || targetRowIndices[tid]>=nTargetRows)
targetRowIndices[tid] = -1;
}
__syncthreads();
// copy 32 rows
for (int i=0; i<localNRowIs; i++){
const int sourceRowI = startSourceRowI + i, targetRowI = targetRowIndices[i];
for (int colI=tid; colI<nCols; colI+=32)
target[targetRowI * nCols + colI] = targetRowI==-1 ? (1.0/0.0 -1.0/0.0) : source[sourceRowI * nCols + colI];
}
}
__global__ void kBlockify(float* source, float* target, int numdims, int blocksize) {
const unsigned int idx = threadIdx.x;
const unsigned int numThreads = blockDim.x;
const int off = blockIdx.x * numdims;
for (unsigned int target_ind = idx; target_ind < numdims; target_ind += numThreads) {
const int block = target_ind / blocksize;
target[off + target_ind] = source[off + block * blocksize];
}
}
__global__ void kGenerateTranslationsBigVarOff(float* source, float* target, float* off_x_arr, float* off_y_arr, int source_w, int target_w, int num_channels) {
const unsigned int idx = threadIdx.x;
const unsigned int numThreads = blockDim.x;
int target_x, target_y;
int pad = (source_w - target_w)/2;
int target_tile_size = target_w * target_w;
int source_tile_size = source_w * source_w;
int off_x = off_x_arr[blockIdx.x];
int off_y = off_y_arr[blockIdx.x];
int target_off = blockIdx.x * target_tile_size;
int source_off = blockIdx.x * source_tile_size + (pad + off_x) * source_w + (pad + off_y);
for (unsigned int target_ind = idx; target_ind < target_tile_size; target_ind += numThreads) {
target_x = target_ind / target_w;
target_y = target_ind - target_x * target_w;
for (unsigned int ch = 0; ch < num_channels; ch += 1) {
target[num_channels*(target_off + target_x * target_w + target_y) + ch] = source[num_channels*(source_off + target_x * source_w + target_y) + ch];
}
}
}
| 003ceed128d8542484a860810897113ae16e91a2.cu | #include "cudamat_kernels.cuh"
#include "float.h"
/* ------------------------- Random number generation ------------------------- */
__global__ void kSeedRandom(unsigned int* rndMults, unsigned long long* rndWords, unsigned int seed) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
// The initial x is the seed and the initial carry is 1
unsigned long long rndWord = ((unsigned long long)seed << 32) + 1;
const unsigned int rndMult = rndMults[idx];
/*
* Run the chain for a few steps so that all the streams have a chance
* to differentiate. They start out generating similar random numbers
* because all the multipliers are similar.
*/
for(unsigned int i = 0; i < NUM_RND_BURNIN; i++) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
}
rndWords[idx] = rndWord;
}
__global__ void kRandomUniform(unsigned int* rndMults, unsigned long long* rndWords, float* gData, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
for(unsigned int i = idx; i < numElements; i += NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
gData[i] = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
}
rndWords[idx] = rndWord;
}
__global__ void kRandomGaussian(unsigned int* rndMults, unsigned long long* rndWords, float* gData, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
float rnd1, rnd2, R, T;
for(unsigned int i = idx; i < numElements; i += 2*NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd1 = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd2 = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
T = 2 * PI * rnd2;
R = sqrtf(-2 * __logf(rnd1));
gData[i] = R * __cosf(T);
if (i + NUM_RND_STREAMS < numElements)
gData[i + NUM_RND_STREAMS] = R * __sinf(T);
}
rndWords[idx] = rndWord;
}
__global__ void kRandomDropout(unsigned int* rndMults, unsigned long long* rndWords, float* gData, unsigned int numElements, float dropprob, float val) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
for(unsigned int i = idx; i < numElements; i += NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
gData[i] = ((__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f) > dropprob ? gData[i]:val;
}
rndWords[idx] = rndWord;
}
__global__ void kSampleBernoulli(unsigned int* rndMults, unsigned long long* rndWords, float* gData, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
for(unsigned int i = idx; i < numElements; i += NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
target[i] = ((__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f) < gData[i] ? 1:0;
}
rndWords[idx] = rndWord;
}
__global__ void kSampleBernoulliTanh(unsigned int* rndMults, unsigned long long* rndWords, float* gData, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
for(unsigned int i = idx; i < numElements; i += NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
target[i] = ((__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f) < (1.0 + gData[i]) / 2.0 ? 1:0;
}
rndWords[idx] = rndWord;
}
__global__ void kSamplePoisson(unsigned int* rndMults, unsigned long long* rndWords, float* gData, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
for(unsigned int i = idx; i < numElements; i += NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
target[i] = gData[i];
}
rndWords[idx] = rndWord;
}
__global__ void kSampleGaussian(unsigned int* rndMults, unsigned long long* rndWords, float* gData, float* target, unsigned int numElements, float mult) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
float rnd1, rnd2, R, T;
for(unsigned int i = idx; i < numElements; i += 2*NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd1 = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd2 = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
T = 2 * PI * rnd2;
R = sqrtf(-2 * __logf(rnd1));
target[i] = gData[i] + mult * R * __cosf(T);
if (i + NUM_RND_STREAMS < numElements)
target[i + NUM_RND_STREAMS] = gData[i + NUM_RND_STREAMS] + mult * R * __sinf(T);
}
rndWords[idx] = rndWord;
}
__global__ void kPerturbEnergy(unsigned int* rndMults, unsigned long long* rndWords, float* gData, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
float rnd;
for(unsigned int i = idx; i < numElements; i += NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
target[i] = gData[i] - __logf( - __logf(rnd));
}
rndWords[idx] = rndWord;
}
__global__ void kPerturbProb(unsigned int* rndMults, unsigned long long* rndWords, float* gData, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
float rnd;
for(unsigned int i = idx; i < numElements; i += NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
target[i] = - gData[i] / __logf(rnd);
}
rndWords[idx] = rndWord;
}
/* ------------------------- Data copying ------------------------- */
/*
Copy row slice from source to target. There is a block for every 32x32 chunk being copied.
*/
__global__ void kGetRowSlice(float* source, float* target, int start, int end, int width, int height) {
const int row = start + blockIdx.x * 32 + threadIdx.x;
const int start_col = blockIdx.y * 32;
const int end_col = (start_col + 32 < width) ? start_col + 32: width;
const int target_height = end - start;
if (row < end) {
for (int cur_col = start_col; cur_col < end_col; cur_col++)
target[cur_col * target_height + row - start] = source[cur_col * height + row];
}
}
__global__ void kSetRowSlice(float* source, float* target, int start, int end, int width, int height) {
const int row = start + blockIdx.x * 32 + threadIdx.x;
const int start_col = blockIdx.y * 32;
const int end_col = (start_col + 32 < width) ? start_col + 32: width;
const int source_height = end - start;
if (row < end) {
for (int cur_col = start_col; cur_col < end_col; cur_col++)
target[cur_col * height + row] = source[cur_col * source_height + row - start];
//source[cur_col * height + row - start] = target[cur_col * target_height + row];
}
}
__global__ void kTranspose(float *odata, float *idata, int width, int height) {
__shared__ float block[COPY_BLOCK_SIZE][COPY_BLOCK_SIZE+1];
// read the matrix tile into shared memory
unsigned int xIndex = blockIdx.x * COPY_BLOCK_SIZE + threadIdx.x;
unsigned int yIndex = blockIdx.y * COPY_BLOCK_SIZE + threadIdx.y;
if((xIndex < width) && (yIndex < height)) {
unsigned int index_in = yIndex * width + xIndex;
block[threadIdx.y][threadIdx.x] = idata[index_in];
}
__syncthreads();
// write the transposed matrix tile to global memory
xIndex = blockIdx.y * COPY_BLOCK_SIZE + threadIdx.x;
yIndex = blockIdx.x * COPY_BLOCK_SIZE + threadIdx.y;
if((xIndex < height) && (yIndex < width)) {
unsigned int index_out = yIndex * height + xIndex;
odata[index_out] = block[threadIdx.x][threadIdx.y];
}
}
/* ------------------------- Mathematical operations ------------------------- */
__global__ void kLessThan(float* mat1, float* mat2, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = mat1[i] < mat2[i];
}
}
__global__ void kLessThanScalar(float* mat, float val, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = mat[i] < val;
}
}
__global__ void kGreaterThan(float* mat1, float* mat2, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = mat1[i] > mat2[i];
}
}
__global__ void kUpperBound(float* mat1, float* mat2, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = mat1[i] > mat2[i] ? mat2[i] : mat1[i];
}
}
__global__ void kLowerBound(float* mat1, float* mat2, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = mat1[i] < mat2[i] ? mat2[i] : mat1[i];
}
}
__global__ void kGreaterThanScalar(float* mat, float val, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = mat[i] > val;
}
}
__global__ void kUpperBoundScalar(float* mat, float val, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = mat[i] > val ? val:mat[i];
}
}
__global__ void kLowerBoundScalar(float* mat, float val, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = mat[i] < val ? val:mat[i];
}
}
__global__ void kSumAll(float* mat, unsigned int len) {
__shared__ float sum_vals[32];
float val = 0;
for (unsigned int i = threadIdx.x; i < len; i += 32)
val += mat[i];
sum_vals[threadIdx.x] = val;
__syncthreads();
if (threadIdx.x == 0) {
val = 0;
for (unsigned int i = 0; i < 32; i++)
val += sum_vals[i];
device_val = val;
}
}
__global__ void kCumsum(float *mat, float *target, float *temp, unsigned int height) {
// extern __shared__ float temp[];// allocated on invocation
const int thid = threadIdx.x;
if (2*thid < height) {
const int super_offset = blockIdx.x * height;
target += super_offset;
mat += super_offset;
temp += super_offset;
int offset = 1;
//float s = 0.0;
temp[2*thid] = mat[2*thid]; // load input into shared memory
temp[2*thid+1] = mat[2*thid+1];
for (int d = height>>1; d > 0; d >>= 1) {// build sum in place up the tree
__syncthreads();
if (thid < d) {
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
temp[bi] += temp[ai];
} else if (thid == d && thid % 2 == 1) {
//int bi = offset*(2*thid+2)-1;
//temp[bi] += temp[ai];
}
offset *= 2;
}
if (thid == 0) { temp[height - 1] = 0; } // clear the last element
for (int d = 1; d < height; d *= 2) { // traverse down tree & build scan
offset >>= 1;
__syncthreads();
if (thid < d) {
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
float t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
// write results to device memory
// if (thid == -1) {
// target[0] = temp[1];
// target[height-1] = s;
// } else {
target[2*thid] = temp[2*thid];
target[2*thid+1] = temp[2*thid+1];
// }
}
}
__global__ void kMaxColumnwise(float* mat, float* target, unsigned int width, unsigned int height) {
__shared__ float max_vals[32];
float cur_max = -FLT_MAX;
float val = 0;
for (unsigned int i = threadIdx.x; i < height; i += 32) {
val = mat[blockIdx.x * height + i];
if (val > cur_max)
cur_max = val;
}
max_vals[threadIdx.x] = cur_max;
__syncthreads();
if (threadIdx.x == 0) {
cur_max = -FLT_MAX;
for (unsigned int i = 0; i < 32; i++)
if (max_vals[i] > cur_max)
cur_max = max_vals[i];
target[blockIdx.x] = cur_max;
}
}
__global__ void kChooseMaxAndAccumulate(float* mat, float* acc, unsigned int height) {
__shared__ float max_vals[32];
__shared__ unsigned int max_val_args[32];
float cur_max = -FLT_MAX;
unsigned int cur_argmax = 0;
float val = 0;
for (unsigned int i = threadIdx.x; i < height; i += 32) {
val = mat[blockIdx.x * height + i];
if (val > cur_max) {
cur_max = val;
cur_argmax = i;
}
}
max_vals[threadIdx.x] = cur_max;
max_val_args[threadIdx.x] = cur_argmax;
__syncthreads();
if (threadIdx.x == 0) {
cur_max = -FLT_MAX;
cur_argmax = 0;
for (unsigned int i = 0; i < 32; i++)
if (max_vals[i] > cur_max) {
cur_max = max_vals[i];
cur_argmax = max_val_args[i];
}
acc[blockIdx.x * height + cur_argmax] += 1;
}
}
__global__ void kChooseMaxColumnwise(float* mat, float* target, unsigned int height) {
__shared__ float max_vals[32];
__shared__ unsigned int max_val_args[32];
float cur_max = -FLT_MAX;
unsigned int cur_argmax = 0;
float val = 0;
for (unsigned int i = threadIdx.x; i < height; i += 32) {
val = mat[blockIdx.x * height + i];
target[blockIdx.x * height + i] = 0;
if (val > cur_max) {
cur_max = val;
cur_argmax = i;
}
}
max_vals[threadIdx.x] = cur_max;
max_val_args[threadIdx.x] = cur_argmax;
__syncthreads();
if (threadIdx.x == 0) {
cur_max = -FLT_MAX;
cur_argmax = 0;
for (unsigned int i = 0; i < 32; i++)
if (max_vals[i] > cur_max) {
cur_max = max_vals[i];
cur_argmax = max_val_args[i];
}
target[blockIdx.x * height + cur_argmax] = 1;
}
}
__global__ void kArgMaxColumnwise(float* mat, float* target, unsigned int width, unsigned int height) {
__shared__ float max_vals[32];
__shared__ unsigned int max_val_args[32];
float cur_max = -FLT_MAX;
float cur_argmax = 0;
float val = 0;
for (unsigned int i = threadIdx.x; i < height; i += 32) {
val = mat[blockIdx.x * height + i];
if (val > cur_max) {
cur_max = val;
cur_argmax = i;
}
}
max_vals[threadIdx.x] = cur_max;
max_val_args[threadIdx.x] = cur_argmax;
__syncthreads();
if (threadIdx.x == 0) {
cur_max = -FLT_MAX;
cur_argmax = 0;
for (unsigned int i = 0; i < 32; i++)
if (max_vals[i] > cur_max) {
cur_max = max_vals[i];
cur_argmax = max_val_args[i];
}
target[blockIdx.x] = cur_argmax;
}
}
__global__ void kSqSumColumnwise(float* mat, float* target, unsigned int width, unsigned int height) {
__shared__ float sum_vals[32];
float cur_sum = 0;
for (unsigned int i = threadIdx.x; i < height; i += 32) {
cur_sum += mat[blockIdx.x * height + i] * mat[blockIdx.x * height + i];
}
sum_vals[threadIdx.x] = cur_sum;
__syncthreads();
if (threadIdx.x == 0) {
cur_sum = 0;
for (unsigned int i = 0; i < 32; i++)
cur_sum += sum_vals[i];
target[blockIdx.x] = cur_sum;
}
}
__global__ void kNormLimitColumnwise(float* mat, float* target,
float norm, unsigned int width,
unsigned int height) {
__shared__ float sum_vals[33];
float cur_sum = 0;
for (unsigned int i = threadIdx.x; i < height; i += 32) {
cur_sum += mat[blockIdx.x * height + i] * mat[blockIdx.x * height + i];
}
sum_vals[threadIdx.x] = cur_sum;
__syncthreads();
if (threadIdx.x == 0) {
cur_sum = 0;
for (unsigned int i = 0; i < 32; i++)
cur_sum += sum_vals[i];
sum_vals[32] = norm > cur_sum ? 1 : norm / sqrt(cur_sum);
}
float scale = sum_vals[32];
for (unsigned int i = threadIdx.x; i < height; i += 32) {
target[blockIdx.x * height + i] = mat[blockIdx.x * height + i] * scale;
}
__syncthreads();
}
__global__ void kSign(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = mat[i] ? copysignf(1., mat[i]) : 0.;
}
}
__global__ void kApplySin(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = __sinf(mat[i]);
}
}
__global__ void kApplyCos(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = __cosf(mat[i]);
}
}
__global__ void kApplySigmoid(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = 1 / (1 + __expf(-mat[i]));
}
}
__global__ void kApplyTanh(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
float mat_i, exp2x;
for (unsigned int i = idx; i < len; i += numThreads) {
mat_i = mat[i];
exp2x = __expf(2 * mat_i);
target[i] = 1 - 2 / (exp2x + 1);
}
}
__global__ void kApplyAbs(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = mat[i] * ((mat[i] > 0) - (mat[i] < 0));
}
}
__global__ void kApplyLog1PlusExpExact(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
float mat_i;
for (unsigned int i = idx; i < len; i += numThreads) {
mat_i = mat[i];
if (mat_i > 0)
target[i] = (log(1 + exp(-mat_i)) + mat_i);
else
target[i] = log(1 + exp(mat_i));
}
}
__global__ void kApplyLog1PlusExp(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
float mat_i;
for (unsigned int i = idx; i < len; i += numThreads) {
mat_i = mat[i];
if (mat_i > 0)
target[i] = (__logf(1 + __expf(-mat_i)) + mat_i);
else
target[i] = __logf(1 + __expf(mat_i));
}
}
__global__ void kLog(float* mat, float* target, unsigned int len, float tiny) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = __logf(mat[i] + tiny);
}
}
__global__ void kExp(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = __expf(mat[i]);
}
}
__global__ void kCeil(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = ceil(mat[i]);
}
}
__global__ void kFloor(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = floor(mat[i]);
}
}
__global__ void kSqrt(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = sqrt(mat[i]);
}
}
__global__ void kPow(float* mat, float pow, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = powf(mat[i], pow);
}
}
__global__ void kPowMatrix(float* mat, float* pow, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = powf(mat[i], pow[i]);
}
}
__global__ void kCrossEntropy(float* mat, float* p, float* target, unsigned int len, float tiny) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = -mat[i] * __logf(p[i] + tiny);
}
}
__global__ void kCrossEntropyBernoulli(float* mat, float* p, float* target, unsigned int len, float tiny) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = -mat[i] * __logf(p[i] + tiny) - (1 - mat[i]) * __logf(1 - p[i] + tiny);
}
}
__global__ void kCorrectPreds(float* mat, float* p, float* target, unsigned int len, float cutoff) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = mat[i] * (p[i] >= cutoff) + (1 - mat[i]) * (p[i] < cutoff);
}
}
__global__ void kReciprocal(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads)
target[i] = 1. / mat[i];
}
__global__ void kAddColVector(float* mat, float* vec, float* tgtMat, unsigned int width,
unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] + vec[i % height];
}
}
__global__ void kAddDiagonalScalar(float* mat, float val, float* tgtMat, unsigned int width) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width; i += numThreads) {
tgtMat[width*i + i] = mat[width*i + i] + val;
}
}
__global__ void kAddDiagonal(float* mat, float* vec, float* tgtMat, unsigned int width) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width; i += numThreads) {
tgtMat[width*i + i] = mat[width*i + i] + vec[i];
}
}
__global__ void kMultDiagonalScalar(float* mat, float val, float* tgtMat, unsigned int width) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width; i += numThreads) {
tgtMat[width*i + i] = mat[width*i + i] * val;
}
}
__global__ void kMultDiagonal(float* mat, float* vec, float* tgtMat, unsigned int width) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width; i += numThreads) {
tgtMat[width*i + i] = mat[width*i + i] * vec[i];
}
}
__global__ void kAddRowVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] + vec[i / height];
}
}
__global__ void kAddColMult(float* mat, float* vec, float* tgtMat, float mult,
unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] + mult * vec[i % height];
}
}
__global__ void kAddRowMult(float* mat, float* vec, float* tgtMat, float mult,
unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] + mult * vec[i / height];
}
}
__global__ void kMultByColVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] * vec[i % height];
}
}
__global__ void kDivByRowVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] / vec[i / height];
}
}
__global__ void kDivByColVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] / vec[i % height];
}
}
__global__ void kMultByRowVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] * vec[i / height];
}
}
__global__ void kAdd(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] + b[i];
}
}
__global__ void kSubtract(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] - b[i];
}
}
__global__ void kDivide(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] / b[i];
}
}
__global__ void kMult(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] * b[i];
}
}
__global__ void kCosDeriv(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = -a[i] * __sinf(b[i]);
}
}
__global__ void kSinDeriv(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] * __cosf(b[i]);
}
}
__global__ void kLogisticDeriv(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] * b[i] * (1.0 - b[i]);
}
}
__global__ void kTanhDeriv(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] * (1.0 + b[i]) * (1.0 - b[i]) * 0.5;
}
}
__global__ void kRectifiedLinearDeriv(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] * (b[i] > 0 ? 1 : 0);
}
}
__global__ void kRectifiedLinearSmoothDeriv(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] * (1 - __expf(-b[i]));
}
}
__global__ void kMultScalar(float* mat, float alpha, float* dest, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
dest[i] = alpha * mat[i];
}
}
__global__ void kAssignScalar(float* dest, float alpha, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
dest[i] = alpha;
}
}
__global__ void kDivideScalar(float* mat, float alpha, float* dest, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
dest[i] = mat[i] / alpha;
}
}
__global__ void kAddScalar(float* a, float alpha, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] + alpha;
}
}
__global__ void kSelectRows(float* source, float* target, float* indices, int nRowIs, int nCols, int nSourceRows){
__shared__ int sourceRowIndices[32];
const int startTargetRowI = blockIdx.x * 32;
const int tid = threadIdx.x;
const int localNRowIs = min(32, nRowIs-startTargetRowI);
// cooperatively load 32 row indices
if (tid < localNRowIs){
sourceRowIndices[tid] = int(indices[startTargetRowI + tid]);
if (sourceRowIndices[tid]<0)
sourceRowIndices[tid] += nSourceRows;
if (sourceRowIndices[tid]<0 || sourceRowIndices[tid]>=nSourceRows)
sourceRowIndices[tid] = -1;
}
__syncthreads();
// copy 32 rows
for (int i=0; i<localNRowIs; i++){
const int targetRowI = startTargetRowI + i, sourceRowI = sourceRowIndices[i];
for (int colI=tid; colI<nCols; colI+=32)
target[targetRowI * nCols + colI] = sourceRowI==-1 ? (1.0/0.0 -1.0/0.0) : source[sourceRowI * nCols + colI];
}
}
__global__ void kSwapRows(float* source, float* target, float* indices1, float* indices2, int nRowIs, int nCols, int nRows){
__shared__ int sourceRowIndices[32], targetRowIndices[32];
const int startRowI = blockIdx.x * 32;
const int tid = threadIdx.x;
const int localNRowIs = min(32, nRowIs-startRowI);
// cooperatively load 32 row indices
if (tid < localNRowIs){
sourceRowIndices[tid] = int(indices1[startRowI + tid]);
targetRowIndices[tid] = int(indices2[startRowI + tid]);
if (sourceRowIndices[tid]<0)
sourceRowIndices[tid] += nRows;
if (sourceRowIndices[tid]<0 || sourceRowIndices[tid]>=nRows)
sourceRowIndices[tid] = -1;
if (targetRowIndices[tid]<0)
targetRowIndices[tid] += nRows;
if (targetRowIndices[tid]<0 || targetRowIndices[tid]>=nRows)
targetRowIndices[tid] = -1;
}
__syncthreads();
// copy 32 rows
for (int i=0; i<localNRowIs; i++){
const int sourceRowI = sourceRowIndices[i], targetRowI = targetRowIndices[i];
for (int colI=tid; colI<nCols; colI+=32) {
const float temp1 = sourceRowI==-1 ? (1.0/0.0 -1.0/0.0) : source[sourceRowI * nCols + colI];
const float temp2 = targetRowI==-1 ? (1.0/0.0 -1.0/0.0) : target[targetRowI * nCols + colI];
if (sourceRowI != -1)
source[sourceRowI * nCols + colI] = temp2;
if (targetRowI != -1)
target[targetRowI * nCols + colI] = temp1;
}
}
}
__global__ void kSetSelectedRows(float* target, float* source, float* indices, int nRowIs, int nCols, int nTargetRows){
__shared__ int targetRowIndices[32];
const int startSourceRowI = blockIdx.x * 32;
const int tid = threadIdx.x;
const int localNRowIs = min(32, nRowIs-startSourceRowI);
// cooperatively load 32 row indices
if (tid < localNRowIs){
targetRowIndices[tid] = int(indices[startSourceRowI + tid]);
if (targetRowIndices[tid]<0)
targetRowIndices[tid] += nTargetRows;
if (targetRowIndices[tid]<0 || targetRowIndices[tid]>=nTargetRows)
targetRowIndices[tid] = -1;
}
__syncthreads();
// copy 32 rows
for (int i=0; i<localNRowIs; i++){
const int sourceRowI = startSourceRowI + i, targetRowI = targetRowIndices[i];
for (int colI=tid; colI<nCols; colI+=32)
target[targetRowI * nCols + colI] = targetRowI==-1 ? (1.0/0.0 -1.0/0.0) : source[sourceRowI * nCols + colI];
}
}
__global__ void kBlockify(float* source, float* target, int numdims, int blocksize) {
const unsigned int idx = threadIdx.x;
const unsigned int numThreads = blockDim.x;
const int off = blockIdx.x * numdims;
for (unsigned int target_ind = idx; target_ind < numdims; target_ind += numThreads) {
const int block = target_ind / blocksize;
target[off + target_ind] = source[off + block * blocksize];
}
}
__global__ void kGenerateTranslationsBigVarOff(float* source, float* target, float* off_x_arr, float* off_y_arr, int source_w, int target_w, int num_channels) {
const unsigned int idx = threadIdx.x;
const unsigned int numThreads = blockDim.x;
int target_x, target_y;
int pad = (source_w - target_w)/2;
int target_tile_size = target_w * target_w;
int source_tile_size = source_w * source_w;
int off_x = off_x_arr[blockIdx.x];
int off_y = off_y_arr[blockIdx.x];
int target_off = blockIdx.x * target_tile_size;
int source_off = blockIdx.x * source_tile_size + (pad + off_x) * source_w + (pad + off_y);
for (unsigned int target_ind = idx; target_ind < target_tile_size; target_ind += numThreads) {
target_x = target_ind / target_w;
target_y = target_ind - target_x * target_w;
for (unsigned int ch = 0; ch < num_channels; ch += 1) {
target[num_channels*(target_off + target_x * target_w + target_y) + ch] = source[num_channels*(source_off + target_x * source_w + target_y) + ch];
}
}
}
|
9fc7c38b94c5c52d0e6d51611b46269a6cba4ead.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void conj_kernel(float *data, float *result)
{
int blockId = blockIdx.x + blockIdx.y * gridDim.x;
int threadId = 2 * (blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x);
result[threadId] = data[threadId];
result[threadId + 1] = -data[threadId + 1];
} | 9fc7c38b94c5c52d0e6d51611b46269a6cba4ead.cu | #include "includes.h"
__global__ void conj_kernel(float *data, float *result)
{
int blockId = blockIdx.x + blockIdx.y * gridDim.x;
int threadId = 2 * (blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x);
result[threadId] = data[threadId];
result[threadId + 1] = -data[threadId + 1];
} |
58b8470fb51baa55fd9564e88c1f890d90b01576.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <chrono>
#include <iostream>
//Kernel definition
template<typename T>
__global__
void copyKernel (T* out,
T* in,
const unsigned int N)
{
const unsigned int id = threadIdx.x + blockIdx.x * blockDim.x;
for (unsigned int i= id; i < N; i = i + blockDim.x * gridDim.x)
{
const unsigned el_id = i;
((T*) out)[el_id] = ((T*) in)[el_id];
// ((T*) out)[(1<<29) + 100] = ((T*) in)[0];
}
}
int main () {
using namespace std::chrono;
unsigned int N = 1<<29; //N is the Number of elements in the Array
double lastMeasurementTimeSpan = 100.0f;//we are not expecting measurements greater 100 s
bool stopMeasurement = false;
std::cout << "np.array("; //output the results so that they can be read easily by python
std::cout << "(";
for (int M = 1; M <= 4; M++)
{
std::cout << "(";
for(int i = 1; i <= 32; i++)
{
if(!stopMeasurement)
{
unsigned int m = 32 * i;
// int* carray;
void* out;
void* in;
// malloc(carray);
// auto err1 = hipHostMalloc(&out, N*4);
auto err1 = hipHostMalloc(&out, N*4, hipHostMallocWriteCombined);
auto err2 = hipMalloc(&in, N*4);
if (err1 != hipSuccess)
{
std::cout << "Allocation ERROR: " << hipGetErrorString(err1) << std::endl;
}
if (err2 != hipSuccess)
{
std::cout << "Allocation ERROR2: " << hipGetErrorString(err2) << std::endl;
}
//make a warmup
hipLaunchKernelGGL(( copyKernel), dim3(M), dim3(m), 0, 0, static_cast<int*> (out), static_cast<int*> (in), N);
hipDeviceSynchronize();
//Time Measururement Point 1
high_resolution_clock::time_point timeBefore = high_resolution_clock::now();
for(int x = 1; x <= 10; x++)//run 10 times for better measurement accuracy
{
//run kernel here
hipLaunchKernelGGL(( copyKernel), dim3(M), dim3(m), 0, 0, static_cast<int*> (out), static_cast<int*> (in), N);
hipDeviceSynchronize();
auto lstErr = hipGetLastError();
if ( hipSuccess != lstErr )
{
std::cout << lstErr << ": " << hipGetErrorString(lstErr) << std::endl;
}
}
//Time Measurement Point 2
high_resolution_clock::time_point timeAfter = high_resolution_clock::now();
//Output Time Measurement Result
duration<double> time_span = duration_cast<duration<double>>(timeAfter - timeBefore);
hipHostFree(out);
hipFree(in);
//it seems we cant use automatic measurement stops
if(false)// (lastMeasurementTimeSpan- time_span.count() < 0.01 && i=1)
{
stopMeasurement = true;
}
else
{
lastMeasurementTimeSpan = time_span.count();
std::cout << time_span.count();
}
}
else
{
std::cout << 0.0;
}
if( i != 32) {std::cout << ",";} //output a , if we aren't the last element of the for loop
}
std::cout << ")";
if( M != 15) {std::cout << ",";} //output a , if we aren't the last element of the for loop
}
std::cout << ")";
std::cout << ")" << std::endl;
return 0;
}
| 58b8470fb51baa55fd9564e88c1f890d90b01576.cu | #include <chrono>
#include <iostream>
//Kernel definition
template<typename T>
__global__
void copyKernel (T* out,
T* in,
const unsigned int N)
{
const unsigned int id = threadIdx.x + blockIdx.x * blockDim.x;
for (unsigned int i= id; i < N; i = i + blockDim.x * gridDim.x)
{
const unsigned el_id = i;
((T*) out)[el_id] = ((T*) in)[el_id];
// ((T*) out)[(1<<29) + 100] = ((T*) in)[0];
}
}
int main () {
using namespace std::chrono;
unsigned int N = 1<<29; //N is the Number of elements in the Array
double lastMeasurementTimeSpan = 100.0f;//we are not expecting measurements greater 100 s
bool stopMeasurement = false;
std::cout << "np.array("; //output the results so that they can be read easily by python
std::cout << "(";
for (int M = 1; M <= 4; M++)
{
std::cout << "(";
for(int i = 1; i <= 32; i++)
{
if(!stopMeasurement)
{
unsigned int m = 32 * i;
// int* carray;
void* out;
void* in;
// malloc(carray);
// auto err1 = cudaMallocHost(&out, N*4);
auto err1 = cudaHostAlloc(&out, N*4, cudaHostAllocWriteCombined);
auto err2 = cudaMalloc(&in, N*4);
if (err1 != cudaSuccess)
{
std::cout << "Allocation ERROR: " << cudaGetErrorString(err1) << std::endl;
}
if (err2 != cudaSuccess)
{
std::cout << "Allocation ERROR2: " << cudaGetErrorString(err2) << std::endl;
}
//make a warmup
copyKernel<<<M, m>>> (static_cast<int*> (out), static_cast<int*> (in), N);
cudaDeviceSynchronize();
//Time Measururement Point 1
high_resolution_clock::time_point timeBefore = high_resolution_clock::now();
for(int x = 1; x <= 10; x++)//run 10 times for better measurement accuracy
{
//run kernel here
copyKernel<<<M, m>>> (static_cast<int*> (out), static_cast<int*> (in), N);
cudaDeviceSynchronize();
auto lstErr = cudaGetLastError();
if ( cudaSuccess != lstErr )
{
std::cout << lstErr << ": " << cudaGetErrorString(lstErr) << std::endl;
}
}
//Time Measurement Point 2
high_resolution_clock::time_point timeAfter = high_resolution_clock::now();
//Output Time Measurement Result
duration<double> time_span = duration_cast<duration<double>>(timeAfter - timeBefore);
cudaFreeHost(out);
cudaFree(in);
//it seems we cant use automatic measurement stops
if(false)// (lastMeasurementTimeSpan- time_span.count() < 0.01 && i=1)
{
stopMeasurement = true;
}
else
{
lastMeasurementTimeSpan = time_span.count();
std::cout << time_span.count();
}
}
else
{
std::cout << 0.0;
}
if( i != 32) {std::cout << ",";} //output a , if we aren't the last element of the for loop
}
std::cout << ")";
if( M != 15) {std::cout << ",";} //output a , if we aren't the last element of the for loop
}
std::cout << ")";
std::cout << ")" << std::endl;
return 0;
}
|
6e35b63e9c9e8ce6385a4676736a96ed41d66ae5.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "Dist_between_two_vec_naive.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *v0 = NULL;
hipMalloc(&v0, XSIZE*YSIZE);
float *v1 = NULL;
hipMalloc(&v1, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
float *dst = NULL;
hipMalloc(&dst, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
Dist_between_two_vec_naive), dim3(gridBlock),dim3(threadBlock), 0, 0, v0,v1,size,dst);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
Dist_between_two_vec_naive), dim3(gridBlock),dim3(threadBlock), 0, 0, v0,v1,size,dst);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
Dist_between_two_vec_naive), dim3(gridBlock),dim3(threadBlock), 0, 0, v0,v1,size,dst);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 6e35b63e9c9e8ce6385a4676736a96ed41d66ae5.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "Dist_between_two_vec_naive.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *v0 = NULL;
cudaMalloc(&v0, XSIZE*YSIZE);
float *v1 = NULL;
cudaMalloc(&v1, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
float *dst = NULL;
cudaMalloc(&dst, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
Dist_between_two_vec_naive<<<gridBlock,threadBlock>>>(v0,v1,size,dst);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
Dist_between_two_vec_naive<<<gridBlock,threadBlock>>>(v0,v1,size,dst);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
Dist_between_two_vec_naive<<<gridBlock,threadBlock>>>(v0,v1,size,dst);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
63fd4a0f3f1353b83af37c3bf1c4b58e2cbca2fc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define THREADS _THREADS_
__device__ void _calculate_centre_of_mass(
const int grid_size,
const int i,
const int j,
const bool *grid,
const int influence_rad,
float *massx,
float *massy,
int *neigh
){
const float one = 1.0f/(float)grid_size;
int count = 0;
float mx = 0.0f;
float my = 0.0f;
float nrm = 0.0f;
const float x = i*one;
const float y = j*one;
float dx = 0;
float dy = 0;
const float rad = pow(one*(float)influence_rad, 2.0f);
int k;
for (int a=max(i-influence_rad,0);a<min(i+influence_rad+1,grid_size);a++){
for (int b=max(j-influence_rad,0);b<min(j+influence_rad+1,grid_size);b++){
k = a*grid_size+b;
if (grid[k]){
dx = x-a*one;
dy = y-b*one;
nrm = dx*dx+dy*dy;
if (nrm>rad){
continue;
}
mx += a*one;
my += b*one;
count += 1;
}
}
}
k = i*grid_size+j;
neigh[k] = count;
if (count>0){
mx /= (float)count;
my /= (float)count;
mx = x-mx;
my = y-my;
nrm = mx*mx+my*my;
if (nrm>0.0f){
nrm = sqrt(nrm);
massx[k] = mx/nrm;
massy[k] = my/nrm;
}
else{
massx[k] = 0.0f;
massy[k] = 0.0f;
}
}
else{
massx[k] = 0.0f;
massy[k] = 0.0f;
}
return;
}
__device__ void _count_connected(
const int grid_size,
const int i,
const int j,
const bool *grid,
int *connected
){
int k = i*grid_size+j;
/*if (grid[k]){*/
/* connected[k] = 0;*/
/* return;*/
/*}*/
int count = 0;
for (int a=max(i-1,0);a<min(i+2,grid_size);a++){
for (int b=max(j-1,0);b<min(j+2,grid_size);b++){
k = a*grid_size+b;
if (grid[k]){
count += 1;
}
}
}
k = i*grid_size+j;
connected[k] = count;
return;
}
__global__ void mass(
const int n,
const int grid_size,
const bool *grid,
const int influence_rad,
float *massx,
float *massy,
int *neigh,
int *connected
){
const int ij = blockIdx.x*THREADS + threadIdx.x;
const int i = (int)floor(float(ij)/(float)grid_size);
const int j = (ij-grid_size*i);
if (ij>=n){
return;
}
_calculate_centre_of_mass(grid_size, i, j, grid, influence_rad, massx, massy, neigh);
_count_connected(grid_size, i, j, grid, connected);
}
| 63fd4a0f3f1353b83af37c3bf1c4b58e2cbca2fc.cu | #define THREADS _THREADS_
__device__ void _calculate_centre_of_mass(
const int grid_size,
const int i,
const int j,
const bool *grid,
const int influence_rad,
float *massx,
float *massy,
int *neigh
){
const float one = 1.0f/(float)grid_size;
int count = 0;
float mx = 0.0f;
float my = 0.0f;
float nrm = 0.0f;
const float x = i*one;
const float y = j*one;
float dx = 0;
float dy = 0;
const float rad = pow(one*(float)influence_rad, 2.0f);
int k;
for (int a=max(i-influence_rad,0);a<min(i+influence_rad+1,grid_size);a++){
for (int b=max(j-influence_rad,0);b<min(j+influence_rad+1,grid_size);b++){
k = a*grid_size+b;
if (grid[k]){
dx = x-a*one;
dy = y-b*one;
nrm = dx*dx+dy*dy;
if (nrm>rad){
continue;
}
mx += a*one;
my += b*one;
count += 1;
}
}
}
k = i*grid_size+j;
neigh[k] = count;
if (count>0){
mx /= (float)count;
my /= (float)count;
mx = x-mx;
my = y-my;
nrm = mx*mx+my*my;
if (nrm>0.0f){
nrm = sqrt(nrm);
massx[k] = mx/nrm;
massy[k] = my/nrm;
}
else{
massx[k] = 0.0f;
massy[k] = 0.0f;
}
}
else{
massx[k] = 0.0f;
massy[k] = 0.0f;
}
return;
}
__device__ void _count_connected(
const int grid_size,
const int i,
const int j,
const bool *grid,
int *connected
){
int k = i*grid_size+j;
/*if (grid[k]){*/
/* connected[k] = 0;*/
/* return;*/
/*}*/
int count = 0;
for (int a=max(i-1,0);a<min(i+2,grid_size);a++){
for (int b=max(j-1,0);b<min(j+2,grid_size);b++){
k = a*grid_size+b;
if (grid[k]){
count += 1;
}
}
}
k = i*grid_size+j;
connected[k] = count;
return;
}
__global__ void mass(
const int n,
const int grid_size,
const bool *grid,
const int influence_rad,
float *massx,
float *massy,
int *neigh,
int *connected
){
const int ij = blockIdx.x*THREADS + threadIdx.x;
const int i = (int)floor(float(ij)/(float)grid_size);
const int j = (ij-grid_size*i);
if (ij>=n){
return;
}
_calculate_centre_of_mass(grid_size, i, j, grid, influence_rad, massx, massy, neigh);
_count_connected(grid_size, i, j, grid, connected);
}
|
0f3f237dd27d04337edbfc4f358d810c3d7c7c7b.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright 2019,2020,2021 Sony Corporation.
// Copyright 2021 Sony Group Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// log_sigmoid.cpp
#include <nbla/cuda/array/cuda_array.hpp>
#include <nbla/cuda/common.hpp>
#include <nbla/cuda/function/log_sigmoid.hpp>
#include <nbla/cuda/function/utils/base_transform_unary.cuh>
#include <cmath>
namespace nbla {
NBLA_DEFINE_TRANSFORM_UNARY_CUDA(LogSigmoid,
x > (T)0 ? -::log(::exp(-x) + (T)1)
: x - ::log(::exp(x) + (T)1),
dy / (::exp(x) + (T)1), false, true);
}
| 0f3f237dd27d04337edbfc4f358d810c3d7c7c7b.cu | // Copyright 2019,2020,2021 Sony Corporation.
// Copyright 2021 Sony Group Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// log_sigmoid.cpp
#include <nbla/cuda/array/cuda_array.hpp>
#include <nbla/cuda/common.hpp>
#include <nbla/cuda/function/log_sigmoid.hpp>
#include <nbla/cuda/function/utils/base_transform_unary.cuh>
#include <cmath>
namespace nbla {
NBLA_DEFINE_TRANSFORM_UNARY_CUDA(LogSigmoid,
x > (T)0 ? -std::log(std::exp(-x) + (T)1)
: x - std::log(std::exp(x) + (T)1),
dy / (std::exp(x) + (T)1), false, true);
}
|
b155691747bad0fd294c5af1fade827ee1237094.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "dali/core/format.h"
#include "dali/operators/debug/dump_image.h"
#include "dali/util/image.h"
namespace dali {
template<>
void DumpImage<GPUBackend>::RunImpl(DeviceWorkspace &ws) {
auto &input = ws.Input<GPUBackend>(0);
auto &output = ws.Output<GPUBackend>(0);
DALI_ENFORCE(input.shape().sample_dim() == 3,
make_string("Input images must have three dimensions, got input with `",
input.shape().sample_dim(), "` dimensions."));
for (int i = 0; i < input.shape().num_samples(); i++) {
int channels = input.shape().tensor_shape_span(i)[2];
DALI_ENFORCE(channels == 1 || channels == 3,
make_string("Only 3-channel and gray images are supported, got input with `",
channels, "` channels."));
}
// sync before write
CUDA_CALL(hipStreamSynchronize(ws.stream()));
WriteHWCBatch(input, suffix_ + "-" + std::to_string(0));
// Forward the input
output.Copy(input, ws.stream());
}
DALI_REGISTER_OPERATOR(DumpImage, DumpImage<GPUBackend>, GPU);
} // namespace dali
| b155691747bad0fd294c5af1fade827ee1237094.cu | // Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "dali/core/format.h"
#include "dali/operators/debug/dump_image.h"
#include "dali/util/image.h"
namespace dali {
template<>
void DumpImage<GPUBackend>::RunImpl(DeviceWorkspace &ws) {
auto &input = ws.Input<GPUBackend>(0);
auto &output = ws.Output<GPUBackend>(0);
DALI_ENFORCE(input.shape().sample_dim() == 3,
make_string("Input images must have three dimensions, got input with `",
input.shape().sample_dim(), "` dimensions."));
for (int i = 0; i < input.shape().num_samples(); i++) {
int channels = input.shape().tensor_shape_span(i)[2];
DALI_ENFORCE(channels == 1 || channels == 3,
make_string("Only 3-channel and gray images are supported, got input with `",
channels, "` channels."));
}
// sync before write
CUDA_CALL(cudaStreamSynchronize(ws.stream()));
WriteHWCBatch(input, suffix_ + "-" + std::to_string(0));
// Forward the input
output.Copy(input, ws.stream());
}
DALI_REGISTER_OPERATOR(DumpImage, DumpImage<GPUBackend>, GPU);
} // namespace dali
|
51f69bde7c2aab1bb820d5e4bab46c7b58e0c689.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#define BLOCK_SIZE 16
// Matrixes Multiplcation (Global Memory)
__global__ void multiply_gm(float *C,float *A,float *B, int nrow,int ncol)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int index=idy*ncol+idx;
if (idy<nrow && idx<ncol){
float sum=0.0f;
for(int k=0;k<ncol;k++){
sum+=A[idy*ncol+k]*B[k*ncol+idx];
}
C[index] = sum;
}
}
int div_up(int a,int b){
return(a/b + (a%b == 0 ? 0:1));
}
int main(int argc, char* argv[]){
float *A_h,*B_h,*C_h; // Host matrixes
float *A_d,*B_d,*C_d; //Device matrixes
int nrow = atoi(argv[1]); // rows
int ncol = nrow; // cols
float N=nrow*ncol; // number of elements
//GPU Time
hipEvent_t start, stop;
float time;
size_t size=N * sizeof(float);
A_h = (float *)malloc(size);
B_h = (float *)malloc(size);
C_h = (float *)malloc(size);
//Initializing Host matrixes
for (int i=0; i<nrow; i++){
for(int j=0;j<ncol;j++){
A_h[i*ncol+j] = 1.0f;
B_h[i*ncol+j] = 2.0f;
//A_h[i*ncol+j] = rand()/100.0f;
//B_h[i*ncol+j] = rand()/100.0f;
}
}
/*
printf("\nMatrix A:\n");
for (int i=0; i<nrow; i++){
for(int j=0;j<ncol;j++){
printf("%.2f ", A_h[i*ncol+j]);
}
printf("\n");
}
printf("\n\nMatrix B:\n");
for (int i=0; i<nrow; i++){
for(int j=0;j<ncol;j++){
printf("%.2f ", B_h[i*ncol+j]);
}
printf("\n");
}
*/
hipMalloc((void **) &A_d,size);
hipMalloc((void **) &B_d,size);
hipMalloc((void **) &C_d,size);
// Host to Device transfer
hipMemcpy(A_d, A_h, size, hipMemcpyHostToDevice);
hipMemcpy(B_d, B_h, size, hipMemcpyHostToDevice);
//Realizamos el clculo en el Device
dim3 block_size(BLOCK_SIZE,BLOCK_SIZE);
dim3 n_blocks(div_up(ncol,block_size.x),div_up(nrow,block_size.y)) ;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
hipLaunchKernelGGL(( multiply_gm), dim3(n_blocks), dim3(block_size) , 0, 0, C_d,A_d,B_d,nrow,ncol);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
printf("\t Time : %f ms\n",time);
// Result from Device to Host
hipMemcpy(C_h, C_d, size,hipMemcpyDeviceToHost);
/*
//Results
printf("\n\nMatrix C:\n");
for (int i=0; i<nrow; i++){
for(int j=0;j<ncol;j++){
printf("%.2f ", C_h[i*ncol+j]);
}
printf("\n");
}
*/
system("sleep 1");
free(A_h);
free(B_h);
free(C_h);
hipFree(A_d);
hipFree(B_d);
hipFree(C_d);
return 0;
} | 51f69bde7c2aab1bb820d5e4bab46c7b58e0c689.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#define BLOCK_SIZE 16
// Matrixes Multiplcation (Global Memory)
__global__ void multiply_gm(float *C,float *A,float *B, int nrow,int ncol)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int index=idy*ncol+idx;
if (idy<nrow && idx<ncol){
float sum=0.0f;
for(int k=0;k<ncol;k++){
sum+=A[idy*ncol+k]*B[k*ncol+idx];
}
C[index] = sum;
}
}
int div_up(int a,int b){
return(a/b + (a%b == 0 ? 0:1));
}
int main(int argc, char* argv[]){
float *A_h,*B_h,*C_h; // Host matrixes
float *A_d,*B_d,*C_d; //Device matrixes
int nrow = atoi(argv[1]); // rows
int ncol = nrow; // cols
float N=nrow*ncol; // number of elements
//GPU Time
cudaEvent_t start, stop;
float time;
size_t size=N * sizeof(float);
A_h = (float *)malloc(size);
B_h = (float *)malloc(size);
C_h = (float *)malloc(size);
//Initializing Host matrixes
for (int i=0; i<nrow; i++){
for(int j=0;j<ncol;j++){
A_h[i*ncol+j] = 1.0f;
B_h[i*ncol+j] = 2.0f;
//A_h[i*ncol+j] = rand()/100.0f;
//B_h[i*ncol+j] = rand()/100.0f;
}
}
/*
printf("\nMatrix A:\n");
for (int i=0; i<nrow; i++){
for(int j=0;j<ncol;j++){
printf("%.2f ", A_h[i*ncol+j]);
}
printf("\n");
}
printf("\n\nMatrix B:\n");
for (int i=0; i<nrow; i++){
for(int j=0;j<ncol;j++){
printf("%.2f ", B_h[i*ncol+j]);
}
printf("\n");
}
*/
cudaMalloc((void **) &A_d,size);
cudaMalloc((void **) &B_d,size);
cudaMalloc((void **) &C_d,size);
// Host to Device transfer
cudaMemcpy(A_d, A_h, size, cudaMemcpyHostToDevice);
cudaMemcpy(B_d, B_h, size, cudaMemcpyHostToDevice);
//Realizamos el cálculo en el Device
dim3 block_size(BLOCK_SIZE,BLOCK_SIZE);
dim3 n_blocks(div_up(ncol,block_size.x),div_up(nrow,block_size.y)) ;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
multiply_gm<<< n_blocks, block_size >>> (C_d,A_d,B_d,nrow,ncol);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("\t Time : %f ms\n",time);
// Result from Device to Host
cudaMemcpy(C_h, C_d, size,cudaMemcpyDeviceToHost);
/*
//Results
printf("\n\nMatrix C:\n");
for (int i=0; i<nrow; i++){
for(int j=0;j<ncol;j++){
printf("%.2f ", C_h[i*ncol+j]);
}
printf("\n");
}
*/
system("sleep 1");
free(A_h);
free(B_h);
free(C_h);
cudaFree(A_d);
cudaFree(B_d);
cudaFree(C_d);
return 0;
} |
66bf04e3c6e21761849cf7c6b45d5fc741586b4a.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cusparse_v2.h>
#include <raft/cudart_utils.h>
#include <raft/mr/device/allocator.hpp>
#include <raft/mr/device/buffer.hpp>
#include <gtest/gtest.h>
#include <raft/sparse/cusparse_wrappers.h>
#include <raft/sparse/convert/dense.cuh>
#include "../test_utils.h"
namespace raft {
namespace sparse {
using namespace raft;
using namespace raft::sparse;
template <typename value_idx, typename value_t>
struct CSRToDenseInputs {
value_idx nrows;
value_idx ncols;
std::vector<value_idx> indptr_h;
std::vector<value_idx> indices_h;
std::vector<value_t> data_h;
std::vector<value_t> out_ref_h;
};
template <typename value_idx, typename value_t>
::std::ostream &operator<<(::std::ostream &os,
const CSRToDenseInputs<value_idx, value_t> &dims) {
return os;
}
template <typename value_idx, typename value_t>
class CSRToDenseTest
: public ::testing::TestWithParam<CSRToDenseInputs<value_idx, value_t>> {
protected:
void make_data() {
std::vector<value_idx> indptr_h = params.indptr_h;
std::vector<value_idx> indices_h = params.indices_h;
std::vector<value_t> data_h = params.data_h;
allocate(indptr, indptr_h.size());
allocate(indices, indices_h.size());
allocate(data, data_h.size());
update_device(indptr, indptr_h.data(), indptr_h.size(), stream);
update_device(indices, indices_h.data(), indices_h.size(), stream);
update_device(data, data_h.data(), data_h.size(), stream);
std::vector<value_t> out_ref_h = params.out_ref_h;
allocate(out_ref, out_ref_h.size());
update_device(out_ref, out_ref_h.data(), out_ref_h.size(), stream);
allocate(out, out_ref_h.size());
}
void SetUp() override {
params = ::testing::TestWithParam<
CSRToDenseInputs<value_idx, value_t>>::GetParam();
std::shared_ptr<raft::mr::device::allocator> alloc(
new raft::mr::device::default_allocator);
CUDA_CHECK(hipStreamCreate(&stream));
CUSPARSE_CHECK(hipsparseCreate(&handle));
make_data();
convert::csr_to_dense(handle, params.nrows, params.ncols, indptr, indices,
data, params.nrows, out, stream, true);
CUDA_CHECK(hipStreamSynchronize(stream));
CUSPARSE_CHECK(hipsparseDestroy(handle));
}
void TearDown() override {
CUDA_CHECK(hipStreamSynchronize(stream));
CUDA_CHECK(hipFree(indptr));
CUDA_CHECK(hipFree(indices));
CUDA_CHECK(hipFree(data));
CUDA_CHECK(hipFree(out));
CUDA_CHECK(hipFree(out_ref));
}
void compare() {
ASSERT_TRUE(
devArrMatch(out, out_ref, params.out_ref_h.size(), Compare<value_t>()));
}
protected:
hipStream_t stream;
hipsparseHandle_t handle;
// input data
value_idx *indptr, *indices;
value_t *data;
// output data
value_t *out;
// expected output data
value_t *out_ref;
CSRToDenseInputs<value_idx, value_t> params;
};
const std::vector<CSRToDenseInputs<int, float>> inputs_i32_f = {
{4,
4,
{0, 2, 4, 6, 8},
{0, 1, 2, 3, 0, 1, 2, 3}, // indices
{1.0f, 3.0f, 1.0f, 5.0f, 50.0f, 28.0f, 16.0f, 2.0f},
{1.0f, 3.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 5.0f, 50.0f, 28.0f, 0.0f, 0.0f,
0.0f, 0.0f, 16.0f, 2.0f}},
};
typedef CSRToDenseTest<int, float> CSRToDenseTestF;
TEST_P(CSRToDenseTestF, Result) { compare(); }
INSTANTIATE_TEST_CASE_P(CSRToDenseTest, CSRToDenseTestF,
::testing::ValuesIn(inputs_i32_f));
}; // end namespace sparse
}; // end namespace raft
| 66bf04e3c6e21761849cf7c6b45d5fc741586b4a.cu | /*
* Copyright (c) 2018-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cusparse_v2.h>
#include <raft/cudart_utils.h>
#include <raft/mr/device/allocator.hpp>
#include <raft/mr/device/buffer.hpp>
#include <gtest/gtest.h>
#include <raft/sparse/cusparse_wrappers.h>
#include <raft/sparse/convert/dense.cuh>
#include "../test_utils.h"
namespace raft {
namespace sparse {
using namespace raft;
using namespace raft::sparse;
template <typename value_idx, typename value_t>
struct CSRToDenseInputs {
value_idx nrows;
value_idx ncols;
std::vector<value_idx> indptr_h;
std::vector<value_idx> indices_h;
std::vector<value_t> data_h;
std::vector<value_t> out_ref_h;
};
template <typename value_idx, typename value_t>
::std::ostream &operator<<(::std::ostream &os,
const CSRToDenseInputs<value_idx, value_t> &dims) {
return os;
}
template <typename value_idx, typename value_t>
class CSRToDenseTest
: public ::testing::TestWithParam<CSRToDenseInputs<value_idx, value_t>> {
protected:
void make_data() {
std::vector<value_idx> indptr_h = params.indptr_h;
std::vector<value_idx> indices_h = params.indices_h;
std::vector<value_t> data_h = params.data_h;
allocate(indptr, indptr_h.size());
allocate(indices, indices_h.size());
allocate(data, data_h.size());
update_device(indptr, indptr_h.data(), indptr_h.size(), stream);
update_device(indices, indices_h.data(), indices_h.size(), stream);
update_device(data, data_h.data(), data_h.size(), stream);
std::vector<value_t> out_ref_h = params.out_ref_h;
allocate(out_ref, out_ref_h.size());
update_device(out_ref, out_ref_h.data(), out_ref_h.size(), stream);
allocate(out, out_ref_h.size());
}
void SetUp() override {
params = ::testing::TestWithParam<
CSRToDenseInputs<value_idx, value_t>>::GetParam();
std::shared_ptr<raft::mr::device::allocator> alloc(
new raft::mr::device::default_allocator);
CUDA_CHECK(cudaStreamCreate(&stream));
CUSPARSE_CHECK(cusparseCreate(&handle));
make_data();
convert::csr_to_dense(handle, params.nrows, params.ncols, indptr, indices,
data, params.nrows, out, stream, true);
CUDA_CHECK(cudaStreamSynchronize(stream));
CUSPARSE_CHECK(cusparseDestroy(handle));
}
void TearDown() override {
CUDA_CHECK(cudaStreamSynchronize(stream));
CUDA_CHECK(cudaFree(indptr));
CUDA_CHECK(cudaFree(indices));
CUDA_CHECK(cudaFree(data));
CUDA_CHECK(cudaFree(out));
CUDA_CHECK(cudaFree(out_ref));
}
void compare() {
ASSERT_TRUE(
devArrMatch(out, out_ref, params.out_ref_h.size(), Compare<value_t>()));
}
protected:
cudaStream_t stream;
cusparseHandle_t handle;
// input data
value_idx *indptr, *indices;
value_t *data;
// output data
value_t *out;
// expected output data
value_t *out_ref;
CSRToDenseInputs<value_idx, value_t> params;
};
const std::vector<CSRToDenseInputs<int, float>> inputs_i32_f = {
{4,
4,
{0, 2, 4, 6, 8},
{0, 1, 2, 3, 0, 1, 2, 3}, // indices
{1.0f, 3.0f, 1.0f, 5.0f, 50.0f, 28.0f, 16.0f, 2.0f},
{1.0f, 3.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 5.0f, 50.0f, 28.0f, 0.0f, 0.0f,
0.0f, 0.0f, 16.0f, 2.0f}},
};
typedef CSRToDenseTest<int, float> CSRToDenseTestF;
TEST_P(CSRToDenseTestF, Result) { compare(); }
INSTANTIATE_TEST_CASE_P(CSRToDenseTest, CSRToDenseTestF,
::testing::ValuesIn(inputs_i32_f));
}; // end namespace sparse
}; // end namespace raft
|
c9231b73dd98ad6471988ec459a10058f19f1275.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2011-2013 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "util_cuda.h"
#include "../neural_network_exception.h"
#include "../layer_configuration_specific.h"
#include "neural_network_cuda_exception.h"
#include <boost/format.hpp>
#include <utility>
namespace nnforge
{
namespace cuda
{
__global__ void set_with_value_util_kernel(
float4 * __restrict buf,
float v,
int elem_count)
{
int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x;
if (elem_id < elem_count)
{
float4 val;
val.x = v;
val.y = v;
val.z = v;
val.w = v;
buf[elem_id] = val;
}
}
__global__ void set_with_value_util_kernel(
int4 * __restrict buf,
int v,
int elem_count)
{
int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x;
if (elem_id < elem_count)
{
int4 val;
val.x = v;
val.y = v;
val.z = v;
val.w = v;
buf[elem_id] = val;
}
}
__global__ void set_with_value_util_kernel(
double2 * __restrict buf,
double v,
int elem_count)
{
int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x;
if (elem_id < elem_count)
{
double2 val;
val.x = v;
val.y = v;
buf[elem_id] = val;
}
}
__global__ void multiply_by_value_util_kernel(
float4 * __restrict buf,
float v,
int elem_count)
{
int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x;
if (elem_id < elem_count)
{
float4 val = buf[elem_id];
val.x *= v;
val.y *= v;
val.z *= v;
val.w *= v;
buf[elem_id] = val;
}
}
__global__ void apply_weight_decay_util_kernel(
const float4 * __restrict learning_rates,
float4 * __restrict weights,
float weight_decay,
int elem_count)
{
int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x;
if (elem_id < elem_count)
{
float4 val = learning_rates[elem_id];
float4 current_weight = weights[elem_id];
val.x = 1.0F - val.x * weight_decay;
val.y = 1.0F - val.y * weight_decay;
val.z = 1.0F - val.z * weight_decay;
val.w = 1.0F - val.w * weight_decay;
current_weight.x *= val.x;
current_weight.y *= val.y;
current_weight.z *= val.z;
current_weight.w *= val.w;
weights[elem_id] = current_weight;
}
}
__global__ void apply_gradient_with_weight_decay_util_kernel(
const float2 * __restrict gradient,
const float2 * __restrict learning_rates,
float2 * __restrict weights,
float weight_decay,
int elem_count)
{
int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x;
if (elem_id < elem_count)
{
float2 lr = learning_rates[elem_id];
float2 current_weight = weights[elem_id];
float2 grad = gradient[elem_id];
float2 new_weight;
new_weight.x = current_weight.x + lr.x * (grad.x - weight_decay * current_weight.x);
new_weight.y = current_weight.y + lr.y * (grad.y - weight_decay * current_weight.y);
weights[elem_id] = new_weight;
}
}
__global__ void multiply_by_itself_training_util_kernel(
const float4 * __restrict input_buf,
float4 * __restrict output_buf,
int elem_count)
{
int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x;
if (elem_id < elem_count)
{
float4 val = input_buf[elem_id];
val.x *= val.x;
val.y *= val.y;
val.z *= val.z;
val.w *= val.w;
output_buf[elem_id] = val;
}
}
__global__ void copy_buffer_util_kernel(
const float4 * __restrict input_buf,
float4 * __restrict output_buf,
int elem_count)
{
int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x;
if (elem_id < elem_count)
output_buf[elem_id] = input_buf[elem_id];
}
__global__ void copy_to_striped_kernel(
const float * __restrict source_buf,
float2 * __restrict dest_buf,
int elem_count_per_feature_map,
int feature_map_count,
int entry_count)
{
int elem_id = blockDim.x * blockIdx.x + threadIdx.x;
int strided_feature_map_id = blockDim.y * blockIdx.y + threadIdx.y;
int entry_id = blockDim.z * blockIdx.z + threadIdx.z;
int first_feature_map_id = strided_feature_map_id * 2;
if ((elem_id < elem_count_per_feature_map) && (first_feature_map_id < feature_map_count) && (entry_id < entry_count))
{
int tt = entry_id * elem_count_per_feature_map;
int base_src_offset = tt * feature_map_count + elem_id;
int base_dst_offset = tt * ((feature_map_count + 1) >> 1) + elem_id;
float2 pack;
pack.x = source_buf[first_feature_map_id * elem_count_per_feature_map + base_src_offset];
pack.y = 0.0F;
int second_feature_map_id = first_feature_map_id + 1;
if (second_feature_map_id < feature_map_count)
pack.y = source_buf[second_feature_map_id * elem_count_per_feature_map + base_src_offset];
dest_buf[strided_feature_map_id * elem_count_per_feature_map + base_dst_offset] = pack;
}
}
__global__ void copy_from_striped_kernel(
const float2 * __restrict source_buf,
float * __restrict dest_buf,
int elem_count_per_feature_map,
int feature_map_count,
int entry_count)
{
int elem_id = blockDim.x * blockIdx.x + threadIdx.x;
int strided_feature_map_id = blockDim.y * blockIdx.y + threadIdx.y;
int entry_id = blockDim.z * blockIdx.z + threadIdx.z;
int first_feature_map_id = strided_feature_map_id * 2;
if ((elem_id < elem_count_per_feature_map) && (first_feature_map_id < feature_map_count) && (entry_id < entry_count))
{
int tt = entry_id * elem_count_per_feature_map;
int base_dst_offset = tt * feature_map_count + elem_id;
int base_src_offset = tt * ((feature_map_count + 1) >> 1) + elem_id;
float2 pack = source_buf[strided_feature_map_id * elem_count_per_feature_map + base_src_offset];
dest_buf[first_feature_map_id * elem_count_per_feature_map + base_dst_offset] = pack.x;
int second_feature_map_id = first_feature_map_id + 1;
if (second_feature_map_id < feature_map_count)
dest_buf[second_feature_map_id * elem_count_per_feature_map + base_dst_offset] = pack.y;
}
}
#define TRANSPOSE_TILE_DIM 32
#define TRANSPOSE_BLOCK_ROWS 8
__global__ void transpose_kernel(
const float * __restrict src,
float * __restrict dst,
int src_fast_dim,
int src_slow_dim,
int elem_count_per_entry,
int entry_count)
{
int x = blockIdx.x * TRANSPOSE_TILE_DIM + threadIdx.x;
int y = blockIdx.y * TRANSPOSE_TILE_DIM + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
bool in_bounds = (entry_id < entry_count);
__shared__ float tile[TRANSPOSE_TILE_DIM][TRANSPOSE_TILE_DIM + 1];
for(int j = 0; j < TRANSPOSE_TILE_DIM; j += TRANSPOSE_BLOCK_ROWS)
{
if (in_bounds && ((y + j) < src_slow_dim) && (x < src_fast_dim))
tile[threadIdx.y + j][threadIdx.x] = src[(int)(entry_id * elem_count_per_entry + (y + j) * src_fast_dim + x)];
}
__syncthreads();
x = blockIdx.y * TRANSPOSE_TILE_DIM + threadIdx.x;
y = blockIdx.x * TRANSPOSE_TILE_DIM + threadIdx.y;
for(int j = 0; j < TRANSPOSE_TILE_DIM; j += TRANSPOSE_BLOCK_ROWS)
{
if (in_bounds && ((y + j) < src_fast_dim) && (x < src_slow_dim))
dst[(int)(entry_id * elem_count_per_entry + (y + j) * src_slow_dim + x)] = tile[threadIdx.x][threadIdx.y + j];
}
}
const unsigned int cuda_util::preferred_width_2d_access = 16;
const unsigned int cuda_util::preferred_height_2d_access = 16;
const unsigned int cuda_util::preferred_threadblocksize_sequential_access = 256;
const unsigned int cuda_util::preferred_width_2d_access_x_aligned = 32;
const unsigned int cuda_util::preferred_height_2d_access_x_aligned = 8;
std::pair<dim3, dim3> cuda_util::get_grid_and_threadblock_sizes_2d_access(
const cuda_running_configuration& cuda_config,
unsigned int x,
unsigned int y,
unsigned int z)
{
dim3 threadblock_size(1, 1, 1);
const unsigned int preferred_threadblock_size = preferred_width_2d_access * preferred_height_2d_access;
if (x < preferred_width_2d_access)
{
threadblock_size.x = x;
threadblock_size.y = std::min<unsigned int>(cuda_config.max_threads_dim[1], std::min<unsigned int>(y, preferred_threadblock_size / threadblock_size.x));
}
else
{
if (y < preferred_height_2d_access)
{
threadblock_size.y = y;
threadblock_size.x = std::min<unsigned int>(cuda_config.max_threads_dim[0], std::min<unsigned int>(x, preferred_threadblock_size / threadblock_size.y));
}
else
{
threadblock_size.x = preferred_width_2d_access;
threadblock_size.y = preferred_height_2d_access;
}
}
unsigned int threadblocks_to_cover_x = (x + threadblock_size.x - 1) / threadblock_size.x;
threadblock_size.x = (x + threadblocks_to_cover_x - 1) / threadblocks_to_cover_x;
unsigned int threadblocks_to_cover_y = (y + threadblock_size.y - 1) / threadblock_size.y;
threadblock_size.y = (y + threadblocks_to_cover_y - 1) / threadblocks_to_cover_y;
threadblock_size.z = std::min<unsigned int>(cuda_config.max_threads_dim[2], std::min<unsigned int>(z, preferred_threadblock_size / (threadblock_size.x * threadblock_size.y)));
unsigned int threadblocks_to_cover_z = (z + threadblock_size.z - 1) / threadblock_size.z;
threadblock_size.z = (z + threadblocks_to_cover_z - 1) / threadblocks_to_cover_z;
dim3 grid_size(
(x + threadblock_size.x - 1) / threadblock_size.x,
(y + threadblock_size.y - 1) / threadblock_size.y,
(z + threadblock_size.z - 1) / threadblock_size.z);
return std::make_pair(grid_size, threadblock_size);
}
std::pair<dim3, dim3> cuda_util::get_grid_and_threadblock_sizes_2d_access_x_aligned(
const cuda_running_configuration& cuda_config,
unsigned int x,
unsigned int y,
unsigned int z)
{
dim3 threadblock_size(1, 1, 1);
const unsigned int preferred_threadblock_size = preferred_width_2d_access_x_aligned * preferred_height_2d_access_x_aligned;
if (x < preferred_width_2d_access_x_aligned)
{
threadblock_size.x = x;
threadblock_size.y = std::min<unsigned int>(cuda_config.max_threads_dim[1], std::min<unsigned int>(y, preferred_threadblock_size / threadblock_size.x));
}
else
{
if (y < preferred_height_2d_access_x_aligned)
{
threadblock_size.y = y;
threadblock_size.x = std::min<unsigned int>(cuda_config.max_threads_dim[0], std::min<unsigned int>(x, preferred_threadblock_size / threadblock_size.y));
}
else
{
threadblock_size.x = preferred_width_2d_access_x_aligned;
threadblock_size.y = preferred_height_2d_access_x_aligned;
}
}
unsigned int threadblocks_to_cover_x = (x + threadblock_size.x - 1) / threadblock_size.x;
threadblock_size.x = (x + threadblocks_to_cover_x - 1) / threadblocks_to_cover_x;
unsigned int threadblocks_to_cover_y = (y + threadblock_size.y - 1) / threadblock_size.y;
threadblock_size.y = (y + threadblocks_to_cover_y - 1) / threadblocks_to_cover_y;
threadblock_size.z = std::min<unsigned int>(cuda_config.max_threads_dim[2], std::min<unsigned int>(z, preferred_threadblock_size / (threadblock_size.x * threadblock_size.y)));
unsigned int threadblocks_to_cover_z = (z + threadblock_size.z - 1) / threadblock_size.z;
threadblock_size.z = (z + threadblocks_to_cover_z - 1) / threadblocks_to_cover_z;
dim3 grid_size(
(x + threadblock_size.x - 1) / threadblock_size.x,
(y + threadblock_size.y - 1) / threadblock_size.y,
(z + threadblock_size.z - 1) / threadblock_size.z);
return std::make_pair(grid_size, threadblock_size);
}
std::pair<dim3, dim3> cuda_util::get_grid_and_threadblock_sizes_sequential_access(
const cuda_running_configuration& cuda_config,
unsigned int x,
unsigned int y,
unsigned int z,
unsigned int threadblock_size_x_evenly_divisible)
{
dim3 threadblock_size(1, 1, 1);
int max_threads_dim_x = cuda_config.max_threads_dim[0];
unsigned int preferred_threadblock_size_remained = preferred_threadblocksize_sequential_access;
preferred_threadblock_size_remained /= threadblock_size_x_evenly_divisible;
if (preferred_threadblock_size_remained == 0)
{
if (threadblock_size_x_evenly_divisible <= cuda_config.max_threads_dim[0])
preferred_threadblock_size_remained = 1;
else
throw neural_network_exception((boost::format("Too large threadblock_size_x_evenly_divisible %1%, unable to compose threabblock") % threadblock_size_x_evenly_divisible).str());
}
x = (x + threadblock_size_x_evenly_divisible - 1) / threadblock_size_x_evenly_divisible;
max_threads_dim_x = max_threads_dim_x / threadblock_size_x_evenly_divisible;
threadblock_size.x = std::min<unsigned int>(std::min<unsigned int>(x, preferred_threadblock_size_remained), max_threads_dim_x);
unsigned int threadblocks_to_cover_x = (x + threadblock_size.x - 1) / threadblock_size.x;
threadblock_size.x = (x + threadblocks_to_cover_x - 1) / threadblocks_to_cover_x;
preferred_threadblock_size_remained = preferred_threadblock_size_remained / threadblock_size.x;
threadblock_size.y = std::min<unsigned int>(std::min<unsigned int>(y, preferred_threadblock_size_remained), cuda_config.max_threads_dim[1]);
unsigned int threadblocks_to_cover_y = (y + threadblock_size.y - 1) / threadblock_size.y;
threadblock_size.y = (y + threadblocks_to_cover_y - 1) / threadblocks_to_cover_y;
preferred_threadblock_size_remained = preferred_threadblock_size_remained / threadblock_size.y;
threadblock_size.z = std::min<unsigned int>(std::min<unsigned int>(z, preferred_threadblock_size_remained), cuda_config.max_threads_dim[2]);
unsigned int threadblocks_to_cover_z = (z + threadblock_size.z - 1) / threadblock_size.z;
threadblock_size.z = (z + threadblocks_to_cover_z - 1) / threadblocks_to_cover_z;
dim3 grid_size(
(x + threadblock_size.x - 1) / threadblock_size.x,
(y + threadblock_size.y - 1) / threadblock_size.y,
(z + threadblock_size.z - 1) / threadblock_size.z);
threadblock_size.x *= threadblock_size_x_evenly_divisible;
return std::make_pair(grid_size, threadblock_size);
}
std::pair<dim3, dim3> cuda_util::get_grid_and_threadblock_sizes_sequential_access(
const cuda_running_configuration& cuda_config,
int elem_count)
{
dim3 threadblock_size(1, 1, 1);
dim3 grid_size(1, 1, 1);
threadblock_size.x = std::min<unsigned int>(preferred_threadblocksize_sequential_access, elem_count);
unsigned int threadblocks = (elem_count + threadblock_size.x - 1) / threadblock_size.x;
if (threadblocks <= cuda_config.max_grid_size[0])
grid_size.x = threadblocks;
else
{
grid_size.y = (threadblocks + cuda_config.max_grid_size[0] - 1) / cuda_config.max_grid_size[0];
grid_size.x = (threadblocks + grid_size.y - 1) / grid_size.y;
}
return std::make_pair(grid_size, threadblock_size);
}
int cuda_util::get_power2_aligned_size(int original_size)
{
int res = 1;
while (res < original_size)
res <<= 1;
return res;
}
size_t cuda_util::get_float4_aligned_buffer_size(size_t original_size)
{
size_t sz = (original_size + 15) & ~15;
return sz;
}
void cuda_util::set_with_value(
const cuda_running_configuration& cuda_config,
float * buf_with_aligned_size,
float v,
int elem_count,
hipStream_t cuda_stream)
{
int new_elem_count = (elem_count + 3) / 4;
std::pair<dim3, dim3> kernel_dims = get_grid_and_threadblock_sizes_sequential_access(
cuda_config,
new_elem_count);
hipLaunchKernelGGL(( set_with_value_util_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, cuda_stream, (float4 *)buf_with_aligned_size, v, new_elem_count);
}
void cuda_util::set_with_value(
const cuda_running_configuration& cuda_config,
double * buf_with_aligned_size,
double v,
int elem_count,
hipStream_t cuda_stream)
{
int new_elem_count = (elem_count + 1) / 2;
std::pair<dim3, dim3> kernel_dims = get_grid_and_threadblock_sizes_sequential_access(
cuda_config,
new_elem_count);
hipLaunchKernelGGL(( set_with_value_util_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, cuda_stream, (double2 *)buf_with_aligned_size, v, new_elem_count);
}
void cuda_util::set_with_value(
const cuda_running_configuration& cuda_config,
int * buf_with_aligned_size,
int v,
int elem_count,
hipStream_t cuda_stream)
{
int new_elem_count = (elem_count + 3) / 4;
std::pair<dim3, dim3> kernel_dims = get_grid_and_threadblock_sizes_sequential_access(
cuda_config,
new_elem_count);
hipLaunchKernelGGL(( set_with_value_util_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, cuda_stream, (int4 *)buf_with_aligned_size, v, new_elem_count);
}
void cuda_util::multiply_by_value(
const cuda_running_configuration& cuda_config,
float * buf_with_aligned_size,
float v,
int elem_count,
hipStream_t cuda_stream)
{
int new_elem_count = (elem_count + 3) / 4;
std::pair<dim3, dim3> kernel_dims = get_grid_and_threadblock_sizes_sequential_access(
cuda_config,
new_elem_count);
hipLaunchKernelGGL(( multiply_by_value_util_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, cuda_stream, (float4 *)buf_with_aligned_size, v, new_elem_count);
}
void cuda_util::multiply_by_itself(
const cuda_running_configuration& cuda_config,
const float * input_buf_with_aligned_size,
float * output_buf_with_aligned_size,
int elem_count,
hipStream_t cuda_stream)
{
int new_elem_count = (elem_count + 3) / 4;
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
cuda_config,
new_elem_count);
hipLaunchKernelGGL(( multiply_by_itself_training_util_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, cuda_stream, (const float4 *)input_buf_with_aligned_size, (float4 *)output_buf_with_aligned_size, new_elem_count);
}
void cuda_util::apply_weight_decay(
const cuda_running_configuration& cuda_config,
const float * learning_rates_with_aligned_size,
float * weights_with_aligned_size,
float weight_decay,
int elem_count,
hipStream_t cuda_stream)
{
if (weight_decay != 0.0F)
{
int new_elem_count = (elem_count + 3) / 4;
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
cuda_config,
new_elem_count);
hipLaunchKernelGGL(( apply_weight_decay_util_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, cuda_stream, (const float4 *)learning_rates_with_aligned_size, (float4 *)weights_with_aligned_size, weight_decay, new_elem_count);
}
}
void cuda_util::apply_gradient_with_weight_decay(
const cuda_running_configuration& cuda_config,
const float * gradient_with_aligned_size,
const float * learning_rates_with_aligned_size,
float * weights_with_aligned_size,
float weight_decay,
int elem_count,
hipStream_t cuda_stream)
{
int new_elem_count = (elem_count + 1) / 2;
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
cuda_config,
new_elem_count);
hipLaunchKernelGGL(( apply_gradient_with_weight_decay_util_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, cuda_stream, (const float2 *)gradient_with_aligned_size, (const float2 *)learning_rates_with_aligned_size, (float2 *)weights_with_aligned_size, weight_decay, new_elem_count);
}
void cuda_util::copy_buffer(
const cuda_running_configuration& cuda_config,
const float * input_buf_with_aligned_size,
float * output_buf_with_aligned_size,
int elem_count,
hipStream_t cuda_stream)
{
int new_elem_count = (elem_count + 3) / 4;
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
cuda_config,
new_elem_count);
hipLaunchKernelGGL(( copy_buffer_util_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, cuda_stream, (const float4 *)input_buf_with_aligned_size, (float4 *)output_buf_with_aligned_size, new_elem_count);
}
void cuda_util::transpose(
const cuda_running_configuration& cuda_config,
const float * src,
float * dst,
int src_fast_dim,
int src_slow_dim,
int entry_count,
hipStream_t cuda_stream)
{
dim3 threadblock_size(TRANSPOSE_TILE_DIM, TRANSPOSE_BLOCK_ROWS, 1);
dim3 grid_size((src_fast_dim + TRANSPOSE_TILE_DIM - 1) / TRANSPOSE_TILE_DIM, (src_slow_dim + TRANSPOSE_TILE_DIM - 1) / TRANSPOSE_TILE_DIM, entry_count);
hipLaunchKernelGGL(( transpose_kernel), dim3(grid_size), dim3(threadblock_size), 0, cuda_stream,
src,
dst,
src_fast_dim,
src_slow_dim,
src_fast_dim * src_slow_dim,
entry_count);
}
int cuda_util::get_group_count(
const cuda_running_configuration& cuda_config,
int total_thread_count,
int divisible)
{
const int assume_threadblock_size = 256;
int threadblock_count = (total_thread_count + assume_threadblock_size - 1) / assume_threadblock_size;
float wave_count = static_cast<float>(threadblock_count) / static_cast<float>(cuda_config.multiprocessor_count * 4);
if (wave_count >= 4.0F)
return 1;
int current_div;
for(int wave_count = 1; wave_count <= 4; ++wave_count)
{
current_div = ::min((cuda_config.multiprocessor_count * 4 * wave_count * assume_threadblock_size) / total_thread_count, divisible);
if (current_div == 0)
continue;
int group_size = (divisible + current_div - 1) / current_div;
current_div = (divisible + group_size - 1) / group_size;
int current_threadblock_count = (total_thread_count * current_div + assume_threadblock_size - 1) / assume_threadblock_size;
float current_wave_count = static_cast<float>(current_threadblock_count) / static_cast<float>(cuda_config.multiprocessor_count * 4);
float remaining_part = wave_count - current_wave_count;
if (remaining_part < 0.2F)
return current_div;
}
return ::min(::max(current_div, 1), divisible);
}
int cuda_util::get_thread_count_per_wave(const cuda_running_configuration& cuda_config)
{
return cuda_config.multiprocessor_count * 4 * 256;
}
unsigned int cuda_util::get_feature_map_count_striped(unsigned int feature_map_count)
{
return ((feature_map_count + 1) >> 1);
}
layer_configuration_specific cuda_util::get_layer_configuration_specific_striped(const layer_configuration_specific& original_layer_config)
{
layer_configuration_specific res = original_layer_config;
res.feature_map_count = get_feature_map_count_striped(res.feature_map_count);
return res;
}
void cuda_util::copy_to_striped(
const cuda_running_configuration& cuda_config,
const float * source_buf,
float2 * dest_buf,
unsigned int elem_count_per_feature_map,
unsigned int feature_map_count,
unsigned int entry_count,
hipStream_t cuda_stream)
{
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
cuda_config,
elem_count_per_feature_map,
get_feature_map_count_striped(feature_map_count),
entry_count);
hipLaunchKernelGGL(( copy_to_striped_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, cuda_stream, source_buf, dest_buf, elem_count_per_feature_map, feature_map_count, entry_count);
}
void cuda_util::copy_from_striped(
const cuda_running_configuration& cuda_config,
const float2 * source_buf,
float * dest_buf,
unsigned int elem_count_per_feature_map,
unsigned int feature_map_count,
unsigned int entry_count,
hipStream_t cuda_stream)
{
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
cuda_config,
elem_count_per_feature_map,
get_feature_map_count_striped(feature_map_count),
entry_count);
hipLaunchKernelGGL(( copy_from_striped_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, cuda_stream, source_buf, dest_buf, elem_count_per_feature_map, feature_map_count, entry_count);
}
}
}
| c9231b73dd98ad6471988ec459a10058f19f1275.cu | /*
* Copyright 2011-2013 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "util_cuda.h"
#include "../neural_network_exception.h"
#include "../layer_configuration_specific.h"
#include "neural_network_cuda_exception.h"
#include <boost/format.hpp>
#include <utility>
namespace nnforge
{
namespace cuda
{
__global__ void set_with_value_util_kernel(
float4 * __restrict buf,
float v,
int elem_count)
{
int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x;
if (elem_id < elem_count)
{
float4 val;
val.x = v;
val.y = v;
val.z = v;
val.w = v;
buf[elem_id] = val;
}
}
__global__ void set_with_value_util_kernel(
int4 * __restrict buf,
int v,
int elem_count)
{
int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x;
if (elem_id < elem_count)
{
int4 val;
val.x = v;
val.y = v;
val.z = v;
val.w = v;
buf[elem_id] = val;
}
}
__global__ void set_with_value_util_kernel(
double2 * __restrict buf,
double v,
int elem_count)
{
int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x;
if (elem_id < elem_count)
{
double2 val;
val.x = v;
val.y = v;
buf[elem_id] = val;
}
}
__global__ void multiply_by_value_util_kernel(
float4 * __restrict buf,
float v,
int elem_count)
{
int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x;
if (elem_id < elem_count)
{
float4 val = buf[elem_id];
val.x *= v;
val.y *= v;
val.z *= v;
val.w *= v;
buf[elem_id] = val;
}
}
__global__ void apply_weight_decay_util_kernel(
const float4 * __restrict learning_rates,
float4 * __restrict weights,
float weight_decay,
int elem_count)
{
int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x;
if (elem_id < elem_count)
{
float4 val = learning_rates[elem_id];
float4 current_weight = weights[elem_id];
val.x = 1.0F - val.x * weight_decay;
val.y = 1.0F - val.y * weight_decay;
val.z = 1.0F - val.z * weight_decay;
val.w = 1.0F - val.w * weight_decay;
current_weight.x *= val.x;
current_weight.y *= val.y;
current_weight.z *= val.z;
current_weight.w *= val.w;
weights[elem_id] = current_weight;
}
}
__global__ void apply_gradient_with_weight_decay_util_kernel(
const float2 * __restrict gradient,
const float2 * __restrict learning_rates,
float2 * __restrict weights,
float weight_decay,
int elem_count)
{
int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x;
if (elem_id < elem_count)
{
float2 lr = learning_rates[elem_id];
float2 current_weight = weights[elem_id];
float2 grad = gradient[elem_id];
float2 new_weight;
new_weight.x = current_weight.x + lr.x * (grad.x - weight_decay * current_weight.x);
new_weight.y = current_weight.y + lr.y * (grad.y - weight_decay * current_weight.y);
weights[elem_id] = new_weight;
}
}
__global__ void multiply_by_itself_training_util_kernel(
const float4 * __restrict input_buf,
float4 * __restrict output_buf,
int elem_count)
{
int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x;
if (elem_id < elem_count)
{
float4 val = input_buf[elem_id];
val.x *= val.x;
val.y *= val.y;
val.z *= val.z;
val.w *= val.w;
output_buf[elem_id] = val;
}
}
__global__ void copy_buffer_util_kernel(
const float4 * __restrict input_buf,
float4 * __restrict output_buf,
int elem_count)
{
int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x;
if (elem_id < elem_count)
output_buf[elem_id] = input_buf[elem_id];
}
__global__ void copy_to_striped_kernel(
const float * __restrict source_buf,
float2 * __restrict dest_buf,
int elem_count_per_feature_map,
int feature_map_count,
int entry_count)
{
int elem_id = blockDim.x * blockIdx.x + threadIdx.x;
int strided_feature_map_id = blockDim.y * blockIdx.y + threadIdx.y;
int entry_id = blockDim.z * blockIdx.z + threadIdx.z;
int first_feature_map_id = strided_feature_map_id * 2;
if ((elem_id < elem_count_per_feature_map) && (first_feature_map_id < feature_map_count) && (entry_id < entry_count))
{
int tt = entry_id * elem_count_per_feature_map;
int base_src_offset = tt * feature_map_count + elem_id;
int base_dst_offset = tt * ((feature_map_count + 1) >> 1) + elem_id;
float2 pack;
pack.x = source_buf[first_feature_map_id * elem_count_per_feature_map + base_src_offset];
pack.y = 0.0F;
int second_feature_map_id = first_feature_map_id + 1;
if (second_feature_map_id < feature_map_count)
pack.y = source_buf[second_feature_map_id * elem_count_per_feature_map + base_src_offset];
dest_buf[strided_feature_map_id * elem_count_per_feature_map + base_dst_offset] = pack;
}
}
__global__ void copy_from_striped_kernel(
const float2 * __restrict source_buf,
float * __restrict dest_buf,
int elem_count_per_feature_map,
int feature_map_count,
int entry_count)
{
int elem_id = blockDim.x * blockIdx.x + threadIdx.x;
int strided_feature_map_id = blockDim.y * blockIdx.y + threadIdx.y;
int entry_id = blockDim.z * blockIdx.z + threadIdx.z;
int first_feature_map_id = strided_feature_map_id * 2;
if ((elem_id < elem_count_per_feature_map) && (first_feature_map_id < feature_map_count) && (entry_id < entry_count))
{
int tt = entry_id * elem_count_per_feature_map;
int base_dst_offset = tt * feature_map_count + elem_id;
int base_src_offset = tt * ((feature_map_count + 1) >> 1) + elem_id;
float2 pack = source_buf[strided_feature_map_id * elem_count_per_feature_map + base_src_offset];
dest_buf[first_feature_map_id * elem_count_per_feature_map + base_dst_offset] = pack.x;
int second_feature_map_id = first_feature_map_id + 1;
if (second_feature_map_id < feature_map_count)
dest_buf[second_feature_map_id * elem_count_per_feature_map + base_dst_offset] = pack.y;
}
}
#define TRANSPOSE_TILE_DIM 32
#define TRANSPOSE_BLOCK_ROWS 8
__global__ void transpose_kernel(
const float * __restrict src,
float * __restrict dst,
int src_fast_dim,
int src_slow_dim,
int elem_count_per_entry,
int entry_count)
{
int x = blockIdx.x * TRANSPOSE_TILE_DIM + threadIdx.x;
int y = blockIdx.y * TRANSPOSE_TILE_DIM + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
bool in_bounds = (entry_id < entry_count);
__shared__ float tile[TRANSPOSE_TILE_DIM][TRANSPOSE_TILE_DIM + 1];
for(int j = 0; j < TRANSPOSE_TILE_DIM; j += TRANSPOSE_BLOCK_ROWS)
{
if (in_bounds && ((y + j) < src_slow_dim) && (x < src_fast_dim))
tile[threadIdx.y + j][threadIdx.x] = src[(int)(entry_id * elem_count_per_entry + (y + j) * src_fast_dim + x)];
}
__syncthreads();
x = blockIdx.y * TRANSPOSE_TILE_DIM + threadIdx.x;
y = blockIdx.x * TRANSPOSE_TILE_DIM + threadIdx.y;
for(int j = 0; j < TRANSPOSE_TILE_DIM; j += TRANSPOSE_BLOCK_ROWS)
{
if (in_bounds && ((y + j) < src_fast_dim) && (x < src_slow_dim))
dst[(int)(entry_id * elem_count_per_entry + (y + j) * src_slow_dim + x)] = tile[threadIdx.x][threadIdx.y + j];
}
}
const unsigned int cuda_util::preferred_width_2d_access = 16;
const unsigned int cuda_util::preferred_height_2d_access = 16;
const unsigned int cuda_util::preferred_threadblocksize_sequential_access = 256;
const unsigned int cuda_util::preferred_width_2d_access_x_aligned = 32;
const unsigned int cuda_util::preferred_height_2d_access_x_aligned = 8;
std::pair<dim3, dim3> cuda_util::get_grid_and_threadblock_sizes_2d_access(
const cuda_running_configuration& cuda_config,
unsigned int x,
unsigned int y,
unsigned int z)
{
dim3 threadblock_size(1, 1, 1);
const unsigned int preferred_threadblock_size = preferred_width_2d_access * preferred_height_2d_access;
if (x < preferred_width_2d_access)
{
threadblock_size.x = x;
threadblock_size.y = std::min<unsigned int>(cuda_config.max_threads_dim[1], std::min<unsigned int>(y, preferred_threadblock_size / threadblock_size.x));
}
else
{
if (y < preferred_height_2d_access)
{
threadblock_size.y = y;
threadblock_size.x = std::min<unsigned int>(cuda_config.max_threads_dim[0], std::min<unsigned int>(x, preferred_threadblock_size / threadblock_size.y));
}
else
{
threadblock_size.x = preferred_width_2d_access;
threadblock_size.y = preferred_height_2d_access;
}
}
unsigned int threadblocks_to_cover_x = (x + threadblock_size.x - 1) / threadblock_size.x;
threadblock_size.x = (x + threadblocks_to_cover_x - 1) / threadblocks_to_cover_x;
unsigned int threadblocks_to_cover_y = (y + threadblock_size.y - 1) / threadblock_size.y;
threadblock_size.y = (y + threadblocks_to_cover_y - 1) / threadblocks_to_cover_y;
threadblock_size.z = std::min<unsigned int>(cuda_config.max_threads_dim[2], std::min<unsigned int>(z, preferred_threadblock_size / (threadblock_size.x * threadblock_size.y)));
unsigned int threadblocks_to_cover_z = (z + threadblock_size.z - 1) / threadblock_size.z;
threadblock_size.z = (z + threadblocks_to_cover_z - 1) / threadblocks_to_cover_z;
dim3 grid_size(
(x + threadblock_size.x - 1) / threadblock_size.x,
(y + threadblock_size.y - 1) / threadblock_size.y,
(z + threadblock_size.z - 1) / threadblock_size.z);
return std::make_pair(grid_size, threadblock_size);
}
std::pair<dim3, dim3> cuda_util::get_grid_and_threadblock_sizes_2d_access_x_aligned(
const cuda_running_configuration& cuda_config,
unsigned int x,
unsigned int y,
unsigned int z)
{
dim3 threadblock_size(1, 1, 1);
const unsigned int preferred_threadblock_size = preferred_width_2d_access_x_aligned * preferred_height_2d_access_x_aligned;
if (x < preferred_width_2d_access_x_aligned)
{
threadblock_size.x = x;
threadblock_size.y = std::min<unsigned int>(cuda_config.max_threads_dim[1], std::min<unsigned int>(y, preferred_threadblock_size / threadblock_size.x));
}
else
{
if (y < preferred_height_2d_access_x_aligned)
{
threadblock_size.y = y;
threadblock_size.x = std::min<unsigned int>(cuda_config.max_threads_dim[0], std::min<unsigned int>(x, preferred_threadblock_size / threadblock_size.y));
}
else
{
threadblock_size.x = preferred_width_2d_access_x_aligned;
threadblock_size.y = preferred_height_2d_access_x_aligned;
}
}
unsigned int threadblocks_to_cover_x = (x + threadblock_size.x - 1) / threadblock_size.x;
threadblock_size.x = (x + threadblocks_to_cover_x - 1) / threadblocks_to_cover_x;
unsigned int threadblocks_to_cover_y = (y + threadblock_size.y - 1) / threadblock_size.y;
threadblock_size.y = (y + threadblocks_to_cover_y - 1) / threadblocks_to_cover_y;
threadblock_size.z = std::min<unsigned int>(cuda_config.max_threads_dim[2], std::min<unsigned int>(z, preferred_threadblock_size / (threadblock_size.x * threadblock_size.y)));
unsigned int threadblocks_to_cover_z = (z + threadblock_size.z - 1) / threadblock_size.z;
threadblock_size.z = (z + threadblocks_to_cover_z - 1) / threadblocks_to_cover_z;
dim3 grid_size(
(x + threadblock_size.x - 1) / threadblock_size.x,
(y + threadblock_size.y - 1) / threadblock_size.y,
(z + threadblock_size.z - 1) / threadblock_size.z);
return std::make_pair(grid_size, threadblock_size);
}
std::pair<dim3, dim3> cuda_util::get_grid_and_threadblock_sizes_sequential_access(
const cuda_running_configuration& cuda_config,
unsigned int x,
unsigned int y,
unsigned int z,
unsigned int threadblock_size_x_evenly_divisible)
{
dim3 threadblock_size(1, 1, 1);
int max_threads_dim_x = cuda_config.max_threads_dim[0];
unsigned int preferred_threadblock_size_remained = preferred_threadblocksize_sequential_access;
preferred_threadblock_size_remained /= threadblock_size_x_evenly_divisible;
if (preferred_threadblock_size_remained == 0)
{
if (threadblock_size_x_evenly_divisible <= cuda_config.max_threads_dim[0])
preferred_threadblock_size_remained = 1;
else
throw neural_network_exception((boost::format("Too large threadblock_size_x_evenly_divisible %1%, unable to compose threabblock") % threadblock_size_x_evenly_divisible).str());
}
x = (x + threadblock_size_x_evenly_divisible - 1) / threadblock_size_x_evenly_divisible;
max_threads_dim_x = max_threads_dim_x / threadblock_size_x_evenly_divisible;
threadblock_size.x = std::min<unsigned int>(std::min<unsigned int>(x, preferred_threadblock_size_remained), max_threads_dim_x);
unsigned int threadblocks_to_cover_x = (x + threadblock_size.x - 1) / threadblock_size.x;
threadblock_size.x = (x + threadblocks_to_cover_x - 1) / threadblocks_to_cover_x;
preferred_threadblock_size_remained = preferred_threadblock_size_remained / threadblock_size.x;
threadblock_size.y = std::min<unsigned int>(std::min<unsigned int>(y, preferred_threadblock_size_remained), cuda_config.max_threads_dim[1]);
unsigned int threadblocks_to_cover_y = (y + threadblock_size.y - 1) / threadblock_size.y;
threadblock_size.y = (y + threadblocks_to_cover_y - 1) / threadblocks_to_cover_y;
preferred_threadblock_size_remained = preferred_threadblock_size_remained / threadblock_size.y;
threadblock_size.z = std::min<unsigned int>(std::min<unsigned int>(z, preferred_threadblock_size_remained), cuda_config.max_threads_dim[2]);
unsigned int threadblocks_to_cover_z = (z + threadblock_size.z - 1) / threadblock_size.z;
threadblock_size.z = (z + threadblocks_to_cover_z - 1) / threadblocks_to_cover_z;
dim3 grid_size(
(x + threadblock_size.x - 1) / threadblock_size.x,
(y + threadblock_size.y - 1) / threadblock_size.y,
(z + threadblock_size.z - 1) / threadblock_size.z);
threadblock_size.x *= threadblock_size_x_evenly_divisible;
return std::make_pair(grid_size, threadblock_size);
}
std::pair<dim3, dim3> cuda_util::get_grid_and_threadblock_sizes_sequential_access(
const cuda_running_configuration& cuda_config,
int elem_count)
{
dim3 threadblock_size(1, 1, 1);
dim3 grid_size(1, 1, 1);
threadblock_size.x = std::min<unsigned int>(preferred_threadblocksize_sequential_access, elem_count);
unsigned int threadblocks = (elem_count + threadblock_size.x - 1) / threadblock_size.x;
if (threadblocks <= cuda_config.max_grid_size[0])
grid_size.x = threadblocks;
else
{
grid_size.y = (threadblocks + cuda_config.max_grid_size[0] - 1) / cuda_config.max_grid_size[0];
grid_size.x = (threadblocks + grid_size.y - 1) / grid_size.y;
}
return std::make_pair(grid_size, threadblock_size);
}
int cuda_util::get_power2_aligned_size(int original_size)
{
int res = 1;
while (res < original_size)
res <<= 1;
return res;
}
size_t cuda_util::get_float4_aligned_buffer_size(size_t original_size)
{
size_t sz = (original_size + 15) & ~15;
return sz;
}
void cuda_util::set_with_value(
const cuda_running_configuration& cuda_config,
float * buf_with_aligned_size,
float v,
int elem_count,
cudaStream_t cuda_stream)
{
int new_elem_count = (elem_count + 3) / 4;
std::pair<dim3, dim3> kernel_dims = get_grid_and_threadblock_sizes_sequential_access(
cuda_config,
new_elem_count);
set_with_value_util_kernel<<<kernel_dims.first, kernel_dims.second, 0, cuda_stream>>>((float4 *)buf_with_aligned_size, v, new_elem_count);
}
void cuda_util::set_with_value(
const cuda_running_configuration& cuda_config,
double * buf_with_aligned_size,
double v,
int elem_count,
cudaStream_t cuda_stream)
{
int new_elem_count = (elem_count + 1) / 2;
std::pair<dim3, dim3> kernel_dims = get_grid_and_threadblock_sizes_sequential_access(
cuda_config,
new_elem_count);
set_with_value_util_kernel<<<kernel_dims.first, kernel_dims.second, 0, cuda_stream>>>((double2 *)buf_with_aligned_size, v, new_elem_count);
}
void cuda_util::set_with_value(
const cuda_running_configuration& cuda_config,
int * buf_with_aligned_size,
int v,
int elem_count,
cudaStream_t cuda_stream)
{
int new_elem_count = (elem_count + 3) / 4;
std::pair<dim3, dim3> kernel_dims = get_grid_and_threadblock_sizes_sequential_access(
cuda_config,
new_elem_count);
set_with_value_util_kernel<<<kernel_dims.first, kernel_dims.second, 0, cuda_stream>>>((int4 *)buf_with_aligned_size, v, new_elem_count);
}
void cuda_util::multiply_by_value(
const cuda_running_configuration& cuda_config,
float * buf_with_aligned_size,
float v,
int elem_count,
cudaStream_t cuda_stream)
{
int new_elem_count = (elem_count + 3) / 4;
std::pair<dim3, dim3> kernel_dims = get_grid_and_threadblock_sizes_sequential_access(
cuda_config,
new_elem_count);
multiply_by_value_util_kernel<<<kernel_dims.first, kernel_dims.second, 0, cuda_stream>>>((float4 *)buf_with_aligned_size, v, new_elem_count);
}
void cuda_util::multiply_by_itself(
const cuda_running_configuration& cuda_config,
const float * input_buf_with_aligned_size,
float * output_buf_with_aligned_size,
int elem_count,
cudaStream_t cuda_stream)
{
int new_elem_count = (elem_count + 3) / 4;
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
cuda_config,
new_elem_count);
multiply_by_itself_training_util_kernel<<<kernel_dims.first, kernel_dims.second, 0, cuda_stream>>>((const float4 *)input_buf_with_aligned_size, (float4 *)output_buf_with_aligned_size, new_elem_count);
}
void cuda_util::apply_weight_decay(
const cuda_running_configuration& cuda_config,
const float * learning_rates_with_aligned_size,
float * weights_with_aligned_size,
float weight_decay,
int elem_count,
cudaStream_t cuda_stream)
{
if (weight_decay != 0.0F)
{
int new_elem_count = (elem_count + 3) / 4;
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
cuda_config,
new_elem_count);
apply_weight_decay_util_kernel<<<kernel_dims.first, kernel_dims.second, 0, cuda_stream>>>((const float4 *)learning_rates_with_aligned_size, (float4 *)weights_with_aligned_size, weight_decay, new_elem_count);
}
}
void cuda_util::apply_gradient_with_weight_decay(
const cuda_running_configuration& cuda_config,
const float * gradient_with_aligned_size,
const float * learning_rates_with_aligned_size,
float * weights_with_aligned_size,
float weight_decay,
int elem_count,
cudaStream_t cuda_stream)
{
int new_elem_count = (elem_count + 1) / 2;
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
cuda_config,
new_elem_count);
apply_gradient_with_weight_decay_util_kernel<<<kernel_dims.first, kernel_dims.second, 0, cuda_stream>>>((const float2 *)gradient_with_aligned_size, (const float2 *)learning_rates_with_aligned_size, (float2 *)weights_with_aligned_size, weight_decay, new_elem_count);
}
void cuda_util::copy_buffer(
const cuda_running_configuration& cuda_config,
const float * input_buf_with_aligned_size,
float * output_buf_with_aligned_size,
int elem_count,
cudaStream_t cuda_stream)
{
int new_elem_count = (elem_count + 3) / 4;
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
cuda_config,
new_elem_count);
copy_buffer_util_kernel<<<kernel_dims.first, kernel_dims.second, 0, cuda_stream>>>((const float4 *)input_buf_with_aligned_size, (float4 *)output_buf_with_aligned_size, new_elem_count);
}
void cuda_util::transpose(
const cuda_running_configuration& cuda_config,
const float * src,
float * dst,
int src_fast_dim,
int src_slow_dim,
int entry_count,
cudaStream_t cuda_stream)
{
dim3 threadblock_size(TRANSPOSE_TILE_DIM, TRANSPOSE_BLOCK_ROWS, 1);
dim3 grid_size((src_fast_dim + TRANSPOSE_TILE_DIM - 1) / TRANSPOSE_TILE_DIM, (src_slow_dim + TRANSPOSE_TILE_DIM - 1) / TRANSPOSE_TILE_DIM, entry_count);
transpose_kernel<<<grid_size, threadblock_size, 0, cuda_stream>>>(
src,
dst,
src_fast_dim,
src_slow_dim,
src_fast_dim * src_slow_dim,
entry_count);
}
int cuda_util::get_group_count(
const cuda_running_configuration& cuda_config,
int total_thread_count,
int divisible)
{
const int assume_threadblock_size = 256;
int threadblock_count = (total_thread_count + assume_threadblock_size - 1) / assume_threadblock_size;
float wave_count = static_cast<float>(threadblock_count) / static_cast<float>(cuda_config.multiprocessor_count * 4);
if (wave_count >= 4.0F)
return 1;
int current_div;
for(int wave_count = 1; wave_count <= 4; ++wave_count)
{
current_div = std::min((cuda_config.multiprocessor_count * 4 * wave_count * assume_threadblock_size) / total_thread_count, divisible);
if (current_div == 0)
continue;
int group_size = (divisible + current_div - 1) / current_div;
current_div = (divisible + group_size - 1) / group_size;
int current_threadblock_count = (total_thread_count * current_div + assume_threadblock_size - 1) / assume_threadblock_size;
float current_wave_count = static_cast<float>(current_threadblock_count) / static_cast<float>(cuda_config.multiprocessor_count * 4);
float remaining_part = wave_count - current_wave_count;
if (remaining_part < 0.2F)
return current_div;
}
return std::min(std::max(current_div, 1), divisible);
}
int cuda_util::get_thread_count_per_wave(const cuda_running_configuration& cuda_config)
{
return cuda_config.multiprocessor_count * 4 * 256;
}
unsigned int cuda_util::get_feature_map_count_striped(unsigned int feature_map_count)
{
return ((feature_map_count + 1) >> 1);
}
layer_configuration_specific cuda_util::get_layer_configuration_specific_striped(const layer_configuration_specific& original_layer_config)
{
layer_configuration_specific res = original_layer_config;
res.feature_map_count = get_feature_map_count_striped(res.feature_map_count);
return res;
}
void cuda_util::copy_to_striped(
const cuda_running_configuration& cuda_config,
const float * source_buf,
float2 * dest_buf,
unsigned int elem_count_per_feature_map,
unsigned int feature_map_count,
unsigned int entry_count,
cudaStream_t cuda_stream)
{
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
cuda_config,
elem_count_per_feature_map,
get_feature_map_count_striped(feature_map_count),
entry_count);
copy_to_striped_kernel<<<kernel_dims.first, kernel_dims.second, 0, cuda_stream>>>(source_buf, dest_buf, elem_count_per_feature_map, feature_map_count, entry_count);
}
void cuda_util::copy_from_striped(
const cuda_running_configuration& cuda_config,
const float2 * source_buf,
float * dest_buf,
unsigned int elem_count_per_feature_map,
unsigned int feature_map_count,
unsigned int entry_count,
cudaStream_t cuda_stream)
{
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
cuda_config,
elem_count_per_feature_map,
get_feature_map_count_striped(feature_map_count),
entry_count);
copy_from_striped_kernel<<<kernel_dims.first, kernel_dims.second, 0, cuda_stream>>>(source_buf, dest_buf, elem_count_per_feature_map, feature_map_count, entry_count);
}
}
}
|
9438464ae03d8614fe4d751ea8dfd67080cdc5a7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define VERSION 1
#define SEED_NUM 123
/*
Common cpp libraries
*/
#include <stdio.h>
#include <stdlib.h>
#include <set>
#include <sstream>
#include <string>
#include <fstream>
#include <iostream>
#include <cstring>
#include <random>
#include <bits/stdc++.h>
/*
Vector operations
*/
#include <thrust/scan.h>
#include <thrust/execution_policy.h>
/*
GPU functions
*/
using namespace std;
/*==============================================================================
CSR graph class
==============================================================================*/
// class definition
template <typename ValueT, typename SizeT>
class CSR
{
public:
CSR(const char *);
~CSR() {
hipFree(csr);
hipFree(offset);
hipFree(colors);
hipFree(rand);
};
// SizeT &operator[] (SizeT); DEPRICATED
// void print_adj();
void print_tup();
void print_arrays();
void check_conflict();
unsigned int edges;
unsigned int vertices;
SizeT* csr;
SizeT* offset;
SizeT* colors;
ValueT* rand;
private:
vector <tuple<SizeT,SizeT>> coo;
// SizeT* adj_matrix; DEPRICATED
};
// class outline (some functions taken from EEC289Q)
// constructor
template <typename ValueT, typename SizeT>
CSR<ValueT,SizeT>::CSR(const char filename[]) {
string line;
ifstream infile(filename);
if (infile.fail()) {
cout << "ERROR: failed to open file" << endl;
return;
}
cout << "Making adj matrix ..." << endl;
while (getline(infile, line)) {
istringstream iss(line);
if (line.find("%") == string::npos)
break;
}
istringstream iss(line);
SizeT num_rows, num_cols, num_edges;
iss >> num_rows >> num_cols >> num_edges;
this->vertices = num_rows;
this->edges = num_edges;
cout << "Number of vertices: " << num_rows << endl;
cout << "Number of directed edges: " << num_edges << endl;
/* vvvvvvvvv DEPRICATED CODE, ALLOC ERROR WHEN GRAPH IS LARGE vvvvvvvvvvvvvvvv
// this->adj_matrix = new SizeT[num_rows * num_rows];
// memset(this->adj_matrix, 0, num_rows * num_rows * sizeof(bool));
//
// while (getline(infile, line)) {
// istringstream iss(line);
// SizeT node1, node2, weight;
// iss >> node1 >> node2 >> weight;
//
// this->adj_matrix[(node1 - 1) * num_rows + (node2 - 1)] = true;
// this->adj_matrix[(node2 - 1) * num_rows + (node1 - 1)] = true;
// }
// infile.close();
//
// cout << "Making csr and offset ..." << endl;
//
// // declare csr and offset
// int csr_length = thrust::reduce(thrust::host,
// this->adj_matrix, this->adj_matrix + this->vertices * this->vertices);
hipMallocManaged(&(this->csr), this->edges * sizeof(SizeT));
hipMallocManaged(&(this->offset), this->vertices * sizeof(SizeT));
// this->csr = new SizeT[this->edges ];
// this->offset = new SizeT[this->vertices];
// populate csr and offset
int count = 0;
for (SizeT v = 0 ; v < this->vertices; v++) {
this->offset[v] = thrust::reduce(thrust::host,
this->adj_matrix + (v * this->vertices),
this->adj_matrix + ((v + 1) * this->vertices) );
for (SizeT adj = 0; adj < this->vertices; adj++) {
if (this->adj_matrix[v * this->vertices + adj]) {
this->csr[count] = adj;
count++;
}
}
}
thrust::exclusive_scan(thrust::host, this->offset,
this->offset + this->vertices, this->offset);
^^^^^^^^^^^^ DEPRICATED CODE, ALLOC ERROR WHEN GRAPH IS LARGE ^^^^^^^^^^^^^^^*/
cout << "Making coo ..." << endl;
while (getline(infile, line)) {
istringstream iss(line);
SizeT node1, node2, weight;
iss >> node1 >> node2 >> weight;
this->coo.push_back(make_tuple(node1 - 1, node2 - 1));
this->coo.push_back(make_tuple(node2 - 1, node1 - 1));
}
infile.close();
// erase redundant nodes
sort(this->coo.begin(), this->coo.end());
this->coo.erase(unique(this->coo.begin(), this->coo.end()), this->coo.end());
cout << "Making csr ..." << endl;
hipMallocManaged(&(this->csr), this->coo.size() * sizeof(SizeT));
hipMallocManaged(&(this->offset), this->vertices * sizeof(SizeT));
for (int i = 0; i < this->coo.size(); i++) {
this->csr[i] = get<1>(this->coo[i]);
}
for (SizeT v = 0; v < this->vertices; v++) {
int count = 0;
for (int i = 0; i < this->coo.size(); i++) {
if (get<0>(this->coo[i]) == v)
count ++;
}
this->offset[v] = count;
}
cout << "Making rand ..." << endl;
// create rand array for IS
hipMallocManaged(&(this->rand), this->vertices * sizeof(ValueT));
// this->rand = new ValueT[this->vertices];
random_device rd;
mt19937 e2(rd());
e2.seed(SEED_NUM);
uniform_real_distribution<> dist(0,100);
for (int v = 0; v < this->vertices; v++) {
this->rand[v] = dist(e2);
}
thrust::exclusive_scan(thrust::host, this->offset,
this->offset + this->vertices, this->offset);
cout << "Making colors ..." << endl;
// allocate memory for colors
hipMallocManaged(&(this->colors), this->vertices * sizeof(SizeT));
// this->colors = new SizeT[this->vertices];
memset(this->colors, -1, this->vertices * sizeof(SizeT));
};
// index overload DEPRICATED
// template <typename ValueT, typename SizeT>
// SizeT & CSR<ValueT,SizeT>::operator[](SizeT idx) {
// return this->adj_matrix[idx];
// };
// print first 20 x 20 entries for adj matrix DEPRICATED
// template <typename ValueT, typename SizeT>
// void CSR<ValueT, SizeT>::print_adj() {
// SizeT max_idx = 20;
// if(this->vertices < 20)
// max_idx = this->vertices;
// for (int i = 0; i < max_idx; i++) {
// cout << i << " : [";
// for (int j = 0; j < max_idx; j++) {
// cout << this->adj_matrix[i * this->vertices + j] << ", ";
// }
// cout << "]" << endl;
// }
// };
// print first 20 tuples
template <typename ValueT, typename SizeT>
void CSR<ValueT, SizeT>::print_tup() {
SizeT max_idx = 20;
if(this->vertices < 20)
max_idx = this->vertices;
for (int i = 0; i < max_idx; i++) {
cout << "(" << get<0>(this->coo[i]) << ", " << get<1>(this->coo[i]) << ")" <<endl;
}
};
// print first 20 entries for offset and csr
template <typename ValueT, typename SizeT>
void CSR<ValueT, SizeT>::print_arrays() {
SizeT max_idx = 20;
if(this->vertices < 20)
max_idx = this->vertices;
cout << "CSR: [";
for (int i = 0; i < max_idx; i++) {
cout << this->csr[i] << ", ";
}
cout << "]" << endl;
cout << "OFFSET: [";
for (int i = 0; i < max_idx; i++) {
cout << this->offset[i] << ", ";
}
cout << "]" << endl;
cout << "COLORS: [";
for (int i = 0; i < max_idx; i++) {
cout << this->colors[i] << ", ";
}
cout << "]" << endl;
cout << "RAND: [";
for (int i = 0; i < max_idx; i++) {
cout << this->rand[i] << ", ";
}
cout << "]" << endl;
};
/*==============================================================================
Check for color conflict
==============================================================================*/
template <typename ValueT, typename SizeT>
void CSR<ValueT, SizeT>::check_conflict() {
for (SizeT v = 0; v < this->vertices; v++) {
SizeT start_edge = offset[v];
SizeT num_neighbors = offset[v + 1] - offset[v];
for (SizeT e = start_edge; e < start_edge + num_neighbors; e++) {
SizeT u = csr[e];
if ((this->colors[v] == this->colors[u]) && (u != v)) {
cout << "ERROR: Conflict at node " << v << "and node " << u
<< " at color" << colors[v] << endl;
}
}
}
}
/*==============================================================================
IS color operation - outline taken from Gunrock jpl_color_op
==============================================================================*/
template <typename ValueT, typename SizeT>
#if defined(VERSION) && VERSION == 1
__global__
#else
__device__
#endif
void color_op(SizeT* csr, SizeT* offset, ValueT* rand,
SizeT* colors, int num_vertices, int iteration) {
unsigned int v = blockIdx.x * blockDim.x + threadIdx.x;
if (v < num_vertices) {
if (colors[v] != -1) return;
SizeT start_edge = offset[v];
SizeT num_neighbors = offset[v + 1] - offset[v];
bool colormax = true;
bool colormin = true;
int color = iteration * 2;
for (SizeT e = start_edge; e < start_edge + num_neighbors; e++) {
SizeT u = csr[e];
if ((colors[u] != -1) && (colors[u] != color + 1) &&
(colors[u] != color + 2) ||
(v == u))
continue;
if (rand[v] <= rand[u]) colormax = false;
if (rand[v] >= rand[u]) colormin = false;
}
if (colormax) colors[v] = color + 1;
if (colormin) colors[v] = color + 2;
}
};
/*==============================================================================
IS color stop condition
==============================================================================*/
template <typename ValueT, typename SizeT>
__host__ __device__
bool stop_condition(SizeT* colors, unsigned int num_vertices) {
#if defined(VERSION) && VERSION == 1
for (int v = 0; v < num_vertices; v++) {
if (colors[v] == -1)
return true;
}
return false;
#else
return false;
#endif
}
/*==============================================================================
IS Kernel function
==============================================================================*/
// template <typename ValueT, typename SizeT>
// __global__
// void ISKernel(SizeT csr, SizeT offset, ValueT rand, SizeT colors, int num_vertices) {
// int iteration = 0;
// while (stop_condition(colors)) {
// color_op(csr, offset, rand, colors, num_vertices, iteration);
// // TODO: grid wise synchronization
// }
// };
/*==============================================================================
IS Kernel Driver
==============================================================================*/
// template <typename ValueT, typename SizeT>
// void ISKernelDriver(CSR<ValueT, SizeT> graph) {
// unsigned int num_threads = 32;
// unsigned int num_blocks = graph.vertices / num_threads + 1;
// ISKernel<ValueT, SizeT><<<num_blocks, num_threads>>>
// (graph.csr,
// graph.offset,
// graph.rand,
// graph.colors,
// graph.vertices);
// }
/*==============================================================================
Tester - version 1
==============================================================================*/
template <typename ValueT, typename SizeT>
void test_1(CSR <float, int> graph) {
int iteration = 0;
unsigned int num_threads = 32;
unsigned int num_blocks = graph.vertices / num_threads + 1;
cout << "Kernel loop start" << endl;
while (stop_condition<float, int>(graph.colors, graph.vertices)) {
hipLaunchKernelGGL(( color_op<float, int>), dim3(num_blocks), dim3(num_threads), 0, 0,
graph.csr,
graph.offset,
graph.rand,
graph.colors,
graph.vertices,
iteration);
hipDeviceSynchronize();
iteration ++;
}
// graph.print_adj(); DEPRICATED
cout << "==== Graph Samples: ====" <<endl;
graph.print_tup();
graph.print_arrays();
graph.check_conflict();
};
/*==============================================================================
Main function
==============================================================================*/
int main(int argc, char const *argv[]) {
#if defined(VERSION) && VERSION == 1
// cout << "Test small graph" << endl;
// CSR <float, int> * graph = new CSR<float, int>("../gunrock/dataset/small/test_cc.mtx");
// test_1 <float, int> (*graph);
cout << "Test large graph" << endl;
CSR <float, int> * graph = new CSR<float, int>("/data-2/topc-datasets/gc-data/offshore/offshore.mtx");
test_1 <float, int> (*graph);
#endif
return 0;
}
| 9438464ae03d8614fe4d751ea8dfd67080cdc5a7.cu | #define VERSION 1
#define SEED_NUM 123
/*
Common cpp libraries
*/
#include <stdio.h>
#include <stdlib.h>
#include <set>
#include <sstream>
#include <string>
#include <fstream>
#include <iostream>
#include <cstring>
#include <random>
#include <bits/stdc++.h>
/*
Vector operations
*/
#include <thrust/scan.h>
#include <thrust/execution_policy.h>
/*
GPU functions
*/
using namespace std;
/*==============================================================================
CSR graph class
==============================================================================*/
// class definition
template <typename ValueT, typename SizeT>
class CSR
{
public:
CSR(const char *);
~CSR() {
cudaFree(csr);
cudaFree(offset);
cudaFree(colors);
cudaFree(rand);
};
// SizeT &operator[] (SizeT); DEPRICATED
// void print_adj();
void print_tup();
void print_arrays();
void check_conflict();
unsigned int edges;
unsigned int vertices;
SizeT* csr;
SizeT* offset;
SizeT* colors;
ValueT* rand;
private:
vector <tuple<SizeT,SizeT>> coo;
// SizeT* adj_matrix; DEPRICATED
};
// class outline (some functions taken from EEC289Q)
// constructor
template <typename ValueT, typename SizeT>
CSR<ValueT,SizeT>::CSR(const char filename[]) {
string line;
ifstream infile(filename);
if (infile.fail()) {
cout << "ERROR: failed to open file" << endl;
return;
}
cout << "Making adj matrix ..." << endl;
while (getline(infile, line)) {
istringstream iss(line);
if (line.find("%") == string::npos)
break;
}
istringstream iss(line);
SizeT num_rows, num_cols, num_edges;
iss >> num_rows >> num_cols >> num_edges;
this->vertices = num_rows;
this->edges = num_edges;
cout << "Number of vertices: " << num_rows << endl;
cout << "Number of directed edges: " << num_edges << endl;
/* vvvvvvvvv DEPRICATED CODE, ALLOC ERROR WHEN GRAPH IS LARGE vvvvvvvvvvvvvvvv
// this->adj_matrix = new SizeT[num_rows * num_rows];
// memset(this->adj_matrix, 0, num_rows * num_rows * sizeof(bool));
//
// while (getline(infile, line)) {
// istringstream iss(line);
// SizeT node1, node2, weight;
// iss >> node1 >> node2 >> weight;
//
// this->adj_matrix[(node1 - 1) * num_rows + (node2 - 1)] = true;
// this->adj_matrix[(node2 - 1) * num_rows + (node1 - 1)] = true;
// }
// infile.close();
//
// cout << "Making csr and offset ..." << endl;
//
// // declare csr and offset
// int csr_length = thrust::reduce(thrust::host,
// this->adj_matrix, this->adj_matrix + this->vertices * this->vertices);
cudaMallocManaged(&(this->csr), this->edges * sizeof(SizeT));
cudaMallocManaged(&(this->offset), this->vertices * sizeof(SizeT));
// this->csr = new SizeT[this->edges ];
// this->offset = new SizeT[this->vertices];
// populate csr and offset
int count = 0;
for (SizeT v = 0 ; v < this->vertices; v++) {
this->offset[v] = thrust::reduce(thrust::host,
this->adj_matrix + (v * this->vertices),
this->adj_matrix + ((v + 1) * this->vertices) );
for (SizeT adj = 0; adj < this->vertices; adj++) {
if (this->adj_matrix[v * this->vertices + adj]) {
this->csr[count] = adj;
count++;
}
}
}
thrust::exclusive_scan(thrust::host, this->offset,
this->offset + this->vertices, this->offset);
^^^^^^^^^^^^ DEPRICATED CODE, ALLOC ERROR WHEN GRAPH IS LARGE ^^^^^^^^^^^^^^^*/
cout << "Making coo ..." << endl;
while (getline(infile, line)) {
istringstream iss(line);
SizeT node1, node2, weight;
iss >> node1 >> node2 >> weight;
this->coo.push_back(make_tuple(node1 - 1, node2 - 1));
this->coo.push_back(make_tuple(node2 - 1, node1 - 1));
}
infile.close();
// erase redundant nodes
sort(this->coo.begin(), this->coo.end());
this->coo.erase(unique(this->coo.begin(), this->coo.end()), this->coo.end());
cout << "Making csr ..." << endl;
cudaMallocManaged(&(this->csr), this->coo.size() * sizeof(SizeT));
cudaMallocManaged(&(this->offset), this->vertices * sizeof(SizeT));
for (int i = 0; i < this->coo.size(); i++) {
this->csr[i] = get<1>(this->coo[i]);
}
for (SizeT v = 0; v < this->vertices; v++) {
int count = 0;
for (int i = 0; i < this->coo.size(); i++) {
if (get<0>(this->coo[i]) == v)
count ++;
}
this->offset[v] = count;
}
cout << "Making rand ..." << endl;
// create rand array for IS
cudaMallocManaged(&(this->rand), this->vertices * sizeof(ValueT));
// this->rand = new ValueT[this->vertices];
random_device rd;
mt19937 e2(rd());
e2.seed(SEED_NUM);
uniform_real_distribution<> dist(0,100);
for (int v = 0; v < this->vertices; v++) {
this->rand[v] = dist(e2);
}
thrust::exclusive_scan(thrust::host, this->offset,
this->offset + this->vertices, this->offset);
cout << "Making colors ..." << endl;
// allocate memory for colors
cudaMallocManaged(&(this->colors), this->vertices * sizeof(SizeT));
// this->colors = new SizeT[this->vertices];
memset(this->colors, -1, this->vertices * sizeof(SizeT));
};
// index overload DEPRICATED
// template <typename ValueT, typename SizeT>
// SizeT & CSR<ValueT,SizeT>::operator[](SizeT idx) {
// return this->adj_matrix[idx];
// };
// print first 20 x 20 entries for adj matrix DEPRICATED
// template <typename ValueT, typename SizeT>
// void CSR<ValueT, SizeT>::print_adj() {
// SizeT max_idx = 20;
// if(this->vertices < 20)
// max_idx = this->vertices;
// for (int i = 0; i < max_idx; i++) {
// cout << i << " : [";
// for (int j = 0; j < max_idx; j++) {
// cout << this->adj_matrix[i * this->vertices + j] << ", ";
// }
// cout << "]" << endl;
// }
// };
// print first 20 tuples
template <typename ValueT, typename SizeT>
void CSR<ValueT, SizeT>::print_tup() {
SizeT max_idx = 20;
if(this->vertices < 20)
max_idx = this->vertices;
for (int i = 0; i < max_idx; i++) {
cout << "(" << get<0>(this->coo[i]) << ", " << get<1>(this->coo[i]) << ")" <<endl;
}
};
// print first 20 entries for offset and csr
template <typename ValueT, typename SizeT>
void CSR<ValueT, SizeT>::print_arrays() {
SizeT max_idx = 20;
if(this->vertices < 20)
max_idx = this->vertices;
cout << "CSR: [";
for (int i = 0; i < max_idx; i++) {
cout << this->csr[i] << ", ";
}
cout << "]" << endl;
cout << "OFFSET: [";
for (int i = 0; i < max_idx; i++) {
cout << this->offset[i] << ", ";
}
cout << "]" << endl;
cout << "COLORS: [";
for (int i = 0; i < max_idx; i++) {
cout << this->colors[i] << ", ";
}
cout << "]" << endl;
cout << "RAND: [";
for (int i = 0; i < max_idx; i++) {
cout << this->rand[i] << ", ";
}
cout << "]" << endl;
};
/*==============================================================================
Check for color conflict
==============================================================================*/
template <typename ValueT, typename SizeT>
void CSR<ValueT, SizeT>::check_conflict() {
for (SizeT v = 0; v < this->vertices; v++) {
SizeT start_edge = offset[v];
SizeT num_neighbors = offset[v + 1] - offset[v];
for (SizeT e = start_edge; e < start_edge + num_neighbors; e++) {
SizeT u = csr[e];
if ((this->colors[v] == this->colors[u]) && (u != v)) {
cout << "ERROR: Conflict at node " << v << "and node " << u
<< " at color" << colors[v] << endl;
}
}
}
}
/*==============================================================================
IS color operation - outline taken from Gunrock jpl_color_op
==============================================================================*/
template <typename ValueT, typename SizeT>
#if defined(VERSION) && VERSION == 1
__global__
#else
__device__
#endif
void color_op(SizeT* csr, SizeT* offset, ValueT* rand,
SizeT* colors, int num_vertices, int iteration) {
unsigned int v = blockIdx.x * blockDim.x + threadIdx.x;
if (v < num_vertices) {
if (colors[v] != -1) return;
SizeT start_edge = offset[v];
SizeT num_neighbors = offset[v + 1] - offset[v];
bool colormax = true;
bool colormin = true;
int color = iteration * 2;
for (SizeT e = start_edge; e < start_edge + num_neighbors; e++) {
SizeT u = csr[e];
if ((colors[u] != -1) && (colors[u] != color + 1) &&
(colors[u] != color + 2) ||
(v == u))
continue;
if (rand[v] <= rand[u]) colormax = false;
if (rand[v] >= rand[u]) colormin = false;
}
if (colormax) colors[v] = color + 1;
if (colormin) colors[v] = color + 2;
}
};
/*==============================================================================
IS color stop condition
==============================================================================*/
template <typename ValueT, typename SizeT>
__host__ __device__
bool stop_condition(SizeT* colors, unsigned int num_vertices) {
#if defined(VERSION) && VERSION == 1
for (int v = 0; v < num_vertices; v++) {
if (colors[v] == -1)
return true;
}
return false;
#else
return false;
#endif
}
/*==============================================================================
IS Kernel function
==============================================================================*/
// template <typename ValueT, typename SizeT>
// __global__
// void ISKernel(SizeT csr, SizeT offset, ValueT rand, SizeT colors, int num_vertices) {
// int iteration = 0;
// while (stop_condition(colors)) {
// color_op(csr, offset, rand, colors, num_vertices, iteration);
// // TODO: grid wise synchronization
// }
// };
/*==============================================================================
IS Kernel Driver
==============================================================================*/
// template <typename ValueT, typename SizeT>
// void ISKernelDriver(CSR<ValueT, SizeT> graph) {
// unsigned int num_threads = 32;
// unsigned int num_blocks = graph.vertices / num_threads + 1;
// ISKernel<ValueT, SizeT><<<num_blocks, num_threads>>>
// (graph.csr,
// graph.offset,
// graph.rand,
// graph.colors,
// graph.vertices);
// }
/*==============================================================================
Tester - version 1
==============================================================================*/
template <typename ValueT, typename SizeT>
void test_1(CSR <float, int> graph) {
int iteration = 0;
unsigned int num_threads = 32;
unsigned int num_blocks = graph.vertices / num_threads + 1;
cout << "Kernel loop start" << endl;
while (stop_condition<float, int>(graph.colors, graph.vertices)) {
color_op<float, int><<<num_blocks, num_threads>>>
(graph.csr,
graph.offset,
graph.rand,
graph.colors,
graph.vertices,
iteration);
cudaDeviceSynchronize();
iteration ++;
}
// graph.print_adj(); DEPRICATED
cout << "==== Graph Samples: ====" <<endl;
graph.print_tup();
graph.print_arrays();
graph.check_conflict();
};
/*==============================================================================
Main function
==============================================================================*/
int main(int argc, char const *argv[]) {
#if defined(VERSION) && VERSION == 1
// cout << "Test small graph" << endl;
// CSR <float, int> * graph = new CSR<float, int>("../gunrock/dataset/small/test_cc.mtx");
// test_1 <float, int> (*graph);
cout << "Test large graph" << endl;
CSR <float, int> * graph = new CSR<float, int>("/data-2/topc-datasets/gc-data/offshore/offshore.mtx");
test_1 <float, int> (*graph);
#endif
return 0;
}
|
sgeqr2x_gpu-v4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
August 2013
@generated s Wed Aug 14 12:16:44 2013
*/
#include "common_magma.h"
#if (GPUSHMEM < 200)
#define BLOCK_SIZE 512
#else
#define BLOCK_SIZE 768
#endif
__global__ void
magma_strmv_kernel2(const float *T, int ldt,
float *v, float *y, float *tau);
__global__ void
magma_sgemv_kernel3(int m, const float * __restrict__ V, int ldv,
float *c, float *dwork,
float *tau);
//////////////////////////////////////////////////////////////////////////////
__global__ void
magma_sgemv_kernel1(int m, const float * __restrict__ V, int ldv,
const float * __restrict__ c,
float *dwork);
__global__ void
magma_sgemv_kernel2(int m, int n, const float * __restrict__ V, int ldv,
const float * __restrict__ x, float *c);
__global__ void
magma_strmv_tkernel(float *T, int ldt, float *v,
float *y);
__global__ void
magma_snrm2_adjust_kernel(float *xnorm, float *c);
extern "C" magma_int_t
magma_sgeqr2x4_gpu(magma_int_t *m, magma_int_t *n, float *dA,
magma_int_t *ldda, float *dtau,
float *dT, float *ddA,
float *dwork, magma_int_t *info, magma_queue_t stream)
{
/* -- MAGMA (version 1.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
August 2013
Purpose
=======
SGEQR2 computes a QR factorization of a real m by n matrix A:
A = Q * R.
This expert routine requires two more arguments than the standard
sgeqr2, namely, dT and ddA, explained below. The storage for A is
also not as in the LAPACK's sgeqr2 routine (see below).
The first is used to output the triangular
n x n factor T of the block reflector used in the factorization.
The second holds the diagonal nxn blocks of A, i.e., the diagonal
submatrices of R. This routine implements the left looking QR.
This version adds internal blocking.
Arguments
=========
M (input) INTEGER
The number of rows of the matrix A. M >= 0.
N (input) INTEGER
The number of columns of the matrix A. N >= 0.
A (input/output) REAL array, dimension (LDA,N)
On entry, the m by n matrix A.
On exit, the unitary matrix Q as a
product of elementary reflectors (see Further Details).
the elements on and above the diagonal of the array
contain the min(m,n) by n upper trapezoidal matrix R (R is
upper triangular if m >= n); the elements below the diagonal,
with the array TAU, represent the unitary matrix Q as a
product of elementary reflectors (see Further Details).
LDA (input) INTEGER
The leading dimension of the array A. LDA >= max(1,M).
TAU (output) REAL array, dimension (min(M,N))
The scalar factors of the elementary reflectors (see Further
Details).
dT (output) REAL array, dimension N x N.
Stores the triangular N x N factor T of the block reflector
used in the factorization. The lower triangular part is 0.
ddA (output) REAL array, dimension N x N.
Stores the elements of the upper N x N diagonal block of A.
LAPACK stores this array in A. There are 0s below the diagonal.
RWORK (workspace) DOUBLE_PRECISION array, dimension (3 N)
INFO (output) INTEGER
= 0: successful exit
< 0: if INFO = -i, the i-th argument had an illegal value
Further Details
===============
The matrix Q is represented as a product of elementary reflectors
Q = H(1) H(2) . . . H(k), where k = min(m,n).
Each H(i) has the form
H(i) = I - tau * v * v'
where tau is a real scalar, and v is a real vector with
v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i),
and tau in TAU(i).
===================================================================== */
#define da_ref(a_1,a_2) ( dA+(a_2)*(*ldda) + (a_1))
#define dt_ref(a_1,a_2) ( dT+(a_2)*(k) + (a_1))
#define BS 32
magma_int_t i, k;
float *dnorm = (float *)dwork;
float *work = (float *)(dwork+2*(*n));
magma_queue_t cstream;
magmablasGetKernelStream(&cstream);
magmablasSetKernelStream(stream);
*info = 0;
if (*m < 0) {
*info = -1;
} else if (*n < 0) {
*info = -2;
} else if (*ldda < max(1,*m)) {
*info = -4;
}
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return *info;
}
/* Compute the norms of the trailing columns */
k = min(*m,*n);
magmablas_snrm2_cols(*m, k, da_ref(0,0), *ldda, dnorm);
for (magma_int_t b=0; b < k; b += BS) {
for (i = b; i < min(k, b+BS); ++i) {
/* Apply H' to A(:,i) from the left */
if ( i-b > 0){
hipLaunchKernelGGL(( magma_sgemv_kernel3), dim3(i-1), dim3(BLOCK_SIZE), 0, magma_stream , *m-i+1, da_ref(i-1,0), *ldda,
da_ref(i-1, i-1), work, dtau+i-1);
hipLaunchKernelGGL(( magma_strmv_kernel2), dim3(i-1), dim3(i-1), 0, magma_stream , dt_ref(0,0), k, work,
dt_ref(0,i-1), dtau+i-1);
/* dwork = V' c */
hipLaunchKernelGGL(( magma_sgemv_kernel1), dim3(i-b), dim3(BLOCK_SIZE), 0, magma_stream , *m-b, da_ref(b, b),
*ldda, da_ref(b,i), work);
/* dwork = T' work */
hipLaunchKernelGGL(( magma_strmv_tkernel), dim3(i-b), dim3(i-b), 0, magma_stream , dt_ref(b,b), k, work, work+i-b);
/* c = c - V work */
dim3 blocks3( (*m-b + BLOCK_SIZE-1) / BLOCK_SIZE );
dim3 threads3( BLOCK_SIZE );
hipLaunchKernelGGL(( magma_sgemv_kernel2), dim3(blocks3), dim3(threads3), 0, magma_stream , *m-b, i-b, da_ref(b,b), *ldda,
work+i-b, da_ref(b, i));
}
/* Adjust the dnorm[i] to hold the norm of A(i:m,i) */
if ( i > 0 )
hipLaunchKernelGGL(( magma_snrm2_adjust_kernel), dim3(1), dim3(i), 0, magma_stream , dnorm+i, da_ref(0, i));
/* Generate elementary reflector H(i) to annihilate A(i+1:m,i)
1. 1 is not yet put on the diagonal of A
2. Elements above the diagonal are copied in ddA and
the ones in A are set to zero
3. update T */
magma_slarfgx_gpu(*m-i, da_ref(i, i), da_ref(min(i+1,*m),i), dtau+i,
dnorm+i, ddA + i + i*(*n), i);
if (i==0){
float tt = MAGMA_S_ONE;
magmablas_slacpy(MagmaUpperLower, 1, 1, dtau, 1, dt_ref(0,0), 1);
magma_ssetmatrix(1,1, &tt,1, da_ref(i, i),1);
}
/*
else
{
// Compute the i-th column of T.
// Set da_ref(i, i) = 1.
magma_sgemv_kernel3<<< i, BLOCK_SIZE, 0, magma_stream >>>( *m-i, da_ref(i,0), *ldda,
da_ref(i, i), work, dtau+i);
magma_strmv_kernel2<<< i, i, 0, magma_stream >>>( dt_ref(0,0), k, work,
dt_ref(0,i), dtau+i);
}
*/
}
hipLaunchKernelGGL(( magma_sgemv_kernel3), dim3(i-1), dim3(BLOCK_SIZE), 0, magma_stream , *m-i+1, da_ref(i-1,0), *ldda,
da_ref(i-1, i-1), work, dtau+i-1);
hipLaunchKernelGGL(( magma_strmv_kernel2), dim3(i-1), dim3(i-1), 0, magma_stream , dt_ref(0,0), k, work,
dt_ref(0,i-1), dtau+i-1);
/* Apply the transformations to the trailing matrix. */
//magma_slarfb2_gpu( MagmaLeft, MagmaTrans, MagmaForward, MagmaColumnwise,
magma_slarfb2_gpu(
*m-b, k-i, BS,
da_ref(b, b), *ldda, dT+b+b*k, k,
da_ref(b, i), *ldda, work, k-i);
}
magmablasSetKernelStream(cstream);
return *info;
} /* magma_sgeqr2 */
| sgeqr2x_gpu-v4.cu | /*
-- MAGMA (version 1.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
August 2013
@generated s Wed Aug 14 12:16:44 2013
*/
#include "common_magma.h"
#if (GPUSHMEM < 200)
#define BLOCK_SIZE 512
#else
#define BLOCK_SIZE 768
#endif
__global__ void
magma_strmv_kernel2(const float *T, int ldt,
float *v, float *y, float *tau);
__global__ void
magma_sgemv_kernel3(int m, const float * __restrict__ V, int ldv,
float *c, float *dwork,
float *tau);
//////////////////////////////////////////////////////////////////////////////
__global__ void
magma_sgemv_kernel1(int m, const float * __restrict__ V, int ldv,
const float * __restrict__ c,
float *dwork);
__global__ void
magma_sgemv_kernel2(int m, int n, const float * __restrict__ V, int ldv,
const float * __restrict__ x, float *c);
__global__ void
magma_strmv_tkernel(float *T, int ldt, float *v,
float *y);
__global__ void
magma_snrm2_adjust_kernel(float *xnorm, float *c);
extern "C" magma_int_t
magma_sgeqr2x4_gpu(magma_int_t *m, magma_int_t *n, float *dA,
magma_int_t *ldda, float *dtau,
float *dT, float *ddA,
float *dwork, magma_int_t *info, magma_queue_t stream)
{
/* -- MAGMA (version 1.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
August 2013
Purpose
=======
SGEQR2 computes a QR factorization of a real m by n matrix A:
A = Q * R.
This expert routine requires two more arguments than the standard
sgeqr2, namely, dT and ddA, explained below. The storage for A is
also not as in the LAPACK's sgeqr2 routine (see below).
The first is used to output the triangular
n x n factor T of the block reflector used in the factorization.
The second holds the diagonal nxn blocks of A, i.e., the diagonal
submatrices of R. This routine implements the left looking QR.
This version adds internal blocking.
Arguments
=========
M (input) INTEGER
The number of rows of the matrix A. M >= 0.
N (input) INTEGER
The number of columns of the matrix A. N >= 0.
A (input/output) REAL array, dimension (LDA,N)
On entry, the m by n matrix A.
On exit, the unitary matrix Q as a
product of elementary reflectors (see Further Details).
the elements on and above the diagonal of the array
contain the min(m,n) by n upper trapezoidal matrix R (R is
upper triangular if m >= n); the elements below the diagonal,
with the array TAU, represent the unitary matrix Q as a
product of elementary reflectors (see Further Details).
LDA (input) INTEGER
The leading dimension of the array A. LDA >= max(1,M).
TAU (output) REAL array, dimension (min(M,N))
The scalar factors of the elementary reflectors (see Further
Details).
dT (output) REAL array, dimension N x N.
Stores the triangular N x N factor T of the block reflector
used in the factorization. The lower triangular part is 0.
ddA (output) REAL array, dimension N x N.
Stores the elements of the upper N x N diagonal block of A.
LAPACK stores this array in A. There are 0s below the diagonal.
RWORK (workspace) DOUBLE_PRECISION array, dimension (3 N)
INFO (output) INTEGER
= 0: successful exit
< 0: if INFO = -i, the i-th argument had an illegal value
Further Details
===============
The matrix Q is represented as a product of elementary reflectors
Q = H(1) H(2) . . . H(k), where k = min(m,n).
Each H(i) has the form
H(i) = I - tau * v * v'
where tau is a real scalar, and v is a real vector with
v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i),
and tau in TAU(i).
===================================================================== */
#define da_ref(a_1,a_2) ( dA+(a_2)*(*ldda) + (a_1))
#define dt_ref(a_1,a_2) ( dT+(a_2)*(k) + (a_1))
#define BS 32
magma_int_t i, k;
float *dnorm = (float *)dwork;
float *work = (float *)(dwork+2*(*n));
magma_queue_t cstream;
magmablasGetKernelStream(&cstream);
magmablasSetKernelStream(stream);
*info = 0;
if (*m < 0) {
*info = -1;
} else if (*n < 0) {
*info = -2;
} else if (*ldda < max(1,*m)) {
*info = -4;
}
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return *info;
}
/* Compute the norms of the trailing columns */
k = min(*m,*n);
magmablas_snrm2_cols(*m, k, da_ref(0,0), *ldda, dnorm);
for (magma_int_t b=0; b < k; b += BS) {
for (i = b; i < min(k, b+BS); ++i) {
/* Apply H' to A(:,i) from the left */
if ( i-b > 0){
magma_sgemv_kernel3<<< i-1, BLOCK_SIZE, 0, magma_stream >>>( *m-i+1, da_ref(i-1,0), *ldda,
da_ref(i-1, i-1), work, dtau+i-1);
magma_strmv_kernel2<<< i-1, i-1, 0, magma_stream >>>( dt_ref(0,0), k, work,
dt_ref(0,i-1), dtau+i-1);
/* dwork = V' c */
magma_sgemv_kernel1<<< i-b, BLOCK_SIZE, 0, magma_stream >>>(*m-b, da_ref(b, b),
*ldda, da_ref(b,i), work);
/* dwork = T' work */
magma_strmv_tkernel<<< i-b, i-b, 0, magma_stream >>>(dt_ref(b,b), k, work, work+i-b);
/* c = c - V work */
dim3 blocks3( (*m-b + BLOCK_SIZE-1) / BLOCK_SIZE );
dim3 threads3( BLOCK_SIZE );
magma_sgemv_kernel2<<< blocks3, threads3, 0, magma_stream >>>(*m-b, i-b, da_ref(b,b), *ldda,
work+i-b, da_ref(b, i));
}
/* Adjust the dnorm[i] to hold the norm of A(i:m,i) */
if ( i > 0 )
magma_snrm2_adjust_kernel<<< 1, i, 0, magma_stream >>> (dnorm+i, da_ref(0, i));
/* Generate elementary reflector H(i) to annihilate A(i+1:m,i)
1. 1 is not yet put on the diagonal of A
2. Elements above the diagonal are copied in ddA and
the ones in A are set to zero
3. update T */
magma_slarfgx_gpu(*m-i, da_ref(i, i), da_ref(min(i+1,*m),i), dtau+i,
dnorm+i, ddA + i + i*(*n), i);
if (i==0){
float tt = MAGMA_S_ONE;
magmablas_slacpy(MagmaUpperLower, 1, 1, dtau, 1, dt_ref(0,0), 1);
magma_ssetmatrix(1,1, &tt,1, da_ref(i, i),1);
}
/*
else
{
// Compute the i-th column of T.
// Set da_ref(i, i) = 1.
magma_sgemv_kernel3<<< i, BLOCK_SIZE, 0, magma_stream >>>( *m-i, da_ref(i,0), *ldda,
da_ref(i, i), work, dtau+i);
magma_strmv_kernel2<<< i, i, 0, magma_stream >>>( dt_ref(0,0), k, work,
dt_ref(0,i), dtau+i);
}
*/
}
magma_sgemv_kernel3<<< i-1, BLOCK_SIZE, 0, magma_stream >>>( *m-i+1, da_ref(i-1,0), *ldda,
da_ref(i-1, i-1), work, dtau+i-1);
magma_strmv_kernel2<<< i-1, i-1, 0, magma_stream >>>( dt_ref(0,0), k, work,
dt_ref(0,i-1), dtau+i-1);
/* Apply the transformations to the trailing matrix. */
//magma_slarfb2_gpu( MagmaLeft, MagmaTrans, MagmaForward, MagmaColumnwise,
magma_slarfb2_gpu(
*m-b, k-i, BS,
da_ref(b, b), *ldda, dT+b+b*k, k,
da_ref(b, i), *ldda, work, k-i);
}
magmablasSetKernelStream(cstream);
return *info;
} /* magma_sgeqr2 */
|
e8de2ff5a3588e1c32b68f759504a7469a77b5ce.hip | // !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCHW;
using ThreadBlockShape = cutlass::gemm::GemmShape<32, 32, 32>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationRelu<
float, 1, int32_t, float, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, float,
LayoutDst, float, LayoutDst, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 4, 16, false,
cutlass::arch::OpMultiplyAdd>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
| e8de2ff5a3588e1c32b68f759504a7469a77b5ce.cu | #if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCHW;
using ThreadBlockShape = cutlass::gemm::GemmShape<32, 32, 32>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationRelu<
float, 1, int32_t, float, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, float,
LayoutDst, float, LayoutDst, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 4, 16, false,
cutlass::arch::OpMultiplyAdd>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
eabb6b4f8de127d27cab8aa88b572ff7ff868703.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <fstream>
#include <iostream>
#include <chrono>
#define CHECK_CUDA_ERR(cudaerr) \
{ \
auto err = cudaerr; \
if (err != hipSuccess) { \
printf("kernel launch failed with error \"%s\".\n",\
hipGetErrorString(err)); \
exit(1); \
} \
}
__device__ void color_pixel(
char *colors, char *pixels,
float c_re, float c_im,
int global_index, int max_iter) {
float i = 0, j = 0, ii = 0, jj = 0;
int iteration = 0;
while ( ii + jj < 4.0 && iteration < max_iter) {
j = 2 * i * j + c_im;
i = ii - jj + c_re;
ii = i * i;
jj = j * j;
iteration++;
}
int color_index = global_index * 3;
if (iteration < max_iter) {
int it_offset = 3 + iteration * 3;
pixels[color_index] = colors[it_offset];
pixels[color_index + 1] = colors[it_offset + 1];
pixels[color_index + 2] = colors[it_offset + 2];
} else {
pixels[color_index] = colors[0];
pixels[color_index + 1] = colors[1];
pixels[color_index + 2] = colors[2];
}
}
__global__ void mandelbrot(char *colors, char* pixels, int height, int width, int max_iter) {
int global_index = threadIdx.x + blockDim.x * blockIdx.x;
float x = (float)(global_index % width);
float y = (float)(global_index / width);
float f_width = (float)width, f_height = (float)height;
float c_re = (x - f_height / 2.0) * 4.0 / f_height;
float c_im = (y - f_height / 2.0) * 4.0 / f_height;
if (global_index < height * width)
color_pixel(colors, pixels, c_re, c_im, global_index, max_iter);
}
void fill_colors(char *colors, int max_iter) {
colors[0] = 200;
colors[1] = 200;
colors[2] = 200;
int shade = 1, speed1 = 0, speed2 = 10, speed3 = 0, j = 1;
for (int i = 0; i < max_iter; i+=3) {
if (j % 50 == 0)
shade <<= 1;
int red = colors[0] + i * speed1 - j;
int green = colors[1] + i * speed2;
int blue = colors[2] + i * speed3 - j;
if (red < 0) red = 0;
if (green < 0) green = 0;
if (blue < 0) blue = 0;
colors[3 + i] = (red) % (256 / shade);
colors[3 + i + 1] = (green) % (256 / shade);
colors[3 + i + 2] = (blue) % (256 / shade);
j += 1;
}
}
int main(int argc, char **argv) {
int write_to_file_flag = std::atoi(argv[1]);
int x_pixels = 19968, y_pixels = 13730, max_iter = 150;
int n_pixels = x_pixels * y_pixels;
char *host_pixels, *device_pixels, *host_colors, *device_colors;
size_t pixel_size = sizeof(char) * n_pixels * 3; // * 3 for RGB
// This allocates pinned memory to speed-up memory transfers
CHECK_CUDA_ERR(hipHostMalloc(&host_pixels, pixel_size));
CHECK_CUDA_ERR(hipMalloc(&device_pixels, pixel_size));
size_t color_size = sizeof(char) * (max_iter * 3 + 3);
CHECK_CUDA_ERR(hipHostMalloc(&host_colors, color_size));
CHECK_CUDA_ERR(hipMalloc(&device_colors, color_size));
fill_colors(host_colors, max_iter);
CHECK_CUDA_ERR(hipMemcpy(device_colors, host_colors, color_size, hipMemcpyHostToDevice));
CHECK_CUDA_ERR(hipDeviceSynchronize());
auto start = std::chrono::steady_clock::now();
hipLaunchKernelGGL(( mandelbrot), dim3((32 + n_pixels) / 32), dim3(32), 0, 0,
/*colors=*/device_colors,
/*pixels=*/device_pixels,
/*height=*/y_pixels,
/*width=*/x_pixels,
/*max_iter*/max_iter);
CHECK_CUDA_ERR(hipDeviceSynchronize());
auto end = std::chrono::steady_clock::now();
std::cout << "RUN "
<< std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count()
<< std::endl;
start = std::chrono::steady_clock::now();
CHECK_CUDA_ERR(hipMemcpy(host_pixels, device_pixels, pixel_size, hipMemcpyDeviceToHost));
CHECK_CUDA_ERR(hipDeviceSynchronize());
end = std::chrono::steady_clock::now();
std::cout << "READ "
<< std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count()
<< std::endl;
if (write_to_file_flag) {
long long current_time = time(nullptr);
std::ofstream image (std::to_string(current_time).append("-gpu.bmp"), std::ofstream::binary);
image <<
(uint8_t)0x42 <<
(uint8_t)0x4D <<
(uint8_t)0x7C <<
(uint8_t)0x00 <<
(uint8_t)0x00 <<
(uint8_t)0x00 <<
(uint8_t)0x00 <<
(uint8_t)0x00 <<
(uint8_t)0x00 <<
(uint8_t)0x00 <<
(uint8_t)0x1A <<
(uint8_t)0x00 <<
(uint8_t)0x00 <<
(uint8_t)0x00 <<
(uint8_t)0x0C <<
(uint8_t)0x00 <<
(uint8_t)0x00 <<
(uint8_t)0x00 <<
(uint8_t)0x00 << // Image Width
(uint8_t)0x4E << // Image Width
(uint8_t)0xA2 << // Image Height
(uint8_t)0x45 << // Image height
(uint8_t)0x01 <<
(uint8_t)0x00 <<
(uint8_t)0x18 <<
(uint8_t)0x00;
for (int i = 0; i < n_pixels * 3; i++)
image << host_pixels[i];
image << 0x00 << 0x00;
}
CHECK_CUDA_ERR(hipHostFree(host_pixels));
CHECK_CUDA_ERR(hipHostFree(host_colors));
CHECK_CUDA_ERR(hipFree(device_pixels));
CHECK_CUDA_ERR(hipFree(device_colors));
return 0;
}
| eabb6b4f8de127d27cab8aa88b572ff7ff868703.cu | #include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <fstream>
#include <iostream>
#include <chrono>
#define CHECK_CUDA_ERR(cudaerr) \
{ \
auto err = cudaerr; \
if (err != cudaSuccess) { \
printf("kernel launch failed with error \"%s\".\n",\
cudaGetErrorString(err)); \
exit(1); \
} \
}
__device__ void color_pixel(
char *colors, char *pixels,
float c_re, float c_im,
int global_index, int max_iter) {
float i = 0, j = 0, ii = 0, jj = 0;
int iteration = 0;
while ( ii + jj < 4.0 && iteration < max_iter) {
j = 2 * i * j + c_im;
i = ii - jj + c_re;
ii = i * i;
jj = j * j;
iteration++;
}
int color_index = global_index * 3;
if (iteration < max_iter) {
int it_offset = 3 + iteration * 3;
pixels[color_index] = colors[it_offset];
pixels[color_index + 1] = colors[it_offset + 1];
pixels[color_index + 2] = colors[it_offset + 2];
} else {
pixels[color_index] = colors[0];
pixels[color_index + 1] = colors[1];
pixels[color_index + 2] = colors[2];
}
}
__global__ void mandelbrot(char *colors, char* pixels, int height, int width, int max_iter) {
int global_index = threadIdx.x + blockDim.x * blockIdx.x;
float x = (float)(global_index % width);
float y = (float)(global_index / width);
float f_width = (float)width, f_height = (float)height;
float c_re = (x - f_height / 2.0) * 4.0 / f_height;
float c_im = (y - f_height / 2.0) * 4.0 / f_height;
if (global_index < height * width)
color_pixel(colors, pixels, c_re, c_im, global_index, max_iter);
}
void fill_colors(char *colors, int max_iter) {
colors[0] = 200;
colors[1] = 200;
colors[2] = 200;
int shade = 1, speed1 = 0, speed2 = 10, speed3 = 0, j = 1;
for (int i = 0; i < max_iter; i+=3) {
if (j % 50 == 0)
shade <<= 1;
int red = colors[0] + i * speed1 - j;
int green = colors[1] + i * speed2;
int blue = colors[2] + i * speed3 - j;
if (red < 0) red = 0;
if (green < 0) green = 0;
if (blue < 0) blue = 0;
colors[3 + i] = (red) % (256 / shade);
colors[3 + i + 1] = (green) % (256 / shade);
colors[3 + i + 2] = (blue) % (256 / shade);
j += 1;
}
}
int main(int argc, char **argv) {
int write_to_file_flag = std::atoi(argv[1]);
int x_pixels = 19968, y_pixels = 13730, max_iter = 150;
int n_pixels = x_pixels * y_pixels;
char *host_pixels, *device_pixels, *host_colors, *device_colors;
size_t pixel_size = sizeof(char) * n_pixels * 3; // * 3 for RGB
// This allocates pinned memory to speed-up memory transfers
CHECK_CUDA_ERR(cudaMallocHost(&host_pixels, pixel_size));
CHECK_CUDA_ERR(cudaMalloc(&device_pixels, pixel_size));
size_t color_size = sizeof(char) * (max_iter * 3 + 3);
CHECK_CUDA_ERR(cudaMallocHost(&host_colors, color_size));
CHECK_CUDA_ERR(cudaMalloc(&device_colors, color_size));
fill_colors(host_colors, max_iter);
CHECK_CUDA_ERR(cudaMemcpy(device_colors, host_colors, color_size, cudaMemcpyHostToDevice));
CHECK_CUDA_ERR(cudaDeviceSynchronize());
auto start = std::chrono::steady_clock::now();
mandelbrot<<<(32 + n_pixels) / 32, 32>>>(
/*colors=*/device_colors,
/*pixels=*/device_pixels,
/*height=*/y_pixels,
/*width=*/x_pixels,
/*max_iter*/max_iter);
CHECK_CUDA_ERR(cudaDeviceSynchronize());
auto end = std::chrono::steady_clock::now();
std::cout << "RUN "
<< std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count()
<< std::endl;
start = std::chrono::steady_clock::now();
CHECK_CUDA_ERR(cudaMemcpy(host_pixels, device_pixels, pixel_size, cudaMemcpyDeviceToHost));
CHECK_CUDA_ERR(cudaDeviceSynchronize());
end = std::chrono::steady_clock::now();
std::cout << "READ "
<< std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count()
<< std::endl;
if (write_to_file_flag) {
long long current_time = time(nullptr);
std::ofstream image (std::to_string(current_time).append("-gpu.bmp"), std::ofstream::binary);
image <<
(uint8_t)0x42 <<
(uint8_t)0x4D <<
(uint8_t)0x7C <<
(uint8_t)0x00 <<
(uint8_t)0x00 <<
(uint8_t)0x00 <<
(uint8_t)0x00 <<
(uint8_t)0x00 <<
(uint8_t)0x00 <<
(uint8_t)0x00 <<
(uint8_t)0x1A <<
(uint8_t)0x00 <<
(uint8_t)0x00 <<
(uint8_t)0x00 <<
(uint8_t)0x0C <<
(uint8_t)0x00 <<
(uint8_t)0x00 <<
(uint8_t)0x00 <<
(uint8_t)0x00 << // Image Width
(uint8_t)0x4E << // Image Width
(uint8_t)0xA2 << // Image Height
(uint8_t)0x45 << // Image height
(uint8_t)0x01 <<
(uint8_t)0x00 <<
(uint8_t)0x18 <<
(uint8_t)0x00;
for (int i = 0; i < n_pixels * 3; i++)
image << host_pixels[i];
image << 0x00 << 0x00;
}
CHECK_CUDA_ERR(cudaFreeHost(host_pixels));
CHECK_CUDA_ERR(cudaFreeHost(host_colors));
CHECK_CUDA_ERR(cudaFree(device_pixels));
CHECK_CUDA_ERR(cudaFree(device_colors));
return 0;
}
|
739435f86a93122cb756087ae6b4c5cbf398941d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/operators/swish_op.h"
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void SwishCUDAKernel(const int N, const T* X, T* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
Y[i] = __ldg(X + i) / (T(1) + exp(-__ldg(X + i)));
#else
Y[i] = X[i] / (T(1) + exp(-X[i]));
#endif
}
}
template <typename T>
__global__ void SwishGradientCUDAKernel(
const int N,
const T* X,
const T* Y,
const T* dY,
T* dX) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
dX[i] = __ldg(dY + i) *
(__ldg(Y + i) + (T(1) - __ldg(Y + i)) / (T(1) + exp(-__ldg(X + i))));
#else
dX[i] = dY[i] * (Y[i] + (T(1) - Y[i]) / (T(1) + exp(-X[i])));
#endif
}
}
} // namespace
template <>
template <typename T>
bool SwishFunctor<CUDAContext>::
operator()(const int N, const T* X, T* Y, CUDAContext* context) const {
hipLaunchKernelGGL(( SwishCUDAKernel<T>)
, dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), N, X, Y);
return true;
}
template <>
template <typename T>
bool SwishGradientOp<CUDAContext>::DoRunWithType() {
auto& Xin = Input(X);
auto& Yin = Input(Y);
auto& DYin = Input(DY);
auto* DXout = Output(DX);
CAFFE_ENFORCE_EQ(Xin.size(), Yin.size());
CAFFE_ENFORCE_EQ(DYin.size(), Yin.size());
DXout->ResizeLike(Yin);
const int n = Xin.size();
const T* x = Xin.template data<T>();
const T* y = Yin.template data<T>();
const T* dy = DYin.template data<T>();
T* dx = DXout->template mutable_data<T>();
hipLaunchKernelGGL(( SwishGradientCUDAKernel<T>)
, dim3(CAFFE_GET_BLOCKS(n)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(), n, x, y, dy, dx);
return true;
}
template <>
bool SwishGradientOp<CUDAContext>::RunOnDevice() {
return DispatchHelper<TensorTypes<float, double>>::call(this, Input(X));
}
REGISTER_CUDA_OPERATOR(
Swish,
UnaryElementwiseOp<
TensorTypes<float, double>,
CUDAContext,
SwishFunctor<CUDAContext>>);
REGISTER_CUDA_OPERATOR(SwishGradient, SwishGradientOp<CUDAContext>);
} // namespace caffe2
| 739435f86a93122cb756087ae6b4c5cbf398941d.cu | #include "caffe2/operators/swish_op.h"
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void SwishCUDAKernel(const int N, const T* X, T* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
Y[i] = __ldg(X + i) / (T(1) + exp(-__ldg(X + i)));
#else
Y[i] = X[i] / (T(1) + exp(-X[i]));
#endif
}
}
template <typename T>
__global__ void SwishGradientCUDAKernel(
const int N,
const T* X,
const T* Y,
const T* dY,
T* dX) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
dX[i] = __ldg(dY + i) *
(__ldg(Y + i) + (T(1) - __ldg(Y + i)) / (T(1) + exp(-__ldg(X + i))));
#else
dX[i] = dY[i] * (Y[i] + (T(1) - Y[i]) / (T(1) + exp(-X[i])));
#endif
}
}
} // namespace
template <>
template <typename T>
bool SwishFunctor<CUDAContext>::
operator()(const int N, const T* X, T* Y, CUDAContext* context) const {
SwishCUDAKernel<T>
<<<CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(N, X, Y);
return true;
}
template <>
template <typename T>
bool SwishGradientOp<CUDAContext>::DoRunWithType() {
auto& Xin = Input(X);
auto& Yin = Input(Y);
auto& DYin = Input(DY);
auto* DXout = Output(DX);
CAFFE_ENFORCE_EQ(Xin.size(), Yin.size());
CAFFE_ENFORCE_EQ(DYin.size(), Yin.size());
DXout->ResizeLike(Yin);
const int n = Xin.size();
const T* x = Xin.template data<T>();
const T* y = Yin.template data<T>();
const T* dy = DYin.template data<T>();
T* dx = DXout->template mutable_data<T>();
SwishGradientCUDAKernel<T>
<<<CAFFE_GET_BLOCKS(n),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(n, x, y, dy, dx);
return true;
}
template <>
bool SwishGradientOp<CUDAContext>::RunOnDevice() {
return DispatchHelper<TensorTypes<float, double>>::call(this, Input(X));
}
REGISTER_CUDA_OPERATOR(
Swish,
UnaryElementwiseOp<
TensorTypes<float, double>,
CUDAContext,
SwishFunctor<CUDAContext>>);
REGISTER_CUDA_OPERATOR(SwishGradient, SwishGradientOp<CUDAContext>);
} // namespace caffe2
|
0ad0382b008f49ef48873f04629b1930288f4d4a.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "merge_sort.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *datas = NULL;
hipMalloc(&datas, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
merge_sort), dim3(gridBlock),dim3(threadBlock), 0, 0, datas,n);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
merge_sort), dim3(gridBlock),dim3(threadBlock), 0, 0, datas,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
merge_sort), dim3(gridBlock),dim3(threadBlock), 0, 0, datas,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 0ad0382b008f49ef48873f04629b1930288f4d4a.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "merge_sort.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *datas = NULL;
cudaMalloc(&datas, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
merge_sort<<<gridBlock,threadBlock>>>(datas,n);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
merge_sort<<<gridBlock,threadBlock>>>(datas,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
merge_sort<<<gridBlock,threadBlock>>>(datas,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
e869049450eb67ee8e35c115b1f86eaafae3316d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/accuracy_op.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
namespace {
__global__ void AccuracyKernel(const int N, const int D, const float* Xdata,
const int* labeldata, float* accuracy) {
int count = 0;
CUDA_1D_KERNEL_LOOP(i, N) {
float maxval = Xdata[i * D];
int maxid = 0;
for (int j = 1; j < D; ++j) {
if (Xdata[i * D + j] > maxval) {
maxval = Xdata[i * D + j];
maxid = j;
}
}
if (maxid == labeldata[i]) {
++count;
}
}
atomicAdd(accuracy, static_cast<float>(count));
}
__global__ void AccuracyDivideKernel(const int N, float* accuracy) {
*accuracy /= N;
}
} // namespace
template <>
bool AccuracyOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(PREDICTION);
auto& label = Input(LABEL);
auto* Y = Output(0);
CAFFE_DCHECK_EQ(X.ndim(), 2);
int N = X.dim32(0);
int D = X.dim32(1);
CAFFE_DCHECK_EQ(label.ndim(), 1);
CAFFE_DCHECK_EQ(label.dim32(0), N);
Y->Reshape(vector<TIndex>(1, 1));
float* Ydata = Y->mutable_data<float>();
math::Set<float, CUDAContext>(1, 0, Ydata, &context_);
hipLaunchKernelGGL(( AccuracyKernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context_.cuda_stream(),
N, D, X.data<float>(), label.data<int>(), Ydata);
// This is going to be executed only in one single kernel. Not very beautiful,
// but probably we have to do this?
hipLaunchKernelGGL(( AccuracyDivideKernel), dim3(1), dim3(1), 0, context_.cuda_stream(),
N, Ydata);
return true;
}
namespace {
REGISTER_CUDA_OPERATOR(Accuracy, AccuracyOp<float, CUDAContext>);
} // namespace
} // namespace caffe2
| e869049450eb67ee8e35c115b1f86eaafae3316d.cu | #include "caffe2/core/context_gpu.h"
#include "caffe2/operators/accuracy_op.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
namespace {
__global__ void AccuracyKernel(const int N, const int D, const float* Xdata,
const int* labeldata, float* accuracy) {
int count = 0;
CUDA_1D_KERNEL_LOOP(i, N) {
float maxval = Xdata[i * D];
int maxid = 0;
for (int j = 1; j < D; ++j) {
if (Xdata[i * D + j] > maxval) {
maxval = Xdata[i * D + j];
maxid = j;
}
}
if (maxid == labeldata[i]) {
++count;
}
}
atomicAdd(accuracy, static_cast<float>(count));
}
__global__ void AccuracyDivideKernel(const int N, float* accuracy) {
*accuracy /= N;
}
} // namespace
template <>
bool AccuracyOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(PREDICTION);
auto& label = Input(LABEL);
auto* Y = Output(0);
CAFFE_DCHECK_EQ(X.ndim(), 2);
int N = X.dim32(0);
int D = X.dim32(1);
CAFFE_DCHECK_EQ(label.ndim(), 1);
CAFFE_DCHECK_EQ(label.dim32(0), N);
Y->Reshape(vector<TIndex>(1, 1));
float* Ydata = Y->mutable_data<float>();
math::Set<float, CUDAContext>(1, 0, Ydata, &context_);
AccuracyKernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
N, D, X.data<float>(), label.data<int>(), Ydata);
// This is going to be executed only in one single kernel. Not very beautiful,
// but probably we have to do this?
AccuracyDivideKernel<<<1, 1, 0, context_.cuda_stream()>>>(
N, Ydata);
return true;
}
namespace {
REGISTER_CUDA_OPERATOR(Accuracy, AccuracyOp<float, CUDAContext>);
} // namespace
} // namespace caffe2
|
d3c271bd21a22899c66282779b8bbee9a4aecf9b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Genoil's CUDA mining kernel for Ethereum
* based on Tim Hughes' opencl kernel.
* thanks to sp_, trpuvot, djm34, cbuchner for things i took from ccminer.
*/
#include "CUDAMiner_cuda.h"
#include "cuda_helper.h"
#include "../libethash/ethash.h"
#include "stdio.h"
#include "nvm_til.h"
// Implementation based on:
// https://github.com/mjosaarinen/tiny_sha3/blob/master/sha3.c
// converted from 64->32 bit words
__device__ __constant__ const uint64_t keccakf_rndc[24] = {
0x0000000000000001ULL, 0x0000000000008082ULL, 0x800000000000808AULL,
0x8000000080008000ULL, 0x000000000000808BULL, 0x0000000080000001ULL,
0x8000000080008081ULL, 0x8000000000008009ULL, 0x000000000000008AULL,
0x0000000000000088ULL, 0x0000000080008009ULL, 0x000000008000000AULL,
0x000000008000808BULL, 0x800000000000008BULL, 0x8000000000008089ULL,
0x8000000000008003ULL, 0x8000000000008002ULL, 0x8000000000000080ULL,
0x000000000000800AULL, 0x800000008000000AULL, 0x8000000080008081ULL,
0x8000000000008080ULL, 0x0000000080000001ULL, 0x8000000080008008ULL
};
__device__ __forceinline__ void keccak_f1600_round(uint64_t st[25], const int r)
{
const uint32_t keccakf_rotc[24] = {
1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 2, 14,
27, 41, 56, 8, 25, 43, 62, 18, 39, 61, 20, 44
};
const uint32_t keccakf_piln[24] = {
10, 7, 11, 17, 18, 3, 5, 16, 8, 21, 24, 4,
15, 23, 19, 13, 12, 2, 20, 14, 22, 9, 6, 1
};
uint64_t t, bc[5];
// Theta
for (int i = 0; i < 5; i++)
bc[i] = st[i] ^ st[i + 5] ^ st[i + 10] ^ st[i + 15] ^ st[i + 20];
for (int i = 0; i < 5; i++) {
t = bc[(i + 4) % 5] ^ ROTL64(bc[(i + 1) % 5], 1);
for (uint32_t j = 0; j < 25; j += 5)
st[j + i] ^= t;
}
// Rho Pi
t = st[1];
for (int i = 0; i < 24; i++) {
uint32_t j = keccakf_piln[i];
bc[0] = st[j];
st[j] = ROTL64(t, keccakf_rotc[i]);
t = bc[0];
}
// Chi
for (uint32_t j = 0; j < 25; j += 5) {
for (int i = 0; i < 5; i++)
bc[i] = st[j + i];
for (int i = 0; i < 5; i++)
st[j + i] ^= (~bc[(i + 1) % 5]) & bc[(i + 2) % 5];
}
// Iota
st[0] ^= keccakf_rndc[r];
}
__device__ __forceinline__ void keccak_f1600(uint64_t st[25])
{
for (int i = 8; i < 25; i++)
{
st[i] = 0;
}
st[8] = 0x8000000000000001;
for (int r = 0; r < 24; r++) {
keccak_f1600_round(st, r);
}
}
#define FNV_PRIME 0x01000193U
#define fnv(x,y) ((uint32_t(x) * (FNV_PRIME)) ^uint32_t(y))
__device__ uint4 fnv4(uint4 a, uint4 b)
{
uint4 c;
c.x = a.x * FNV_PRIME ^ b.x;
c.y = a.y * FNV_PRIME ^ b.y;
c.z = a.z * FNV_PRIME ^ b.z;
c.w = a.w * FNV_PRIME ^ b.w;
return c;
}
#define NODE_WORDS (ETHASH_HASH_BYTES/sizeof(uint32_t))
__global__ void
ethash_calculate_dag_item(uint32_t start, hash64_t *g_dag, uint64_t dag_bytes, hash64_t* g_light, uint32_t light_words)
{
uint64_t const node_index = start + uint64_t(blockIdx.x) * blockDim.x + threadIdx.x;
uint64_t num_nodes = dag_bytes / sizeof(hash64_t);
uint64_t num_nodes_rounded = ((num_nodes + 3) / 4) * 4;
if (node_index >= num_nodes_rounded) return; // None of the threads from this quad have valid node_index
hash200_t dag_node;
for(int i=0; i<4; i++)
dag_node.uint4s[i] = g_light[node_index % light_words].uint4s[i];
dag_node.words[0] ^= node_index;
keccak_f1600(dag_node.uint64s);
const int thread_id = threadIdx.x & 3;
#pragma unroll
for (uint32_t i = 0; i < ETHASH_DATASET_PARENTS; ++i) {
uint32_t parent_index = fnv(node_index ^ i, dag_node.words[i % NODE_WORDS]) % light_words;
for (uint32_t t = 0; t < 4; t++) {
uint32_t shuffle_index = __shfl_sync(0xFFFFFFFF,parent_index, t, 4);
uint4 p4 = g_light[shuffle_index].uint4s[thread_id];
#pragma unroll
for (int w = 0; w < 4; w++) {
uint4 s4 = make_uint4(__shfl_sync(0xFFFFFFFF,p4.x, w, 4),
__shfl_sync(0xFFFFFFFF,p4.y, w, 4),
__shfl_sync(0xFFFFFFFF,p4.z, w, 4),
__shfl_sync(0xFFFFFFFF,p4.w, w, 4));
if (t == thread_id) {
dag_node.uint4s[w] = fnv4(dag_node.uint4s[w], s4);
}
}
}
}
keccak_f1600(dag_node.uint64s);
for (uint32_t t = 0; t < 4; t++) {
uint32_t shuffle_index = __shfl_sync(0xFFFFFFFF,node_index, t, 4);
uint4 s[4];
for (uint32_t w = 0; w < 4; w++) {
s[w] = make_uint4(__shfl_sync(0xFFFFFFFF,dag_node.uint4s[w].x, t, 4),
__shfl_sync(0xFFFFFFFF,dag_node.uint4s[w].y, t, 4),
__shfl_sync(0xFFFFFFFF,dag_node.uint4s[w].z, t, 4),
__shfl_sync(0xFFFFFFFF,dag_node.uint4s[w].w, t, 4));
}
if(shuffle_index*sizeof(hash64_t) < dag_bytes){
g_dag[shuffle_index].uint4s[thread_id] = s[thread_id];
}
}
L2WB;
MEM_FENCE;PCOMMIT; MEM_FENCE;
}
void ethash_generate_dag(
hash64_t* dag,
uint64_t dag_bytes,
hash64_t * light,
uint32_t light_words,
uint32_t blocks,
uint32_t threads,
hipStream_t stream,
int device
)
{
uint64_t const work = dag_bytes / sizeof(hash64_t);
uint32_t fullRuns = (uint32_t)(work / (blocks * threads));
uint32_t const restWork = (uint32_t)(work % (blocks * threads));
if (restWork > 0) fullRuns++;
printf("fullRuns=%d\n",fullRuns);
for (uint32_t i = 0; i < fullRuns; i++)
{
hipLaunchKernelGGL(( ethash_calculate_dag_item) , dim3(blocks), dim3(threads), 0, stream , i * blocks * threads, dag, dag_bytes, light, light_words);
CUDA_SAFE_CALL(hipDeviceSynchronize());
}
CUDA_SAFE_CALL(hipGetLastError());
}
| d3c271bd21a22899c66282779b8bbee9a4aecf9b.cu | /*
* Genoil's CUDA mining kernel for Ethereum
* based on Tim Hughes' opencl kernel.
* thanks to sp_, trpuvot, djm34, cbuchner for things i took from ccminer.
*/
#include "CUDAMiner_cuda.h"
#include "cuda_helper.h"
#include "../libethash/ethash.h"
#include "stdio.h"
#include "nvm_til.h"
// Implementation based on:
// https://github.com/mjosaarinen/tiny_sha3/blob/master/sha3.c
// converted from 64->32 bit words
__device__ __constant__ const uint64_t keccakf_rndc[24] = {
0x0000000000000001ULL, 0x0000000000008082ULL, 0x800000000000808AULL,
0x8000000080008000ULL, 0x000000000000808BULL, 0x0000000080000001ULL,
0x8000000080008081ULL, 0x8000000000008009ULL, 0x000000000000008AULL,
0x0000000000000088ULL, 0x0000000080008009ULL, 0x000000008000000AULL,
0x000000008000808BULL, 0x800000000000008BULL, 0x8000000000008089ULL,
0x8000000000008003ULL, 0x8000000000008002ULL, 0x8000000000000080ULL,
0x000000000000800AULL, 0x800000008000000AULL, 0x8000000080008081ULL,
0x8000000000008080ULL, 0x0000000080000001ULL, 0x8000000080008008ULL
};
__device__ __forceinline__ void keccak_f1600_round(uint64_t st[25], const int r)
{
const uint32_t keccakf_rotc[24] = {
1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 2, 14,
27, 41, 56, 8, 25, 43, 62, 18, 39, 61, 20, 44
};
const uint32_t keccakf_piln[24] = {
10, 7, 11, 17, 18, 3, 5, 16, 8, 21, 24, 4,
15, 23, 19, 13, 12, 2, 20, 14, 22, 9, 6, 1
};
uint64_t t, bc[5];
// Theta
for (int i = 0; i < 5; i++)
bc[i] = st[i] ^ st[i + 5] ^ st[i + 10] ^ st[i + 15] ^ st[i + 20];
for (int i = 0; i < 5; i++) {
t = bc[(i + 4) % 5] ^ ROTL64(bc[(i + 1) % 5], 1);
for (uint32_t j = 0; j < 25; j += 5)
st[j + i] ^= t;
}
// Rho Pi
t = st[1];
for (int i = 0; i < 24; i++) {
uint32_t j = keccakf_piln[i];
bc[0] = st[j];
st[j] = ROTL64(t, keccakf_rotc[i]);
t = bc[0];
}
// Chi
for (uint32_t j = 0; j < 25; j += 5) {
for (int i = 0; i < 5; i++)
bc[i] = st[j + i];
for (int i = 0; i < 5; i++)
st[j + i] ^= (~bc[(i + 1) % 5]) & bc[(i + 2) % 5];
}
// Iota
st[0] ^= keccakf_rndc[r];
}
__device__ __forceinline__ void keccak_f1600(uint64_t st[25])
{
for (int i = 8; i < 25; i++)
{
st[i] = 0;
}
st[8] = 0x8000000000000001;
for (int r = 0; r < 24; r++) {
keccak_f1600_round(st, r);
}
}
#define FNV_PRIME 0x01000193U
#define fnv(x,y) ((uint32_t(x) * (FNV_PRIME)) ^uint32_t(y))
__device__ uint4 fnv4(uint4 a, uint4 b)
{
uint4 c;
c.x = a.x * FNV_PRIME ^ b.x;
c.y = a.y * FNV_PRIME ^ b.y;
c.z = a.z * FNV_PRIME ^ b.z;
c.w = a.w * FNV_PRIME ^ b.w;
return c;
}
#define NODE_WORDS (ETHASH_HASH_BYTES/sizeof(uint32_t))
__global__ void
ethash_calculate_dag_item(uint32_t start, hash64_t *g_dag, uint64_t dag_bytes, hash64_t* g_light, uint32_t light_words)
{
uint64_t const node_index = start + uint64_t(blockIdx.x) * blockDim.x + threadIdx.x;
uint64_t num_nodes = dag_bytes / sizeof(hash64_t);
uint64_t num_nodes_rounded = ((num_nodes + 3) / 4) * 4;
if (node_index >= num_nodes_rounded) return; // None of the threads from this quad have valid node_index
hash200_t dag_node;
for(int i=0; i<4; i++)
dag_node.uint4s[i] = g_light[node_index % light_words].uint4s[i];
dag_node.words[0] ^= node_index;
keccak_f1600(dag_node.uint64s);
const int thread_id = threadIdx.x & 3;
#pragma unroll
for (uint32_t i = 0; i < ETHASH_DATASET_PARENTS; ++i) {
uint32_t parent_index = fnv(node_index ^ i, dag_node.words[i % NODE_WORDS]) % light_words;
for (uint32_t t = 0; t < 4; t++) {
uint32_t shuffle_index = __shfl_sync(0xFFFFFFFF,parent_index, t, 4);
uint4 p4 = g_light[shuffle_index].uint4s[thread_id];
#pragma unroll
for (int w = 0; w < 4; w++) {
uint4 s4 = make_uint4(__shfl_sync(0xFFFFFFFF,p4.x, w, 4),
__shfl_sync(0xFFFFFFFF,p4.y, w, 4),
__shfl_sync(0xFFFFFFFF,p4.z, w, 4),
__shfl_sync(0xFFFFFFFF,p4.w, w, 4));
if (t == thread_id) {
dag_node.uint4s[w] = fnv4(dag_node.uint4s[w], s4);
}
}
}
}
keccak_f1600(dag_node.uint64s);
for (uint32_t t = 0; t < 4; t++) {
uint32_t shuffle_index = __shfl_sync(0xFFFFFFFF,node_index, t, 4);
uint4 s[4];
for (uint32_t w = 0; w < 4; w++) {
s[w] = make_uint4(__shfl_sync(0xFFFFFFFF,dag_node.uint4s[w].x, t, 4),
__shfl_sync(0xFFFFFFFF,dag_node.uint4s[w].y, t, 4),
__shfl_sync(0xFFFFFFFF,dag_node.uint4s[w].z, t, 4),
__shfl_sync(0xFFFFFFFF,dag_node.uint4s[w].w, t, 4));
}
if(shuffle_index*sizeof(hash64_t) < dag_bytes){
g_dag[shuffle_index].uint4s[thread_id] = s[thread_id];
}
}
L2WB;
MEM_FENCE;PCOMMIT; MEM_FENCE;
}
void ethash_generate_dag(
hash64_t* dag,
uint64_t dag_bytes,
hash64_t * light,
uint32_t light_words,
uint32_t blocks,
uint32_t threads,
cudaStream_t stream,
int device
)
{
uint64_t const work = dag_bytes / sizeof(hash64_t);
uint32_t fullRuns = (uint32_t)(work / (blocks * threads));
uint32_t const restWork = (uint32_t)(work % (blocks * threads));
if (restWork > 0) fullRuns++;
printf("fullRuns=%d\n",fullRuns);
for (uint32_t i = 0; i < fullRuns; i++)
{
ethash_calculate_dag_item <<<blocks, threads, 0, stream >>>(i * blocks * threads, dag, dag_bytes, light, light_words);
CUDA_SAFE_CALL(cudaDeviceSynchronize());
}
CUDA_SAFE_CALL(cudaGetLastError());
}
|
09578f2f079e44fba94bfe2ac8085df1a67a6de2.hip | // !!! This is a file automatically generated by hipify!!!
#include <thrust/gather.h>
#include <thrust/iterator/discard_iterator.h>
#include "cupoch/geometry/kdtree_flann.h"
#include "cupoch/geometry/pointcloud.h"
#include "cupoch/utility/console.h"
#include "cupoch/utility/helper.h"
#include "cupoch/utility/platform.h"
#include "cupoch/utility/range.h"
using namespace cupoch;
using namespace cupoch::geometry;
namespace {
void SelectByIndexImpl(const geometry::PointCloud &src,
geometry::PointCloud &dst,
const utility::device_vector<size_t> &indices) {
const bool has_normals = src.HasNormals();
const bool has_colors = src.HasColors();
if (has_normals) dst.normals_.resize(indices.size());
if (has_colors) dst.colors_.resize(indices.size());
dst.points_.resize(indices.size());
thrust::gather(utility::exec_policy(utility::GetStream(0))
->on(utility::GetStream(0)),
indices.begin(), indices.end(), src.points_.begin(),
dst.points_.begin());
if (has_normals) {
thrust::gather(utility::exec_policy(utility::GetStream(1))
->on(utility::GetStream(1)),
indices.begin(), indices.end(), src.normals_.begin(),
dst.normals_.begin());
}
if (has_colors) {
thrust::gather(utility::exec_policy(utility::GetStream(2))
->on(utility::GetStream(2)),
indices.begin(), indices.end(), src.colors_.begin(),
dst.colors_.begin());
}
cudaSafeCall(hipDeviceSynchronize());
}
struct compute_key_functor {
compute_key_functor(const Eigen::Vector3f &voxel_min_bound,
float voxel_size)
: voxel_min_bound_(voxel_min_bound), voxel_size_(voxel_size){};
const Eigen::Vector3f voxel_min_bound_;
const float voxel_size_;
__device__ Eigen::Vector3i operator()(const Eigen::Vector3f &pt) {
auto ref_coord = (pt - voxel_min_bound_) / voxel_size_;
return Eigen::Vector3i(int(floor(ref_coord(0))),
int(floor(ref_coord(1))),
int(floor(ref_coord(2))));
}
};
template <typename OutputIterator, class... Args>
__host__ int CalcAverageByKey(utility::device_vector<Eigen::Vector3i> &keys,
OutputIterator buf_begins,
OutputIterator output_begins) {
const size_t n = keys.size();
thrust::sort_by_key(keys.begin(), keys.end(), buf_begins);
utility::device_vector<int> counts(n);
auto end1 = thrust::reduce_by_key(
keys.begin(), keys.end(), thrust::make_constant_iterator(1),
thrust::make_discard_iterator(), counts.begin());
int n_out = thrust::distance(counts.begin(), end1.second);
counts.resize(n_out);
thrust::equal_to<Eigen::Vector3i> binary_pred;
add_tuple_functor<Args...> add_func;
auto end2 = thrust::reduce_by_key(keys.begin(), keys.end(), buf_begins,
thrust::make_discard_iterator(),
output_begins, binary_pred, add_func);
devide_tuple_functor<Args...> dv_func;
thrust::transform(output_begins, output_begins + n_out, counts.begin(),
output_begins, dv_func);
return n_out;
}
struct has_radius_points_functor {
has_radius_points_functor(const int *indices, int n_points, int knn)
: indices_(indices), n_points_(n_points), knn_(knn){};
const int *indices_;
const int n_points_;
const int knn_;
__device__ bool operator()(int idx) const {
int count = 0;
for (int i = 0; i < knn_; ++i) {
if (indices_[idx * knn_ + i] >= 0) count++;
}
return (count > n_points_);
}
};
struct average_distance_functor {
average_distance_functor(const float *distance, int knn)
: distance_(distance), knn_(knn){};
const float *distance_;
const int knn_;
__device__ float operator()(int idx) const {
int count = 0;
float avg = 0;
for (int i = 0; i < knn_; ++i) {
const float d = distance_[idx * knn_ + i];
if (isinf(d) || d < 0.0) continue;
avg += d;
count++;
}
return (count == 0) ? -1.0 : avg / (float)count;
}
};
struct check_distance_threshold_functor {
check_distance_threshold_functor(const float *distances,
float distance_threshold)
: distances_(distances), distance_threshold_(distance_threshold){};
const float *distances_;
const float distance_threshold_;
__device__ bool operator()(int idx) const {
return (distances_[idx] > 0 && distances_[idx] < distance_threshold_);
}
};
} // namespace
std::shared_ptr<PointCloud> PointCloud::SelectByIndex(
const utility::device_vector<size_t> &indices, bool invert) const {
auto output = std::make_shared<PointCloud>();
if (invert) {
size_t n_out = points_.size() - indices.size();
utility::device_vector<size_t> sorted_indices = indices;
thrust::sort(sorted_indices.begin(), sorted_indices.end());
utility::device_vector<size_t> inv_indices(n_out);
thrust::set_difference(thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator(points_.size()),
sorted_indices.begin(), sorted_indices.end(),
inv_indices.begin());
SelectByIndexImpl(*this, *output, inv_indices);
} else {
SelectByIndexImpl(*this, *output, indices);
}
return output;
}
std::shared_ptr<PointCloud> PointCloud::VoxelDownSample(
float voxel_size) const {
auto output = std::make_shared<PointCloud>();
if (voxel_size <= 0.0) {
utility::LogWarning("[VoxelDownSample] voxel_size <= 0.\n");
return output;
}
const Eigen::Vector3f voxel_size3 =
Eigen::Vector3f(voxel_size, voxel_size, voxel_size);
const Eigen::Vector3f voxel_min_bound = GetMinBound() - voxel_size3 * 0.5;
const Eigen::Vector3f voxel_max_bound = GetMaxBound() + voxel_size3 * 0.5;
if (voxel_size * std::numeric_limits<int>::max() <
(voxel_max_bound - voxel_min_bound).maxCoeff()) {
utility::LogWarning("[VoxelDownSample] voxel_size is too small.\n");
return output;
}
const int n = points_.size();
const bool has_normals = HasNormals();
const bool has_colors = HasColors();
compute_key_functor ck_func(voxel_min_bound, voxel_size);
utility::device_vector<Eigen::Vector3i> keys(n);
thrust::transform(points_.begin(), points_.end(), keys.begin(), ck_func);
utility::device_vector<Eigen::Vector3f> sorted_points = points_;
output->points_.resize(n);
if (!has_normals && !has_colors) {
typedef thrust::tuple<utility::device_vector<Eigen::Vector3f>::iterator>
IteratorTuple;
typedef thrust::zip_iterator<IteratorTuple> ZipIterator;
auto n_out = CalcAverageByKey<ZipIterator, Eigen::Vector3f>(
keys, make_tuple_iterator(sorted_points.begin()),
make_tuple_iterator(output->points_.begin()));
output->points_.resize(n_out);
} else if (has_normals && !has_colors) {
utility::device_vector<Eigen::Vector3f> sorted_normals = normals_;
output->normals_.resize(n);
typedef thrust::tuple<utility::device_vector<Eigen::Vector3f>::iterator,
utility::device_vector<Eigen::Vector3f>::iterator>
IteratorTuple;
typedef thrust::zip_iterator<IteratorTuple> ZipIterator;
auto n_out =
CalcAverageByKey<ZipIterator, Eigen::Vector3f, Eigen::Vector3f>(
keys,
make_tuple_iterator(sorted_points.begin(),
sorted_normals.begin()),
make_tuple_iterator(output->points_.begin(),
output->normals_.begin()));
output->points_.resize(n_out);
output->normals_.resize(n_out);
thrust::for_each(
output->normals_.begin(), output->normals_.end(),
[] __device__(Eigen::Vector3f & nl) { nl.normalize(); });
} else if (!has_normals && has_colors) {
utility::device_vector<Eigen::Vector3f> sorted_colors = colors_;
output->colors_.resize(n);
typedef thrust::tuple<utility::device_vector<Eigen::Vector3f>::iterator,
utility::device_vector<Eigen::Vector3f>::iterator>
IteratorTuple;
typedef thrust::zip_iterator<IteratorTuple> ZipIterator;
auto n_out =
CalcAverageByKey<ZipIterator, Eigen::Vector3f, Eigen::Vector3f>(
keys,
make_tuple_iterator(sorted_points.begin(),
sorted_colors.begin()),
make_tuple_iterator(output->points_.begin(),
output->colors_.begin()));
output->points_.resize(n_out);
output->colors_.resize(n_out);
} else {
utility::device_vector<Eigen::Vector3f> sorted_normals = normals_;
utility::device_vector<Eigen::Vector3f> sorted_colors = colors_;
output->normals_.resize(n);
output->colors_.resize(n);
typedef thrust::tuple<utility::device_vector<Eigen::Vector3f>::iterator,
utility::device_vector<Eigen::Vector3f>::iterator,
utility::device_vector<Eigen::Vector3f>::iterator>
IteratorTuple;
typedef thrust::zip_iterator<IteratorTuple> ZipIterator;
auto n_out = CalcAverageByKey<ZipIterator, Eigen::Vector3f,
Eigen::Vector3f, Eigen::Vector3f>(
keys,
make_tuple_iterator(sorted_points.begin(),
sorted_normals.begin(),
sorted_colors.begin()),
make_tuple_iterator(output->points_.begin(),
output->normals_.begin(),
output->colors_.begin()));
output->points_.resize(n_out);
output->normals_.resize(n_out);
output->colors_.resize(n_out);
thrust::for_each(
output->normals_.begin(), output->normals_.end(),
[] __device__(Eigen::Vector3f & nl) { nl.normalize(); });
}
utility::LogDebug(
"Pointcloud down sampled from {:d} points to {:d} points.\n",
(int)points_.size(), (int)output->points_.size());
return output;
}
std::shared_ptr<PointCloud> PointCloud::UniformDownSample(
size_t every_k_points) const {
const bool has_normals = HasNormals();
const bool has_colors = HasColors();
auto output = std::make_shared<PointCloud>();
if (every_k_points == 0) {
utility::LogError("[UniformDownSample] Illegal sample rate.");
return output;
}
const int n_out = points_.size() / every_k_points;
output->points_.resize(n_out);
if (has_normals) output->normals_.resize(n_out);
if (has_colors) output->colors_.resize(n_out);
thrust::strided_range<
utility::device_vector<Eigen::Vector3f>::const_iterator>
range_points(points_.begin(), points_.end(), every_k_points);
thrust::copy(utility::exec_policy(utility::GetStream(0))
->on(utility::GetStream(0)),
range_points.begin(), range_points.end(),
output->points_.begin());
if (has_normals) {
thrust::strided_range<
utility::device_vector<Eigen::Vector3f>::const_iterator>
range_normals(normals_.begin(), normals_.end(), every_k_points);
thrust::copy(utility::exec_policy(utility::GetStream(1))
->on(utility::GetStream(1)),
range_normals.begin(), range_normals.end(),
output->normals_.begin());
}
if (has_colors) {
thrust::strided_range<
utility::device_vector<Eigen::Vector3f>::const_iterator>
range_colors(colors_.begin(), colors_.end(), every_k_points);
thrust::copy(utility::exec_policy(utility::GetStream(2))
->on(utility::GetStream(2)),
range_colors.begin(), range_colors.end(),
output->colors_.begin());
}
cudaSafeCall(hipDeviceSynchronize());
return output;
}
std::tuple<std::shared_ptr<PointCloud>, utility::device_vector<size_t>>
PointCloud::RemoveRadiusOutliers(size_t nb_points, float search_radius) const {
if (nb_points < 1 || search_radius <= 0) {
utility::LogError(
"[RemoveRadiusOutliers] Illegal input parameters,"
"number of points and radius must be positive");
}
KDTreeFlann kdtree;
kdtree.SetGeometry(*this);
utility::device_vector<int> tmp_indices;
utility::device_vector<float> dist;
kdtree.SearchRadius(points_, search_radius, tmp_indices, dist);
const size_t n_pt = points_.size();
utility::device_vector<size_t> indices(n_pt);
has_radius_points_functor func(thrust::raw_pointer_cast(tmp_indices.data()),
nb_points, NUM_MAX_NN);
auto end = thrust::copy_if(thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator(n_pt),
indices.begin(), func);
indices.resize(thrust::distance(indices.begin(), end));
return std::make_tuple(SelectByIndex(indices), indices);
}
std::tuple<std::shared_ptr<PointCloud>, utility::device_vector<size_t>>
PointCloud::RemoveStatisticalOutliers(size_t nb_neighbors,
float std_ratio) const {
if (nb_neighbors < 1 || std_ratio <= 0) {
utility::LogError(
"[RemoveStatisticalOutliers] Illegal input parameters, number "
"of neighbors and standard deviation ratio must be positive");
}
if (points_.empty()) {
return std::make_tuple(std::make_shared<PointCloud>(),
utility::device_vector<size_t>());
}
KDTreeFlann kdtree;
kdtree.SetGeometry(*this);
const int n_pt = points_.size();
utility::device_vector<float> avg_distances(n_pt);
utility::device_vector<size_t> indices(n_pt);
utility::device_vector<int> tmp_indices;
utility::device_vector<float> dist;
kdtree.SearchKNN(points_, int(nb_neighbors), tmp_indices, dist);
average_distance_functor avg_func(thrust::raw_pointer_cast(dist.data()),
nb_neighbors);
thrust::transform(thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator((size_t)n_pt),
avg_distances.begin(), avg_func);
const size_t valid_distances =
thrust::count_if(avg_distances.begin(), avg_distances.end(),
[] __device__(float x) { return (x >= 0.0); });
if (valid_distances == 0) {
return std::make_tuple(std::make_shared<PointCloud>(),
utility::device_vector<size_t>());
}
float cloud_mean =
thrust::reduce(avg_distances.begin(), avg_distances.end(), 0.0,
[] __device__(float const &x, float const &y) {
return (y > 0) ? x + y : x;
});
cloud_mean /= valid_distances;
const float sq_sum = thrust::transform_reduce(
avg_distances.begin(), avg_distances.end(),
[cloud_mean] __device__(const float x) {
return (x > 0) ? (x - cloud_mean) * (x - cloud_mean) : 0;
},
0.0, thrust::plus<float>());
// Bessel's correction
const float std_dev = std::sqrt(sq_sum / (valid_distances - 1));
const float distance_threshold = cloud_mean + std_ratio * std_dev;
check_distance_threshold_functor th_func(
thrust::raw_pointer_cast(avg_distances.data()), distance_threshold);
auto end = thrust::copy_if(thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator((size_t)n_pt),
indices.begin(), th_func);
indices.resize(thrust::distance(indices.begin(), end));
return std::make_tuple(SelectByIndex(indices), indices);
}
| 09578f2f079e44fba94bfe2ac8085df1a67a6de2.cu | #include <thrust/gather.h>
#include <thrust/iterator/discard_iterator.h>
#include "cupoch/geometry/kdtree_flann.h"
#include "cupoch/geometry/pointcloud.h"
#include "cupoch/utility/console.h"
#include "cupoch/utility/helper.h"
#include "cupoch/utility/platform.h"
#include "cupoch/utility/range.h"
using namespace cupoch;
using namespace cupoch::geometry;
namespace {
void SelectByIndexImpl(const geometry::PointCloud &src,
geometry::PointCloud &dst,
const utility::device_vector<size_t> &indices) {
const bool has_normals = src.HasNormals();
const bool has_colors = src.HasColors();
if (has_normals) dst.normals_.resize(indices.size());
if (has_colors) dst.colors_.resize(indices.size());
dst.points_.resize(indices.size());
thrust::gather(utility::exec_policy(utility::GetStream(0))
->on(utility::GetStream(0)),
indices.begin(), indices.end(), src.points_.begin(),
dst.points_.begin());
if (has_normals) {
thrust::gather(utility::exec_policy(utility::GetStream(1))
->on(utility::GetStream(1)),
indices.begin(), indices.end(), src.normals_.begin(),
dst.normals_.begin());
}
if (has_colors) {
thrust::gather(utility::exec_policy(utility::GetStream(2))
->on(utility::GetStream(2)),
indices.begin(), indices.end(), src.colors_.begin(),
dst.colors_.begin());
}
cudaSafeCall(cudaDeviceSynchronize());
}
struct compute_key_functor {
compute_key_functor(const Eigen::Vector3f &voxel_min_bound,
float voxel_size)
: voxel_min_bound_(voxel_min_bound), voxel_size_(voxel_size){};
const Eigen::Vector3f voxel_min_bound_;
const float voxel_size_;
__device__ Eigen::Vector3i operator()(const Eigen::Vector3f &pt) {
auto ref_coord = (pt - voxel_min_bound_) / voxel_size_;
return Eigen::Vector3i(int(floor(ref_coord(0))),
int(floor(ref_coord(1))),
int(floor(ref_coord(2))));
}
};
template <typename OutputIterator, class... Args>
__host__ int CalcAverageByKey(utility::device_vector<Eigen::Vector3i> &keys,
OutputIterator buf_begins,
OutputIterator output_begins) {
const size_t n = keys.size();
thrust::sort_by_key(keys.begin(), keys.end(), buf_begins);
utility::device_vector<int> counts(n);
auto end1 = thrust::reduce_by_key(
keys.begin(), keys.end(), thrust::make_constant_iterator(1),
thrust::make_discard_iterator(), counts.begin());
int n_out = thrust::distance(counts.begin(), end1.second);
counts.resize(n_out);
thrust::equal_to<Eigen::Vector3i> binary_pred;
add_tuple_functor<Args...> add_func;
auto end2 = thrust::reduce_by_key(keys.begin(), keys.end(), buf_begins,
thrust::make_discard_iterator(),
output_begins, binary_pred, add_func);
devide_tuple_functor<Args...> dv_func;
thrust::transform(output_begins, output_begins + n_out, counts.begin(),
output_begins, dv_func);
return n_out;
}
struct has_radius_points_functor {
has_radius_points_functor(const int *indices, int n_points, int knn)
: indices_(indices), n_points_(n_points), knn_(knn){};
const int *indices_;
const int n_points_;
const int knn_;
__device__ bool operator()(int idx) const {
int count = 0;
for (int i = 0; i < knn_; ++i) {
if (indices_[idx * knn_ + i] >= 0) count++;
}
return (count > n_points_);
}
};
struct average_distance_functor {
average_distance_functor(const float *distance, int knn)
: distance_(distance), knn_(knn){};
const float *distance_;
const int knn_;
__device__ float operator()(int idx) const {
int count = 0;
float avg = 0;
for (int i = 0; i < knn_; ++i) {
const float d = distance_[idx * knn_ + i];
if (isinf(d) || d < 0.0) continue;
avg += d;
count++;
}
return (count == 0) ? -1.0 : avg / (float)count;
}
};
struct check_distance_threshold_functor {
check_distance_threshold_functor(const float *distances,
float distance_threshold)
: distances_(distances), distance_threshold_(distance_threshold){};
const float *distances_;
const float distance_threshold_;
__device__ bool operator()(int idx) const {
return (distances_[idx] > 0 && distances_[idx] < distance_threshold_);
}
};
} // namespace
std::shared_ptr<PointCloud> PointCloud::SelectByIndex(
const utility::device_vector<size_t> &indices, bool invert) const {
auto output = std::make_shared<PointCloud>();
if (invert) {
size_t n_out = points_.size() - indices.size();
utility::device_vector<size_t> sorted_indices = indices;
thrust::sort(sorted_indices.begin(), sorted_indices.end());
utility::device_vector<size_t> inv_indices(n_out);
thrust::set_difference(thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator(points_.size()),
sorted_indices.begin(), sorted_indices.end(),
inv_indices.begin());
SelectByIndexImpl(*this, *output, inv_indices);
} else {
SelectByIndexImpl(*this, *output, indices);
}
return output;
}
std::shared_ptr<PointCloud> PointCloud::VoxelDownSample(
float voxel_size) const {
auto output = std::make_shared<PointCloud>();
if (voxel_size <= 0.0) {
utility::LogWarning("[VoxelDownSample] voxel_size <= 0.\n");
return output;
}
const Eigen::Vector3f voxel_size3 =
Eigen::Vector3f(voxel_size, voxel_size, voxel_size);
const Eigen::Vector3f voxel_min_bound = GetMinBound() - voxel_size3 * 0.5;
const Eigen::Vector3f voxel_max_bound = GetMaxBound() + voxel_size3 * 0.5;
if (voxel_size * std::numeric_limits<int>::max() <
(voxel_max_bound - voxel_min_bound).maxCoeff()) {
utility::LogWarning("[VoxelDownSample] voxel_size is too small.\n");
return output;
}
const int n = points_.size();
const bool has_normals = HasNormals();
const bool has_colors = HasColors();
compute_key_functor ck_func(voxel_min_bound, voxel_size);
utility::device_vector<Eigen::Vector3i> keys(n);
thrust::transform(points_.begin(), points_.end(), keys.begin(), ck_func);
utility::device_vector<Eigen::Vector3f> sorted_points = points_;
output->points_.resize(n);
if (!has_normals && !has_colors) {
typedef thrust::tuple<utility::device_vector<Eigen::Vector3f>::iterator>
IteratorTuple;
typedef thrust::zip_iterator<IteratorTuple> ZipIterator;
auto n_out = CalcAverageByKey<ZipIterator, Eigen::Vector3f>(
keys, make_tuple_iterator(sorted_points.begin()),
make_tuple_iterator(output->points_.begin()));
output->points_.resize(n_out);
} else if (has_normals && !has_colors) {
utility::device_vector<Eigen::Vector3f> sorted_normals = normals_;
output->normals_.resize(n);
typedef thrust::tuple<utility::device_vector<Eigen::Vector3f>::iterator,
utility::device_vector<Eigen::Vector3f>::iterator>
IteratorTuple;
typedef thrust::zip_iterator<IteratorTuple> ZipIterator;
auto n_out =
CalcAverageByKey<ZipIterator, Eigen::Vector3f, Eigen::Vector3f>(
keys,
make_tuple_iterator(sorted_points.begin(),
sorted_normals.begin()),
make_tuple_iterator(output->points_.begin(),
output->normals_.begin()));
output->points_.resize(n_out);
output->normals_.resize(n_out);
thrust::for_each(
output->normals_.begin(), output->normals_.end(),
[] __device__(Eigen::Vector3f & nl) { nl.normalize(); });
} else if (!has_normals && has_colors) {
utility::device_vector<Eigen::Vector3f> sorted_colors = colors_;
output->colors_.resize(n);
typedef thrust::tuple<utility::device_vector<Eigen::Vector3f>::iterator,
utility::device_vector<Eigen::Vector3f>::iterator>
IteratorTuple;
typedef thrust::zip_iterator<IteratorTuple> ZipIterator;
auto n_out =
CalcAverageByKey<ZipIterator, Eigen::Vector3f, Eigen::Vector3f>(
keys,
make_tuple_iterator(sorted_points.begin(),
sorted_colors.begin()),
make_tuple_iterator(output->points_.begin(),
output->colors_.begin()));
output->points_.resize(n_out);
output->colors_.resize(n_out);
} else {
utility::device_vector<Eigen::Vector3f> sorted_normals = normals_;
utility::device_vector<Eigen::Vector3f> sorted_colors = colors_;
output->normals_.resize(n);
output->colors_.resize(n);
typedef thrust::tuple<utility::device_vector<Eigen::Vector3f>::iterator,
utility::device_vector<Eigen::Vector3f>::iterator,
utility::device_vector<Eigen::Vector3f>::iterator>
IteratorTuple;
typedef thrust::zip_iterator<IteratorTuple> ZipIterator;
auto n_out = CalcAverageByKey<ZipIterator, Eigen::Vector3f,
Eigen::Vector3f, Eigen::Vector3f>(
keys,
make_tuple_iterator(sorted_points.begin(),
sorted_normals.begin(),
sorted_colors.begin()),
make_tuple_iterator(output->points_.begin(),
output->normals_.begin(),
output->colors_.begin()));
output->points_.resize(n_out);
output->normals_.resize(n_out);
output->colors_.resize(n_out);
thrust::for_each(
output->normals_.begin(), output->normals_.end(),
[] __device__(Eigen::Vector3f & nl) { nl.normalize(); });
}
utility::LogDebug(
"Pointcloud down sampled from {:d} points to {:d} points.\n",
(int)points_.size(), (int)output->points_.size());
return output;
}
std::shared_ptr<PointCloud> PointCloud::UniformDownSample(
size_t every_k_points) const {
const bool has_normals = HasNormals();
const bool has_colors = HasColors();
auto output = std::make_shared<PointCloud>();
if (every_k_points == 0) {
utility::LogError("[UniformDownSample] Illegal sample rate.");
return output;
}
const int n_out = points_.size() / every_k_points;
output->points_.resize(n_out);
if (has_normals) output->normals_.resize(n_out);
if (has_colors) output->colors_.resize(n_out);
thrust::strided_range<
utility::device_vector<Eigen::Vector3f>::const_iterator>
range_points(points_.begin(), points_.end(), every_k_points);
thrust::copy(utility::exec_policy(utility::GetStream(0))
->on(utility::GetStream(0)),
range_points.begin(), range_points.end(),
output->points_.begin());
if (has_normals) {
thrust::strided_range<
utility::device_vector<Eigen::Vector3f>::const_iterator>
range_normals(normals_.begin(), normals_.end(), every_k_points);
thrust::copy(utility::exec_policy(utility::GetStream(1))
->on(utility::GetStream(1)),
range_normals.begin(), range_normals.end(),
output->normals_.begin());
}
if (has_colors) {
thrust::strided_range<
utility::device_vector<Eigen::Vector3f>::const_iterator>
range_colors(colors_.begin(), colors_.end(), every_k_points);
thrust::copy(utility::exec_policy(utility::GetStream(2))
->on(utility::GetStream(2)),
range_colors.begin(), range_colors.end(),
output->colors_.begin());
}
cudaSafeCall(cudaDeviceSynchronize());
return output;
}
std::tuple<std::shared_ptr<PointCloud>, utility::device_vector<size_t>>
PointCloud::RemoveRadiusOutliers(size_t nb_points, float search_radius) const {
if (nb_points < 1 || search_radius <= 0) {
utility::LogError(
"[RemoveRadiusOutliers] Illegal input parameters,"
"number of points and radius must be positive");
}
KDTreeFlann kdtree;
kdtree.SetGeometry(*this);
utility::device_vector<int> tmp_indices;
utility::device_vector<float> dist;
kdtree.SearchRadius(points_, search_radius, tmp_indices, dist);
const size_t n_pt = points_.size();
utility::device_vector<size_t> indices(n_pt);
has_radius_points_functor func(thrust::raw_pointer_cast(tmp_indices.data()),
nb_points, NUM_MAX_NN);
auto end = thrust::copy_if(thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator(n_pt),
indices.begin(), func);
indices.resize(thrust::distance(indices.begin(), end));
return std::make_tuple(SelectByIndex(indices), indices);
}
std::tuple<std::shared_ptr<PointCloud>, utility::device_vector<size_t>>
PointCloud::RemoveStatisticalOutliers(size_t nb_neighbors,
float std_ratio) const {
if (nb_neighbors < 1 || std_ratio <= 0) {
utility::LogError(
"[RemoveStatisticalOutliers] Illegal input parameters, number "
"of neighbors and standard deviation ratio must be positive");
}
if (points_.empty()) {
return std::make_tuple(std::make_shared<PointCloud>(),
utility::device_vector<size_t>());
}
KDTreeFlann kdtree;
kdtree.SetGeometry(*this);
const int n_pt = points_.size();
utility::device_vector<float> avg_distances(n_pt);
utility::device_vector<size_t> indices(n_pt);
utility::device_vector<int> tmp_indices;
utility::device_vector<float> dist;
kdtree.SearchKNN(points_, int(nb_neighbors), tmp_indices, dist);
average_distance_functor avg_func(thrust::raw_pointer_cast(dist.data()),
nb_neighbors);
thrust::transform(thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator((size_t)n_pt),
avg_distances.begin(), avg_func);
const size_t valid_distances =
thrust::count_if(avg_distances.begin(), avg_distances.end(),
[] __device__(float x) { return (x >= 0.0); });
if (valid_distances == 0) {
return std::make_tuple(std::make_shared<PointCloud>(),
utility::device_vector<size_t>());
}
float cloud_mean =
thrust::reduce(avg_distances.begin(), avg_distances.end(), 0.0,
[] __device__(float const &x, float const &y) {
return (y > 0) ? x + y : x;
});
cloud_mean /= valid_distances;
const float sq_sum = thrust::transform_reduce(
avg_distances.begin(), avg_distances.end(),
[cloud_mean] __device__(const float x) {
return (x > 0) ? (x - cloud_mean) * (x - cloud_mean) : 0;
},
0.0, thrust::plus<float>());
// Bessel's correction
const float std_dev = std::sqrt(sq_sum / (valid_distances - 1));
const float distance_threshold = cloud_mean + std_ratio * std_dev;
check_distance_threshold_functor th_func(
thrust::raw_pointer_cast(avg_distances.data()), distance_threshold);
auto end = thrust::copy_if(thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator((size_t)n_pt),
indices.begin(), th_func);
indices.resize(thrust::distance(indices.begin(), end));
return std::make_tuple(SelectByIndex(indices), indices);
}
|
01ae1c9f5be3568c0c94348e8c1c2964caeed051.hip | // !!! This is a file automatically generated by hipify!!!
// Demonstration of kernel execution configuration for a two-dimensional
// grid.
// Example for video 2.2.
#include <hip/hip_runtime_api.h>
#include <stdio.h>
// Error checking macro
#define cudaCheckError(code) \
{ \
if ((code) != hipSuccess) { \
fprintf(stderr, "Cuda failure %s:%d: '%s' \n", __FILE__, __LINE__, \
hipGetErrorString(code)); \
} \
}
__global__ void kernel_2d()
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
printf("block (%d, %d), thread (%d, %d), index (%d, %d)\n", blockIdx.x,
blockIdx.y, threadIdx.x, threadIdx.y, x, y);
}
int main()
{
dim3 block_dim(8, 2);
dim3 grid_dim(2, 1);
hipLaunchKernelGGL(( kernel_2d), dim3(grid_dim), dim3(block_dim), 0, 0, );
cudaCheckError(hipDeviceSynchronize());
}
| 01ae1c9f5be3568c0c94348e8c1c2964caeed051.cu | // Demonstration of kernel execution configuration for a two-dimensional
// grid.
// Example for video 2.2.
#include <cuda_runtime_api.h>
#include <stdio.h>
// Error checking macro
#define cudaCheckError(code) \
{ \
if ((code) != cudaSuccess) { \
fprintf(stderr, "Cuda failure %s:%d: '%s' \n", __FILE__, __LINE__, \
cudaGetErrorString(code)); \
} \
}
__global__ void kernel_2d()
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
printf("block (%d, %d), thread (%d, %d), index (%d, %d)\n", blockIdx.x,
blockIdx.y, threadIdx.x, threadIdx.y, x, y);
}
int main()
{
dim3 block_dim(8, 2);
dim3 grid_dim(2, 1);
kernel_2d<<<grid_dim, block_dim>>>();
cudaCheckError(cudaDeviceSynchronize());
}
|
135127f461ed7ba36a5935deb6f311c31685b14f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hip/hip_runtime.h>
#include <hip/device_functions.h>
#include <hip/hip_runtime_api.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define BLOCK_SIZE 16
#define ARRAY_SIZE 1280
int main(int argc, char const *argv[])
{
// allocate in host
int *h_a, *h_b, *h_c1, *h_c2;
hipHostMalloc((void **)&h_a, sizeof(int)*ARRAY_SIZE*ARRAY_SIZE);
hipHostMalloc((void **)&h_b, sizeof(int)*ARRAY_SIZE*ARRAY_SIZE);
hipHostMalloc((void **)&h_c1, sizeof(int)*ARRAY_SIZE*ARRAY_SIZE);
hipHostMalloc((void **)&h_c2, sizeof(int)*ARRAY_SIZE*ARRAY_SIZE);
for (int i = 0; i < ARRAY_SIZE; ++i) {
for (int j = 0; j < ARRAY_SIZE; ++j) {
h_a[i * ARRAY_SIZE + j] = rand() % 100;
h_b[i * ARRAY_SIZE + j] = rand() % 100;
}
}
// Allocate in device
int *d_a, *d_b, *d_c;
hipMalloc((void **)&d_a, sizeof(int)*ARRAY_SIZE*ARRAY_SIZE);
hipMalloc((void **)&d_b, sizeof(int)*ARRAY_SIZE*ARRAY_SIZE);
hipMalloc((void **)&d_c, sizeof(int)*ARRAY_SIZE*ARRAY_SIZE);
// copy from host to device memory
hipMemcpy(d_a, h_a, sizeof(int)*ARRAY_SIZE*ARRAY_SIZE, hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, sizeof(int)*ARRAY_SIZE*ARRAY_SIZE, hipMemcpyHostToDevice);
int grid_rows = BLOCK_SIZE*BLOCK_SIZE;
int grid_cols = ARRAY_SIZE / grid_rows;
dim3 dimGrid(grid_cols, grid_cols,1);
dim3 dimBlock(grid_rows, grid_rows,1);
float elapsed_time_gpu;
//description to calculate time
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// start time of GPU
hipEventRecord(start, 0);
matrix_mult_gpu <<<dimGrid, dimBlock >> > (d_a, d_b, d_c, ARRAY_SIZE);
// copy from device to host
hipMemcpy(h_c1, d_c, sizeof(int)*ARRAY_SIZE*ARRAY_SIZE, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
// stop time
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
// compute time elapse on GPU computing
hipEventElapsedTime(&elapsed_time_gpu, start, stop);
printf("Time elapsed on matrix multiplician %d on GPU: %.1f s.\n\n", ARRAY_SIZE, elapsed_time_gpu);
// start the CPU version
clock_t t;
t = clock();
matrix_mult_cpu(h_a, h_b, h_c2, ARRAY_SIZE);
t = clock() - t;
double elapsed_time = ((double)t) / CLOCKS_PER_SEC;
printf("Time elapsed on matrix multiplication %d on CPU: %.1f s.\n\n", ARRAY_SIZE, elapsed_time);
// free memory
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
hipHostFree(h_a);
hipHostFree(h_b);
hipHostFree(h_c1);
hipHostFree(h_c2);
return 0;
}
__global__ void matrix_mult_gpu(int *a, int *b, int *c, int n)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int sum = 0;
if (col < n && row < n)
{
for (int i = 0; i < n; i++)
{
sum += a[row * n + i] * b[i * n + col];
}
c[row * n + col] = sum;
}
}
void matrix_mult_cpu(int *h_a, int *h_b, int *h_result, int n)
{
int i, j, k;
for (i = 0; i < n; i++)
{
for (j = 0; j < n; j++)
{
h_result[i*n + j] = 0;
for (k = 0; k < n; k++)
{
h_result[i*n + j] += h_a[k + i * n] * h_b[k*n + j];
}
}
}
} | 135127f461ed7ba36a5935deb6f311c31685b14f.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cuda.h>
#include <device_functions.h>
#include <cuda_runtime_api.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define BLOCK_SIZE 16
#define ARRAY_SIZE 1280
int main(int argc, char const *argv[])
{
// allocate in host
int *h_a, *h_b, *h_c1, *h_c2;
cudaMallocHost((void **)&h_a, sizeof(int)*ARRAY_SIZE*ARRAY_SIZE);
cudaMallocHost((void **)&h_b, sizeof(int)*ARRAY_SIZE*ARRAY_SIZE);
cudaMallocHost((void **)&h_c1, sizeof(int)*ARRAY_SIZE*ARRAY_SIZE);
cudaMallocHost((void **)&h_c2, sizeof(int)*ARRAY_SIZE*ARRAY_SIZE);
for (int i = 0; i < ARRAY_SIZE; ++i) {
for (int j = 0; j < ARRAY_SIZE; ++j) {
h_a[i * ARRAY_SIZE + j] = rand() % 100;
h_b[i * ARRAY_SIZE + j] = rand() % 100;
}
}
// Allocate in device
int *d_a, *d_b, *d_c;
cudaMalloc((void **)&d_a, sizeof(int)*ARRAY_SIZE*ARRAY_SIZE);
cudaMalloc((void **)&d_b, sizeof(int)*ARRAY_SIZE*ARRAY_SIZE);
cudaMalloc((void **)&d_c, sizeof(int)*ARRAY_SIZE*ARRAY_SIZE);
// copy from host to device memory
cudaMemcpy(d_a, h_a, sizeof(int)*ARRAY_SIZE*ARRAY_SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, sizeof(int)*ARRAY_SIZE*ARRAY_SIZE, cudaMemcpyHostToDevice);
int grid_rows = BLOCK_SIZE*BLOCK_SIZE;
int grid_cols = ARRAY_SIZE / grid_rows;
dim3 dimGrid(grid_cols, grid_cols,1);
dim3 dimBlock(grid_rows, grid_rows,1);
float elapsed_time_gpu;
//description to calculate time
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// start time of GPU
cudaEventRecord(start, 0);
matrix_mult_gpu <<<dimGrid, dimBlock >> > (d_a, d_b, d_c, ARRAY_SIZE);
// copy from device to host
cudaMemcpy(h_c1, d_c, sizeof(int)*ARRAY_SIZE*ARRAY_SIZE, cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
// stop time
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// compute time elapse on GPU computing
cudaEventElapsedTime(&elapsed_time_gpu, start, stop);
printf("Time elapsed on matrix multiplician %d on GPU: %.1f s.\n\n", ARRAY_SIZE, elapsed_time_gpu);
// start the CPU version
clock_t t;
t = clock();
matrix_mult_cpu(h_a, h_b, h_c2, ARRAY_SIZE);
t = clock() - t;
double elapsed_time = ((double)t) / CLOCKS_PER_SEC;
printf("Time elapsed on matrix multiplication %d on CPU: %.1f s.\n\n", ARRAY_SIZE, elapsed_time);
// free memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cudaFreeHost(h_a);
cudaFreeHost(h_b);
cudaFreeHost(h_c1);
cudaFreeHost(h_c2);
return 0;
}
__global__ void matrix_mult_gpu(int *a, int *b, int *c, int n)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int sum = 0;
if (col < n && row < n)
{
for (int i = 0; i < n; i++)
{
sum += a[row * n + i] * b[i * n + col];
}
c[row * n + col] = sum;
}
}
void matrix_mult_cpu(int *h_a, int *h_b, int *h_result, int n)
{
int i, j, k;
for (i = 0; i < n; i++)
{
for (j = 0; j < n; j++)
{
h_result[i*n + j] = 0;
for (k = 0; k < n; k++)
{
h_result[i*n + j] += h_a[k + i * n] * h_b[k*n + j];
}
}
}
} |
8177760846ffc017b53d1036383ab741eff27baf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kPowMatrix(float* mat, float* pow, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = powf(mat[i], pow[i]);
}
} | 8177760846ffc017b53d1036383ab741eff27baf.cu | #include "includes.h"
__global__ void kPowMatrix(float* mat, float* pow, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = powf(mat[i], pow[i]);
}
} |
d0abcaac944b8291d15eb7e01f7ebd79981c6edb.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <ATen/ATen.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <vector>
__global__ void NmDistanceKernel(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i){
const int batch=512;
__shared__ float buf[batch*3];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int k2=0;k2<m;k2+=batch){
int end_k=min(m,k2+batch)-k2;
for (int j=threadIdx.x;j<end_k*3;j+=blockDim.x){
buf[j]=xyz2[(i*m+k2)*3+j];
}
__syncthreads();
for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){
float x1=xyz[(i*n+j)*3+0];
float y1=xyz[(i*n+j)*3+1];
float z1=xyz[(i*n+j)*3+2];
int best_i=0;
float best=0;
int end_ka=end_k-(end_k&3);
if (end_ka==batch){
for (int k=0;k<batch;k+=4){
{
float x2=buf[k*3+0]-x1;
float y2=buf[k*3+1]-y1;
float z2=buf[k*3+2]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
{
float x2=buf[k*3+3]-x1;
float y2=buf[k*3+4]-y1;
float z2=buf[k*3+5]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+1;
}
}
{
float x2=buf[k*3+6]-x1;
float y2=buf[k*3+7]-y1;
float z2=buf[k*3+8]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+2;
}
}
{
float x2=buf[k*3+9]-x1;
float y2=buf[k*3+10]-y1;
float z2=buf[k*3+11]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+3;
}
}
}
}else{
for (int k=0;k<end_ka;k+=4){
{
float x2=buf[k*3+0]-x1;
float y2=buf[k*3+1]-y1;
float z2=buf[k*3+2]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
{
float x2=buf[k*3+3]-x1;
float y2=buf[k*3+4]-y1;
float z2=buf[k*3+5]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+1;
}
}
{
float x2=buf[k*3+6]-x1;
float y2=buf[k*3+7]-y1;
float z2=buf[k*3+8]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+2;
}
}
{
float x2=buf[k*3+9]-x1;
float y2=buf[k*3+10]-y1;
float z2=buf[k*3+11]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+3;
}
}
}
}
for (int k=end_ka;k<end_k;k++){
float x2=buf[k*3+0]-x1;
float y2=buf[k*3+1]-y1;
float z2=buf[k*3+2]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
if (k2==0 || result[(i*n+j)]>best){
result[(i*n+j)]=best;
result_i[(i*n+j)]=best_i;
}
}
__syncthreads();
}
}
}
// int chamfer_cuda_forward(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i,float * result2,int * result2_i, hipStream_t stream){
int chamfer_cuda_forward(at::Tensor xyz1, at::Tensor xyz2, at::Tensor dist1, at::Tensor dist2, at::Tensor idx1, at::Tensor idx2){
const auto batch_size = xyz1.size(0);
const auto n = xyz1.size(1); //num_points point cloud A
const auto m = xyz2.size(1); //num_points point cloud B
hipLaunchKernelGGL(( NmDistanceKernel), dim3(dim3(32,16,1)),dim3(512), 0, 0, batch_size, n, xyz1.data<float>(), m, xyz2.data<float>(), dist1.data<float>(), idx1.data<int>());
hipLaunchKernelGGL(( NmDistanceKernel), dim3(dim3(32,16,1)),dim3(512), 0, 0, batch_size, m, xyz2.data<float>(), n, xyz1.data<float>(), dist2.data<float>(), idx2.data<int>());
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in nnd updateOutput: %s\n", hipGetErrorString(err));
//THError("aborting");
return 0;
}
return 1;
}
__global__ void NmDistanceGradKernel(int b,int n,const float * xyz1,int m,const float * xyz2,const float * grad_dist1,const int * idx1,float * grad_xyz1,float * grad_xyz2){
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){
float x1=xyz1[(i*n+j)*3+0];
float y1=xyz1[(i*n+j)*3+1];
float z1=xyz1[(i*n+j)*3+2];
int j2=idx1[i*n+j];
float x2=xyz2[(i*m+j2)*3+0];
float y2=xyz2[(i*m+j2)*3+1];
float z2=xyz2[(i*m+j2)*3+2];
float g=grad_dist1[i*n+j]*2;
atomicAdd(&(grad_xyz1[(i*n+j)*3+0]),g*(x1-x2));
atomicAdd(&(grad_xyz1[(i*n+j)*3+1]),g*(y1-y2));
atomicAdd(&(grad_xyz1[(i*n+j)*3+2]),g*(z1-z2));
atomicAdd(&(grad_xyz2[(i*m+j2)*3+0]),-(g*(x1-x2)));
atomicAdd(&(grad_xyz2[(i*m+j2)*3+1]),-(g*(y1-y2)));
atomicAdd(&(grad_xyz2[(i*m+j2)*3+2]),-(g*(z1-z2)));
}
}
}
// int chamfer_cuda_backward(int b,int n,const float * xyz1,int m,const float * xyz2,const float * grad_dist1,const int * idx1,const float * grad_dist2,const int * idx2,float * grad_xyz1,float * grad_xyz2, hipStream_t stream){
int chamfer_cuda_backward(at::Tensor xyz1, at::Tensor xyz2, at::Tensor gradxyz1, at::Tensor gradxyz2, at::Tensor graddist1, at::Tensor graddist2, at::Tensor idx1, at::Tensor idx2){
// hipMemset(grad_xyz1,0,b*n*3*4);
// hipMemset(grad_xyz2,0,b*m*3*4);
const auto batch_size = xyz1.size(0);
const auto n = xyz1.size(1); //num_points point cloud A
const auto m = xyz2.size(1); //num_points point cloud B
hipLaunchKernelGGL(( NmDistanceGradKernel), dim3(dim3(1,16,1)),dim3(256), 0, 0, batch_size,n,xyz1.data<float>(),m,xyz2.data<float>(),graddist1.data<float>(),idx1.data<int>(),gradxyz1.data<float>(),gradxyz2.data<float>());
hipLaunchKernelGGL(( NmDistanceGradKernel), dim3(dim3(1,16,1)),dim3(256), 0, 0, batch_size,m,xyz2.data<float>(),n,xyz1.data<float>(),graddist2.data<float>(),idx2.data<int>(),gradxyz2.data<float>(),gradxyz1.data<float>());
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in nnd get grad: %s\n", hipGetErrorString(err));
//THError("aborting");
return 0;
}
return 1;
}
| d0abcaac944b8291d15eb7e01f7ebd79981c6edb.cu |
#include <stdio.h>
#include <ATen/ATen.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <vector>
__global__ void NmDistanceKernel(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i){
const int batch=512;
__shared__ float buf[batch*3];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int k2=0;k2<m;k2+=batch){
int end_k=min(m,k2+batch)-k2;
for (int j=threadIdx.x;j<end_k*3;j+=blockDim.x){
buf[j]=xyz2[(i*m+k2)*3+j];
}
__syncthreads();
for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){
float x1=xyz[(i*n+j)*3+0];
float y1=xyz[(i*n+j)*3+1];
float z1=xyz[(i*n+j)*3+2];
int best_i=0;
float best=0;
int end_ka=end_k-(end_k&3);
if (end_ka==batch){
for (int k=0;k<batch;k+=4){
{
float x2=buf[k*3+0]-x1;
float y2=buf[k*3+1]-y1;
float z2=buf[k*3+2]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
{
float x2=buf[k*3+3]-x1;
float y2=buf[k*3+4]-y1;
float z2=buf[k*3+5]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+1;
}
}
{
float x2=buf[k*3+6]-x1;
float y2=buf[k*3+7]-y1;
float z2=buf[k*3+8]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+2;
}
}
{
float x2=buf[k*3+9]-x1;
float y2=buf[k*3+10]-y1;
float z2=buf[k*3+11]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+3;
}
}
}
}else{
for (int k=0;k<end_ka;k+=4){
{
float x2=buf[k*3+0]-x1;
float y2=buf[k*3+1]-y1;
float z2=buf[k*3+2]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
{
float x2=buf[k*3+3]-x1;
float y2=buf[k*3+4]-y1;
float z2=buf[k*3+5]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+1;
}
}
{
float x2=buf[k*3+6]-x1;
float y2=buf[k*3+7]-y1;
float z2=buf[k*3+8]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+2;
}
}
{
float x2=buf[k*3+9]-x1;
float y2=buf[k*3+10]-y1;
float z2=buf[k*3+11]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+3;
}
}
}
}
for (int k=end_ka;k<end_k;k++){
float x2=buf[k*3+0]-x1;
float y2=buf[k*3+1]-y1;
float z2=buf[k*3+2]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
if (k2==0 || result[(i*n+j)]>best){
result[(i*n+j)]=best;
result_i[(i*n+j)]=best_i;
}
}
__syncthreads();
}
}
}
// int chamfer_cuda_forward(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i,float * result2,int * result2_i, cudaStream_t stream){
int chamfer_cuda_forward(at::Tensor xyz1, at::Tensor xyz2, at::Tensor dist1, at::Tensor dist2, at::Tensor idx1, at::Tensor idx2){
const auto batch_size = xyz1.size(0);
const auto n = xyz1.size(1); //num_points point cloud A
const auto m = xyz2.size(1); //num_points point cloud B
NmDistanceKernel<<<dim3(32,16,1),512>>>(batch_size, n, xyz1.data<float>(), m, xyz2.data<float>(), dist1.data<float>(), idx1.data<int>());
NmDistanceKernel<<<dim3(32,16,1),512>>>(batch_size, m, xyz2.data<float>(), n, xyz1.data<float>(), dist2.data<float>(), idx2.data<int>());
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in nnd updateOutput: %s\n", cudaGetErrorString(err));
//THError("aborting");
return 0;
}
return 1;
}
__global__ void NmDistanceGradKernel(int b,int n,const float * xyz1,int m,const float * xyz2,const float * grad_dist1,const int * idx1,float * grad_xyz1,float * grad_xyz2){
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){
float x1=xyz1[(i*n+j)*3+0];
float y1=xyz1[(i*n+j)*3+1];
float z1=xyz1[(i*n+j)*3+2];
int j2=idx1[i*n+j];
float x2=xyz2[(i*m+j2)*3+0];
float y2=xyz2[(i*m+j2)*3+1];
float z2=xyz2[(i*m+j2)*3+2];
float g=grad_dist1[i*n+j]*2;
atomicAdd(&(grad_xyz1[(i*n+j)*3+0]),g*(x1-x2));
atomicAdd(&(grad_xyz1[(i*n+j)*3+1]),g*(y1-y2));
atomicAdd(&(grad_xyz1[(i*n+j)*3+2]),g*(z1-z2));
atomicAdd(&(grad_xyz2[(i*m+j2)*3+0]),-(g*(x1-x2)));
atomicAdd(&(grad_xyz2[(i*m+j2)*3+1]),-(g*(y1-y2)));
atomicAdd(&(grad_xyz2[(i*m+j2)*3+2]),-(g*(z1-z2)));
}
}
}
// int chamfer_cuda_backward(int b,int n,const float * xyz1,int m,const float * xyz2,const float * grad_dist1,const int * idx1,const float * grad_dist2,const int * idx2,float * grad_xyz1,float * grad_xyz2, cudaStream_t stream){
int chamfer_cuda_backward(at::Tensor xyz1, at::Tensor xyz2, at::Tensor gradxyz1, at::Tensor gradxyz2, at::Tensor graddist1, at::Tensor graddist2, at::Tensor idx1, at::Tensor idx2){
// cudaMemset(grad_xyz1,0,b*n*3*4);
// cudaMemset(grad_xyz2,0,b*m*3*4);
const auto batch_size = xyz1.size(0);
const auto n = xyz1.size(1); //num_points point cloud A
const auto m = xyz2.size(1); //num_points point cloud B
NmDistanceGradKernel<<<dim3(1,16,1),256>>>(batch_size,n,xyz1.data<float>(),m,xyz2.data<float>(),graddist1.data<float>(),idx1.data<int>(),gradxyz1.data<float>(),gradxyz2.data<float>());
NmDistanceGradKernel<<<dim3(1,16,1),256>>>(batch_size,m,xyz2.data<float>(),n,xyz1.data<float>(),graddist2.data<float>(),idx2.data<int>(),gradxyz2.data<float>(),gradxyz1.data<float>());
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in nnd get grad: %s\n", cudaGetErrorString(err));
//THError("aborting");
return 0;
}
return 1;
}
|
08d4e501e9c414c1acce2d09adcb26e69b1cd900.hip | // !!! This is a file automatically generated by hipify!!!
#include <cudf/column/column_view.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/detail/iterator.cuh>
#include <rmm/rmm.h>
#include <cudf/utilities/error.hpp>
#include <cudf/null_mask.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <cudf/detail/utilities/device_atomics.cuh>
#include <hipcub/hipcub.hpp>
#include <cudf/reduction.hpp>
#include <cudf/detail/copy.hpp>
#include <cudf/null_mask.hpp>
namespace cudf {
namespace experimental {
namespace detail {
/**
* @brief Dispatcher for running Scan operation on input column
* Dispatches scan operartion on `Op` and creates output column
*
* @tparam Op device binary operator
*/
template <typename Op>
struct ScanDispatcher {
private:
// return true if T is arithmetic type (including cudf::experimental::bool8)
template <typename T>
static constexpr bool is_supported() {
return std::is_arithmetic<T>::value;
}
template <typename T>
auto exclusive_scan(const column_view& input_view,
rmm::mr::device_memory_resource* mr, hipStream_t stream)
{
const size_t size = input_view.size();
auto output_column = experimental::detail::allocate_like(
input_view, size, experimental::mask_allocation_policy::NEVER, mr,
stream);
output_column->set_null_mask(copy_bitmask(input_view, stream, mr),
input_view.null_count());
mutable_column_view output = output_column->mutable_view();
auto d_input = column_device_view::create(input_view, stream);
rmm::device_buffer temp_storage;
size_t temp_storage_bytes = 0;
if (input_view.has_nulls()) {
auto input = make_null_replacement_iterator(*d_input, Op::template identity<T>());
// Prepare temp storage
hipcub::DeviceScan::ExclusiveScan(temp_storage.data(), temp_storage_bytes,
input, output.data<T>(), Op{},
Op::template identity<T>(), size, stream);
temp_storage = rmm::device_buffer{temp_storage_bytes, stream, mr};
hipcub::DeviceScan::ExclusiveScan(temp_storage.data(), temp_storage_bytes,
input, output.data<T>(), Op{},
Op::template identity<T>(), size, stream);
} else {
auto input = d_input->begin<T>();
// Prepare temp storage
hipcub::DeviceScan::ExclusiveScan(temp_storage.data(), temp_storage_bytes,
input, output.data<T>(), Op{},
Op::template identity<T>(), size, stream);
temp_storage = rmm::device_buffer{temp_storage_bytes, stream, mr};
hipcub::DeviceScan::ExclusiveScan(temp_storage.data(), temp_storage_bytes,
input, output.data<T>(), Op{},
Op::template identity<T>(), size, stream);
}
CHECK_CUDA(stream);
return output_column;
}
template <typename T>
auto inclusive_scan(const column_view& input_view,
rmm::mr::device_memory_resource* mr, hipStream_t stream)
{
const size_t size = input_view.size();
auto output_column = experimental::detail::allocate_like(
input_view, size, experimental::mask_allocation_policy::NEVER, mr,
stream);
output_column->set_null_mask(copy_bitmask(input_view, stream, mr),
input_view.null_count());
mutable_column_view output = output_column->mutable_view();
auto d_input = column_device_view::create(input_view, stream);
rmm::device_buffer temp_storage;
size_t temp_storage_bytes = 0;
if (input_view.has_nulls()) {
auto input = make_null_replacement_iterator(*d_input, Op::template identity<T>());
// Prepare temp storage
hipcub::DeviceScan::InclusiveScan(temp_storage.data(), temp_storage_bytes,
input, output.data<T>(), Op{}, size, stream);
temp_storage = rmm::device_buffer{temp_storage_bytes, stream, mr};
hipcub::DeviceScan::InclusiveScan(temp_storage.data(), temp_storage_bytes,
input, output.data<T>(), Op{}, size, stream);
} else {
auto input = d_input->begin<T>();
// Prepare temp storage
hipcub::DeviceScan::InclusiveScan(temp_storage.data(), temp_storage_bytes,
input, output.data<T>(), Op{},
size, stream);
temp_storage = rmm::device_buffer{temp_storage_bytes, stream, mr};
hipcub::DeviceScan::InclusiveScan(temp_storage.data(), temp_storage_bytes,
input, output.data<T>(), Op{}, size, stream);
}
CHECK_CUDA(stream);
return output_column;
}
public:
/**
* @brief creates new column from input column by applying scan operation
*
* @param input input column view
* @param inclusive inclusive or exclusive scan
* @param mr The resource to use for all allocations
* @param stream The stream on which to execute all allocations and copies
* @return
*
* @tparam T type of input column
*/
template <typename T,
typename std::enable_if_t<is_supported<T>(), T>* = nullptr>
std::unique_ptr<column> operator()(const column_view& input,
bool inclusive,
rmm::mr::device_memory_resource* mr, hipStream_t stream)
{
std::unique_ptr<column> output;
if (inclusive)
output = inclusive_scan<T>(input, mr, stream);
else
output = exclusive_scan<T>(input, mr, stream);
CUDF_EXPECTS(input.null_count() == output->null_count(),
"Input / output column null count mismatch");
return output;
}
template <typename T,
typename std::enable_if_t<!is_supported<T>(), T>* = nullptr>
std::unique_ptr<column> operator()(const column_view& input,
bool inclusive,
rmm::mr::device_memory_resource* mr, hipStream_t stream)
{
CUDF_FAIL("Non-arithmetic types not supported for `cudf::scan`");
}
};
std::unique_ptr<column> scan(const column_view& input,
scan_op op, bool inclusive,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
hipStream_t stream=0)
{
CUDF_EXPECTS(is_numeric(input.type()), "Unexpected non-numeric type.");
switch (op) {
case scan_op::SUM:
return cudf::experimental::type_dispatcher(input.type(),
ScanDispatcher<cudf::DeviceSum>(), input, inclusive, mr, stream);
case scan_op::MIN:
return cudf::experimental::type_dispatcher(input.type(),
ScanDispatcher<cudf::DeviceMin>(), input, inclusive, mr, stream);
case scan_op::MAX:
return cudf::experimental::type_dispatcher(input.type(),
ScanDispatcher<cudf::DeviceMax>(), input, inclusive, mr, stream);
case scan_op::PRODUCT:
return cudf::experimental::type_dispatcher(input.type(),
ScanDispatcher<cudf::DeviceProduct>(), input, inclusive, mr, stream);
default:
CUDF_FAIL("The input enum `scan::operators` is out of the range");
}
}
} // namespace detail
std::unique_ptr<column> scan(const column_view& input,
scan_op op, bool inclusive,
rmm::mr::device_memory_resource* mr)
{
return detail::scan(input, op, inclusive, mr);
}
} // namespace experimental
} // namespace cudf
| 08d4e501e9c414c1acce2d09adcb26e69b1cd900.cu | #include <cudf/column/column_view.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/detail/iterator.cuh>
#include <rmm/rmm.h>
#include <cudf/utilities/error.hpp>
#include <cudf/null_mask.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <cudf/detail/utilities/device_atomics.cuh>
#include <cub/device/device_scan.cuh>
#include <cudf/reduction.hpp>
#include <cudf/detail/copy.hpp>
#include <cudf/null_mask.hpp>
namespace cudf {
namespace experimental {
namespace detail {
/**
* @brief Dispatcher for running Scan operation on input column
* Dispatches scan operartion on `Op` and creates output column
*
* @tparam Op device binary operator
*/
template <typename Op>
struct ScanDispatcher {
private:
// return true if T is arithmetic type (including cudf::experimental::bool8)
template <typename T>
static constexpr bool is_supported() {
return std::is_arithmetic<T>::value;
}
template <typename T>
auto exclusive_scan(const column_view& input_view,
rmm::mr::device_memory_resource* mr, cudaStream_t stream)
{
const size_t size = input_view.size();
auto output_column = experimental::detail::allocate_like(
input_view, size, experimental::mask_allocation_policy::NEVER, mr,
stream);
output_column->set_null_mask(copy_bitmask(input_view, stream, mr),
input_view.null_count());
mutable_column_view output = output_column->mutable_view();
auto d_input = column_device_view::create(input_view, stream);
rmm::device_buffer temp_storage;
size_t temp_storage_bytes = 0;
if (input_view.has_nulls()) {
auto input = make_null_replacement_iterator(*d_input, Op::template identity<T>());
// Prepare temp storage
cub::DeviceScan::ExclusiveScan(temp_storage.data(), temp_storage_bytes,
input, output.data<T>(), Op{},
Op::template identity<T>(), size, stream);
temp_storage = rmm::device_buffer{temp_storage_bytes, stream, mr};
cub::DeviceScan::ExclusiveScan(temp_storage.data(), temp_storage_bytes,
input, output.data<T>(), Op{},
Op::template identity<T>(), size, stream);
} else {
auto input = d_input->begin<T>();
// Prepare temp storage
cub::DeviceScan::ExclusiveScan(temp_storage.data(), temp_storage_bytes,
input, output.data<T>(), Op{},
Op::template identity<T>(), size, stream);
temp_storage = rmm::device_buffer{temp_storage_bytes, stream, mr};
cub::DeviceScan::ExclusiveScan(temp_storage.data(), temp_storage_bytes,
input, output.data<T>(), Op{},
Op::template identity<T>(), size, stream);
}
CHECK_CUDA(stream);
return output_column;
}
template <typename T>
auto inclusive_scan(const column_view& input_view,
rmm::mr::device_memory_resource* mr, cudaStream_t stream)
{
const size_t size = input_view.size();
auto output_column = experimental::detail::allocate_like(
input_view, size, experimental::mask_allocation_policy::NEVER, mr,
stream);
output_column->set_null_mask(copy_bitmask(input_view, stream, mr),
input_view.null_count());
mutable_column_view output = output_column->mutable_view();
auto d_input = column_device_view::create(input_view, stream);
rmm::device_buffer temp_storage;
size_t temp_storage_bytes = 0;
if (input_view.has_nulls()) {
auto input = make_null_replacement_iterator(*d_input, Op::template identity<T>());
// Prepare temp storage
cub::DeviceScan::InclusiveScan(temp_storage.data(), temp_storage_bytes,
input, output.data<T>(), Op{}, size, stream);
temp_storage = rmm::device_buffer{temp_storage_bytes, stream, mr};
cub::DeviceScan::InclusiveScan(temp_storage.data(), temp_storage_bytes,
input, output.data<T>(), Op{}, size, stream);
} else {
auto input = d_input->begin<T>();
// Prepare temp storage
cub::DeviceScan::InclusiveScan(temp_storage.data(), temp_storage_bytes,
input, output.data<T>(), Op{},
size, stream);
temp_storage = rmm::device_buffer{temp_storage_bytes, stream, mr};
cub::DeviceScan::InclusiveScan(temp_storage.data(), temp_storage_bytes,
input, output.data<T>(), Op{}, size, stream);
}
CHECK_CUDA(stream);
return output_column;
}
public:
/**
* @brief creates new column from input column by applying scan operation
*
* @param input input column view
* @param inclusive inclusive or exclusive scan
* @param mr The resource to use for all allocations
* @param stream The stream on which to execute all allocations and copies
* @return
*
* @tparam T type of input column
*/
template <typename T,
typename std::enable_if_t<is_supported<T>(), T>* = nullptr>
std::unique_ptr<column> operator()(const column_view& input,
bool inclusive,
rmm::mr::device_memory_resource* mr, cudaStream_t stream)
{
std::unique_ptr<column> output;
if (inclusive)
output = inclusive_scan<T>(input, mr, stream);
else
output = exclusive_scan<T>(input, mr, stream);
CUDF_EXPECTS(input.null_count() == output->null_count(),
"Input / output column null count mismatch");
return output;
}
template <typename T,
typename std::enable_if_t<!is_supported<T>(), T>* = nullptr>
std::unique_ptr<column> operator()(const column_view& input,
bool inclusive,
rmm::mr::device_memory_resource* mr, cudaStream_t stream)
{
CUDF_FAIL("Non-arithmetic types not supported for `cudf::scan`");
}
};
std::unique_ptr<column> scan(const column_view& input,
scan_op op, bool inclusive,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
cudaStream_t stream=0)
{
CUDF_EXPECTS(is_numeric(input.type()), "Unexpected non-numeric type.");
switch (op) {
case scan_op::SUM:
return cudf::experimental::type_dispatcher(input.type(),
ScanDispatcher<cudf::DeviceSum>(), input, inclusive, mr, stream);
case scan_op::MIN:
return cudf::experimental::type_dispatcher(input.type(),
ScanDispatcher<cudf::DeviceMin>(), input, inclusive, mr, stream);
case scan_op::MAX:
return cudf::experimental::type_dispatcher(input.type(),
ScanDispatcher<cudf::DeviceMax>(), input, inclusive, mr, stream);
case scan_op::PRODUCT:
return cudf::experimental::type_dispatcher(input.type(),
ScanDispatcher<cudf::DeviceProduct>(), input, inclusive, mr, stream);
default:
CUDF_FAIL("The input enum `scan::operators` is out of the range");
}
}
} // namespace detail
std::unique_ptr<column> scan(const column_view& input,
scan_op op, bool inclusive,
rmm::mr::device_memory_resource* mr)
{
return detail::scan(input, op, inclusive, mr);
}
} // namespace experimental
} // namespace cudf
|
1ea6584324c197d8582f461ad12766aa3335bb3b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* To run the debugger!!
* CUDA_VISIBLE_DEVICES="0" cuda-gdb -tui ns
*
* To run the profiler on Reynolds!!
* nvvp -vmargs -Dosgi.locking=none
*
* */
#include "globals.h"
#include "cuda_functions.h"
#include "cuda_globals.h"
__constant__ myprec dcoeffF[stencilSize];
__constant__ myprec dcoeffS[stencilSize+1];
__constant__ myprec dcoeffVF[stencilVisc];
__constant__ myprec dcoeffVS[stencilVisc+1];
__constant__ myprec dcoeffSx[mx*(2*stencilSize+1)];
__constant__ myprec dcoeffVSx[mx*(2*stencilVisc+1)];
__constant__ myprec d_dt, d_dx, d_dy, d_dz, d_d2x, d_d2y, d_d2z, d_x[mx], d_xp[mx], d_dxv[mx];
#if (capability>capabilityMin)
__constant__ dim3 d_block[5], grid0;
__constant__ dim3 d_grid[5], block0;
#else
__constant__ int d_block[5*2], grid0[2];
__constant__ int d_grid[5*2], block0[2];
#endif
dim3 hgrid, hblock;
void copyThreadGridsToDevice(dim3 &grid, dim3 &block);
// host routine to set constant data
void setDerivativeParameters(dim3 &grid, dim3 &block)
{
// check to make sure dimensions are integral multiples of sPencils
if ((mx % sPencils != 0) || (my %sPencils != 0) || (mz % sPencils != 0)) {
printf("'mx', 'my', and 'mz' must be integral multiples of sPencils\n");
exit(1);
}
myprec h_dt = (myprec) dt;
myprec h_dx = (myprec) 1.0/(dx);
myprec h_dy = (myprec) 1.0/(y[1] - y[0]);
myprec h_dz = (myprec) 1.0/(z[1] - z[0]);
myprec *h_x = new myprec[mx];
myprec *h_xp = new myprec[mx];
myprec *h_dxv = new myprec[mx];
for (int i=0; i<mx; i++) {
h_x[i] = x[i];
h_xp[i] = xp[i];
}
h_dxv[0] = (x[1]+x[0])/2.0;
for (int i=1; i<mx-1; i++) {
h_dxv[i] = (x[i+1]-x[i-1])/2.0;
}
h_dxv[mx-1] = Lx - (x[mx-1]+x[mx-2])/2.0;
myprec h_d2x = h_dx*h_dx;
myprec h_d2y = h_dy*h_dy;
myprec h_d2z = h_dz*h_dz;
myprec *h_coeffF = new myprec[stencilSize];
myprec *h_coeffS = new myprec[stencilSize+1];
myprec *h_coeffVF = new myprec[stencilSize];
myprec *h_coeffVS = new myprec[stencilSize+1];
for (int it=0; it<stencilSize; it++) {
h_coeffF[it] = (myprec) coeffF[it]; }
for (int it=0; it<stencilVisc; it++) {
h_coeffVF[it] = (myprec) coeffVF[it]; }
for (int it=0; it<stencilSize+1; it++) {
h_coeffS[it] = (myprec) coeffS[it]; }
for (int it=0; it<stencilVisc+1; it++) {
h_coeffVS[it] = (myprec) coeffVS[it]; }
//constructing the second order derivative coefficients in the x-direction
myprec *h_coeffVSx = new myprec[mx*(2*stencilVisc+1)];
for (int it=0; it<stencilVisc; it++)
for (int i=0; i<mx; i++) {
int idx = i + it*mx;
h_coeffVSx[idx] = (coeffVS[it]*(xp[i]*xp[i])*h_d2x - coeffVF[it]*xpp[i]*(xp[i]*xp[i]*xp[i])*h_dx);
}
for (int i=0; i<mx; i++) {
int idx = i + stencilVisc*mx;
h_coeffVSx[idx] = coeffVS[stencilVisc]*(xp[i]*xp[i])*h_d2x;
}
for (int it=stencilVisc+1; it<2*stencilVisc+1; it++)
for (int i=0; i<mx; i++) {
int idx = i + it*mx;
h_coeffVSx[idx] = ( coeffVS[2*stencilVisc - it]*(xp[i]*xp[i])*h_d2x +
coeffVF[2*stencilVisc - it]*xpp[i]*(xp[i]*xp[i]*xp[i])*h_dx);
}
myprec *h_coeffSx = new myprec[mx*(2*stencilSize+1)];
for (int it=0; it<stencilSize; it++)
for (int i=0; i<mx; i++) {
int idx = i + it*mx;
h_coeffSx[idx] = (coeffS[it]*(xp[i]*xp[i])*h_d2x - coeffF[it]*xpp[i]*(xp[i]*xp[i]*xp[i])*h_dx);
}
for (int i=0; i<mx; i++) {
int idx = i + stencilSize*mx;
h_coeffSx[idx] = coeffS[stencilSize]*(xp[i]*xp[i])*h_d2x;
}
for (int it=stencilSize+1; it<2*stencilSize+1; it++)
for (int i=0; i<mx; i++) {
int idx = i + it*mx;
h_coeffSx[idx] = ( coeffVS[2*stencilSize - it]*(xp[i]*xp[i])*h_d2x +
coeffVF[2*stencilSize - it]*xpp[i]*(xp[i]*xp[i]*xp[i])*h_dx);
}
checkCuda( hipMemcpyToSymbol(dcoeffF , h_coeffF , stencilSize *sizeof(myprec), 0, hipMemcpyHostToDevice) );
checkCuda( hipMemcpyToSymbol(dcoeffS , h_coeffS , (stencilSize+1)*sizeof(myprec), 0, hipMemcpyHostToDevice) );
checkCuda( hipMemcpyToSymbol(dcoeffVF, h_coeffVF, stencilVisc *sizeof(myprec), 0, hipMemcpyHostToDevice) );
checkCuda( hipMemcpyToSymbol(dcoeffVS, h_coeffVS, (stencilVisc+1)*sizeof(myprec), 0, hipMemcpyHostToDevice) );
checkCuda( hipMemcpyToSymbol(dcoeffSx , h_coeffSx , mx*(2*stencilSize+1)*sizeof(myprec), 0, hipMemcpyHostToDevice) );
checkCuda( hipMemcpyToSymbol(dcoeffVSx, h_coeffVSx, mx*(2*stencilVisc+1)*sizeof(myprec), 0, hipMemcpyHostToDevice) );
checkCuda( hipMemcpyToSymbol(d_dt , &h_dt , sizeof(myprec), 0, hipMemcpyHostToDevice) );
checkCuda( hipMemcpyToSymbol(d_dx , &h_dx , sizeof(myprec), 0, hipMemcpyHostToDevice) );
checkCuda( hipMemcpyToSymbol(d_dy , &h_dy , sizeof(myprec), 0, hipMemcpyHostToDevice) );
checkCuda( hipMemcpyToSymbol(d_dz , &h_dz , sizeof(myprec), 0, hipMemcpyHostToDevice) );
checkCuda( hipMemcpyToSymbol(d_d2x , &h_d2x , sizeof(myprec), 0, hipMemcpyHostToDevice) );
checkCuda( hipMemcpyToSymbol(d_d2y , &h_d2y , sizeof(myprec), 0, hipMemcpyHostToDevice) );
checkCuda( hipMemcpyToSymbol(d_d2z , &h_d2z , sizeof(myprec), 0, hipMemcpyHostToDevice) );
checkCuda( hipMemcpyToSymbol(d_x , h_x ,mx*sizeof(myprec), 0, hipMemcpyHostToDevice) );
checkCuda( hipMemcpyToSymbol(d_xp , h_xp ,mx*sizeof(myprec), 0, hipMemcpyHostToDevice) );
checkCuda( hipMemcpyToSymbol(d_dxv , h_dxv ,mx*sizeof(myprec), 0, hipMemcpyHostToDevice) );
copyThreadGridsToDevice(grid,block);
delete [] h_coeffF;
delete [] h_coeffS;
delete [] h_coeffVF;
delete [] h_coeffVS;
delete [] h_coeffSx;
delete [] h_coeffVSx;
delete [] h_x;
delete [] h_xp;
delete [] h_dxv;
}
#if (capability>capabilityMin)
void copyThreadGridsToDevice(dim3 &grid, dim3 &block) {
dim3 *h_grid, *h_block;
h_grid = new dim3[5];
h_block = new dim3[5];
// X-grid
h_grid[0] = dim3(my / sPencils, mz, 1);
h_block[0] = dim3(mx, sPencils, 1);
// Y-grid (2) for viscous fluxes and (4) for advective fluxes
h_grid[3] = dim3(mx / lPencils, mz, 1);
h_block[3] = dim3(lPencils, (my * sPencils) / lPencils, 1);
h_grid[1] = dim3(mx / sPencils, mz, 1);
h_block[1] = dim3(my , sPencils, 1); //if not using shared change!!
// Z-grid (2) for viscous fluxes and (4) for advective fluxes
h_grid[4] = dim3(mx / lPencils, my, 1);
h_block[4] = dim3(lPencils, (mz * sPencils) / lPencils, 1);
h_grid[2] = dim3(mx / sPencils, my, 1);
h_block[2] = dim3(mz , sPencils, 1); //if not using shared change!!
checkCuda( hipMemcpyToSymbol(d_grid , h_grid , 5*sizeof(dim3), 0, hipMemcpyHostToDevice) );
checkCuda( hipMemcpyToSymbol(d_block , h_block , 5*sizeof(dim3), 0, hipMemcpyHostToDevice) );
printf("Grid configuration:\n");
printf("Grid 0: {%d, %d, %d} blocks. Blocks 0: {%d, %d, %d} threads.\n",h_grid[0].x, h_grid[0].y, h_grid[0].z, h_block[0].x, h_block[0].y, h_block[0].z);
printf("Grid 1: {%d, %d, %d} blocks. Blocks 1: {%d, %d, %d} threads.\n",h_grid[1].x, h_grid[1].y, h_grid[1].z, h_block[1].x, h_block[1].y, h_block[1].z);
printf("Grid 2: {%d, %d, %d} blocks. Blocks 2: {%d, %d, %d} threads.\n",h_grid[2].x, h_grid[2].y, h_grid[2].z, h_block[2].x, h_block[2].y, h_block[2].z);
printf("Grid 3: {%d, %d, %d} blocks. Blocks 1: {%d, %d, %d} threads.\n",h_grid[3].x, h_grid[3].y, h_grid[3].z, h_block[3].x, h_block[3].y, h_block[3].z);
printf("Grid 4: {%d, %d, %d} blocks. Blocks 2: {%d, %d, %d} threads.\n",h_grid[4].x, h_grid[4].y, h_grid[4].z, h_block[4].x, h_block[4].y, h_block[4].z);
printf("\n");
hgrid = dim3(my / sPencils, mz, 1);
hblock = dim3(mx, sPencils, 1);
checkCuda( hipMemcpyToSymbol(grid0 , &hgrid , sizeof(dim3), 0, hipMemcpyHostToDevice) );
checkCuda( hipMemcpyToSymbol(block0 , &hblock , sizeof(dim3), 0, hipMemcpyHostToDevice) );
grid = 1;
block = 1;
delete [] h_grid;
delete [] h_block;
}
#else
void copyThreadGridsToDevice(dim3 &grid, dim3 &block) {
int *h_grid, *h_block;
h_grid = new int[2*5];
h_block = new int[2*5];
// X-grid
h_grid[0] = my / sPencils; h_grid[1] = mz;
h_block[0] = mx; h_block[1] = sPencils;
// Y-grid (2) for viscous fluxes and (4) for advective fluxes
h_grid[2] = mx / lPencils; h_grid[3] = mz;
h_block[2] = lPencils; h_block[3] = (my * sPencils) / lPencils;
h_grid[4] = mx / sPencils; h_grid[5] = mz;
h_block[4] = my; h_block[5] = sPencils;
// Z-grid (2) for viscous fluxes and (4) for advective fluxes
h_grid[6] = mx / lPencils; h_grid[7] = my;
h_block[6] = lPencils; h_block[7] = (mz * sPencils) / lPencils;
h_grid[8] = mx / sPencils; h_grid[9] = my;
h_block[8] = mz; h_block[9] = sPencils;
checkCuda( hipMemcpyToSymbol(d_grid , h_grid , 2*5*sizeof(int), 0, hipMemcpyHostToDevice) );
checkCuda( hipMemcpyToSymbol(d_block , h_block , 2*5*sizeof(int), 0, hipMemcpyHostToDevice) );
printf("Grid configuration:\n");
printf("Grid 0: {%d, %d, %d} blocks. Blocks 0: {%d, %d, %d} threads.\n",h_grid[0], h_grid[1], 1, h_block[0], h_block[1], 1);
printf("Grid 1: {%d, %d, %d} blocks. Blocks 1: {%d, %d, %d} threads.\n",h_grid[2], h_grid[3], 1, h_block[2], h_block[3], 1);
printf("Grid 2: {%d, %d, %d} blocks. Blocks 2: {%d, %d, %d} threads.\n",h_grid[6], h_grid[7], 1, h_block[6], h_block[7], 1);
printf("Grid 3: {%d, %d, %d} blocks. Blocks 1: {%d, %d, %d} threads.\n",h_grid[4], h_grid[5], 1, h_block[4], h_block[5], 1);
printf("Grid 4: {%d, %d, %d} blocks. Blocks 2: {%d, %d, %d} threads.\n",h_grid[8], h_grid[9], 1, h_block[8], h_block[9], 1);
printf("\n");
hgrid = dim3(my / sPencils, mz, 1);
hblock = dim3(mx, sPencils, 1);
int *h_grid0,*h_block0;
h_grid0 = new int[2];
h_block0 = new int[2];
h_grid0[0] = h_grid[0] ; h_grid0[1] = h_grid[1];
h_block0[0] = h_block[0]; h_block0[1] = h_block[1];
checkCuda( hipMemcpyToSymbol(grid0 , h_grid0 , 2*sizeof(int), 0, hipMemcpyHostToDevice) );
checkCuda( hipMemcpyToSymbol(block0 , h_block0 , 2*sizeof(int), 0, hipMemcpyHostToDevice) );
grid = 1;
block = 1;
delete [] h_grid;
delete [] h_block;
delete [] h_grid0;
delete [] h_block0;
}
#endif
void copyField(int direction) {
myprec *fr = new myprec[mx*my*mz];
myprec *fu = new myprec[mx*my*mz];
myprec *fv = new myprec[mx*my*mz];
myprec *fw = new myprec[mx*my*mz];
myprec *fe = new myprec[mx*my*mz];
myprec *d_fr, *d_fu, *d_fv, *d_fw, *d_fe;
int bytes = mx*my*mz * sizeof(myprec);
checkCuda( hipMalloc((void**)&d_fr, bytes) );
checkCuda( hipMalloc((void**)&d_fu, bytes) );
checkCuda( hipMalloc((void**)&d_fv, bytes) );
checkCuda( hipMalloc((void**)&d_fw, bytes) );
checkCuda( hipMalloc((void**)&d_fe, bytes) );
if(direction == 0) {
for (int it=0; it<mx*my*mz; it++) {
fr[it] = (myprec) r[it];
fu[it] = (myprec) u[it];
fv[it] = (myprec) v[it];
fw[it] = (myprec) w[it];
fe[it] = (myprec) e[it];
}
// device arrays
checkCuda( hipMemcpy(d_fr, fr, bytes, hipMemcpyHostToDevice) );
checkCuda( hipMemcpy(d_fu, fu, bytes, hipMemcpyHostToDevice) );
checkCuda( hipMemcpy(d_fv, fv, bytes, hipMemcpyHostToDevice) );
checkCuda( hipMemcpy(d_fw, fw, bytes, hipMemcpyHostToDevice) );
checkCuda( hipMemcpy(d_fe, fe, bytes, hipMemcpyHostToDevice) );
hipLaunchKernelGGL(( initDevice), dim3(hgrid), dim3(hblock), 0, 0, d_fr,d_fu,d_fv,d_fw,d_fe,h_dpdz);
} else if (direction == 1) {
checkCuda( hipMemset(d_fr, 0, bytes) );
checkCuda( hipMemset(d_fu, 0, bytes) );
checkCuda( hipMemset(d_fv, 0, bytes) );
checkCuda( hipMemset(d_fw, 0, bytes) );
checkCuda( hipMemset(d_fe, 0, bytes) );
hipLaunchKernelGGL(( getResults), dim3(hgrid), dim3(hblock), 0, 0, d_fr,d_fu,d_fv,d_fw,d_fe);
checkCuda( hipMemcpy(fr, d_fr, bytes, hipMemcpyDeviceToHost) );
checkCuda( hipMemcpy(fu, d_fu, bytes, hipMemcpyDeviceToHost) );
checkCuda( hipMemcpy(fv, d_fv, bytes, hipMemcpyDeviceToHost) );
checkCuda( hipMemcpy(fw, d_fw, bytes, hipMemcpyDeviceToHost) );
checkCuda( hipMemcpy(fe, d_fe, bytes, hipMemcpyDeviceToHost) );
for (int it=0; it<mx*my*mz; it++) {
r[it] = (double) fr[it];
u[it] = (double) fu[it];
v[it] = (double) fv[it];
w[it] = (double) fw[it];
e[it] = (double) fe[it];
}
}
checkCuda( hipFree(d_fr) );
checkCuda( hipFree(d_fu) );
checkCuda( hipFree(d_fv) );
checkCuda( hipFree(d_fw) );
checkCuda( hipFree(d_fe) );
delete [] fr;
delete [] fu;
delete [] fv;
delete [] fw;
delete [] fe;
}
__global__ void initDevice(myprec *d_fr, myprec *d_fu, myprec *d_fv, myprec *d_fw, myprec *d_fe, double h_dpdz) {
int threadsPerBlock = blockDim.x * blockDim.y;
int threadNumInBlock = threadIdx.x + blockDim.x * threadIdx.y;
int blockNumInGrid = blockIdx.x + gridDim.x * blockIdx.y;
int globalThreadNum = blockNumInGrid * threadsPerBlock + threadNumInBlock;
if(forcing) {
dpdz = h_dpdz;
} else {
dpdz = 0;
}
d_r[globalThreadNum] = d_fr[globalThreadNum];
d_u[globalThreadNum] = d_fu[globalThreadNum];
d_v[globalThreadNum] = d_fv[globalThreadNum];
d_w[globalThreadNum] = d_fw[globalThreadNum];
d_e[globalThreadNum] = d_fe[globalThreadNum];
}
__global__ void getResults(myprec *d_fr, myprec *d_fu, myprec *d_fv, myprec *d_fw, myprec *d_fe) {
int threadsPerBlock = blockDim.x * blockDim.y;
int threadNumInBlock = threadIdx.x + blockDim.x * threadIdx.y;
int blockNumInGrid = blockIdx.x + gridDim.x * blockIdx.y;
int globalThreadNum = blockNumInGrid * threadsPerBlock + threadNumInBlock;
d_fr[globalThreadNum] = d_r[globalThreadNum];
d_fu[globalThreadNum] = d_u[globalThreadNum];
d_fv[globalThreadNum] = d_v[globalThreadNum];
d_fw[globalThreadNum] = d_w[globalThreadNum];
d_fe[globalThreadNum] = d_e[globalThreadNum];
}
void checkGpuMem() {
float free_m;
size_t free_t,total_t;
hipMemGetInfo(&free_t,&total_t);
free_m =(uint)free_t/1048576.0 ;
printf ( "mem free %zu\t (%f MB mem)\n",free_t,free_m);
}
__device__ void threadBlockDeviceSynchronize(void) {
__syncthreads();
if(threadIdx.x == 0)
hipDeviceSynchronize();
__syncthreads();
}
| 1ea6584324c197d8582f461ad12766aa3335bb3b.cu | /* To run the debugger!!
* CUDA_VISIBLE_DEVICES="0" cuda-gdb -tui ns
*
* To run the profiler on Reynolds!!
* nvvp -vmargs -Dosgi.locking=none
*
* */
#include "globals.h"
#include "cuda_functions.h"
#include "cuda_globals.h"
__constant__ myprec dcoeffF[stencilSize];
__constant__ myprec dcoeffS[stencilSize+1];
__constant__ myprec dcoeffVF[stencilVisc];
__constant__ myprec dcoeffVS[stencilVisc+1];
__constant__ myprec dcoeffSx[mx*(2*stencilSize+1)];
__constant__ myprec dcoeffVSx[mx*(2*stencilVisc+1)];
__constant__ myprec d_dt, d_dx, d_dy, d_dz, d_d2x, d_d2y, d_d2z, d_x[mx], d_xp[mx], d_dxv[mx];
#if (capability>capabilityMin)
__constant__ dim3 d_block[5], grid0;
__constant__ dim3 d_grid[5], block0;
#else
__constant__ int d_block[5*2], grid0[2];
__constant__ int d_grid[5*2], block0[2];
#endif
dim3 hgrid, hblock;
void copyThreadGridsToDevice(dim3 &grid, dim3 &block);
// host routine to set constant data
void setDerivativeParameters(dim3 &grid, dim3 &block)
{
// check to make sure dimensions are integral multiples of sPencils
if ((mx % sPencils != 0) || (my %sPencils != 0) || (mz % sPencils != 0)) {
printf("'mx', 'my', and 'mz' must be integral multiples of sPencils\n");
exit(1);
}
myprec h_dt = (myprec) dt;
myprec h_dx = (myprec) 1.0/(dx);
myprec h_dy = (myprec) 1.0/(y[1] - y[0]);
myprec h_dz = (myprec) 1.0/(z[1] - z[0]);
myprec *h_x = new myprec[mx];
myprec *h_xp = new myprec[mx];
myprec *h_dxv = new myprec[mx];
for (int i=0; i<mx; i++) {
h_x[i] = x[i];
h_xp[i] = xp[i];
}
h_dxv[0] = (x[1]+x[0])/2.0;
for (int i=1; i<mx-1; i++) {
h_dxv[i] = (x[i+1]-x[i-1])/2.0;
}
h_dxv[mx-1] = Lx - (x[mx-1]+x[mx-2])/2.0;
myprec h_d2x = h_dx*h_dx;
myprec h_d2y = h_dy*h_dy;
myprec h_d2z = h_dz*h_dz;
myprec *h_coeffF = new myprec[stencilSize];
myprec *h_coeffS = new myprec[stencilSize+1];
myprec *h_coeffVF = new myprec[stencilSize];
myprec *h_coeffVS = new myprec[stencilSize+1];
for (int it=0; it<stencilSize; it++) {
h_coeffF[it] = (myprec) coeffF[it]; }
for (int it=0; it<stencilVisc; it++) {
h_coeffVF[it] = (myprec) coeffVF[it]; }
for (int it=0; it<stencilSize+1; it++) {
h_coeffS[it] = (myprec) coeffS[it]; }
for (int it=0; it<stencilVisc+1; it++) {
h_coeffVS[it] = (myprec) coeffVS[it]; }
//constructing the second order derivative coefficients in the x-direction
myprec *h_coeffVSx = new myprec[mx*(2*stencilVisc+1)];
for (int it=0; it<stencilVisc; it++)
for (int i=0; i<mx; i++) {
int idx = i + it*mx;
h_coeffVSx[idx] = (coeffVS[it]*(xp[i]*xp[i])*h_d2x - coeffVF[it]*xpp[i]*(xp[i]*xp[i]*xp[i])*h_dx);
}
for (int i=0; i<mx; i++) {
int idx = i + stencilVisc*mx;
h_coeffVSx[idx] = coeffVS[stencilVisc]*(xp[i]*xp[i])*h_d2x;
}
for (int it=stencilVisc+1; it<2*stencilVisc+1; it++)
for (int i=0; i<mx; i++) {
int idx = i + it*mx;
h_coeffVSx[idx] = ( coeffVS[2*stencilVisc - it]*(xp[i]*xp[i])*h_d2x +
coeffVF[2*stencilVisc - it]*xpp[i]*(xp[i]*xp[i]*xp[i])*h_dx);
}
myprec *h_coeffSx = new myprec[mx*(2*stencilSize+1)];
for (int it=0; it<stencilSize; it++)
for (int i=0; i<mx; i++) {
int idx = i + it*mx;
h_coeffSx[idx] = (coeffS[it]*(xp[i]*xp[i])*h_d2x - coeffF[it]*xpp[i]*(xp[i]*xp[i]*xp[i])*h_dx);
}
for (int i=0; i<mx; i++) {
int idx = i + stencilSize*mx;
h_coeffSx[idx] = coeffS[stencilSize]*(xp[i]*xp[i])*h_d2x;
}
for (int it=stencilSize+1; it<2*stencilSize+1; it++)
for (int i=0; i<mx; i++) {
int idx = i + it*mx;
h_coeffSx[idx] = ( coeffVS[2*stencilSize - it]*(xp[i]*xp[i])*h_d2x +
coeffVF[2*stencilSize - it]*xpp[i]*(xp[i]*xp[i]*xp[i])*h_dx);
}
checkCuda( cudaMemcpyToSymbol(dcoeffF , h_coeffF , stencilSize *sizeof(myprec), 0, cudaMemcpyHostToDevice) );
checkCuda( cudaMemcpyToSymbol(dcoeffS , h_coeffS , (stencilSize+1)*sizeof(myprec), 0, cudaMemcpyHostToDevice) );
checkCuda( cudaMemcpyToSymbol(dcoeffVF, h_coeffVF, stencilVisc *sizeof(myprec), 0, cudaMemcpyHostToDevice) );
checkCuda( cudaMemcpyToSymbol(dcoeffVS, h_coeffVS, (stencilVisc+1)*sizeof(myprec), 0, cudaMemcpyHostToDevice) );
checkCuda( cudaMemcpyToSymbol(dcoeffSx , h_coeffSx , mx*(2*stencilSize+1)*sizeof(myprec), 0, cudaMemcpyHostToDevice) );
checkCuda( cudaMemcpyToSymbol(dcoeffVSx, h_coeffVSx, mx*(2*stencilVisc+1)*sizeof(myprec), 0, cudaMemcpyHostToDevice) );
checkCuda( cudaMemcpyToSymbol(d_dt , &h_dt , sizeof(myprec), 0, cudaMemcpyHostToDevice) );
checkCuda( cudaMemcpyToSymbol(d_dx , &h_dx , sizeof(myprec), 0, cudaMemcpyHostToDevice) );
checkCuda( cudaMemcpyToSymbol(d_dy , &h_dy , sizeof(myprec), 0, cudaMemcpyHostToDevice) );
checkCuda( cudaMemcpyToSymbol(d_dz , &h_dz , sizeof(myprec), 0, cudaMemcpyHostToDevice) );
checkCuda( cudaMemcpyToSymbol(d_d2x , &h_d2x , sizeof(myprec), 0, cudaMemcpyHostToDevice) );
checkCuda( cudaMemcpyToSymbol(d_d2y , &h_d2y , sizeof(myprec), 0, cudaMemcpyHostToDevice) );
checkCuda( cudaMemcpyToSymbol(d_d2z , &h_d2z , sizeof(myprec), 0, cudaMemcpyHostToDevice) );
checkCuda( cudaMemcpyToSymbol(d_x , h_x ,mx*sizeof(myprec), 0, cudaMemcpyHostToDevice) );
checkCuda( cudaMemcpyToSymbol(d_xp , h_xp ,mx*sizeof(myprec), 0, cudaMemcpyHostToDevice) );
checkCuda( cudaMemcpyToSymbol(d_dxv , h_dxv ,mx*sizeof(myprec), 0, cudaMemcpyHostToDevice) );
copyThreadGridsToDevice(grid,block);
delete [] h_coeffF;
delete [] h_coeffS;
delete [] h_coeffVF;
delete [] h_coeffVS;
delete [] h_coeffSx;
delete [] h_coeffVSx;
delete [] h_x;
delete [] h_xp;
delete [] h_dxv;
}
#if (capability>capabilityMin)
void copyThreadGridsToDevice(dim3 &grid, dim3 &block) {
dim3 *h_grid, *h_block;
h_grid = new dim3[5];
h_block = new dim3[5];
// X-grid
h_grid[0] = dim3(my / sPencils, mz, 1);
h_block[0] = dim3(mx, sPencils, 1);
// Y-grid (2) for viscous fluxes and (4) for advective fluxes
h_grid[3] = dim3(mx / lPencils, mz, 1);
h_block[3] = dim3(lPencils, (my * sPencils) / lPencils, 1);
h_grid[1] = dim3(mx / sPencils, mz, 1);
h_block[1] = dim3(my , sPencils, 1); //if not using shared change!!
// Z-grid (2) for viscous fluxes and (4) for advective fluxes
h_grid[4] = dim3(mx / lPencils, my, 1);
h_block[4] = dim3(lPencils, (mz * sPencils) / lPencils, 1);
h_grid[2] = dim3(mx / sPencils, my, 1);
h_block[2] = dim3(mz , sPencils, 1); //if not using shared change!!
checkCuda( cudaMemcpyToSymbol(d_grid , h_grid , 5*sizeof(dim3), 0, cudaMemcpyHostToDevice) );
checkCuda( cudaMemcpyToSymbol(d_block , h_block , 5*sizeof(dim3), 0, cudaMemcpyHostToDevice) );
printf("Grid configuration:\n");
printf("Grid 0: {%d, %d, %d} blocks. Blocks 0: {%d, %d, %d} threads.\n",h_grid[0].x, h_grid[0].y, h_grid[0].z, h_block[0].x, h_block[0].y, h_block[0].z);
printf("Grid 1: {%d, %d, %d} blocks. Blocks 1: {%d, %d, %d} threads.\n",h_grid[1].x, h_grid[1].y, h_grid[1].z, h_block[1].x, h_block[1].y, h_block[1].z);
printf("Grid 2: {%d, %d, %d} blocks. Blocks 2: {%d, %d, %d} threads.\n",h_grid[2].x, h_grid[2].y, h_grid[2].z, h_block[2].x, h_block[2].y, h_block[2].z);
printf("Grid 3: {%d, %d, %d} blocks. Blocks 1: {%d, %d, %d} threads.\n",h_grid[3].x, h_grid[3].y, h_grid[3].z, h_block[3].x, h_block[3].y, h_block[3].z);
printf("Grid 4: {%d, %d, %d} blocks. Blocks 2: {%d, %d, %d} threads.\n",h_grid[4].x, h_grid[4].y, h_grid[4].z, h_block[4].x, h_block[4].y, h_block[4].z);
printf("\n");
hgrid = dim3(my / sPencils, mz, 1);
hblock = dim3(mx, sPencils, 1);
checkCuda( cudaMemcpyToSymbol(grid0 , &hgrid , sizeof(dim3), 0, cudaMemcpyHostToDevice) );
checkCuda( cudaMemcpyToSymbol(block0 , &hblock , sizeof(dim3), 0, cudaMemcpyHostToDevice) );
grid = 1;
block = 1;
delete [] h_grid;
delete [] h_block;
}
#else
void copyThreadGridsToDevice(dim3 &grid, dim3 &block) {
int *h_grid, *h_block;
h_grid = new int[2*5];
h_block = new int[2*5];
// X-grid
h_grid[0] = my / sPencils; h_grid[1] = mz;
h_block[0] = mx; h_block[1] = sPencils;
// Y-grid (2) for viscous fluxes and (4) for advective fluxes
h_grid[2] = mx / lPencils; h_grid[3] = mz;
h_block[2] = lPencils; h_block[3] = (my * sPencils) / lPencils;
h_grid[4] = mx / sPencils; h_grid[5] = mz;
h_block[4] = my; h_block[5] = sPencils;
// Z-grid (2) for viscous fluxes and (4) for advective fluxes
h_grid[6] = mx / lPencils; h_grid[7] = my;
h_block[6] = lPencils; h_block[7] = (mz * sPencils) / lPencils;
h_grid[8] = mx / sPencils; h_grid[9] = my;
h_block[8] = mz; h_block[9] = sPencils;
checkCuda( cudaMemcpyToSymbol(d_grid , h_grid , 2*5*sizeof(int), 0, cudaMemcpyHostToDevice) );
checkCuda( cudaMemcpyToSymbol(d_block , h_block , 2*5*sizeof(int), 0, cudaMemcpyHostToDevice) );
printf("Grid configuration:\n");
printf("Grid 0: {%d, %d, %d} blocks. Blocks 0: {%d, %d, %d} threads.\n",h_grid[0], h_grid[1], 1, h_block[0], h_block[1], 1);
printf("Grid 1: {%d, %d, %d} blocks. Blocks 1: {%d, %d, %d} threads.\n",h_grid[2], h_grid[3], 1, h_block[2], h_block[3], 1);
printf("Grid 2: {%d, %d, %d} blocks. Blocks 2: {%d, %d, %d} threads.\n",h_grid[6], h_grid[7], 1, h_block[6], h_block[7], 1);
printf("Grid 3: {%d, %d, %d} blocks. Blocks 1: {%d, %d, %d} threads.\n",h_grid[4], h_grid[5], 1, h_block[4], h_block[5], 1);
printf("Grid 4: {%d, %d, %d} blocks. Blocks 2: {%d, %d, %d} threads.\n",h_grid[8], h_grid[9], 1, h_block[8], h_block[9], 1);
printf("\n");
hgrid = dim3(my / sPencils, mz, 1);
hblock = dim3(mx, sPencils, 1);
int *h_grid0,*h_block0;
h_grid0 = new int[2];
h_block0 = new int[2];
h_grid0[0] = h_grid[0] ; h_grid0[1] = h_grid[1];
h_block0[0] = h_block[0]; h_block0[1] = h_block[1];
checkCuda( cudaMemcpyToSymbol(grid0 , h_grid0 , 2*sizeof(int), 0, cudaMemcpyHostToDevice) );
checkCuda( cudaMemcpyToSymbol(block0 , h_block0 , 2*sizeof(int), 0, cudaMemcpyHostToDevice) );
grid = 1;
block = 1;
delete [] h_grid;
delete [] h_block;
delete [] h_grid0;
delete [] h_block0;
}
#endif
void copyField(int direction) {
myprec *fr = new myprec[mx*my*mz];
myprec *fu = new myprec[mx*my*mz];
myprec *fv = new myprec[mx*my*mz];
myprec *fw = new myprec[mx*my*mz];
myprec *fe = new myprec[mx*my*mz];
myprec *d_fr, *d_fu, *d_fv, *d_fw, *d_fe;
int bytes = mx*my*mz * sizeof(myprec);
checkCuda( cudaMalloc((void**)&d_fr, bytes) );
checkCuda( cudaMalloc((void**)&d_fu, bytes) );
checkCuda( cudaMalloc((void**)&d_fv, bytes) );
checkCuda( cudaMalloc((void**)&d_fw, bytes) );
checkCuda( cudaMalloc((void**)&d_fe, bytes) );
if(direction == 0) {
for (int it=0; it<mx*my*mz; it++) {
fr[it] = (myprec) r[it];
fu[it] = (myprec) u[it];
fv[it] = (myprec) v[it];
fw[it] = (myprec) w[it];
fe[it] = (myprec) e[it];
}
// device arrays
checkCuda( cudaMemcpy(d_fr, fr, bytes, cudaMemcpyHostToDevice) );
checkCuda( cudaMemcpy(d_fu, fu, bytes, cudaMemcpyHostToDevice) );
checkCuda( cudaMemcpy(d_fv, fv, bytes, cudaMemcpyHostToDevice) );
checkCuda( cudaMemcpy(d_fw, fw, bytes, cudaMemcpyHostToDevice) );
checkCuda( cudaMemcpy(d_fe, fe, bytes, cudaMemcpyHostToDevice) );
initDevice<<<hgrid, hblock>>>(d_fr,d_fu,d_fv,d_fw,d_fe,h_dpdz);
} else if (direction == 1) {
checkCuda( cudaMemset(d_fr, 0, bytes) );
checkCuda( cudaMemset(d_fu, 0, bytes) );
checkCuda( cudaMemset(d_fv, 0, bytes) );
checkCuda( cudaMemset(d_fw, 0, bytes) );
checkCuda( cudaMemset(d_fe, 0, bytes) );
getResults<<<hgrid, hblock>>>(d_fr,d_fu,d_fv,d_fw,d_fe);
checkCuda( cudaMemcpy(fr, d_fr, bytes, cudaMemcpyDeviceToHost) );
checkCuda( cudaMemcpy(fu, d_fu, bytes, cudaMemcpyDeviceToHost) );
checkCuda( cudaMemcpy(fv, d_fv, bytes, cudaMemcpyDeviceToHost) );
checkCuda( cudaMemcpy(fw, d_fw, bytes, cudaMemcpyDeviceToHost) );
checkCuda( cudaMemcpy(fe, d_fe, bytes, cudaMemcpyDeviceToHost) );
for (int it=0; it<mx*my*mz; it++) {
r[it] = (double) fr[it];
u[it] = (double) fu[it];
v[it] = (double) fv[it];
w[it] = (double) fw[it];
e[it] = (double) fe[it];
}
}
checkCuda( cudaFree(d_fr) );
checkCuda( cudaFree(d_fu) );
checkCuda( cudaFree(d_fv) );
checkCuda( cudaFree(d_fw) );
checkCuda( cudaFree(d_fe) );
delete [] fr;
delete [] fu;
delete [] fv;
delete [] fw;
delete [] fe;
}
__global__ void initDevice(myprec *d_fr, myprec *d_fu, myprec *d_fv, myprec *d_fw, myprec *d_fe, double h_dpdz) {
int threadsPerBlock = blockDim.x * blockDim.y;
int threadNumInBlock = threadIdx.x + blockDim.x * threadIdx.y;
int blockNumInGrid = blockIdx.x + gridDim.x * blockIdx.y;
int globalThreadNum = blockNumInGrid * threadsPerBlock + threadNumInBlock;
if(forcing) {
dpdz = h_dpdz;
} else {
dpdz = 0;
}
d_r[globalThreadNum] = d_fr[globalThreadNum];
d_u[globalThreadNum] = d_fu[globalThreadNum];
d_v[globalThreadNum] = d_fv[globalThreadNum];
d_w[globalThreadNum] = d_fw[globalThreadNum];
d_e[globalThreadNum] = d_fe[globalThreadNum];
}
__global__ void getResults(myprec *d_fr, myprec *d_fu, myprec *d_fv, myprec *d_fw, myprec *d_fe) {
int threadsPerBlock = blockDim.x * blockDim.y;
int threadNumInBlock = threadIdx.x + blockDim.x * threadIdx.y;
int blockNumInGrid = blockIdx.x + gridDim.x * blockIdx.y;
int globalThreadNum = blockNumInGrid * threadsPerBlock + threadNumInBlock;
d_fr[globalThreadNum] = d_r[globalThreadNum];
d_fu[globalThreadNum] = d_u[globalThreadNum];
d_fv[globalThreadNum] = d_v[globalThreadNum];
d_fw[globalThreadNum] = d_w[globalThreadNum];
d_fe[globalThreadNum] = d_e[globalThreadNum];
}
void checkGpuMem() {
float free_m;
size_t free_t,total_t;
cudaMemGetInfo(&free_t,&total_t);
free_m =(uint)free_t/1048576.0 ;
printf ( "mem free %zu\t (%f MB mem)\n",free_t,free_m);
}
__device__ void threadBlockDeviceSynchronize(void) {
__syncthreads();
if(threadIdx.x == 0)
cudaDeviceSynchronize();
__syncthreads();
}
|
7ddd43b7506abdb48a27503ae88816d4aeebff68.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <time.h>
#include <hip/hip_runtime.h>
#define all_num 1000 //
#define test_num 50 //
#define train_num 950 //
#define D 10 //
#define K 14//K
#define THREAD_NUM 2
#define BLOCK_NUM D / 2
typedef struct {
double data;//
int trainlabel;//
}Distance;
typedef struct {
double data[D];//
int label; //
}data;
data test[test_num];//
data train[train_num];//
data temp[all_num]; //
Distance distance[train_num];//
void loaddata(char filename[]) { // test_num train_num
FILE* fp = NULL;
fp = fopen(filename, "r");
int i, j;
int n = 0, m = 0;
for (i = 0; i < all_num; ++i) {
for (j = 0; j < D; ++j) {
fscanf(fp, "%lf ", &temp[i].data[j]);
}
fscanf(fp, "%d", &temp[i].label);
}
srand((unsigned int)time(NULL));
for (i = 0; i < all_num; ++i) {
int n1 = (rand() % all_num);//n n
int n2 = (rand() % all_num);
if (n1 != n2) { //
data t = temp[n1];
temp[n1] = temp[n2];
temp[n2] = t;
}
}
for (i = 0; i < all_num; i++) {
if (i < test_num) { //
for (j = 0; j < D; j++) {
test[n].data[j] = temp[i].data[j]; //
}
test[n].label = temp[i].label;
n++;
}
else { //
for (j = 0; j < D; ++j) {
train[m].data[j] = temp[i].data[j];//
}
train[m].label = temp[i].label;
m++;
}
}
fclose(fp);
fp = NULL;
printf("test:\n"); //
for (i = 0; i < test_num; ++i) {
for (j = 0; j < D; ++j) {
printf("%lf ", test[i].data[j]);
}
printf("%d\n",test[i].label);
}
printf("\n\ntrain:\n");
for (i = 0; i < train_num; ++i) {
for (j = 0; j < D; ++j) {
printf("%lf ", train[i].data[j]);
}
printf("%d\n",train[i].label);
}
}
__global__ static void computedistance(double *gpu_train, double *gpu_test, double *gpu_dist) { //
double sum = 0.0;
for (int i = blockIdx.x * THREAD_NUM + threadIdx.x; i < D; i += BLOCK_NUM * THREAD_NUM) {
sum += (gpu_test[i] - gpu_train[i]) * (gpu_test[i] - gpu_train[i]);
}
gpu_dist[blockIdx.x * THREAD_NUM + threadIdx.x] = sum;
}
int maxn(int a, int b) { //
if (a > b) return 0;
else if(b > a) return 1;
return 0;
}
void countlabel(int* sum ,int k, int n) { //k
int i;
int sumA = 0, sumB = 0;
for (i = 0; i < k; ++i) {
switch (distance[i].trainlabel) { //DistancepK
case 0: sumA++; break;
case 1: sumB++; break;
}
}
printf("%d %d ", sumA, sumB);
printf("%d %d \n", maxn(sumA, sumB), test[n].label);
if (maxn(sumA, sumB) == test[n].label) { //k
(*sum)++; //
}
}
int cmp(const void* a, const void* b) { //qsortcmp()
Distance A = *(Distance*)a;
Distance B = *(Distance*)b;
return A.data > B.data ? 1 : -1;
}
int main() {
loaddata("targetclass.txt");
int sum = 0;//k
hipEvent_t start, stop;
float elapsedTime;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
printf("\n\nCheck data:\n\n");
double *gpu_test, *gpu_train, *gpu_dist;
double dist[D], result;
hipMalloc((void**)&gpu_test, sizeof(double) * D);
hipMalloc((void**)&gpu_train, sizeof(double) * D);
hipMalloc((void**)&gpu_dist, sizeof(double) * D);
for (int i = 0; i < test_num; i++) {
for (int j = 0; j < train_num; j++) {
result = 0;
hipMemcpy(gpu_test, test[i].data, sizeof(double) * D, hipMemcpyHostToDevice);
hipMemcpy(gpu_train, train[j].data, sizeof(double) * D, hipMemcpyHostToDevice);
computedistance << <BLOCK_NUM, THREAD_NUM, 0>> >(gpu_train, gpu_test, gpu_dist);
hipMemcpy(dist, gpu_dist, sizeof(double) * D, hipMemcpyDeviceToHost);
for (int k = 0; k < D; ++k) {
result += dist[k];
}
distance[j].data = sqrt(result);
distance[j].trainlabel = train[j].label; //distance
}
qsort(distance, train_num, sizeof(distance[0]), cmp); //qsort
countlabel(&sum, K, i); //
}
printf("K = %d P = %.1lf%%\n", K, 100.0 * (sum) / test_num); //K
hipFree(gpu_test);
hipFree(gpu_train);
hipFree(gpu_dist);
hipEventRecord(stop, 0);
hipEventSynchronize(start);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
printf("GPU_time: %f ms\n", elapsedTime);
hipEventDestroy(start);
hipEventDestroy(stop);
return 0;
}
| 7ddd43b7506abdb48a27503ae88816d4aeebff68.cu | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <time.h>
#include <cuda_runtime.h>
#define all_num 1000 //总数据的数量
#define test_num 50 //测试数据的数量
#define train_num 950 //训练数据的数量
#define D 10 //特征数据的数量(维数)
#define K 14//K的最大取值
#define THREAD_NUM 2
#define BLOCK_NUM D / 2
typedef struct {
double data;//距离
int trainlabel;//用于链接训练标签
}Distance;
typedef struct {
double data[D];//特征数据
int label; //标签
}data;
data test[test_num];//测试数据结构体数组
data train[train_num];//训练数据结构体数组
data temp[all_num]; //临时存放数据结构体数组
Distance distance[train_num];//存放距离结构体数组
void loaddata(char filename[]) { //加载数据 分割:测试test_num组 训练train_num组
FILE* fp = NULL;
fp = fopen(filename, "r");
int i, j;
int n = 0, m = 0;
for (i = 0; i < all_num; ++i) {
for (j = 0; j < D; ++j) {
fscanf(fp, "%lf ", &temp[i].data[j]);
}
fscanf(fp, "%d", &temp[i].label);
}
srand((unsigned int)time(NULL));
for (i = 0; i < all_num; ++i) {
int n1 = (rand() % all_num);//产生n以内的随机数 n是数组元素个数
int n2 = (rand() % all_num);
if (n1 != n2) { //若两随机数不相等 则下标为这两随机数的数组进行交换
data t = temp[n1];
temp[n1] = temp[n2];
temp[n2] = t;
}
}
for (i = 0; i < all_num; i++) {
if (i < test_num) { //存入测试集
for (j = 0; j < D; j++) {
test[n].data[j] = temp[i].data[j]; //存入花的四个特征数据
}
test[n].label = temp[i].label;
n++;
}
else { //剩下的行数存入训练集
for (j = 0; j < D; ++j) {
train[m].data[j] = temp[i].data[j];//存入花的四个特征数据
}
train[m].label = temp[i].label;
m++;
}
}
fclose(fp);
fp = NULL;
printf("test:\n"); //把分割后的数据都打印出来 便于观察是否已经打乱
for (i = 0; i < test_num; ++i) {
for (j = 0; j < D; ++j) {
printf("%lf ", test[i].data[j]);
}
printf("%d\n",test[i].label);
}
printf("\n\ntrain:\n");
for (i = 0; i < train_num; ++i) {
for (j = 0; j < D; ++j) {
printf("%lf ", train[i].data[j]);
}
printf("%d\n",train[i].label);
}
}
__global__ static void computedistance(double *gpu_train, double *gpu_test, double *gpu_dist) { //计算距离
double sum = 0.0;
for (int i = blockIdx.x * THREAD_NUM + threadIdx.x; i < D; i += BLOCK_NUM * THREAD_NUM) {
sum += (gpu_test[i] - gpu_train[i]) * (gpu_test[i] - gpu_train[i]);
}
gpu_dist[blockIdx.x * THREAD_NUM + threadIdx.x] = sum;
}
int maxn(int a, int b) { //找出频数最高的 测试数据就属于出现次数最高的
if (a > b) return 0;
else if(b > a) return 1;
return 0;
}
void countlabel(int* sum ,int k, int n) { //统计距离最邻近的k个标签出现的频数
int i;
int sumA = 0, sumB = 0;
for (i = 0; i < k; ++i) {
switch (distance[i].trainlabel) { //用Distance结构体指针p来取K个距离最近的标签来进行判断
case 0: sumA++; break;
case 1: sumB++; break;
}
}
printf("%d %d ", sumA, sumB);
printf("%d %d \n", maxn(sumA, sumB), test[n].label);
if (maxn(sumA, sumB) == test[n].label) { //检测距离最近的k个标签与原测试标签是否符合 并统计
(*sum)++; //统计符合的数量
}
}
int cmp(const void* a, const void* b) { //快速排序qsort函数的cmp函数(判断函数)
Distance A = *(Distance*)a;
Distance B = *(Distance*)b;
return A.data > B.data ? 1 : -1;
}
int main() {
loaddata("targetclass.txt");
int sum = 0;//用于统计距离最近的k个标签与原测试标签符合的数量
cudaEvent_t start, stop;
float elapsedTime;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
printf("\n\nCheck data:\n\n");
double *gpu_test, *gpu_train, *gpu_dist;
double dist[D], result;
cudaMalloc((void**)&gpu_test, sizeof(double) * D);
cudaMalloc((void**)&gpu_train, sizeof(double) * D);
cudaMalloc((void**)&gpu_dist, sizeof(double) * D);
for (int i = 0; i < test_num; i++) {
for (int j = 0; j < train_num; j++) {
result = 0;
cudaMemcpy(gpu_test, test[i].data, sizeof(double) * D, cudaMemcpyHostToDevice);
cudaMemcpy(gpu_train, train[j].data, sizeof(double) * D, cudaMemcpyHostToDevice);
computedistance << <BLOCK_NUM, THREAD_NUM, 0>> >(gpu_train, gpu_test, gpu_dist);
cudaMemcpy(dist, gpu_dist, sizeof(double) * D, cudaMemcpyDeviceToHost);
for (int k = 0; k < D; ++k) {
result += dist[k];
}
distance[j].data = sqrt(result);
distance[j].trainlabel = train[j].label; //以上距离存入的同时也把训练集标签一起存入distance结构体数组中
}
qsort(distance, train_num, sizeof(distance[0]), cmp); //用qsort函数从小到大排序测试数据与每组训练数据的距离
countlabel(&sum, K, i); //统计距离测试集标签最近的标签出现频数
}
printf("K = %d P = %.1lf%%\n", K, 100.0 * (sum) / test_num); //打印K值对应的概率
cudaFree(gpu_test);
cudaFree(gpu_train);
cudaFree(gpu_dist);
cudaEventRecord(stop, 0);
cudaEventSynchronize(start);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("GPU_time: %f ms\n", elapsedTime);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return 0;
}
|
18e899adcaa180efec2ec91692365d23262d0cbf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date
@precisions normal z -> s d c
@author Stan Tomov
*/
#include "magma_internal.h"
#include "commonblas_z.h"
#include "magma_templates.h"
#define NTHREADS 64
#define NBLOCKS 40
__global__ void
zdiinertia_kernel(int n, magmaDoubleComplex_const_ptr dA, int ldda, int *dneig)
{
const int tx = threadIdx.x;
const int blk = blockIdx.x;
int peig = 0, neig = 0, zeig = 0;
__shared__ int pe[NTHREADS], ne[NTHREADS], ze[NTHREADS];
// Each thread computes its part of the intertia
for(int i=tx + blk*NTHREADS; i<n; i+= NTHREADS*NBLOCKS) {
double diag = MAGMA_Z_REAL(dA[i+i*ldda]);
if (diag > 0.0)
peig++;
else if (diag < 0.0)
neig++;
else
zeig++;
}
pe[tx] = peig;
ne[tx] = neig;
ze[tx] = zeig;
// The threads within a thread block sum their contributions to the inertia
magma_sum_reduce< NTHREADS >( tx, pe );
magma_sum_reduce< NTHREADS >( tx, ne );
magma_sum_reduce< NTHREADS >( tx, ze );
__syncthreads();
// Attomic sum the contributions from all theread blocks (by thread 0)
if (tx == 0){
atomicAdd(&dneig[0], pe[0]);
atomicAdd(&dneig[1], ne[0]);
atomicAdd(&dneig[2], ze[0]);
}
}
/***************************************************************************//**
Purpose
-------
magmablas_zdiinertia computes the inertia of a real diagonal matrix.
If matrix entries are complex, magmablas_zdiinertia considers the real
part of the diagonal.
Arguments
----------
@param[in]
n INTEGER.
On entry, N specifies the order of the matrix A.
N must be at least zero.
@param[in]
dA COMPLEX_16 array of DIMENSION ( LDDA, n ).
The input matrix A with diagonal entries for which the inertia
is computed. If dA is complex, the computation is done on the
real part of the diagonal.
@param[in]
ldda INTEGER.
On entry, LDDA specifies the leading dimension of A.
LDDA must be at least max( 1, n ).
@param[out]
dneig INTEGER array of DIMENSION 3 on the GPU memory.
The number of positive, negative, and zero eigenvalues
in this order.
@param[in]
queue magma_queue_t.
Queue to execute in.
@ingroup magma_hetrf
*******************************************************************************/
extern "C"
magma_int_t
magmablas_zdiinertia(
magma_int_t n,
magmaDoubleComplex_const_ptr dA, magma_int_t ldda,
int *dneig,
magma_queue_t queue )
{
/*
* Test the input parameters.
*/
magma_int_t info = 0;
if ( n < 0 ) {
info = -1;
} else if ( ldda < max(1, n) ) {
info = -3;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return info;
}
/*
* Quick return if possible.
*/
if (n == 0)
return info;
dim3 grid( NBLOCKS, 1, 1 );
dim3 threads( NTHREADS, 1, 1 );
// Set itertia to zero
hipMemsetAsync(dneig, 0, 3*sizeof(int), queue->cuda_stream() );
hipLaunchKernelGGL(( zdiinertia_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
n, dA, ldda, dneig);
return info;
}
// end magmablas_zdiinertia
| 18e899adcaa180efec2ec91692365d23262d0cbf.cu | /*
-- MAGMA (version 2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date
@precisions normal z -> s d c
@author Stan Tomov
*/
#include "magma_internal.h"
#include "commonblas_z.h"
#include "magma_templates.h"
#define NTHREADS 64
#define NBLOCKS 40
__global__ void
zdiinertia_kernel(int n, magmaDoubleComplex_const_ptr dA, int ldda, int *dneig)
{
const int tx = threadIdx.x;
const int blk = blockIdx.x;
int peig = 0, neig = 0, zeig = 0;
__shared__ int pe[NTHREADS], ne[NTHREADS], ze[NTHREADS];
// Each thread computes its part of the intertia
for(int i=tx + blk*NTHREADS; i<n; i+= NTHREADS*NBLOCKS) {
double diag = MAGMA_Z_REAL(dA[i+i*ldda]);
if (diag > 0.0)
peig++;
else if (diag < 0.0)
neig++;
else
zeig++;
}
pe[tx] = peig;
ne[tx] = neig;
ze[tx] = zeig;
// The threads within a thread block sum their contributions to the inertia
magma_sum_reduce< NTHREADS >( tx, pe );
magma_sum_reduce< NTHREADS >( tx, ne );
magma_sum_reduce< NTHREADS >( tx, ze );
__syncthreads();
// Attomic sum the contributions from all theread blocks (by thread 0)
if (tx == 0){
atomicAdd(&dneig[0], pe[0]);
atomicAdd(&dneig[1], ne[0]);
atomicAdd(&dneig[2], ze[0]);
}
}
/***************************************************************************//**
Purpose
-------
magmablas_zdiinertia computes the inertia of a real diagonal matrix.
If matrix entries are complex, magmablas_zdiinertia considers the real
part of the diagonal.
Arguments
----------
@param[in]
n INTEGER.
On entry, N specifies the order of the matrix A.
N must be at least zero.
@param[in]
dA COMPLEX_16 array of DIMENSION ( LDDA, n ).
The input matrix A with diagonal entries for which the inertia
is computed. If dA is complex, the computation is done on the
real part of the diagonal.
@param[in]
ldda INTEGER.
On entry, LDDA specifies the leading dimension of A.
LDDA must be at least max( 1, n ).
@param[out]
dneig INTEGER array of DIMENSION 3 on the GPU memory.
The number of positive, negative, and zero eigenvalues
in this order.
@param[in]
queue magma_queue_t.
Queue to execute in.
@ingroup magma_hetrf
*******************************************************************************/
extern "C"
magma_int_t
magmablas_zdiinertia(
magma_int_t n,
magmaDoubleComplex_const_ptr dA, magma_int_t ldda,
int *dneig,
magma_queue_t queue )
{
/*
* Test the input parameters.
*/
magma_int_t info = 0;
if ( n < 0 ) {
info = -1;
} else if ( ldda < max(1, n) ) {
info = -3;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return info;
}
/*
* Quick return if possible.
*/
if (n == 0)
return info;
dim3 grid( NBLOCKS, 1, 1 );
dim3 threads( NTHREADS, 1, 1 );
// Set itertia to zero
cudaMemsetAsync(dneig, 0, 3*sizeof(int), queue->cuda_stream() );
zdiinertia_kernel<<<grid, threads, 0, queue->cuda_stream() >>>
(n, dA, ldda, dneig);
return info;
}
// end magmablas_zdiinertia
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.