hip_filename stringlengths 5 84 | hip_content stringlengths 79 9.69M | cuda_filename stringlengths 4 83 | cuda_content stringlengths 19 9.69M |
|---|---|---|---|
1ad9964981e2396e734801a426c757a3bcb54b2d.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdlib>
#include <cstdio>
#include <string>
#include <iostream>
#include <color_spinor_field.h>
#include <clover_field.h>
// these control the Wilson-type actions
#ifdef GPU_WILSON_DIRAC
//#define DIRECT_ACCESS_LINK
//#define DIRECT_ACCESS_WILSON_SPINOR
//#define DIRECT_ACCESS_WILSON_ACCUM
//#define DIRECT_ACCESS_WILSON_INTER
//#define DIRECT_ACCESS_WILSON_PACK_SPINOR
//#define DIRECT_ACCESS_CLOVER
#endif // GPU_WILSON_DIRAC
#include <quda_internal.h>
#include <dslash_quda.h>
#include <sys/time.h>
#include <blas_quda.h>
#include <inline_ptx.h>
namespace quda {
namespace domainwall4d {
#undef GPU_STAGGERED_DIRAC
#include <dslash_constants.h>
#include <dslash_textures.h>
#include <dslash_index.cuh>
// Enable shared memory dslash for Fermi architecture
//#define SHARED_WILSON_DSLASH
//#define SHARED_8_BYTE_WORD_SIZE // 8-byte shared memory access
#ifdef GPU_DOMAIN_WALL_DIRAC
#include <dw_dslash4_def.h> // Dslash4 Domain Wall kernels
#include <dw_dslash5_def.h> // Dslash5 Domain Wall kernels
#include <dw_dslash5inv_def.h> // Dslash5inv Domain Wall kernels
#endif
#ifndef DSLASH_SHARED_FLOATS_PER_THREAD
#define DSLASH_SHARED_FLOATS_PER_THREAD 0
#endif
#include <dslash_quda.cuh>
}
// declare the dslash events
#include <dslash_events.cuh>
using namespace domainwall4d;
#ifdef GPU_DOMAIN_WALL_DIRAC
template <typename sFloat, typename gFloat>
class DomainWallDslash4DPCCuda : public DslashCuda {
private:
const int DS_type;
bool checkGrid(TuneParam ¶m) const {
if (param.grid.x > (unsigned int)deviceProp.maxGridSize[0] || param.grid.y > (unsigned int)deviceProp.maxGridSize[1]) {
warningQuda("Autotuner is skipping blockDim=(%u,%u,%u), gridDim=(%u,%u,%u) because lattice volume is too large",
param.block.x, param.block.y, param.block.z, param.grid.x, param.grid.y, param.grid.z);
return false;
} else {
return true;
}
}
protected:
bool advanceBlockDim(TuneParam ¶m) const
{
const unsigned int max_shared = 16384; // FIXME: use deviceProp.sharedMemPerBlock;
const int step[2] = { deviceProp.warpSize, 1 };
bool advance[2] = { false, false };
// first try to advance block.x
param.block.x += step[0];
if (param.block.x > (unsigned int)deviceProp.maxThreadsDim[0] ||
sharedBytesPerThread()*param.block.x*param.block.y > max_shared) {
advance[0] = false;
param.block.x = step[0]; // reset block.x
} else {
advance[0] = true; // successfully advanced block.x
}
if (!advance[0]) { // if failed to advance block.x, now try block.y
param.block.y += step[1];
if (param.block.y > (unsigned)in->X(4) ||
sharedBytesPerThread()*param.block.x*param.block.y > max_shared) {
advance[1] = false;
param.block.y = step[1]; // reset block.x
} else {
advance[1] = true; // successfully advanced block.y
}
}
if (advance[0] || advance[1]) {
param.grid = dim3( (dslashParam.threads+param.block.x-1) / param.block.x,
(in->X(4)+param.block.y-1) / param.block.y, 1);
bool advance = true;
if (!checkGrid(param)) advance = advanceBlockDim(param);
return advance;
} else {
return false;
}
}
unsigned int sharedBytesPerThread() const { return 0; }
public:
DomainWallDslash4DPCCuda(cudaColorSpinorField *out, const GaugeField &gauge, const cudaColorSpinorField *in,
const cudaColorSpinorField *x, const double mferm,
const double a, const double b, const int parity, const int dagger, const int *commOverride, const int DS_type)
: DslashCuda(out, in, x, gauge, parity, dagger, commOverride), DS_type(DS_type)
{
dslashParam.a = a;
dslashParam.a_f = a;
dslashParam.b = b;
dslashParam.b_f = b;
dslashParam.mferm = mferm;
dslashParam.mferm_f = mferm;
}
virtual ~DomainWallDslash4DPCCuda() { unbindSpinorTex<sFloat>(in, out, x); }
TuneKey tuneKey() const
{
TuneKey key = DslashCuda::tuneKey();
switch(DS_type){
case 0:
strcat(key.aux,",Dslash4");
break;
case 1:
strcat(key.aux,",Dslash5");
break;
case 2:
strcat(key.aux,",Dslash5inv");
break;
}
return key;
}
virtual void initTuneParam(TuneParam ¶m) const
{
Tunable::initTuneParam(param);
param.grid = dim3( (dslashParam.threads+param.block.x-1) / param.block.x,
(in->X(4)+param.block.y-1) / param.block.y, 1);
bool ok = true;
if (!checkGrid(param)) ok = advanceBlockDim(param);
if (!ok) errorQuda("Lattice volume is too large for even the largest blockDim");
}
/** sets default values for when tuning is disabled */
virtual void defaultTuneParam(TuneParam ¶m) const
{
Tunable::defaultTuneParam(param);
param.grid = dim3( (dslashParam.threads+param.block.x-1) / param.block.x,
(in->X(4)+param.block.y-1) / param.block.y, 1);
bool ok = true;
if (!checkGrid(param)) ok = advanceBlockDim(param);
if (!ok) errorQuda("Lattice volume is too large for even the largest blockDim");
}
void apply(const hipStream_t &stream)
{
#ifndef USE_TEXTURE_OBJECTS
if (dslashParam.kernel_type == INTERIOR_KERNEL) bindSpinorTex<sFloat>(in, out, x);
#endif // USE_TEXTURE_OBJECTS
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
setParam();
switch(DS_type){
case 0:
DSLASH(domainWallDslash4, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam);
break;
case 1:
DSLASH(domainWallDslash5, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam);
break;
case 2:
DSLASH(domainWallDslash5inv, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam);
break;
default:
errorQuda("invalid Dslash type");
}
}
long long flops() const {
long long Ls = in->X(4);
long long vol4d = in->VolumeCB() / Ls;
long long bulk = (Ls-2)*vol4d;
long long wall = 2*vol4d;
long long flops = 0;
switch(DS_type){
case 0:
flops = DslashCuda::flops();
break;
case 1:
flops = (x ? 48ll : 0 ) * in->VolumeCB() + 96ll*bulk + 120ll*wall;
break;
case 2:
flops = 144ll*in->VolumeCB()*Ls + 3ll*Ls*(Ls-1ll);
break;
default:
errorQuda("invalid Dslash type");
}
return flops;
}
long long bytes() const {
bool isHalf = in->Precision() == sizeof(short) ? true : false;
int spinor_bytes = 2 * in->Ncolor() * in->Nspin() * in->Precision() + (isHalf ? sizeof(float) : 0);
long long Ls = in->X(4);
long long bytes = 0;
switch(DS_type){
case 0:
bytes = DslashCuda::bytes();
break;
case 1:
bytes = (x ? 5ll : 4ll ) * spinor_bytes * in->VolumeCB();
break;
case 2:
bytes = (x ? Ls + 2 : Ls + 1) * spinor_bytes * in->VolumeCB();
break;
default:
errorQuda("invalid Dslash type");
}
return bytes;
}
};
#endif // GPU_DOMAIN_WALL_DIRAC
#include <dslash_policy.cuh>
//-----------------------------------------------------
// Modification for 4D preconditioned DWF operator
// Additional Arg. is added to give a function name.
//
// pre-defined DS_type list
// 0 = dslash4
// 1 = dslash5
// 2 = dslash5inv
//-----------------------------------------------------
void domainWallDslashCuda(cudaColorSpinorField *out, const cudaGaugeField &gauge,
const cudaColorSpinorField *in, const int parity, const int dagger,
const cudaColorSpinorField *x, const double &m_f, const double &a, const double &b,
const int *commOverride, const int DS_type, TimeProfile &profile)
{
#ifdef GPU_DOMAIN_WALL_DIRAC
const_cast<cudaColorSpinorField*>(in)->createComms(1);
DslashCuda *dslash = nullptr;
if (in->Precision() == QUDA_DOUBLE_PRECISION) {
dslash = new DomainWallDslash4DPCCuda<double2,double2>(out, gauge, in, x, m_f, a, b, parity, dagger, commOverride, DS_type);
} else if (in->Precision() == QUDA_SINGLE_PRECISION) {
dslash = new DomainWallDslash4DPCCuda<float4,float4>(out, gauge, in, x, m_f, a, b, parity, dagger, commOverride, DS_type);
} else if (in->Precision() == QUDA_HALF_PRECISION) {
dslash = new DomainWallDslash4DPCCuda<short4,short4>(out, gauge, in, x, m_f, a, b, parity, dagger, commOverride, DS_type);
}
// the parameters passed to dslashCuda must be 4-d volume and 3-d
// faces because Ls is added as the y-dimension in thread space
int ghostFace[QUDA_MAX_DIM];
for (int i=0; i<4; i++) ghostFace[i] = in->GhostFace()[i] / in->X(4);
DslashPolicyImp* dslashImp = nullptr;
if (DS_type != 0) {
dslashImp = DslashFactory::create(QudaDslashPolicy::QUDA_DSLASH_NC);
(*dslashImp)(*dslash, const_cast<cudaColorSpinorField*>(in), in->Volume()/in->X(4), ghostFace, profile);
delete dslashImp;
} else {
DslashPolicyTune dslash_policy(*dslash, const_cast<cudaColorSpinorField*>(in), in->Volume()/in->X(4), ghostFace, profile);
dslash_policy.apply(0);
}
delete dslash;
#else
errorQuda("4D preconditioned Domain wall dslash has not been built");
#endif
}
}
| 1ad9964981e2396e734801a426c757a3bcb54b2d.cu | #include <cstdlib>
#include <cstdio>
#include <string>
#include <iostream>
#include <color_spinor_field.h>
#include <clover_field.h>
// these control the Wilson-type actions
#ifdef GPU_WILSON_DIRAC
//#define DIRECT_ACCESS_LINK
//#define DIRECT_ACCESS_WILSON_SPINOR
//#define DIRECT_ACCESS_WILSON_ACCUM
//#define DIRECT_ACCESS_WILSON_INTER
//#define DIRECT_ACCESS_WILSON_PACK_SPINOR
//#define DIRECT_ACCESS_CLOVER
#endif // GPU_WILSON_DIRAC
#include <quda_internal.h>
#include <dslash_quda.h>
#include <sys/time.h>
#include <blas_quda.h>
#include <inline_ptx.h>
namespace quda {
namespace domainwall4d {
#undef GPU_STAGGERED_DIRAC
#include <dslash_constants.h>
#include <dslash_textures.h>
#include <dslash_index.cuh>
// Enable shared memory dslash for Fermi architecture
//#define SHARED_WILSON_DSLASH
//#define SHARED_8_BYTE_WORD_SIZE // 8-byte shared memory access
#ifdef GPU_DOMAIN_WALL_DIRAC
#include <dw_dslash4_def.h> // Dslash4 Domain Wall kernels
#include <dw_dslash5_def.h> // Dslash5 Domain Wall kernels
#include <dw_dslash5inv_def.h> // Dslash5inv Domain Wall kernels
#endif
#ifndef DSLASH_SHARED_FLOATS_PER_THREAD
#define DSLASH_SHARED_FLOATS_PER_THREAD 0
#endif
#include <dslash_quda.cuh>
}
// declare the dslash events
#include <dslash_events.cuh>
using namespace domainwall4d;
#ifdef GPU_DOMAIN_WALL_DIRAC
template <typename sFloat, typename gFloat>
class DomainWallDslash4DPCCuda : public DslashCuda {
private:
const int DS_type;
bool checkGrid(TuneParam ¶m) const {
if (param.grid.x > (unsigned int)deviceProp.maxGridSize[0] || param.grid.y > (unsigned int)deviceProp.maxGridSize[1]) {
warningQuda("Autotuner is skipping blockDim=(%u,%u,%u), gridDim=(%u,%u,%u) because lattice volume is too large",
param.block.x, param.block.y, param.block.z, param.grid.x, param.grid.y, param.grid.z);
return false;
} else {
return true;
}
}
protected:
bool advanceBlockDim(TuneParam ¶m) const
{
const unsigned int max_shared = 16384; // FIXME: use deviceProp.sharedMemPerBlock;
const int step[2] = { deviceProp.warpSize, 1 };
bool advance[2] = { false, false };
// first try to advance block.x
param.block.x += step[0];
if (param.block.x > (unsigned int)deviceProp.maxThreadsDim[0] ||
sharedBytesPerThread()*param.block.x*param.block.y > max_shared) {
advance[0] = false;
param.block.x = step[0]; // reset block.x
} else {
advance[0] = true; // successfully advanced block.x
}
if (!advance[0]) { // if failed to advance block.x, now try block.y
param.block.y += step[1];
if (param.block.y > (unsigned)in->X(4) ||
sharedBytesPerThread()*param.block.x*param.block.y > max_shared) {
advance[1] = false;
param.block.y = step[1]; // reset block.x
} else {
advance[1] = true; // successfully advanced block.y
}
}
if (advance[0] || advance[1]) {
param.grid = dim3( (dslashParam.threads+param.block.x-1) / param.block.x,
(in->X(4)+param.block.y-1) / param.block.y, 1);
bool advance = true;
if (!checkGrid(param)) advance = advanceBlockDim(param);
return advance;
} else {
return false;
}
}
unsigned int sharedBytesPerThread() const { return 0; }
public:
DomainWallDslash4DPCCuda(cudaColorSpinorField *out, const GaugeField &gauge, const cudaColorSpinorField *in,
const cudaColorSpinorField *x, const double mferm,
const double a, const double b, const int parity, const int dagger, const int *commOverride, const int DS_type)
: DslashCuda(out, in, x, gauge, parity, dagger, commOverride), DS_type(DS_type)
{
dslashParam.a = a;
dslashParam.a_f = a;
dslashParam.b = b;
dslashParam.b_f = b;
dslashParam.mferm = mferm;
dslashParam.mferm_f = mferm;
}
virtual ~DomainWallDslash4DPCCuda() { unbindSpinorTex<sFloat>(in, out, x); }
TuneKey tuneKey() const
{
TuneKey key = DslashCuda::tuneKey();
switch(DS_type){
case 0:
strcat(key.aux,",Dslash4");
break;
case 1:
strcat(key.aux,",Dslash5");
break;
case 2:
strcat(key.aux,",Dslash5inv");
break;
}
return key;
}
virtual void initTuneParam(TuneParam ¶m) const
{
Tunable::initTuneParam(param);
param.grid = dim3( (dslashParam.threads+param.block.x-1) / param.block.x,
(in->X(4)+param.block.y-1) / param.block.y, 1);
bool ok = true;
if (!checkGrid(param)) ok = advanceBlockDim(param);
if (!ok) errorQuda("Lattice volume is too large for even the largest blockDim");
}
/** sets default values for when tuning is disabled */
virtual void defaultTuneParam(TuneParam ¶m) const
{
Tunable::defaultTuneParam(param);
param.grid = dim3( (dslashParam.threads+param.block.x-1) / param.block.x,
(in->X(4)+param.block.y-1) / param.block.y, 1);
bool ok = true;
if (!checkGrid(param)) ok = advanceBlockDim(param);
if (!ok) errorQuda("Lattice volume is too large for even the largest blockDim");
}
void apply(const cudaStream_t &stream)
{
#ifndef USE_TEXTURE_OBJECTS
if (dslashParam.kernel_type == INTERIOR_KERNEL) bindSpinorTex<sFloat>(in, out, x);
#endif // USE_TEXTURE_OBJECTS
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
setParam();
switch(DS_type){
case 0:
DSLASH(domainWallDslash4, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam);
break;
case 1:
DSLASH(domainWallDslash5, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam);
break;
case 2:
DSLASH(domainWallDslash5inv, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam);
break;
default:
errorQuda("invalid Dslash type");
}
}
long long flops() const {
long long Ls = in->X(4);
long long vol4d = in->VolumeCB() / Ls;
long long bulk = (Ls-2)*vol4d;
long long wall = 2*vol4d;
long long flops = 0;
switch(DS_type){
case 0:
flops = DslashCuda::flops();
break;
case 1:
flops = (x ? 48ll : 0 ) * in->VolumeCB() + 96ll*bulk + 120ll*wall;
break;
case 2:
flops = 144ll*in->VolumeCB()*Ls + 3ll*Ls*(Ls-1ll);
break;
default:
errorQuda("invalid Dslash type");
}
return flops;
}
long long bytes() const {
bool isHalf = in->Precision() == sizeof(short) ? true : false;
int spinor_bytes = 2 * in->Ncolor() * in->Nspin() * in->Precision() + (isHalf ? sizeof(float) : 0);
long long Ls = in->X(4);
long long bytes = 0;
switch(DS_type){
case 0:
bytes = DslashCuda::bytes();
break;
case 1:
bytes = (x ? 5ll : 4ll ) * spinor_bytes * in->VolumeCB();
break;
case 2:
bytes = (x ? Ls + 2 : Ls + 1) * spinor_bytes * in->VolumeCB();
break;
default:
errorQuda("invalid Dslash type");
}
return bytes;
}
};
#endif // GPU_DOMAIN_WALL_DIRAC
#include <dslash_policy.cuh>
//-----------------------------------------------------
// Modification for 4D preconditioned DWF operator
// Additional Arg. is added to give a function name.
//
// pre-defined DS_type list
// 0 = dslash4
// 1 = dslash5
// 2 = dslash5inv
//-----------------------------------------------------
void domainWallDslashCuda(cudaColorSpinorField *out, const cudaGaugeField &gauge,
const cudaColorSpinorField *in, const int parity, const int dagger,
const cudaColorSpinorField *x, const double &m_f, const double &a, const double &b,
const int *commOverride, const int DS_type, TimeProfile &profile)
{
#ifdef GPU_DOMAIN_WALL_DIRAC
const_cast<cudaColorSpinorField*>(in)->createComms(1);
DslashCuda *dslash = nullptr;
if (in->Precision() == QUDA_DOUBLE_PRECISION) {
dslash = new DomainWallDslash4DPCCuda<double2,double2>(out, gauge, in, x, m_f, a, b, parity, dagger, commOverride, DS_type);
} else if (in->Precision() == QUDA_SINGLE_PRECISION) {
dslash = new DomainWallDslash4DPCCuda<float4,float4>(out, gauge, in, x, m_f, a, b, parity, dagger, commOverride, DS_type);
} else if (in->Precision() == QUDA_HALF_PRECISION) {
dslash = new DomainWallDslash4DPCCuda<short4,short4>(out, gauge, in, x, m_f, a, b, parity, dagger, commOverride, DS_type);
}
// the parameters passed to dslashCuda must be 4-d volume and 3-d
// faces because Ls is added as the y-dimension in thread space
int ghostFace[QUDA_MAX_DIM];
for (int i=0; i<4; i++) ghostFace[i] = in->GhostFace()[i] / in->X(4);
DslashPolicyImp* dslashImp = nullptr;
if (DS_type != 0) {
dslashImp = DslashFactory::create(QudaDslashPolicy::QUDA_DSLASH_NC);
(*dslashImp)(*dslash, const_cast<cudaColorSpinorField*>(in), in->Volume()/in->X(4), ghostFace, profile);
delete dslashImp;
} else {
DslashPolicyTune dslash_policy(*dslash, const_cast<cudaColorSpinorField*>(in), in->Volume()/in->X(4), ghostFace, profile);
dslash_policy.apply(0);
}
delete dslash;
#else
errorQuda("4D preconditioned Domain wall dslash has not been built");
#endif
}
}
|
2d86469124811ef1ac5e9a26176b914328435adf.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "yuv422_to_yuv444_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const void *src = NULL;
hipMalloc(&src, XSIZE*YSIZE);
void *out = NULL;
hipMalloc(&out, XSIZE*YSIZE);
int pix_count = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
yuv422_to_yuv444_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, src,out,pix_count);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
yuv422_to_yuv444_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, src,out,pix_count);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
yuv422_to_yuv444_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, src,out,pix_count);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 2d86469124811ef1ac5e9a26176b914328435adf.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "yuv422_to_yuv444_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const void *src = NULL;
cudaMalloc(&src, XSIZE*YSIZE);
void *out = NULL;
cudaMalloc(&out, XSIZE*YSIZE);
int pix_count = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
yuv422_to_yuv444_kernel<<<gridBlock,threadBlock>>>(src,out,pix_count);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
yuv422_to_yuv444_kernel<<<gridBlock,threadBlock>>>(src,out,pix_count);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
yuv422_to_yuv444_kernel<<<gridBlock,threadBlock>>>(src,out,pix_count);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
0bdd5c4944f430cc06f27272ae5095d030a3b304.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// STL
#include <algorithm>
// C-Standard
#include <cassert>
#include <cmath>
#include <cstdio>
#include <cstdlib>
#include <project6/cuda_util.hpp>
__global__ void vecAdd(const float *a, const float *b, float *c, int n) {
int id = blockDim.x * blockIdx.x + threadIdx.x;
if (id < n)
c[id] = a[id] + b[id];
}
int main(int argc, char *argv[]) {
int n = 100000;
auto *h_a = new float[n];
auto *h_b = new float[n];
auto *h_c = new float[n];
float *d_a = nullptr;
float *d_b = nullptr;
float *d_c = nullptr;
cuda_eexit(hipMalloc(&d_a, sizeof(float)*n));
cuda_eexit(hipMalloc(&d_b, sizeof(float)*n));
cuda_eexit(hipMalloc(&d_c, sizeof(float)*n));
for (int i = 0; i < n; i++) {
h_a[i] = sin(i)*sin(i);
h_b[i] = cos(i)*cos(i);
}
cuda_eexit(hipMemcpy(d_a, h_a, sizeof(float)*n, hipMemcpyHostToDevice));
cuda_eexit(hipMemcpy(d_b, h_b, sizeof(float)*n, hipMemcpyHostToDevice));
int threadsPerBlock = 256;
int blocksPerGrid =(n + threadsPerBlock - 1) / threadsPerBlock;
hipLaunchKernelGGL(( vecAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_a, d_b, d_c, n);
cuda_eexit(hipGetLastError());
cuda_eexit(hipMemcpy(h_c, d_c, sizeof(float)*n, hipMemcpyDeviceToHost));
float sum = std::accumulate(h_c, h_c + n, 0.f);
printf("final result: %f\n", sum / n);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
delete h_a;
delete h_b;
delete h_c;
return 0;
}
| 0bdd5c4944f430cc06f27272ae5095d030a3b304.cu | // STL
#include <algorithm>
// C-Standard
#include <cassert>
#include <cmath>
#include <cstdio>
#include <cstdlib>
#include <project6/cuda_util.hpp>
__global__ void vecAdd(const float *a, const float *b, float *c, int n) {
int id = blockDim.x * blockIdx.x + threadIdx.x;
if (id < n)
c[id] = a[id] + b[id];
}
int main(int argc, char *argv[]) {
int n = 100000;
auto *h_a = new float[n];
auto *h_b = new float[n];
auto *h_c = new float[n];
float *d_a = nullptr;
float *d_b = nullptr;
float *d_c = nullptr;
cuda_eexit(cudaMalloc(&d_a, sizeof(float)*n));
cuda_eexit(cudaMalloc(&d_b, sizeof(float)*n));
cuda_eexit(cudaMalloc(&d_c, sizeof(float)*n));
for (int i = 0; i < n; i++) {
h_a[i] = sin(i)*sin(i);
h_b[i] = cos(i)*cos(i);
}
cuda_eexit(cudaMemcpy(d_a, h_a, sizeof(float)*n, cudaMemcpyHostToDevice));
cuda_eexit(cudaMemcpy(d_b, h_b, sizeof(float)*n, cudaMemcpyHostToDevice));
int threadsPerBlock = 256;
int blocksPerGrid =(n + threadsPerBlock - 1) / threadsPerBlock;
vecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_a, d_b, d_c, n);
cuda_eexit(cudaGetLastError());
cuda_eexit(cudaMemcpy(h_c, d_c, sizeof(float)*n, cudaMemcpyDeviceToHost));
float sum = std::accumulate(h_c, h_c + n, 0.f);
printf("final result: %f\n", sum / n);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
delete h_a;
delete h_b;
delete h_c;
return 0;
}
|
c4673a3ade02b9284f38eeeedd9cd0ad881367c3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2013 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
// Utilities and system includes
#include <helper_cuda.h>
// clamp x to range [a, b]
__device__ float clamp(float x, float a, float b)
{
return max(a, min(b, x));
}
__device__ int clamp(int x, int a, int b)
{
return max(a, min(b, x));
}
// convert floating point rgb color to 8-bit integer
__device__ int rgbToInt(float r, float g, float b)
{
r = clamp(r, 0.0f, 255.0f);
g = clamp(g, 0.0f, 255.0f);
b = clamp(b, 0.0f, 255.0f);
return (int(b)<<16) | (int(g)<<8) | int(r);
}
__global__ void
cudaProcess(unsigned int *g_odata, int imgw)
{
extern __shared__ uchar4 sdata[];
int tx = threadIdx.x;
int ty = threadIdx.y;
int bw = blockDim.x;
int bh = blockDim.y;
int x = blockIdx.x*bw + tx;
int y = blockIdx.y*bh + ty;
uchar4 c4 = make_uchar4((x & 0x20)?100:0,0,(y & 0x20)?100:0,0);
g_odata[y*imgw+x] = rgbToInt(c4.z, c4.y, c4.x);
}
extern "C" void
launch_cudaProcess(dim3 grid, dim3 block, int sbytes,
unsigned int *g_odata,
int imgw)
{
hipLaunchKernelGGL(( cudaProcess), dim3(grid), dim3(block), sbytes , 0, g_odata, imgw);
}
| c4673a3ade02b9284f38eeeedd9cd0ad881367c3.cu | /*
* Copyright 1993-2013 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
// Utilities and system includes
#include <helper_cuda.h>
// clamp x to range [a, b]
__device__ float clamp(float x, float a, float b)
{
return max(a, min(b, x));
}
__device__ int clamp(int x, int a, int b)
{
return max(a, min(b, x));
}
// convert floating point rgb color to 8-bit integer
__device__ int rgbToInt(float r, float g, float b)
{
r = clamp(r, 0.0f, 255.0f);
g = clamp(g, 0.0f, 255.0f);
b = clamp(b, 0.0f, 255.0f);
return (int(b)<<16) | (int(g)<<8) | int(r);
}
__global__ void
cudaProcess(unsigned int *g_odata, int imgw)
{
extern __shared__ uchar4 sdata[];
int tx = threadIdx.x;
int ty = threadIdx.y;
int bw = blockDim.x;
int bh = blockDim.y;
int x = blockIdx.x*bw + tx;
int y = blockIdx.y*bh + ty;
uchar4 c4 = make_uchar4((x & 0x20)?100:0,0,(y & 0x20)?100:0,0);
g_odata[y*imgw+x] = rgbToInt(c4.z, c4.y, c4.x);
}
extern "C" void
launch_cudaProcess(dim3 grid, dim3 block, int sbytes,
unsigned int *g_odata,
int imgw)
{
cudaProcess<<< grid, block, sbytes >>>(g_odata, imgw);
}
|
5c45cd63b63009953ed10e9adbaf41ff867b6475.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.3.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
November 2012
@precisions normal z -> z
WARNING: this version has really poor performance
and cublas is prefered to this implementation.
*/
#include "common_magma.h"
#define PRECISION_z
/*The version for fermi can be found in zsymv_fermi.cu */
#if defined(PRECISION_z) && (GPUSHMEM < 200)
#define magmablas_zsymv_130 magmablas_zsymv
#define thread_seg 128 // used in zsymv_130_kernel1
#define threadSize 128 // used in zsymv_130_kernel2
__global__ void
magmablas_zsymv_130_kernel1( magma_int_t m, hipDoubleComplex alpha,
const hipDoubleComplex *A, magma_int_t lda,
const hipDoubleComplex *x, magma_int_t incx,
hipDoubleComplex beta,
hipDoubleComplex *y, magma_int_t incy )
{
hipDoubleComplex res = MAGMA_Z_ZERO;
magma_int_t tid = blockIdx.x * thread_seg + threadIdx.x;
magma_int_t i;
if(tid < m)
{
#pragma unroll
for (i=0; i<tid; i++)
{
res += A[tid + i*lda] * x[i];
}
y[tid] = beta * y[tid] + alpha * res;
}
}
__global__ void
magmablas_zsymv_130_kernel2( magma_int_t m, hipDoubleComplex alpha,
const hipDoubleComplex *A, magma_int_t lda,
const hipDoubleComplex *x, magma_int_t incx,
hipDoubleComplex beta,
hipDoubleComplex *y, magma_int_t incy )
{
__shared__ hipDoubleComplex sdata[threadSize];
magma_int_t tx = threadIdx.x;
magma_int_t i;
hipDoubleComplex c_zero = MAGMA_Z_ZERO;
hipDoubleComplex res = MAGMA_Z_ZERO;
magma_int_t m1 = ((m - blockIdx.y)/threadSize) * threadSize;
for(i=blockIdx.y; i<(m1 + blockIdx.y); i+= threadSize)
{
res += cuConj(A[tx+i + lda*blockIdx.y]) * x[tx+i];
}
if(m > (m1 + blockIdx.y))
{
if( (tx + m1 + blockIdx.y) < m )
{
res += cuConj(A[tx+m1+blockIdx.y + lda*blockIdx.y])
* x[tx+m1+blockIdx.y];
}
else
{
res += c_zero;
}
}
sdata[tx] = res;
__syncthreads();
if(tx < 64)
{
sdata[tx] += sdata[tx + 64];
}
__syncthreads();
if(tx < 32)
{
sdata[tx] += sdata[tx + 32];
sdata[tx] += sdata[tx + 16];
sdata[tx] += sdata[tx + 8];
sdata[tx] += sdata[tx + 4];
sdata[tx] += sdata[tx + 2];
sdata[tx] += sdata[tx + 1];
}
if( tx == 0 )
{
y[blockIdx.y] = alpha * sdata[0] + y[blockIdx.y];
}
}
/*************************************************************************
Purpose
=======
magmablas_zsymv_130 performs the matrix-vector operation on tesla:
y := alpha*A*x + beta*y,
where alpha and beta are scalars, x and y are n element vectors and
A is an n by n symmetric matrix.
Arguments
==========
UPLO - CHARACTER*1.
On entry, UPLO specifies whether the upper or lower
triangular part of the array A is to be referenced as
follows:
UPLO = 'U' or 'u' Only the upper triangular part of A
is to be referenced.
UPLO = 'L' or 'l' Only the lower triangular part of A
is to be referenced.
Unchanged on exit.
N - INTEGER.
On entry, N specifies the order of the matrix A.
N must be at least zero.
Unchanged on exit.
ALPHA - COMPLEX*16 .
On entry, ALPHA specifies the scalar alpha.
Unchanged on exit.
A - COMPLEX*16 array of DIMENSION ( LDA, n ).
Before entry with UPLO = 'U' or 'u', the leading n by n
upper triangular part of the array A must contain the upper
triangular part of the hermitian matrix and the strictly
lower triangular part of A is not referenced.
Before entry with UPLO = 'L' or 'l', the leading n by n
lower triangular part of the array A must contain the lower
triangular part of the hermitian matrix and the strictly
upper triangular part of A is not referenced.
Note that the imaginary parts of the diagonal elements need
not be set and are assumed to be zero.
Unchanged on exit.
LDA - INTEGER.
On entry, LDA specifies the first dimension of A as declared
in the calling (sub) program. LDA must be at least
max( 1, n ).
Unchanged on exit.
It is recommended that lda is multiple of 16. Otherwise
performance would be deteriorated as the memory accesses
would not be fully coalescent.
X - COMPLEX*16 array of dimension at least
( 1 + ( n - 1 )*abs( INCX ) ).
Before entry, the incremented array X must contain the n
element vector x.
Unchanged on exit.
INCX - INTEGER.
On entry, INCX specifies the increment for the elements of
X. INCX must not be zero.
Unchanged on exit.
BETA - COMPLEX*16 .
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
Unchanged on exit.
Y - COMPLEX*16 array of dimension at least
( 1 + ( n - 1 )*abs( INCY ) ).
Before entry, the incremented array Y must contain the n
element vector y. On exit, Y is overwritten by the updated
vector y.
INCY - INTEGER.
On entry, INCY specifies the increment for the elements of
Y. INCY must not be zero.
Unchanged on exit.
*/
extern "C"
magma_int_t
magmablas_zsymv_130( char uplo, magma_int_t n,
hipDoubleComplex alpha,
const hipDoubleComplex *A, magma_int_t lda,
const hipDoubleComplex *X, magma_int_t incx,
hipDoubleComplex beta,
hipDoubleComplex *Y, magma_int_t incy)
{
char uplo_[2] = {uplo, 0};
int upper = lapackf77_lsame(uplo_, "U");
/*
* Test the input parameters.
*/
if ((! upper) && (! lapackf77_lsame(uplo_, "L"))) {
return -1;
} else if ( n < 0 ) {
return -2;
} else if ( lda < max(1,n) ) {
return -5;
} else if ( incx == 0 ) {
return -7;
} else if ( incy == 0 ) {
return -10;
}
/*
* Quick return if possible.
*/
if ( (n == 0) || ( MAGMA_Z_EQUAL(alpha, MAGMA_Z_ZERO) && MAGMA_Z_EQUAL(beta, MAGMA_Z_ONE) ) )
return MAGMA_SUCCESS;
magma_int_t blocks = (n-1)/thread_seg + 1;
dim3 grid1( blocks, 1, 1);
dim3 threads1(thread_seg, 1, 1);
dim3 grid2( 1, n, 1);
dim3 threads2(threadSize, 1, 1);
/* TODO: Upper case is not implemented in MAGMA */
if ( upper ) {
#if defined(PRECISION_z) || (defined PRECISION_c)
fprintf(stderr, "%s: %s\n", __func__, "Upper case not implemented");
#else
hipblasZsymv(uplo, n, alpha, A, lda, X, incx, beta, Y, incy);
#endif
}
else
{
hipLaunchKernelGGL(( magmablas_zsymv_130_kernel1) , dim3(grid1), dim3(threads1), 0, magma_stream ,
n, alpha, A, lda, X, incx, beta, Y, incy);
hipLaunchKernelGGL(( magmablas_zsymv_130_kernel2) , dim3(grid2), dim3(threads2), 0, magma_stream ,
n, alpha, A, lda, X, incx, beta, Y, incy);
}
return MAGMA_SUCCESS;
}
#endif /* defined(PRECISION_z) && (GPUSHMEM < 200)*/
| 5c45cd63b63009953ed10e9adbaf41ff867b6475.cu | /*
-- MAGMA (version 1.3.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
November 2012
@precisions normal z -> z
WARNING: this version has really poor performance
and cublas is prefered to this implementation.
*/
#include "common_magma.h"
#define PRECISION_z
/*The version for fermi can be found in zsymv_fermi.cu */
#if defined(PRECISION_z) && (GPUSHMEM < 200)
#define magmablas_zsymv_130 magmablas_zsymv
#define thread_seg 128 // used in zsymv_130_kernel1
#define threadSize 128 // used in zsymv_130_kernel2
__global__ void
magmablas_zsymv_130_kernel1( magma_int_t m, cuDoubleComplex alpha,
const cuDoubleComplex *A, magma_int_t lda,
const cuDoubleComplex *x, magma_int_t incx,
cuDoubleComplex beta,
cuDoubleComplex *y, magma_int_t incy )
{
cuDoubleComplex res = MAGMA_Z_ZERO;
magma_int_t tid = blockIdx.x * thread_seg + threadIdx.x;
magma_int_t i;
if(tid < m)
{
#pragma unroll
for (i=0; i<tid; i++)
{
res += A[tid + i*lda] * x[i];
}
y[tid] = beta * y[tid] + alpha * res;
}
}
__global__ void
magmablas_zsymv_130_kernel2( magma_int_t m, cuDoubleComplex alpha,
const cuDoubleComplex *A, magma_int_t lda,
const cuDoubleComplex *x, magma_int_t incx,
cuDoubleComplex beta,
cuDoubleComplex *y, magma_int_t incy )
{
__shared__ cuDoubleComplex sdata[threadSize];
magma_int_t tx = threadIdx.x;
magma_int_t i;
cuDoubleComplex c_zero = MAGMA_Z_ZERO;
cuDoubleComplex res = MAGMA_Z_ZERO;
magma_int_t m1 = ((m - blockIdx.y)/threadSize) * threadSize;
for(i=blockIdx.y; i<(m1 + blockIdx.y); i+= threadSize)
{
res += cuConj(A[tx+i + lda*blockIdx.y]) * x[tx+i];
}
if(m > (m1 + blockIdx.y))
{
if( (tx + m1 + blockIdx.y) < m )
{
res += cuConj(A[tx+m1+blockIdx.y + lda*blockIdx.y])
* x[tx+m1+blockIdx.y];
}
else
{
res += c_zero;
}
}
sdata[tx] = res;
__syncthreads();
if(tx < 64)
{
sdata[tx] += sdata[tx + 64];
}
__syncthreads();
if(tx < 32)
{
sdata[tx] += sdata[tx + 32];
sdata[tx] += sdata[tx + 16];
sdata[tx] += sdata[tx + 8];
sdata[tx] += sdata[tx + 4];
sdata[tx] += sdata[tx + 2];
sdata[tx] += sdata[tx + 1];
}
if( tx == 0 )
{
y[blockIdx.y] = alpha * sdata[0] + y[blockIdx.y];
}
}
/*************************************************************************
Purpose
=======
magmablas_zsymv_130 performs the matrix-vector operation on tesla:
y := alpha*A*x + beta*y,
where alpha and beta are scalars, x and y are n element vectors and
A is an n by n symmetric matrix.
Arguments
==========
UPLO - CHARACTER*1.
On entry, UPLO specifies whether the upper or lower
triangular part of the array A is to be referenced as
follows:
UPLO = 'U' or 'u' Only the upper triangular part of A
is to be referenced.
UPLO = 'L' or 'l' Only the lower triangular part of A
is to be referenced.
Unchanged on exit.
N - INTEGER.
On entry, N specifies the order of the matrix A.
N must be at least zero.
Unchanged on exit.
ALPHA - COMPLEX*16 .
On entry, ALPHA specifies the scalar alpha.
Unchanged on exit.
A - COMPLEX*16 array of DIMENSION ( LDA, n ).
Before entry with UPLO = 'U' or 'u', the leading n by n
upper triangular part of the array A must contain the upper
triangular part of the hermitian matrix and the strictly
lower triangular part of A is not referenced.
Before entry with UPLO = 'L' or 'l', the leading n by n
lower triangular part of the array A must contain the lower
triangular part of the hermitian matrix and the strictly
upper triangular part of A is not referenced.
Note that the imaginary parts of the diagonal elements need
not be set and are assumed to be zero.
Unchanged on exit.
LDA - INTEGER.
On entry, LDA specifies the first dimension of A as declared
in the calling (sub) program. LDA must be at least
max( 1, n ).
Unchanged on exit.
It is recommended that lda is multiple of 16. Otherwise
performance would be deteriorated as the memory accesses
would not be fully coalescent.
X - COMPLEX*16 array of dimension at least
( 1 + ( n - 1 )*abs( INCX ) ).
Before entry, the incremented array X must contain the n
element vector x.
Unchanged on exit.
INCX - INTEGER.
On entry, INCX specifies the increment for the elements of
X. INCX must not be zero.
Unchanged on exit.
BETA - COMPLEX*16 .
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
Unchanged on exit.
Y - COMPLEX*16 array of dimension at least
( 1 + ( n - 1 )*abs( INCY ) ).
Before entry, the incremented array Y must contain the n
element vector y. On exit, Y is overwritten by the updated
vector y.
INCY - INTEGER.
On entry, INCY specifies the increment for the elements of
Y. INCY must not be zero.
Unchanged on exit.
*/
extern "C"
magma_int_t
magmablas_zsymv_130( char uplo, magma_int_t n,
cuDoubleComplex alpha,
const cuDoubleComplex *A, magma_int_t lda,
const cuDoubleComplex *X, magma_int_t incx,
cuDoubleComplex beta,
cuDoubleComplex *Y, magma_int_t incy)
{
char uplo_[2] = {uplo, 0};
int upper = lapackf77_lsame(uplo_, "U");
/*
* Test the input parameters.
*/
if ((! upper) && (! lapackf77_lsame(uplo_, "L"))) {
return -1;
} else if ( n < 0 ) {
return -2;
} else if ( lda < max(1,n) ) {
return -5;
} else if ( incx == 0 ) {
return -7;
} else if ( incy == 0 ) {
return -10;
}
/*
* Quick return if possible.
*/
if ( (n == 0) || ( MAGMA_Z_EQUAL(alpha, MAGMA_Z_ZERO) && MAGMA_Z_EQUAL(beta, MAGMA_Z_ONE) ) )
return MAGMA_SUCCESS;
magma_int_t blocks = (n-1)/thread_seg + 1;
dim3 grid1( blocks, 1, 1);
dim3 threads1(thread_seg, 1, 1);
dim3 grid2( 1, n, 1);
dim3 threads2(threadSize, 1, 1);
/* TODO: Upper case is not implemented in MAGMA */
if ( upper ) {
#if defined(PRECISION_z) || (defined PRECISION_c)
fprintf(stderr, "%s: %s\n", __func__, "Upper case not implemented");
#else
cublasZsymv(uplo, n, alpha, A, lda, X, incx, beta, Y, incy);
#endif
}
else
{
magmablas_zsymv_130_kernel1 <<< grid1, threads1, 0, magma_stream >>>
(n, alpha, A, lda, X, incx, beta, Y, incy);
magmablas_zsymv_130_kernel2 <<< grid2, threads2, 0, magma_stream >>>
(n, alpha, A, lda, X, incx, beta, Y, incy);
}
return MAGMA_SUCCESS;
}
#endif /* defined(PRECISION_z) && (GPUSHMEM < 200)*/
|
2de4d52d82bde4d8336635387a73404ca7658842.hip | // !!! This is a file automatically generated by hipify!!!
/*
To compile:
nvcc -O3 -o mandelbrot mandelbrot.c png_util.c -I. -lpng -lm -fopenmp
Or just type:
module load gcc
make
To create an image with 4096 x 4096 pixels (last argument will be used to set number of threads):
./mandelbrot 4096 4096 1
*/
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "png_util.h"
// Q2a: add include for CUDA header file here:
#include "hip/hip_runtime.h"
#define MXITER 1000
typedef struct {
double r;
double i;
}complex_t;
// return iterations before z leaves mandelbrot set for given c
int testpoint(complex_t c){
int iter;
complex_t z;
double temp;
z = c;
for(iter=0; iter<MXITER; iter++){
temp = (z.r*z.r) - (z.i*z.i) + c.r;
z.i = z.r*z.i*2. + c.i;
z.r = temp;
if((z.r*z.r+z.i*z.i)>4.0){
return iter;
}
}
return iter;
}
// perform Mandelbrot iteration on a grid of numbers in the complex plane
// record the iteration counts in the count array
// Q2c: transform this function into a CUDA kernel
__global__ void mandelbrot(int Nre, int Nim, complex_t cmin, complex_t cmax, float *count){
int n,m;
complex_t c;
double dr = (cmax.r-cmin.r)/(Nre-1);
double di = (cmax.i-cmin.i)/(Nim-1);;
c.r = cmin.r + dr*m;
c.i = cmin.i + di*n;
count[m+n*Nre] = testpoint(c);
}
int main(int argc, char **argv){
// to create a 4096x4096 pixel image [ last argument is placeholder for number of threads ]
// usage: ./mandelbrot 4096 4096 1
int Nre = atoi(argv[1]);
int Nim = atoi(argv[2]);
int Nthreads = atoi(argv[3]);
// Q2b: set the number of threads per block and the number of blocks here:
int Bx = Nthreads;
// storage for the iteration counts
float *count;
hipMalloc(&count, Nre*Nim*sizeof(float);
// Parameters for a bounding box for "c" that generates an interesting image
const float centRe = -.759856, centIm= .125547;
const float diam = 0.151579;
complex_t cmin;
complex_t cmax;
cmin.r = centRe - 0.5*diam;
cmax.r = centRe + 0.5*diam;
cmin.i = centIm - 0.5*diam;
cmax.i = centIm + 0.5*diam;
clock_t start = clock(); //start time in CPU cycles
// compute mandelbrot set
mandelbrot(Nre, Nim, cmin, cmax, count);
clock_t end = clock(); //start time in CPU cycles
// print elapsed time
printf("elapsed = %f\n", ((double)(end-start))/CLOCKS_PER_SEC);
//arrays
float *f_a, *f_b, *f_c;
//allocate
hipMalloc(&f_a,N*sizeof(float);
hipMalloc(&f_b,N*sizeof(float);
hipMalloc(&f_c,N*sizeof(float);
// output mandelbrot to png format image
FILE *fp = fopen("mandelbrot.png", "w");
printf("Printing mandelbrot.png...");
write_hot_png(fp, Nre, Nim, count, 0, 80);
printf("done.\n");
free(count);
exit(0);
return 0;
}
| 2de4d52d82bde4d8336635387a73404ca7658842.cu | /*
To compile:
nvcc -O3 -o mandelbrot mandelbrot.c png_util.c -I. -lpng -lm -fopenmp
Or just type:
module load gcc
make
To create an image with 4096 x 4096 pixels (last argument will be used to set number of threads):
./mandelbrot 4096 4096 1
*/
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "png_util.h"
// Q2a: add include for CUDA header file here:
#include "cuda.h"
#define MXITER 1000
typedef struct {
double r;
double i;
}complex_t;
// return iterations before z leaves mandelbrot set for given c
int testpoint(complex_t c){
int iter;
complex_t z;
double temp;
z = c;
for(iter=0; iter<MXITER; iter++){
temp = (z.r*z.r) - (z.i*z.i) + c.r;
z.i = z.r*z.i*2. + c.i;
z.r = temp;
if((z.r*z.r+z.i*z.i)>4.0){
return iter;
}
}
return iter;
}
// perform Mandelbrot iteration on a grid of numbers in the complex plane
// record the iteration counts in the count array
// Q2c: transform this function into a CUDA kernel
__global__ void mandelbrot(int Nre, int Nim, complex_t cmin, complex_t cmax, float *count){
int n,m;
complex_t c;
double dr = (cmax.r-cmin.r)/(Nre-1);
double di = (cmax.i-cmin.i)/(Nim-1);;
c.r = cmin.r + dr*m;
c.i = cmin.i + di*n;
count[m+n*Nre] = testpoint(c);
}
int main(int argc, char **argv){
// to create a 4096x4096 pixel image [ last argument is placeholder for number of threads ]
// usage: ./mandelbrot 4096 4096 1
int Nre = atoi(argv[1]);
int Nim = atoi(argv[2]);
int Nthreads = atoi(argv[3]);
// Q2b: set the number of threads per block and the number of blocks here:
int Bx = Nthreads;
// storage for the iteration counts
float *count;
cudaMalloc(&count, Nre*Nim*sizeof(float);
// Parameters for a bounding box for "c" that generates an interesting image
const float centRe = -.759856, centIm= .125547;
const float diam = 0.151579;
complex_t cmin;
complex_t cmax;
cmin.r = centRe - 0.5*diam;
cmax.r = centRe + 0.5*diam;
cmin.i = centIm - 0.5*diam;
cmax.i = centIm + 0.5*diam;
clock_t start = clock(); //start time in CPU cycles
// compute mandelbrot set
mandelbrot(Nre, Nim, cmin, cmax, count);
clock_t end = clock(); //start time in CPU cycles
// print elapsed time
printf("elapsed = %f\n", ((double)(end-start))/CLOCKS_PER_SEC);
//arrays
float *f_a, *f_b, *f_c;
//allocate
cudaMalloc(&f_a,N*sizeof(float);
cudaMalloc(&f_b,N*sizeof(float);
cudaMalloc(&f_c,N*sizeof(float);
// output mandelbrot to png format image
FILE *fp = fopen("mandelbrot.png", "w");
printf("Printing mandelbrot.png...");
write_hot_png(fp, Nre, Nim, count, 0, 80);
printf("done.\n");
free(count);
exit(0);
return 0;
}
|
e07b4359cd8d00b2fe1e489d7fffda1f1137f8cc.hip | // !!! This is a file automatically generated by hipify!!!
/******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include "kernel.hip"
#include "support.cu"
int main (int argc, char *argv[])
{
//set standard seed
srand(217);
Timer timer;
hipError_t cuda_ret;
// Initialize host variables ----------------------------------------------
printf("\nSetting up the problem..."); fflush(stdout);
startTime(&timer);
float *A_h, *B_h, *C_h;
float *A_d, *B_d, *C_d;
size_t A_sz, B_sz, C_sz;
unsigned VecSize;
dim3 dim_block, dim_grid;
if (argc == 1) {
VecSize = 1000;
} else if (argc == 2) {
VecSize = atoi(argv[1]);
}
else {
printf("\nOh no!\nUsage: ./vecAdd <Size>");
exit(0);
}
A_sz = VecSize;
B_sz = VecSize;
C_sz = VecSize;
A_h = (float*) malloc( sizeof(float)*A_sz );
for (unsigned int i=0; i < A_sz; i++) { A_h[i] = (rand()%100)/100.00; }
B_h = (float*) malloc( sizeof(float)*B_sz );
for (unsigned int i=0; i < B_sz; i++) { B_h[i] = (rand()%100)/100.00; }
C_h = (float*) malloc( sizeof(float)*C_sz );
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
printf(" size Of vector: %u x %u\n ", VecSize);
// Allocate device variables ----------------------------------------------
printf("Allocating device variables..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
int size = VecSize * sizeof(float);
hipMalloc((void **)&A_d, size);
hipMalloc((void **)&B_d, size);
hipMalloc((void **)&C_d, size);
hipDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Copy host variables to device ------------------------------------------
printf("Copying data from host to device..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
hipMemcpy(A_d, A_h, size, hipMemcpyHostToDevice);
hipMemcpy(B_d, B_h, size, hipMemcpyHostToDevice);
hipDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Launch kernel ---------------------------
printf("Launching kernel..."); fflush(stdout);
startTime(&timer);
basicVecAdd(A_d, B_d, C_d, VecSize); //In kernel.cu
cuda_ret = hipDeviceSynchronize();
if(cuda_ret != hipSuccess) FATAL("Unable to launch kernel");
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Copy device variables from host ----------------------------------------
printf("Copying data from device to host..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
hipMemcpy(C_h, C_d, size, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Verify correctness -----------------------------------------------------
printf("Verifying results..."); fflush(stdout);
verify(A_h, B_h, C_h, VecSize);
// Free memory ------------------------------------------------------------
free(A_h);
free(B_h);
free(C_h);
//INSERT CODE HERE
hipFree(A_d);
hipFree(B_d);
hipFree(C_d);
return 0;
}
| e07b4359cd8d00b2fe1e489d7fffda1f1137f8cc.cu | /******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include "kernel.cu"
#include "support.cu"
int main (int argc, char *argv[])
{
//set standard seed
srand(217);
Timer timer;
cudaError_t cuda_ret;
// Initialize host variables ----------------------------------------------
printf("\nSetting up the problem..."); fflush(stdout);
startTime(&timer);
float *A_h, *B_h, *C_h;
float *A_d, *B_d, *C_d;
size_t A_sz, B_sz, C_sz;
unsigned VecSize;
dim3 dim_block, dim_grid;
if (argc == 1) {
VecSize = 1000;
} else if (argc == 2) {
VecSize = atoi(argv[1]);
}
else {
printf("\nOh no!\nUsage: ./vecAdd <Size>");
exit(0);
}
A_sz = VecSize;
B_sz = VecSize;
C_sz = VecSize;
A_h = (float*) malloc( sizeof(float)*A_sz );
for (unsigned int i=0; i < A_sz; i++) { A_h[i] = (rand()%100)/100.00; }
B_h = (float*) malloc( sizeof(float)*B_sz );
for (unsigned int i=0; i < B_sz; i++) { B_h[i] = (rand()%100)/100.00; }
C_h = (float*) malloc( sizeof(float)*C_sz );
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
printf(" size Of vector: %u x %u\n ", VecSize);
// Allocate device variables ----------------------------------------------
printf("Allocating device variables..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
int size = VecSize * sizeof(float);
cudaMalloc((void **)&A_d, size);
cudaMalloc((void **)&B_d, size);
cudaMalloc((void **)&C_d, size);
cudaDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Copy host variables to device ------------------------------------------
printf("Copying data from host to device..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
cudaMemcpy(A_d, A_h, size, cudaMemcpyHostToDevice);
cudaMemcpy(B_d, B_h, size, cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Launch kernel ---------------------------
printf("Launching kernel..."); fflush(stdout);
startTime(&timer);
basicVecAdd(A_d, B_d, C_d, VecSize); //In kernel.cu
cuda_ret = cudaDeviceSynchronize();
if(cuda_ret != cudaSuccess) FATAL("Unable to launch kernel");
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Copy device variables from host ----------------------------------------
printf("Copying data from device to host..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
cudaMemcpy(C_h, C_d, size, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Verify correctness -----------------------------------------------------
printf("Verifying results..."); fflush(stdout);
verify(A_h, B_h, C_h, VecSize);
// Free memory ------------------------------------------------------------
free(A_h);
free(B_h);
free(C_h);
//INSERT CODE HERE
cudaFree(A_d);
cudaFree(B_d);
cudaFree(C_d);
return 0;
}
|
93e1cffd12482de5decceca4b98591d980cf5f7a.hip | // !!! This is a file automatically generated by hipify!!!
#pragma once
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void multiplyKernel(int resultWidth, int resultHeight, int leftWidth, int valueColsPerThread, int valueRowsPerThread, double* left, double* right, double* result)
{
int ROW = blockIdx.y * blockDim.y + threadIdx.y;
int COL = blockIdx.x * blockDim.x + threadIdx.x;
int totalGridHeight = blockDim.y * gridDim.y * valueRowsPerThread;
int totalGridWidth = blockDim.x * gridDim.x * valueColsPerThread;
for (int i = 0; i < valueColsPerThread; ++i)
{
int currentRow = ROW + i;
for (int j = 0; j < valueRowsPerThread; ++j)
{
int currentCol = COL + j;
if (currentRow < resultHeight && currentCol < resultWidth)
{
double tmpSum = 0.0f;
for (int k = 0; k < leftWidth; ++k)
{
double leftValue = left[(currentRow)*leftWidth + k];
double rightValue = right[k * resultWidth + currentCol];
tmpSum += leftValue * rightValue;
}
result[(currentRow)*resultWidth + currentCol] = tmpSum;
}
//outer threads may need to do extra work because the matrix might not fit nicely on the grid
if (blockIdx.y + 1 == gridDim.y && totalGridHeight < resultHeight)
{
int extraRow = currentRow + blockDim.y;
if (extraRow < resultHeight && currentCol < resultWidth)
{
double tmpSum = 0.0f;
for (int k = 0; k < leftWidth; ++k)
{
double leftValue = left[(extraRow)*leftWidth + k];
double rightValue = right[k * resultWidth + currentCol];
tmpSum += leftValue * rightValue;
}
result[(extraRow)*resultWidth + currentCol] = tmpSum;
}
}
if (blockIdx.x + 1 == gridDim.x && totalGridWidth < resultWidth)
{
int extraCol = currentCol + blockDim.x;
if (currentRow < resultHeight && extraCol < resultWidth)
{
double tmpSum = 0.0f;
for (int k = 0; k < leftWidth; ++k)
{
double leftValue = left[(currentRow)*leftWidth + k];
double rightValue = right[k * resultWidth + extraCol];
tmpSum += leftValue * rightValue;
}
result[(currentRow)*resultWidth + extraCol] = tmpSum;
}
}
if (blockIdx.x + 1 == gridDim.x && totalGridWidth < resultWidth && blockIdx.y + 1 == gridDim.y && totalGridHeight < resultHeight)
{
int extraCol = currentCol + blockDim.x;
int extraRow = currentRow + blockDim.y;
if (extraRow < resultHeight && extraCol < resultWidth)
{
float tmpSum = 0.0f;
for (int k = 0; k < leftWidth; ++k)
{
double leftValue = left[(extraRow)*leftWidth + k];
double rightValue = right[k * resultWidth + extraCol];
tmpSum += leftValue * rightValue;
}
result[(extraRow)*resultWidth + extraCol] = tmpSum;
}
}
}
}
}
hipError_t launchMultiplyKernel(dim3 blocksPerGrid, dim3 threadsPerBlock, int valueColsPerThread, int valueRowsPerThread, int resultWidth, int resultHeight, int leftWidth, double* dev_left, double* dev_right, double* dev_result)
{
multiplyKernel << <blocksPerGrid, threadsPerBlock >> > (resultWidth, resultHeight, leftWidth, valueColsPerThread, valueRowsPerThread, dev_left, dev_right, dev_result);
return hipGetLastError();
}
| 93e1cffd12482de5decceca4b98591d980cf5f7a.cu | #pragma once
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void multiplyKernel(int resultWidth, int resultHeight, int leftWidth, int valueColsPerThread, int valueRowsPerThread, double* left, double* right, double* result)
{
int ROW = blockIdx.y * blockDim.y + threadIdx.y;
int COL = blockIdx.x * blockDim.x + threadIdx.x;
int totalGridHeight = blockDim.y * gridDim.y * valueRowsPerThread;
int totalGridWidth = blockDim.x * gridDim.x * valueColsPerThread;
for (int i = 0; i < valueColsPerThread; ++i)
{
int currentRow = ROW + i;
for (int j = 0; j < valueRowsPerThread; ++j)
{
int currentCol = COL + j;
if (currentRow < resultHeight && currentCol < resultWidth)
{
double tmpSum = 0.0f;
for (int k = 0; k < leftWidth; ++k)
{
double leftValue = left[(currentRow)*leftWidth + k];
double rightValue = right[k * resultWidth + currentCol];
tmpSum += leftValue * rightValue;
}
result[(currentRow)*resultWidth + currentCol] = tmpSum;
}
//outer threads may need to do extra work because the matrix might not fit nicely on the grid
if (blockIdx.y + 1 == gridDim.y && totalGridHeight < resultHeight)
{
int extraRow = currentRow + blockDim.y;
if (extraRow < resultHeight && currentCol < resultWidth)
{
double tmpSum = 0.0f;
for (int k = 0; k < leftWidth; ++k)
{
double leftValue = left[(extraRow)*leftWidth + k];
double rightValue = right[k * resultWidth + currentCol];
tmpSum += leftValue * rightValue;
}
result[(extraRow)*resultWidth + currentCol] = tmpSum;
}
}
if (blockIdx.x + 1 == gridDim.x && totalGridWidth < resultWidth)
{
int extraCol = currentCol + blockDim.x;
if (currentRow < resultHeight && extraCol < resultWidth)
{
double tmpSum = 0.0f;
for (int k = 0; k < leftWidth; ++k)
{
double leftValue = left[(currentRow)*leftWidth + k];
double rightValue = right[k * resultWidth + extraCol];
tmpSum += leftValue * rightValue;
}
result[(currentRow)*resultWidth + extraCol] = tmpSum;
}
}
if (blockIdx.x + 1 == gridDim.x && totalGridWidth < resultWidth && blockIdx.y + 1 == gridDim.y && totalGridHeight < resultHeight)
{
int extraCol = currentCol + blockDim.x;
int extraRow = currentRow + blockDim.y;
if (extraRow < resultHeight && extraCol < resultWidth)
{
float tmpSum = 0.0f;
for (int k = 0; k < leftWidth; ++k)
{
double leftValue = left[(extraRow)*leftWidth + k];
double rightValue = right[k * resultWidth + extraCol];
tmpSum += leftValue * rightValue;
}
result[(extraRow)*resultWidth + extraCol] = tmpSum;
}
}
}
}
}
cudaError launchMultiplyKernel(dim3 blocksPerGrid, dim3 threadsPerBlock, int valueColsPerThread, int valueRowsPerThread, int resultWidth, int resultHeight, int leftWidth, double* dev_left, double* dev_right, double* dev_result)
{
multiplyKernel << <blocksPerGrid, threadsPerBlock >> > (resultWidth, resultHeight, leftWidth, valueColsPerThread, valueRowsPerThread, dev_left, dev_right, dev_result);
return cudaGetLastError();
}
|
3e0c8a3919b6f0b4d26e0a08647b38c38343722e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*-------------------------------------------------------------------------
*
* CUDA functions for Steepest descend in POCS-type algorithms.
*
* This file will iteratively minimize by stepest descend the total variation
* of the input image, with the parameters given, using GPUs.
*
* CODE by Ander Biguri
*
* ---------------------------------------------------------------------------
* ---------------------------------------------------------------------------
* Copyright (c) 2015, University of Bath and CERN- European Organization for
* Nuclear Research
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* ---------------------------------------------------------------------------
*
* Contact: tigre.toolbox@gmail.com
* Codes : https://github.com/CERN/TIGRE
* ---------------------------------------------------------------------------
*/
#define MAXTHREADS 1024
#define MAX_BUFFER 60
#include "POCS_TV.hpp"
inline int cudaCheckErrors(const char * msg)
{
hipError_t __err = hipGetLastError();
if (__err != hipSuccess)
{
printf("CUDA:pocs_tv:%s:%s\n",msg, hipGetErrorString(__err));
hipDeviceReset();
return 1;
}
return 0;
}
// CUDA kernels
//https://stackoverflow.com/questions/21332040/simple-cuda-kernel-optimization/21340927#21340927
__global__ void divideArrayScalar(float* vec,float scalar,const size_t n){
unsigned long long i = (blockIdx.x * blockDim.x) + threadIdx.x;
for(; i<n; i+=gridDim.x*blockDim.x) {
vec[i]/=scalar;
}
}
__global__ void multiplyArrayScalar(float* vec,float scalar,const size_t n)
{
unsigned long long i = (blockIdx.x * blockDim.x) + threadIdx.x;
for(; i<n; i+=gridDim.x*blockDim.x) {
vec[i]*=scalar;
}
}
__global__ void substractArrays(float* vec,float* vec2,const size_t n)
{
unsigned long long i = (blockIdx.x * blockDim.x) + threadIdx.x;
for(; i<n; i+=gridDim.x*blockDim.x) {
vec[i]-=vec2[i];
}
}
__device__ __inline__
void gradient(const float* u, float* grad,
long z, long y, long x,
long depth, long rows, long cols){
unsigned long size2d = rows*cols;
unsigned long long idx = z * size2d + y * cols + x;
float uidx = u[idx];
if ( z - 1 >= 0 && z<depth) {
grad[0] = (uidx-u[(z-1)*size2d + y*cols + x]) ;
}
if ( y - 1 >= 0 && y<rows){
grad[1] = (uidx-u[z*size2d + (y-1)*cols + x]) ;
}
if ( x - 1 >= 0 && x<cols) {
grad[2] = (uidx-u[z*size2d + y*cols + (x-1)]);
}
}
__global__ void gradientTV(const float* f, float* dftv,
long depth, long rows, long cols){
unsigned long x = threadIdx.x + blockIdx.x * blockDim.x;
unsigned long y = threadIdx.y + blockIdx.y * blockDim.y;
unsigned long z = threadIdx.z + blockIdx.z * blockDim.z;
unsigned long long idx = z * rows * cols + y * cols + x;
if ( x >= cols || y >= rows || z >= depth )
return;
float df[3] ={0.f,0.f,0.f};
float dfi[3]={0.f,0.f,0.f}; // dfi== \partial f_{i+1,j,k}
float dfj[3]={0.f,0.f,0.f};
float dfk[3]={0.f,0.f,0.f};
gradient(f,df ,z ,y ,x , depth,rows,cols);
gradient(f,dfi ,z ,y ,x+1, depth,rows,cols);
gradient(f,dfj ,z ,y+1,x , depth,rows,cols);
gradient(f,dfk ,z+1,y ,x , depth,rows,cols);
float eps=0.00000001; //% avoid division by zero
dftv[idx]=(df[0]+df[1]+df[2])/(sqrt(df[0] *df[0] +df[1] *df[1] +df[2] *df[2])+eps)
-dfi[2]/(sqrt(dfi[0]*dfi[0]+dfi[1]*dfi[1]+dfi[2]*dfi[2]) +eps) // I wish I coudl precompute this, but if I do then Id need to recompute the gradient.
-dfj[1]/(sqrt(dfj[0]*dfj[0]+dfj[1]*dfj[1]+dfj[2]*dfj[2]) +eps)
-dfk[0]/(sqrt(dfk[0]*dfk[0]+dfk[1]*dfk[1]+dfk[2]*dfk[2]) +eps);
return;
}
__device__ void warpReduce(volatile float *sdata, size_t tid) {
sdata[tid] += sdata[tid + 32];
sdata[tid] += sdata[tid + 16];
sdata[tid] += sdata[tid + 8];
sdata[tid] += sdata[tid + 4];
sdata[tid] += sdata[tid + 2];
sdata[tid] += sdata[tid + 1];
}
__global__ void reduceNorm2(float *g_idata, float *g_odata, size_t n){
extern __shared__ volatile float sdata[];
//http://stackoverflow.com/a/35133396/1485872
size_t tid = threadIdx.x;
size_t i = blockIdx.x*blockDim.x + tid;
size_t gridSize = blockDim.x*gridDim.x;
float mySum = 0;
float value=0;
while (i < n) {
value=g_idata[i]; //avoid reading twice
mySum += value*value;
i += gridSize;
}
sdata[tid] = mySum;
__syncthreads();
if (tid < 512)
sdata[tid] += sdata[tid + 512];
__syncthreads();
if (tid < 256)
sdata[tid] += sdata[tid + 256];
__syncthreads();
if (tid < 128)
sdata[tid] += sdata[tid + 128];
__syncthreads();
if (tid < 64)
sdata[tid] += sdata[tid + 64];
__syncthreads();
#if (__CUDA_ARCH__ >= 300)
if ( tid < 32 )
{
mySum = sdata[tid] + sdata[tid + 32];
for (int offset = warpSize/2; offset > 0; offset /= 2) {
mySum += __shfl_down_sync(0xFFFFFFFF, mySum, offset,32);
}
}
#else
if (tid < 32) {
warpReduce(sdata, tid);
mySum = sdata[0];
}
#endif
if (tid == 0) g_odata[blockIdx.x] = mySum;
}
__global__ void reduceSum(float *g_idata, float *g_odata, size_t n){
extern __shared__ volatile float sdata[];
//http://stackoverflow.com/a/35133396/1485872
size_t tid = threadIdx.x;
size_t i = blockIdx.x*blockDim.x + tid;
size_t gridSize = blockDim.x*gridDim.x;
float mySum = 0;
// float value=0;
while (i < n) {
mySum += g_idata[i];
i += gridSize;
}
sdata[tid] = mySum;
__syncthreads();
if (tid < 512)
sdata[tid] += sdata[tid + 512];
__syncthreads();
if (tid < 256)
sdata[tid] += sdata[tid + 256];
__syncthreads();
if (tid < 128)
sdata[tid] += sdata[tid + 128];
__syncthreads();
if (tid < 64)
sdata[tid] += sdata[tid + 64];
__syncthreads();
#if (__CUDA_ARCH__ >= 300)
if ( tid < 32 )
{
mySum = sdata[tid] + sdata[tid + 32];
for (int offset = warpSize/2; offset > 0; offset /= 2) {
mySum += __shfl_down_sync(0xFFFFFFFF, mySum, offset,32);
}
}
#else
if (tid < 32) {
warpReduce(sdata, tid);
mySum = sdata[0];
}
#endif
if (tid == 0) g_odata[blockIdx.x] = mySum;
}
// main function
int pocs_tv(float* img,float* dst,float alpha,const long* image_size, int maxIter){
// Prepare for MultiGPU
int deviceCount = 0;
hipGetDeviceCount(&deviceCount);
if(cudaCheckErrors("Device query fail")){return 1;}
if (deviceCount == 0) {
//mexErrMsgIdAndTxt("minimizeTV:POCS_TV:GPUselect","There are no available device(s) that support CUDA\n");
return ERR_NO_CAPABLE_DEVICES;
}
//
// CODE assumes
// 1.-All available devices are usable by this code
// 2.-All available devices are equal, they are the same machine (warning trhown)
int dev;
char * devicenames;
hipDeviceProp_t deviceProp;
for (dev = 0; dev < deviceCount; dev++) {
hipSetDevice(dev);
hipGetDeviceProperties(&deviceProp, dev);
if (dev>0){
if (strcmp(devicenames,deviceProp.name)!=0){
printf("minimizeTV:POCS_TV:GPUselect","Detected one (or more) different GPUs.\n This code is not smart enough to separate the memory GPU wise if they have different computational times or memory limits.\n First GPU parameters used. If the code errors you might need to change the way GPU selection is performed. \n POCS_TV.cu line 277.");
break;
}
}
devicenames=deviceProp.name;
}
// We don't know if the devices are being used. lets check that. and only use the amount of memory we need.
size_t mem_GPU_global;
checkFreeMemory(deviceCount,&mem_GPU_global);
// %5 of free memory shoudl be enough, we have almsot no variables in these kernels
size_t total_pixels = image_size[0] * image_size[1] * image_size[2] ;
size_t mem_slice_image = sizeof(float)* image_size[0] * image_size[1] ;
size_t mem_size_image = sizeof(float)* total_pixels;
size_t mem_auxiliary = sizeof(float)* (total_pixels + MAXTHREADS - 1) / MAXTHREADS;
// Decide how are we handling the distribution of computation
size_t mem_img_each_GPU;
unsigned int buffer_length=2;
//Does everything fit in the GPU?
unsigned int slices_per_split;
unsigned int splits=1; // if the number does not fit in an uint, you have more serious trouble than this.
if(mem_GPU_global> 3*mem_size_image+3*(deviceCount-1)*mem_slice_image*buffer_length+mem_auxiliary){
// We only need to split if we have extra GPUs
slices_per_split=(image_size[2]+deviceCount-1)/deviceCount;
mem_img_each_GPU=mem_slice_image*((slices_per_split+buffer_length*2));
}else{
// As mem_auxiliary is not expected to be a large value (for a 2000^3 image is around 28Mbytes), lets for now assume we need it all
size_t mem_free=mem_GPU_global-mem_auxiliary;
splits=(unsigned int)(ceil(((float)(3*mem_size_image)/(float)(deviceCount))/mem_free));
// Now, there is an overhead here, as each splits should have 2 slices more, to accoutn for overlap of images.
// lets make sure these 2 slices fit, if they do not, add 1 to splits.
slices_per_split=(image_size[2]+deviceCount*splits-1)/(deviceCount*splits);
mem_img_each_GPU=(mem_slice_image*(slices_per_split+buffer_length*2));
// if the new stuff does not fit in the GPU, it measn we are in the edge case where adding that extra slice will overflow memory
if (mem_GPU_global< 3*mem_img_each_GPU+mem_auxiliary){
// one more splot shoudl do the job, as its an edge case.
splits++;
//recompute for later
slices_per_split=(image_size[2]+deviceCount*splits-1)/(deviceCount*splits); // amountf of slices that fit on a GPU. Later we add 2 to these, as we need them for overlap
mem_img_each_GPU=(mem_slice_image*(slices_per_split+buffer_length*2));
}
// How many EXTRA buffer slices should be able to fit in here??!?!
// Only do it if there are splits needed.
if(splits>1){
mem_free=mem_GPU_global-(3*mem_img_each_GPU+mem_auxiliary);
unsigned int extra_buff=(mem_free/mem_slice_image);
buffer_length=(extra_buff/2)/3; // we need double whatever this results in, rounded down.
buffer_length=max(buffer_length,2);// minimum 2
buffer_length=min(MAX_BUFFER,buffer_length);
mem_img_each_GPU=mem_slice_image*(slices_per_split+buffer_length*2);
}else{
buffer_length=2;
}
// Assert
if (mem_GPU_global< 3*mem_img_each_GPU+mem_auxiliary){
//mexErrMsgIdAndTxt("minimizeTV:POCS_TV:GPU","Assertion Failed. Logic behind spliting flawed! Please tell: ander.biguri@gmail.com\n");
return ERR_BAD_ASSERT;
}
}
// Assert
if ((slices_per_split+buffer_length*2)*image_size[0]*image_size[1]* sizeof(float)!= mem_img_each_GPU){
//mexErrMsgIdAndTxt("minimizeTV:POCS_TV:GPU","Assertion Failed. Memory needed calculation broken! Please tell: ander.biguri@gmail.com\n");
return ERR_ASSERT_FAIL;
}
float** d_image= (float**)malloc(deviceCount*sizeof(float*));
float** d_dimgTV= (float**)malloc(deviceCount*sizeof(float*));
float** d_norm2aux= (float**)malloc(deviceCount*sizeof(float*));
float** d_norm2= (float**)malloc(deviceCount*sizeof(float*));
// allocate memory in each GPU
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipMalloc((void**)&d_image[dev] , mem_img_each_GPU);
hipMemset(d_image[dev],0 , mem_img_each_GPU);
hipMalloc((void**)&d_dimgTV[dev] , mem_img_each_GPU);
hipMalloc((void**)&d_norm2[dev] , slices_per_split*mem_slice_image);
hipMalloc((void**)&d_norm2aux[dev] , mem_auxiliary);
cudaCheckErrors("Malloc error");
}
unsigned long long buffer_pixels=buffer_length*image_size[0]*image_size[1];
float* buffer;
if(splits>1){
printf("minimizeTV:POCS_TV:Image_split","Your image can not be fully split between the available GPUs. The computation of minTV will be significantly slowed due to the image size.\nApproximated mathematics turned on for computational speed.");
}else{
hipHostMalloc((void**)&buffer,buffer_length*image_size[0]*image_size[1]*sizeof(float));
}
// Lets try to make the host memory pinned:
// We laredy queried the GPU and assuemd they are the same, thus shoudl have the same attributes.
int isHostRegisterSupported;
hipDeviceGetAttribute(&isHostRegisterSupported,hipDeviceAttributeHostRegisterSupported,0);
// splits>2 is completely empirical observation
if (isHostRegisterSupported & splits>2){
hipHostRegister(img ,image_size[2]*image_size[1]*image_size[0]*sizeof(float),hipHostRegisterPortable);
hipHostRegister(dst ,image_size[2]*image_size[1]*image_size[0]*sizeof(float),hipHostRegisterPortable);
}
if(cudaCheckErrors("Error pinning memory")){return 1;}
// Create streams
int nStream_device=2;
int nStreams=deviceCount*nStream_device;
hipStream_t* stream=(hipStream_t*)malloc(nStreams*sizeof(hipStream_t));
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
for (int i = 0; i < nStream_device; ++i){
hipStreamCreate(&stream[i+dev*nStream_device]);
}
}
if(cudaCheckErrors("Stream creation fail")){return 1;}
// For the reduction
double totalsum_prev;
double totalsum;
float sum_curr_spl;
float * sumnorm2;
hipHostMalloc((void**)&sumnorm2,deviceCount*sizeof(float));
unsigned int curr_slices;
unsigned long long curr_pixels;
size_t linear_idx_start;
unsigned long long* offset_device=(unsigned long long*)malloc(deviceCount*sizeof(unsigned long long));
unsigned long long* offset_host =(unsigned long long*)malloc(deviceCount*sizeof(unsigned long long));
unsigned long long* bytes_device =(unsigned long long*)malloc(deviceCount*sizeof(unsigned long long));
bool is_first_chunk;
bool is_last_chunk;
for(unsigned int i=0;i<maxIter;i+=(buffer_length-1)){
if(splits>1){
totalsum_prev=0;
}
for(unsigned int sp=0;sp<splits;sp++){
// For each iteration we need to comptue all the image. The ordering of these loops
// need to be like this due to the boudnign layers between slpits. If more than 1 split is needed
// for each GPU then there is no other way that taking the entire memory out of GPU and putting it back.
// If the memory can be shared ebtween GPUs fully without extra splits, then there is an easy way of syncronizing the memory
// Copy image to memory
for (dev = 0; dev < deviceCount; dev++){
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
curr_pixels=curr_slices*image_size[0]*image_size[1];
linear_idx_start=image_size[0]*image_size[1]*slices_per_split*(sp*deviceCount+dev);
// Check if its the first or last chunck
is_last_chunk=!((sp*deviceCount+dev)<deviceCount*splits-1);
is_first_chunk=!(sp*deviceCount+dev);
// lets compute where we start copyes and how much. This avoids 3 calls to Memcpy
offset_device[dev]=buffer_pixels*is_first_chunk;
offset_host[dev]=linear_idx_start-buffer_pixels*!is_first_chunk;
bytes_device[dev]=curr_pixels+buffer_pixels*!is_first_chunk+buffer_pixels*!is_last_chunk;
}
if(i==0){
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipMemcpyAsync(d_image[dev]+offset_device[dev], img+offset_host[dev] , bytes_device[dev]*sizeof(float), hipMemcpyHostToDevice,stream[dev*nStream_device+1]);
}
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipDeviceSynchronize();
}
}
// if we need to split and its not the first iteration, then we need to copy from Host memory the previosu result.
if (splits>1 & i>0){
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipMemcpyAsync(d_image[dev]+offset_device[dev], dst+offset_host[dev] , bytes_device[dev]*sizeof(float), hipMemcpyHostToDevice,stream[dev*nStream_device+1]);
}
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipDeviceSynchronize();
}
}
if(cudaCheckErrors("Memcpy failure on multi split")){return 1;}
for(unsigned int ib=0; (ib<(buffer_length-1)) && ((i+ib)<maxIter); ib++){
// For the gradient
dim3 blockGrad(10, 10, 10);
dim3 gridGrad((image_size[0]+blockGrad.x-1)/blockGrad.x, (image_size[1]+blockGrad.y-1)/blockGrad.y, (curr_slices+buffer_length*2+blockGrad.z-1)/blockGrad.z);
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
// Compute the gradient of the TV norm
// I Dont understand why I need to store 2 layers to compute correctly with 1 buffer. The bounding checks shoudl
// be enough but they are not.
hipLaunchKernelGGL(( gradientTV), dim3(gridGrad), dim3(blockGrad),0,stream[dev*nStream_device], d_image[dev],d_dimgTV[dev],(long)(curr_slices+buffer_length*2-1), image_size[1],image_size[0]);
}
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
// no need to copy the 2 aux slices here
hipStreamSynchronize(stream[dev*nStream_device]);
hipMemcpyAsync(d_norm2[dev], d_dimgTV[dev]+buffer_pixels, image_size[0]*image_size[1]*curr_slices*sizeof(float), hipMemcpyDeviceToDevice,stream[dev*nStream_device+1]);
}
// Compute the L2 norm of the gradint. For that, reduction is used.
//REDUCE
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
total_pixels=curr_slices*image_size[0]*image_size[1];
size_t dimblockRed = MAXTHREADS;
size_t dimgridRed = (total_pixels + MAXTHREADS - 1) / MAXTHREADS;
hipStreamSynchronize(stream[dev*nStream_device+1]);
reduceNorm2 << <dimgridRed, dimblockRed, MAXTHREADS*sizeof(float),stream[dev*nStream_device]>> >(d_norm2[dev], d_norm2aux[dev], total_pixels);
}
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
total_pixels=curr_slices*image_size[0]*image_size[1];
size_t dimblockRed = MAXTHREADS;
size_t dimgridRed = (total_pixels + MAXTHREADS - 1) / MAXTHREADS;
if (dimgridRed > 1) {
reduceSum << <1, dimblockRed, MAXTHREADS*sizeof(float),stream[dev*nStream_device] >> >(d_norm2aux[dev], d_norm2[dev], dimgridRed);
hipStreamSynchronize(stream[dev*nStream_device]);
hipMemcpyAsync(&sumnorm2[dev], d_norm2[dev], sizeof(float), hipMemcpyDeviceToHost,stream[dev*nStream_device+1]);
}
else {
hipStreamSynchronize(stream[dev*nStream_device]);
hipMemcpyAsync(&sumnorm2[dev], d_norm2aux[dev], sizeof(float), hipMemcpyDeviceToHost,stream[dev*nStream_device+1]);
}
}
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipDeviceSynchronize();
}
if(cudaCheckErrors("Reduction error")){return 1;}
// Accumulate the nomr accross devices
sum_curr_spl=0;
// this is CPU code
for (dev = 0; dev < deviceCount; dev++){
sum_curr_spl+=sumnorm2[dev];
}
sum_curr_spl+=0.0000001f; // avoid division by zero
// If we have more than one splits, lets use the result from prior calls
if(i>0 && splits>1){
// this is already stored:
//totalsum=totalsum_prev;
}else{
totalsum=sum_curr_spl;
}
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
total_pixels=curr_slices*image_size[0]*image_size[1];
//NOMRALIZE
//in a Tesla, maximum blocks =15 SM * 4 blocks/SM
hipLaunchKernelGGL(( divideArrayScalar) , dim3(60),dim3(MAXTHREADS),0,stream[dev*nStream_device], d_dimgTV[dev]+buffer_pixels,(float)sqrt(totalsum),total_pixels);
//MULTIPLY HYPERPARAMETER
hipLaunchKernelGGL(( multiplyArrayScalar), dim3(60),dim3(MAXTHREADS),0,stream[dev*nStream_device], d_dimgTV[dev]+buffer_pixels,alpha, total_pixels);
}
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipDeviceSynchronize();
}
if(cudaCheckErrors("Scalar operations error")){return 1;}
//SUBSTRACT GRADIENT
//////////////////////////////////////////////
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
total_pixels=curr_slices*image_size[0]*image_size[1];
hipLaunchKernelGGL(( substractArrays), dim3(60),dim3(MAXTHREADS),0,stream[dev*nStream_device], d_image[dev]+buffer_pixels,d_dimgTV[dev]+buffer_pixels, total_pixels);
}
}
// Syncronize mathematics, make sure bounding pixels are correct
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipDeviceSynchronize();
}
if(splits==1){
for(dev=0; dev<deviceCount;dev++){
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
total_pixels=curr_slices*image_size[0]*image_size[1];
if (dev<deviceCount-1){
hipSetDevice(dev+1);
hipMemcpy(buffer, d_image[dev+1], buffer_pixels*sizeof(float), hipMemcpyDeviceToHost);
hipSetDevice(dev);
hipMemcpy(d_image[dev]+total_pixels+buffer_pixels,buffer, buffer_pixels*sizeof(float), hipMemcpyHostToDevice);
}
hipDeviceSynchronize();
if (dev>0){
hipSetDevice(dev-1);
hipMemcpyAsync(buffer, d_image[dev-1]+total_pixels+buffer_pixels, buffer_pixels*sizeof(float), hipMemcpyDeviceToHost);
hipSetDevice(dev);
hipMemcpyAsync(d_image[dev],buffer, buffer_pixels*sizeof(float), hipMemcpyHostToDevice);
}
}
}else{
// We need to take it out :(
for(dev=0; dev<deviceCount;dev++){
hipSetDevice(dev);
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
linear_idx_start=image_size[0]*image_size[1]*slices_per_split*(sp*deviceCount+dev);
total_pixels=curr_slices*image_size[0]*image_size[1];
hipMemcpyAsync(&dst[linear_idx_start], d_image[dev]+buffer_pixels,total_pixels*sizeof(float), hipMemcpyDeviceToHost,stream[dev*nStream_device+1]);
}
}
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipDeviceSynchronize();
}
if(cudaCheckErrors("Memory gather error")){return 1;}
totalsum_prev+=sum_curr_spl;
}
totalsum=totalsum_prev;
}
// If there has not been splits, we still have data in memory
if(splits==1){
for(dev=0; dev<deviceCount;dev++){
hipSetDevice(dev);
curr_slices=((dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*dev;
total_pixels=curr_slices*image_size[0]*image_size[1];
hipMemcpy(dst+slices_per_split*image_size[0]*image_size[1]*dev, d_image[dev]+buffer_pixels,total_pixels*sizeof(float), hipMemcpyDeviceToHost);
}
}
if(cudaCheckErrors("Copy result back")){return 1;}
for(dev=0; dev<deviceCount;dev++){
hipSetDevice(dev);
hipFree(d_image[dev]);
hipFree(d_norm2aux[dev]);
hipFree(d_dimgTV[dev]);
hipFree(d_norm2[dev]);
}
if (splits==1){
hipHostFree(buffer);
}
if (isHostRegisterSupported& splits>2){
hipHostUnregister(img);
hipHostUnregister(dst);
}
for (int i = 0; i < nStreams; ++i)
hipStreamDestroy(stream[i]) ;
if(cudaCheckErrors("Memory free")){return 1;}
return 0;
}
void checkFreeMemory(int deviceCount,size_t *mem_GPU_global){
size_t memfree;
size_t memtotal;
for (int dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipMemGetInfo(&memfree,&memtotal);
if(dev==0) *mem_GPU_global=memfree;
if(memfree<memtotal/2){
printf("tvDenoise:tvdenoising:GPU","One (or more) of your GPUs is being heavily used by another program (possibly graphics-based).\n Free the GPU to run TIGRE\n");
}
cudaCheckErrors("Check mem error");
*mem_GPU_global=(memfree<*mem_GPU_global)?memfree:*mem_GPU_global;
}
*mem_GPU_global=(size_t)((double)*mem_GPU_global*0.95);
//*mem_GPU_global= insert your known number here, in bytes.
}
| 3e0c8a3919b6f0b4d26e0a08647b38c38343722e.cu | /*-------------------------------------------------------------------------
*
* CUDA functions for Steepest descend in POCS-type algorithms.
*
* This file will iteratively minimize by stepest descend the total variation
* of the input image, with the parameters given, using GPUs.
*
* CODE by Ander Biguri
*
* ---------------------------------------------------------------------------
* ---------------------------------------------------------------------------
* Copyright (c) 2015, University of Bath and CERN- European Organization for
* Nuclear Research
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* ---------------------------------------------------------------------------
*
* Contact: tigre.toolbox@gmail.com
* Codes : https://github.com/CERN/TIGRE
* ---------------------------------------------------------------------------
*/
#define MAXTHREADS 1024
#define MAX_BUFFER 60
#include "POCS_TV.hpp"
inline int cudaCheckErrors(const char * msg)
{
cudaError_t __err = cudaGetLastError();
if (__err != cudaSuccess)
{
printf("CUDA:pocs_tv:%s:%s\n",msg, cudaGetErrorString(__err));
cudaDeviceReset();
return 1;
}
return 0;
}
// CUDA kernels
//https://stackoverflow.com/questions/21332040/simple-cuda-kernel-optimization/21340927#21340927
__global__ void divideArrayScalar(float* vec,float scalar,const size_t n){
unsigned long long i = (blockIdx.x * blockDim.x) + threadIdx.x;
for(; i<n; i+=gridDim.x*blockDim.x) {
vec[i]/=scalar;
}
}
__global__ void multiplyArrayScalar(float* vec,float scalar,const size_t n)
{
unsigned long long i = (blockIdx.x * blockDim.x) + threadIdx.x;
for(; i<n; i+=gridDim.x*blockDim.x) {
vec[i]*=scalar;
}
}
__global__ void substractArrays(float* vec,float* vec2,const size_t n)
{
unsigned long long i = (blockIdx.x * blockDim.x) + threadIdx.x;
for(; i<n; i+=gridDim.x*blockDim.x) {
vec[i]-=vec2[i];
}
}
__device__ __inline__
void gradient(const float* u, float* grad,
long z, long y, long x,
long depth, long rows, long cols){
unsigned long size2d = rows*cols;
unsigned long long idx = z * size2d + y * cols + x;
float uidx = u[idx];
if ( z - 1 >= 0 && z<depth) {
grad[0] = (uidx-u[(z-1)*size2d + y*cols + x]) ;
}
if ( y - 1 >= 0 && y<rows){
grad[1] = (uidx-u[z*size2d + (y-1)*cols + x]) ;
}
if ( x - 1 >= 0 && x<cols) {
grad[2] = (uidx-u[z*size2d + y*cols + (x-1)]);
}
}
__global__ void gradientTV(const float* f, float* dftv,
long depth, long rows, long cols){
unsigned long x = threadIdx.x + blockIdx.x * blockDim.x;
unsigned long y = threadIdx.y + blockIdx.y * blockDim.y;
unsigned long z = threadIdx.z + blockIdx.z * blockDim.z;
unsigned long long idx = z * rows * cols + y * cols + x;
if ( x >= cols || y >= rows || z >= depth )
return;
float df[3] ={0.f,0.f,0.f};
float dfi[3]={0.f,0.f,0.f}; // dfi== \partial f_{i+1,j,k}
float dfj[3]={0.f,0.f,0.f};
float dfk[3]={0.f,0.f,0.f};
gradient(f,df ,z ,y ,x , depth,rows,cols);
gradient(f,dfi ,z ,y ,x+1, depth,rows,cols);
gradient(f,dfj ,z ,y+1,x , depth,rows,cols);
gradient(f,dfk ,z+1,y ,x , depth,rows,cols);
float eps=0.00000001; //% avoid division by zero
dftv[idx]=(df[0]+df[1]+df[2])/(sqrt(df[0] *df[0] +df[1] *df[1] +df[2] *df[2])+eps)
-dfi[2]/(sqrt(dfi[0]*dfi[0]+dfi[1]*dfi[1]+dfi[2]*dfi[2]) +eps) // I wish I coudl precompute this, but if I do then Id need to recompute the gradient.
-dfj[1]/(sqrt(dfj[0]*dfj[0]+dfj[1]*dfj[1]+dfj[2]*dfj[2]) +eps)
-dfk[0]/(sqrt(dfk[0]*dfk[0]+dfk[1]*dfk[1]+dfk[2]*dfk[2]) +eps);
return;
}
__device__ void warpReduce(volatile float *sdata, size_t tid) {
sdata[tid] += sdata[tid + 32];
sdata[tid] += sdata[tid + 16];
sdata[tid] += sdata[tid + 8];
sdata[tid] += sdata[tid + 4];
sdata[tid] += sdata[tid + 2];
sdata[tid] += sdata[tid + 1];
}
__global__ void reduceNorm2(float *g_idata, float *g_odata, size_t n){
extern __shared__ volatile float sdata[];
//http://stackoverflow.com/a/35133396/1485872
size_t tid = threadIdx.x;
size_t i = blockIdx.x*blockDim.x + tid;
size_t gridSize = blockDim.x*gridDim.x;
float mySum = 0;
float value=0;
while (i < n) {
value=g_idata[i]; //avoid reading twice
mySum += value*value;
i += gridSize;
}
sdata[tid] = mySum;
__syncthreads();
if (tid < 512)
sdata[tid] += sdata[tid + 512];
__syncthreads();
if (tid < 256)
sdata[tid] += sdata[tid + 256];
__syncthreads();
if (tid < 128)
sdata[tid] += sdata[tid + 128];
__syncthreads();
if (tid < 64)
sdata[tid] += sdata[tid + 64];
__syncthreads();
#if (__CUDA_ARCH__ >= 300)
if ( tid < 32 )
{
mySum = sdata[tid] + sdata[tid + 32];
for (int offset = warpSize/2; offset > 0; offset /= 2) {
mySum += __shfl_down_sync(0xFFFFFFFF, mySum, offset,32);
}
}
#else
if (tid < 32) {
warpReduce(sdata, tid);
mySum = sdata[0];
}
#endif
if (tid == 0) g_odata[blockIdx.x] = mySum;
}
__global__ void reduceSum(float *g_idata, float *g_odata, size_t n){
extern __shared__ volatile float sdata[];
//http://stackoverflow.com/a/35133396/1485872
size_t tid = threadIdx.x;
size_t i = blockIdx.x*blockDim.x + tid;
size_t gridSize = blockDim.x*gridDim.x;
float mySum = 0;
// float value=0;
while (i < n) {
mySum += g_idata[i];
i += gridSize;
}
sdata[tid] = mySum;
__syncthreads();
if (tid < 512)
sdata[tid] += sdata[tid + 512];
__syncthreads();
if (tid < 256)
sdata[tid] += sdata[tid + 256];
__syncthreads();
if (tid < 128)
sdata[tid] += sdata[tid + 128];
__syncthreads();
if (tid < 64)
sdata[tid] += sdata[tid + 64];
__syncthreads();
#if (__CUDA_ARCH__ >= 300)
if ( tid < 32 )
{
mySum = sdata[tid] + sdata[tid + 32];
for (int offset = warpSize/2; offset > 0; offset /= 2) {
mySum += __shfl_down_sync(0xFFFFFFFF, mySum, offset,32);
}
}
#else
if (tid < 32) {
warpReduce(sdata, tid);
mySum = sdata[0];
}
#endif
if (tid == 0) g_odata[blockIdx.x] = mySum;
}
// main function
int pocs_tv(float* img,float* dst,float alpha,const long* image_size, int maxIter){
// Prepare for MultiGPU
int deviceCount = 0;
cudaGetDeviceCount(&deviceCount);
if(cudaCheckErrors("Device query fail")){return 1;}
if (deviceCount == 0) {
//mexErrMsgIdAndTxt("minimizeTV:POCS_TV:GPUselect","There are no available device(s) that support CUDA\n");
return ERR_NO_CAPABLE_DEVICES;
}
//
// CODE assumes
// 1.-All available devices are usable by this code
// 2.-All available devices are equal, they are the same machine (warning trhown)
int dev;
char * devicenames;
cudaDeviceProp deviceProp;
for (dev = 0; dev < deviceCount; dev++) {
cudaSetDevice(dev);
cudaGetDeviceProperties(&deviceProp, dev);
if (dev>0){
if (strcmp(devicenames,deviceProp.name)!=0){
printf("minimizeTV:POCS_TV:GPUselect","Detected one (or more) different GPUs.\n This code is not smart enough to separate the memory GPU wise if they have different computational times or memory limits.\n First GPU parameters used. If the code errors you might need to change the way GPU selection is performed. \n POCS_TV.cu line 277.");
break;
}
}
devicenames=deviceProp.name;
}
// We don't know if the devices are being used. lets check that. and only use the amount of memory we need.
size_t mem_GPU_global;
checkFreeMemory(deviceCount,&mem_GPU_global);
// %5 of free memory shoudl be enough, we have almsot no variables in these kernels
size_t total_pixels = image_size[0] * image_size[1] * image_size[2] ;
size_t mem_slice_image = sizeof(float)* image_size[0] * image_size[1] ;
size_t mem_size_image = sizeof(float)* total_pixels;
size_t mem_auxiliary = sizeof(float)* (total_pixels + MAXTHREADS - 1) / MAXTHREADS;
// Decide how are we handling the distribution of computation
size_t mem_img_each_GPU;
unsigned int buffer_length=2;
//Does everything fit in the GPU?
unsigned int slices_per_split;
unsigned int splits=1; // if the number does not fit in an uint, you have more serious trouble than this.
if(mem_GPU_global> 3*mem_size_image+3*(deviceCount-1)*mem_slice_image*buffer_length+mem_auxiliary){
// We only need to split if we have extra GPUs
slices_per_split=(image_size[2]+deviceCount-1)/deviceCount;
mem_img_each_GPU=mem_slice_image*((slices_per_split+buffer_length*2));
}else{
// As mem_auxiliary is not expected to be a large value (for a 2000^3 image is around 28Mbytes), lets for now assume we need it all
size_t mem_free=mem_GPU_global-mem_auxiliary;
splits=(unsigned int)(ceil(((float)(3*mem_size_image)/(float)(deviceCount))/mem_free));
// Now, there is an overhead here, as each splits should have 2 slices more, to accoutn for overlap of images.
// lets make sure these 2 slices fit, if they do not, add 1 to splits.
slices_per_split=(image_size[2]+deviceCount*splits-1)/(deviceCount*splits);
mem_img_each_GPU=(mem_slice_image*(slices_per_split+buffer_length*2));
// if the new stuff does not fit in the GPU, it measn we are in the edge case where adding that extra slice will overflow memory
if (mem_GPU_global< 3*mem_img_each_GPU+mem_auxiliary){
// one more splot shoudl do the job, as its an edge case.
splits++;
//recompute for later
slices_per_split=(image_size[2]+deviceCount*splits-1)/(deviceCount*splits); // amountf of slices that fit on a GPU. Later we add 2 to these, as we need them for overlap
mem_img_each_GPU=(mem_slice_image*(slices_per_split+buffer_length*2));
}
// How many EXTRA buffer slices should be able to fit in here??!?!
// Only do it if there are splits needed.
if(splits>1){
mem_free=mem_GPU_global-(3*mem_img_each_GPU+mem_auxiliary);
unsigned int extra_buff=(mem_free/mem_slice_image);
buffer_length=(extra_buff/2)/3; // we need double whatever this results in, rounded down.
buffer_length=max(buffer_length,2);// minimum 2
buffer_length=min(MAX_BUFFER,buffer_length);
mem_img_each_GPU=mem_slice_image*(slices_per_split+buffer_length*2);
}else{
buffer_length=2;
}
// Assert
if (mem_GPU_global< 3*mem_img_each_GPU+mem_auxiliary){
//mexErrMsgIdAndTxt("minimizeTV:POCS_TV:GPU","Assertion Failed. Logic behind spliting flawed! Please tell: ander.biguri@gmail.com\n");
return ERR_BAD_ASSERT;
}
}
// Assert
if ((slices_per_split+buffer_length*2)*image_size[0]*image_size[1]* sizeof(float)!= mem_img_each_GPU){
//mexErrMsgIdAndTxt("minimizeTV:POCS_TV:GPU","Assertion Failed. Memory needed calculation broken! Please tell: ander.biguri@gmail.com\n");
return ERR_ASSERT_FAIL;
}
float** d_image= (float**)malloc(deviceCount*sizeof(float*));
float** d_dimgTV= (float**)malloc(deviceCount*sizeof(float*));
float** d_norm2aux= (float**)malloc(deviceCount*sizeof(float*));
float** d_norm2= (float**)malloc(deviceCount*sizeof(float*));
// allocate memory in each GPU
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaMalloc((void**)&d_image[dev] , mem_img_each_GPU);
cudaMemset(d_image[dev],0 , mem_img_each_GPU);
cudaMalloc((void**)&d_dimgTV[dev] , mem_img_each_GPU);
cudaMalloc((void**)&d_norm2[dev] , slices_per_split*mem_slice_image);
cudaMalloc((void**)&d_norm2aux[dev] , mem_auxiliary);
cudaCheckErrors("Malloc error");
}
unsigned long long buffer_pixels=buffer_length*image_size[0]*image_size[1];
float* buffer;
if(splits>1){
printf("minimizeTV:POCS_TV:Image_split","Your image can not be fully split between the available GPUs. The computation of minTV will be significantly slowed due to the image size.\nApproximated mathematics turned on for computational speed.");
}else{
cudaMallocHost((void**)&buffer,buffer_length*image_size[0]*image_size[1]*sizeof(float));
}
// Lets try to make the host memory pinned:
// We laredy queried the GPU and assuemd they are the same, thus shoudl have the same attributes.
int isHostRegisterSupported;
cudaDeviceGetAttribute(&isHostRegisterSupported,cudaDevAttrHostRegisterSupported,0);
// splits>2 is completely empirical observation
if (isHostRegisterSupported & splits>2){
cudaHostRegister(img ,image_size[2]*image_size[1]*image_size[0]*sizeof(float),cudaHostRegisterPortable);
cudaHostRegister(dst ,image_size[2]*image_size[1]*image_size[0]*sizeof(float),cudaHostRegisterPortable);
}
if(cudaCheckErrors("Error pinning memory")){return 1;}
// Create streams
int nStream_device=2;
int nStreams=deviceCount*nStream_device;
cudaStream_t* stream=(cudaStream_t*)malloc(nStreams*sizeof(cudaStream_t));
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
for (int i = 0; i < nStream_device; ++i){
cudaStreamCreate(&stream[i+dev*nStream_device]);
}
}
if(cudaCheckErrors("Stream creation fail")){return 1;}
// For the reduction
double totalsum_prev;
double totalsum;
float sum_curr_spl;
float * sumnorm2;
cudaMallocHost((void**)&sumnorm2,deviceCount*sizeof(float));
unsigned int curr_slices;
unsigned long long curr_pixels;
size_t linear_idx_start;
unsigned long long* offset_device=(unsigned long long*)malloc(deviceCount*sizeof(unsigned long long));
unsigned long long* offset_host =(unsigned long long*)malloc(deviceCount*sizeof(unsigned long long));
unsigned long long* bytes_device =(unsigned long long*)malloc(deviceCount*sizeof(unsigned long long));
bool is_first_chunk;
bool is_last_chunk;
for(unsigned int i=0;i<maxIter;i+=(buffer_length-1)){
if(splits>1){
totalsum_prev=0;
}
for(unsigned int sp=0;sp<splits;sp++){
// For each iteration we need to comptue all the image. The ordering of these loops
// need to be like this due to the boudnign layers between slpits. If more than 1 split is needed
// for each GPU then there is no other way that taking the entire memory out of GPU and putting it back.
// If the memory can be shared ebtween GPUs fully without extra splits, then there is an easy way of syncronizing the memory
// Copy image to memory
for (dev = 0; dev < deviceCount; dev++){
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
curr_pixels=curr_slices*image_size[0]*image_size[1];
linear_idx_start=image_size[0]*image_size[1]*slices_per_split*(sp*deviceCount+dev);
// Check if its the first or last chunck
is_last_chunk=!((sp*deviceCount+dev)<deviceCount*splits-1);
is_first_chunk=!(sp*deviceCount+dev);
// lets compute where we start copyes and how much. This avoids 3 calls to Memcpy
offset_device[dev]=buffer_pixels*is_first_chunk;
offset_host[dev]=linear_idx_start-buffer_pixels*!is_first_chunk;
bytes_device[dev]=curr_pixels+buffer_pixels*!is_first_chunk+buffer_pixels*!is_last_chunk;
}
if(i==0){
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaMemcpyAsync(d_image[dev]+offset_device[dev], img+offset_host[dev] , bytes_device[dev]*sizeof(float), cudaMemcpyHostToDevice,stream[dev*nStream_device+1]);
}
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaDeviceSynchronize();
}
}
// if we need to split and its not the first iteration, then we need to copy from Host memory the previosu result.
if (splits>1 & i>0){
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaMemcpyAsync(d_image[dev]+offset_device[dev], dst+offset_host[dev] , bytes_device[dev]*sizeof(float), cudaMemcpyHostToDevice,stream[dev*nStream_device+1]);
}
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaDeviceSynchronize();
}
}
if(cudaCheckErrors("Memcpy failure on multi split")){return 1;}
for(unsigned int ib=0; (ib<(buffer_length-1)) && ((i+ib)<maxIter); ib++){
// For the gradient
dim3 blockGrad(10, 10, 10);
dim3 gridGrad((image_size[0]+blockGrad.x-1)/blockGrad.x, (image_size[1]+blockGrad.y-1)/blockGrad.y, (curr_slices+buffer_length*2+blockGrad.z-1)/blockGrad.z);
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
// Compute the gradient of the TV norm
// I Dont understand why I need to store 2 layers to compute correctly with 1 buffer. The bounding checks shoudl
// be enough but they are not.
gradientTV<<<gridGrad, blockGrad,0,stream[dev*nStream_device]>>>(d_image[dev],d_dimgTV[dev],(long)(curr_slices+buffer_length*2-1), image_size[1],image_size[0]);
}
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
// no need to copy the 2 aux slices here
cudaStreamSynchronize(stream[dev*nStream_device]);
cudaMemcpyAsync(d_norm2[dev], d_dimgTV[dev]+buffer_pixels, image_size[0]*image_size[1]*curr_slices*sizeof(float), cudaMemcpyDeviceToDevice,stream[dev*nStream_device+1]);
}
// Compute the L2 norm of the gradint. For that, reduction is used.
//REDUCE
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
total_pixels=curr_slices*image_size[0]*image_size[1];
size_t dimblockRed = MAXTHREADS;
size_t dimgridRed = (total_pixels + MAXTHREADS - 1) / MAXTHREADS;
cudaStreamSynchronize(stream[dev*nStream_device+1]);
reduceNorm2 << <dimgridRed, dimblockRed, MAXTHREADS*sizeof(float),stream[dev*nStream_device]>> >(d_norm2[dev], d_norm2aux[dev], total_pixels);
}
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
total_pixels=curr_slices*image_size[0]*image_size[1];
size_t dimblockRed = MAXTHREADS;
size_t dimgridRed = (total_pixels + MAXTHREADS - 1) / MAXTHREADS;
if (dimgridRed > 1) {
reduceSum << <1, dimblockRed, MAXTHREADS*sizeof(float),stream[dev*nStream_device] >> >(d_norm2aux[dev], d_norm2[dev], dimgridRed);
cudaStreamSynchronize(stream[dev*nStream_device]);
cudaMemcpyAsync(&sumnorm2[dev], d_norm2[dev], sizeof(float), cudaMemcpyDeviceToHost,stream[dev*nStream_device+1]);
}
else {
cudaStreamSynchronize(stream[dev*nStream_device]);
cudaMemcpyAsync(&sumnorm2[dev], d_norm2aux[dev], sizeof(float), cudaMemcpyDeviceToHost,stream[dev*nStream_device+1]);
}
}
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaDeviceSynchronize();
}
if(cudaCheckErrors("Reduction error")){return 1;}
// Accumulate the nomr accross devices
sum_curr_spl=0;
// this is CPU code
for (dev = 0; dev < deviceCount; dev++){
sum_curr_spl+=sumnorm2[dev];
}
sum_curr_spl+=0.0000001f; // avoid division by zero
// If we have more than one splits, lets use the result from prior calls
if(i>0 && splits>1){
// this is already stored:
//totalsum=totalsum_prev;
}else{
totalsum=sum_curr_spl;
}
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
total_pixels=curr_slices*image_size[0]*image_size[1];
//NOMRALIZE
//in a Tesla, maximum blocks =15 SM * 4 blocks/SM
divideArrayScalar <<<60,MAXTHREADS,0,stream[dev*nStream_device]>>>(d_dimgTV[dev]+buffer_pixels,(float)sqrt(totalsum),total_pixels);
//MULTIPLY HYPERPARAMETER
multiplyArrayScalar<<<60,MAXTHREADS,0,stream[dev*nStream_device]>>>(d_dimgTV[dev]+buffer_pixels,alpha, total_pixels);
}
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaDeviceSynchronize();
}
if(cudaCheckErrors("Scalar operations error")){return 1;}
//SUBSTRACT GRADIENT
//////////////////////////////////////////////
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
total_pixels=curr_slices*image_size[0]*image_size[1];
substractArrays<<<60,MAXTHREADS,0,stream[dev*nStream_device]>>>(d_image[dev]+buffer_pixels,d_dimgTV[dev]+buffer_pixels, total_pixels);
}
}
// Syncronize mathematics, make sure bounding pixels are correct
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaDeviceSynchronize();
}
if(splits==1){
for(dev=0; dev<deviceCount;dev++){
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
total_pixels=curr_slices*image_size[0]*image_size[1];
if (dev<deviceCount-1){
cudaSetDevice(dev+1);
cudaMemcpy(buffer, d_image[dev+1], buffer_pixels*sizeof(float), cudaMemcpyDeviceToHost);
cudaSetDevice(dev);
cudaMemcpy(d_image[dev]+total_pixels+buffer_pixels,buffer, buffer_pixels*sizeof(float), cudaMemcpyHostToDevice);
}
cudaDeviceSynchronize();
if (dev>0){
cudaSetDevice(dev-1);
cudaMemcpyAsync(buffer, d_image[dev-1]+total_pixels+buffer_pixels, buffer_pixels*sizeof(float), cudaMemcpyDeviceToHost);
cudaSetDevice(dev);
cudaMemcpyAsync(d_image[dev],buffer, buffer_pixels*sizeof(float), cudaMemcpyHostToDevice);
}
}
}else{
// We need to take it out :(
for(dev=0; dev<deviceCount;dev++){
cudaSetDevice(dev);
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
linear_idx_start=image_size[0]*image_size[1]*slices_per_split*(sp*deviceCount+dev);
total_pixels=curr_slices*image_size[0]*image_size[1];
cudaMemcpyAsync(&dst[linear_idx_start], d_image[dev]+buffer_pixels,total_pixels*sizeof(float), cudaMemcpyDeviceToHost,stream[dev*nStream_device+1]);
}
}
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaDeviceSynchronize();
}
if(cudaCheckErrors("Memory gather error")){return 1;}
totalsum_prev+=sum_curr_spl;
}
totalsum=totalsum_prev;
}
// If there has not been splits, we still have data in memory
if(splits==1){
for(dev=0; dev<deviceCount;dev++){
cudaSetDevice(dev);
curr_slices=((dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*dev;
total_pixels=curr_slices*image_size[0]*image_size[1];
cudaMemcpy(dst+slices_per_split*image_size[0]*image_size[1]*dev, d_image[dev]+buffer_pixels,total_pixels*sizeof(float), cudaMemcpyDeviceToHost);
}
}
if(cudaCheckErrors("Copy result back")){return 1;}
for(dev=0; dev<deviceCount;dev++){
cudaSetDevice(dev);
cudaFree(d_image[dev]);
cudaFree(d_norm2aux[dev]);
cudaFree(d_dimgTV[dev]);
cudaFree(d_norm2[dev]);
}
if (splits==1){
cudaFreeHost(buffer);
}
if (isHostRegisterSupported& splits>2){
cudaHostUnregister(img);
cudaHostUnregister(dst);
}
for (int i = 0; i < nStreams; ++i)
cudaStreamDestroy(stream[i]) ;
if(cudaCheckErrors("Memory free")){return 1;}
return 0;
}
void checkFreeMemory(int deviceCount,size_t *mem_GPU_global){
size_t memfree;
size_t memtotal;
for (int dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaMemGetInfo(&memfree,&memtotal);
if(dev==0) *mem_GPU_global=memfree;
if(memfree<memtotal/2){
printf("tvDenoise:tvdenoising:GPU","One (or more) of your GPUs is being heavily used by another program (possibly graphics-based).\n Free the GPU to run TIGRE\n");
}
cudaCheckErrors("Check mem error");
*mem_GPU_global=(memfree<*mem_GPU_global)?memfree:*mem_GPU_global;
}
*mem_GPU_global=(size_t)((double)*mem_GPU_global*0.95);
//*mem_GPU_global= insert your known number here, in bytes.
}
|
39aecb56d21aad7b6363cd23835dd2e4b927157f.hip | // !!! This is a file automatically generated by hipify!!!
// ----------------------------------------------------------------------------
// - Open3D: www.open3d.org -
// ----------------------------------------------------------------------------
// The MIT License (MIT)
//
// Copyright (c) 2018-2021 www.open3d.org
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
// ----------------------------------------------------------------------------
// MIT License
//
// Copyright (c) Facebook, Inc. and its affiliates.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
// ----------------------------------------------------------------------------
// original path: faiss/faiss/gpu/utils/blockselect/BlockSelectFloat64.cu
// ----------------------------------------------------------------------------
#include "open3d/core/nns/kernel/BlockSelectImpl.cuh"
namespace open3d {
namespace core {
BLOCK_SELECT_IMPL(double, int32_t, true, 1, 1);
BLOCK_SELECT_IMPL(double, int32_t, false, 1, 1);
BLOCK_SELECT_IMPL(double, int32_t, true, 32, 2);
BLOCK_SELECT_IMPL(double, int32_t, false, 32, 2);
BLOCK_SELECT_IMPL(double, int32_t, true, 64, 3);
BLOCK_SELECT_IMPL(double, int32_t, false, 64, 3);
BLOCK_SELECT_IMPL(double, int32_t, true, 128, 3);
BLOCK_SELECT_IMPL(double, int32_t, false, 128, 3);
BLOCK_SELECT_IMPL(double, int32_t, true, 256, 4);
BLOCK_SELECT_IMPL(double, int32_t, false, 256, 4);
BLOCK_SELECT_IMPL(double, int32_t, true, 512, 8);
BLOCK_SELECT_IMPL(double, int32_t, false, 512, 8);
BLOCK_SELECT_IMPL(double, int32_t, true, 1024, 8);
BLOCK_SELECT_IMPL(double, int32_t, false, 1024, 8);
#if GPU_MAX_SELECTION_K >= 2048
BLOCK_SELECT_IMPL(double, int32_t, true, 2048, 8);
BLOCK_SELECT_IMPL(double, int32_t, false, 2048, 8);
#endif
BLOCK_SELECT_IMPL(double, int64_t, true, 1, 1);
BLOCK_SELECT_IMPL(double, int64_t, false, 1, 1);
BLOCK_SELECT_IMPL(double, int64_t, true, 32, 2);
BLOCK_SELECT_IMPL(double, int64_t, false, 32, 2);
BLOCK_SELECT_IMPL(double, int64_t, true, 64, 3);
BLOCK_SELECT_IMPL(double, int64_t, false, 64, 3);
BLOCK_SELECT_IMPL(double, int64_t, true, 128, 3);
BLOCK_SELECT_IMPL(double, int64_t, false, 128, 3);
BLOCK_SELECT_IMPL(double, int64_t, true, 256, 4);
BLOCK_SELECT_IMPL(double, int64_t, false, 256, 4);
BLOCK_SELECT_IMPL(double, int64_t, true, 512, 8);
BLOCK_SELECT_IMPL(double, int64_t, false, 512, 8);
BLOCK_SELECT_IMPL(double, int64_t, true, 1024, 8);
BLOCK_SELECT_IMPL(double, int64_t, false, 1024, 8);
#if GPU_MAX_SELECTION_K >= 2048
BLOCK_SELECT_IMPL(double, int64_t, true, 2048, 8);
BLOCK_SELECT_IMPL(double, int64_t, false, 2048, 8);
#endif
void runBlockSelectPair(hipStream_t stream,
double* inK,
int32_t* inV,
double* outK,
int32_t* outV,
bool dir,
int k,
int dim,
int num_points) {
OPEN3D_ASSERT(k <= GPU_MAX_SELECTION_K);
if (dir) {
if (k == 1) {
BLOCK_SELECT_PAIR_CALL(double, int32_t, true, 1);
} else if (k <= 32) {
BLOCK_SELECT_PAIR_CALL(double, int32_t, true, 32);
} else if (k <= 64) {
BLOCK_SELECT_PAIR_CALL(double, int32_t, true, 64);
} else if (k <= 128) {
BLOCK_SELECT_PAIR_CALL(double, int32_t, true, 128);
} else if (k <= 256) {
BLOCK_SELECT_PAIR_CALL(double, int32_t, true, 256);
} else if (k <= 512) {
BLOCK_SELECT_PAIR_CALL(double, int32_t, true, 512);
} else if (k <= 1024) {
BLOCK_SELECT_PAIR_CALL(double, int32_t, true, 1024);
#if GPU_MAX_SELECTION_K >= 2048
} else if (k <= 2048) {
BLOCK_SELECT_PAIR_CALL(double, int32_t, true, 2048);
#endif
}
} else {
if (k == 1) {
BLOCK_SELECT_PAIR_CALL(double, int32_t, false, 1);
} else if (k <= 32) {
BLOCK_SELECT_PAIR_CALL(double, int32_t, false, 32);
} else if (k <= 64) {
BLOCK_SELECT_PAIR_CALL(double, int32_t, false, 64);
} else if (k <= 128) {
BLOCK_SELECT_PAIR_CALL(double, int32_t, false, 128);
} else if (k <= 256) {
BLOCK_SELECT_PAIR_CALL(double, int32_t, false, 256);
} else if (k <= 512) {
BLOCK_SELECT_PAIR_CALL(double, int32_t, false, 512);
} else if (k <= 1024) {
BLOCK_SELECT_PAIR_CALL(double, int32_t, false, 1024);
#if GPU_MAX_SELECTION_K >= 2048
} else if (k <= 2048) {
BLOCK_SELECT_PAIR_CALL(double, int32_t, false, 2048);
#endif
}
}
}
void runBlockSelectPair(hipStream_t stream,
double* inK,
int64_t* inV,
double* outK,
int64_t* outV,
bool dir,
int k,
int dim,
int num_points) {
OPEN3D_ASSERT(k <= GPU_MAX_SELECTION_K);
if (dir) {
if (k == 1) {
BLOCK_SELECT_PAIR_CALL(double, int64_t, true, 1);
} else if (k <= 32) {
BLOCK_SELECT_PAIR_CALL(double, int64_t, true, 32);
} else if (k <= 64) {
BLOCK_SELECT_PAIR_CALL(double, int64_t, true, 64);
} else if (k <= 128) {
BLOCK_SELECT_PAIR_CALL(double, int64_t, true, 128);
} else if (k <= 256) {
BLOCK_SELECT_PAIR_CALL(double, int64_t, true, 256);
} else if (k <= 512) {
BLOCK_SELECT_PAIR_CALL(double, int64_t, true, 512);
} else if (k <= 1024) {
BLOCK_SELECT_PAIR_CALL(double, int64_t, true, 1024);
#if GPU_MAX_SELECTION_K >= 2048
} else if (k <= 2048) {
BLOCK_SELECT_PAIR_CALL(double, int64_t, true, 2048);
#endif
}
} else {
if (k == 1) {
BLOCK_SELECT_PAIR_CALL(double, int64_t, false, 1);
} else if (k <= 32) {
BLOCK_SELECT_PAIR_CALL(double, int64_t, false, 32);
} else if (k <= 64) {
BLOCK_SELECT_PAIR_CALL(double, int64_t, false, 64);
} else if (k <= 128) {
BLOCK_SELECT_PAIR_CALL(double, int64_t, false, 128);
} else if (k <= 256) {
BLOCK_SELECT_PAIR_CALL(double, int64_t, false, 256);
} else if (k <= 512) {
BLOCK_SELECT_PAIR_CALL(double, int64_t, false, 512);
} else if (k <= 1024) {
BLOCK_SELECT_PAIR_CALL(double, int64_t, false, 1024);
#if GPU_MAX_SELECTION_K >= 2048
} else if (k <= 2048) {
BLOCK_SELECT_PAIR_CALL(double, int64_t, false, 2048);
#endif
}
}
}
} // namespace core
} // namespace open3d
| 39aecb56d21aad7b6363cd23835dd2e4b927157f.cu | // ----------------------------------------------------------------------------
// - Open3D: www.open3d.org -
// ----------------------------------------------------------------------------
// The MIT License (MIT)
//
// Copyright (c) 2018-2021 www.open3d.org
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
// ----------------------------------------------------------------------------
// MIT License
//
// Copyright (c) Facebook, Inc. and its affiliates.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
// ----------------------------------------------------------------------------
// original path: faiss/faiss/gpu/utils/blockselect/BlockSelectFloat64.cu
// ----------------------------------------------------------------------------
#include "open3d/core/nns/kernel/BlockSelectImpl.cuh"
namespace open3d {
namespace core {
BLOCK_SELECT_IMPL(double, int32_t, true, 1, 1);
BLOCK_SELECT_IMPL(double, int32_t, false, 1, 1);
BLOCK_SELECT_IMPL(double, int32_t, true, 32, 2);
BLOCK_SELECT_IMPL(double, int32_t, false, 32, 2);
BLOCK_SELECT_IMPL(double, int32_t, true, 64, 3);
BLOCK_SELECT_IMPL(double, int32_t, false, 64, 3);
BLOCK_SELECT_IMPL(double, int32_t, true, 128, 3);
BLOCK_SELECT_IMPL(double, int32_t, false, 128, 3);
BLOCK_SELECT_IMPL(double, int32_t, true, 256, 4);
BLOCK_SELECT_IMPL(double, int32_t, false, 256, 4);
BLOCK_SELECT_IMPL(double, int32_t, true, 512, 8);
BLOCK_SELECT_IMPL(double, int32_t, false, 512, 8);
BLOCK_SELECT_IMPL(double, int32_t, true, 1024, 8);
BLOCK_SELECT_IMPL(double, int32_t, false, 1024, 8);
#if GPU_MAX_SELECTION_K >= 2048
BLOCK_SELECT_IMPL(double, int32_t, true, 2048, 8);
BLOCK_SELECT_IMPL(double, int32_t, false, 2048, 8);
#endif
BLOCK_SELECT_IMPL(double, int64_t, true, 1, 1);
BLOCK_SELECT_IMPL(double, int64_t, false, 1, 1);
BLOCK_SELECT_IMPL(double, int64_t, true, 32, 2);
BLOCK_SELECT_IMPL(double, int64_t, false, 32, 2);
BLOCK_SELECT_IMPL(double, int64_t, true, 64, 3);
BLOCK_SELECT_IMPL(double, int64_t, false, 64, 3);
BLOCK_SELECT_IMPL(double, int64_t, true, 128, 3);
BLOCK_SELECT_IMPL(double, int64_t, false, 128, 3);
BLOCK_SELECT_IMPL(double, int64_t, true, 256, 4);
BLOCK_SELECT_IMPL(double, int64_t, false, 256, 4);
BLOCK_SELECT_IMPL(double, int64_t, true, 512, 8);
BLOCK_SELECT_IMPL(double, int64_t, false, 512, 8);
BLOCK_SELECT_IMPL(double, int64_t, true, 1024, 8);
BLOCK_SELECT_IMPL(double, int64_t, false, 1024, 8);
#if GPU_MAX_SELECTION_K >= 2048
BLOCK_SELECT_IMPL(double, int64_t, true, 2048, 8);
BLOCK_SELECT_IMPL(double, int64_t, false, 2048, 8);
#endif
void runBlockSelectPair(cudaStream_t stream,
double* inK,
int32_t* inV,
double* outK,
int32_t* outV,
bool dir,
int k,
int dim,
int num_points) {
OPEN3D_ASSERT(k <= GPU_MAX_SELECTION_K);
if (dir) {
if (k == 1) {
BLOCK_SELECT_PAIR_CALL(double, int32_t, true, 1);
} else if (k <= 32) {
BLOCK_SELECT_PAIR_CALL(double, int32_t, true, 32);
} else if (k <= 64) {
BLOCK_SELECT_PAIR_CALL(double, int32_t, true, 64);
} else if (k <= 128) {
BLOCK_SELECT_PAIR_CALL(double, int32_t, true, 128);
} else if (k <= 256) {
BLOCK_SELECT_PAIR_CALL(double, int32_t, true, 256);
} else if (k <= 512) {
BLOCK_SELECT_PAIR_CALL(double, int32_t, true, 512);
} else if (k <= 1024) {
BLOCK_SELECT_PAIR_CALL(double, int32_t, true, 1024);
#if GPU_MAX_SELECTION_K >= 2048
} else if (k <= 2048) {
BLOCK_SELECT_PAIR_CALL(double, int32_t, true, 2048);
#endif
}
} else {
if (k == 1) {
BLOCK_SELECT_PAIR_CALL(double, int32_t, false, 1);
} else if (k <= 32) {
BLOCK_SELECT_PAIR_CALL(double, int32_t, false, 32);
} else if (k <= 64) {
BLOCK_SELECT_PAIR_CALL(double, int32_t, false, 64);
} else if (k <= 128) {
BLOCK_SELECT_PAIR_CALL(double, int32_t, false, 128);
} else if (k <= 256) {
BLOCK_SELECT_PAIR_CALL(double, int32_t, false, 256);
} else if (k <= 512) {
BLOCK_SELECT_PAIR_CALL(double, int32_t, false, 512);
} else if (k <= 1024) {
BLOCK_SELECT_PAIR_CALL(double, int32_t, false, 1024);
#if GPU_MAX_SELECTION_K >= 2048
} else if (k <= 2048) {
BLOCK_SELECT_PAIR_CALL(double, int32_t, false, 2048);
#endif
}
}
}
void runBlockSelectPair(cudaStream_t stream,
double* inK,
int64_t* inV,
double* outK,
int64_t* outV,
bool dir,
int k,
int dim,
int num_points) {
OPEN3D_ASSERT(k <= GPU_MAX_SELECTION_K);
if (dir) {
if (k == 1) {
BLOCK_SELECT_PAIR_CALL(double, int64_t, true, 1);
} else if (k <= 32) {
BLOCK_SELECT_PAIR_CALL(double, int64_t, true, 32);
} else if (k <= 64) {
BLOCK_SELECT_PAIR_CALL(double, int64_t, true, 64);
} else if (k <= 128) {
BLOCK_SELECT_PAIR_CALL(double, int64_t, true, 128);
} else if (k <= 256) {
BLOCK_SELECT_PAIR_CALL(double, int64_t, true, 256);
} else if (k <= 512) {
BLOCK_SELECT_PAIR_CALL(double, int64_t, true, 512);
} else if (k <= 1024) {
BLOCK_SELECT_PAIR_CALL(double, int64_t, true, 1024);
#if GPU_MAX_SELECTION_K >= 2048
} else if (k <= 2048) {
BLOCK_SELECT_PAIR_CALL(double, int64_t, true, 2048);
#endif
}
} else {
if (k == 1) {
BLOCK_SELECT_PAIR_CALL(double, int64_t, false, 1);
} else if (k <= 32) {
BLOCK_SELECT_PAIR_CALL(double, int64_t, false, 32);
} else if (k <= 64) {
BLOCK_SELECT_PAIR_CALL(double, int64_t, false, 64);
} else if (k <= 128) {
BLOCK_SELECT_PAIR_CALL(double, int64_t, false, 128);
} else if (k <= 256) {
BLOCK_SELECT_PAIR_CALL(double, int64_t, false, 256);
} else if (k <= 512) {
BLOCK_SELECT_PAIR_CALL(double, int64_t, false, 512);
} else if (k <= 1024) {
BLOCK_SELECT_PAIR_CALL(double, int64_t, false, 1024);
#if GPU_MAX_SELECTION_K >= 2048
} else if (k <= 2048) {
BLOCK_SELECT_PAIR_CALL(double, int64_t, false, 2048);
#endif
}
}
}
} // namespace core
} // namespace open3d
|
6878f648d016939f1599240a37da3689d6e6af8d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// MP Reduction
// Given a list (lst) of length n
// Output its sum = lst[0] + lst[1] + ... + lst[n-1];
#include <wb.h>
#define BLOCK_SIZE 32 //@@ You can change this
#define wbCheck(stmt) \
do { \
hipError_t err = stmt; \
if (err != hipSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
wbLog(ERROR, "Got CUDA error ... ", hipGetErrorString(err)); \
return -1; \
} \
} while (0)
__global__ void total(float *input, float *output, int len) {
//@@ Load a segment of the input vector into shared memory
//@@ Traverse the reduction tree
//@@ Write the computed sum of the block to the output vector at the
//@@ correct index
__shared__ float partialSum[2 * BLOCK_SIZE];
unsigned int t = threadIdx.x;
unsigned int start = 2 * blockIdx.x * blockDim.x;
if (start + t < len)
partialSum[t] = input[start + t];
else
partialSum[t] = 0.0;
if (start + blockDim.x + t < len){
partialSum[blockDim.x + t] = input[start + blockDim.x + t];
}
else{
partialSum[blockDim.x + t] = 0.0;
}
for (unsigned int stride = blockDim.x; stride >= 1; stride /= 2)
{
__syncthreads();
if (t < stride)
partialSum[t] += partialSum[t + stride];
}
output[blockIdx.x] = partialSum[0];
}
int main(int argc, char **argv) {
int ii;
wbArg_t args;
float *hostInput; // The input 1D list
float *hostOutput; // The output list
float *deviceInput;
float *deviceOutput;
int numInputElements; // number of elements in the input list
int numOutputElements; // number of elements in the output list
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostInput =
(float *)wbImport(wbArg_getInputFile(args, 0), &numInputElements);
numOutputElements = numInputElements / (BLOCK_SIZE << 1);
if (numInputElements % (BLOCK_SIZE << 1)) {
numOutputElements++;
}
hostOutput = (float *)malloc(numOutputElements * sizeof(float));
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The number of input elements in the input is ",
numInputElements);
wbLog(TRACE, "The number of output elements in the input is ",
numOutputElements);
wbTime_start(GPU, "Allocating GPU memory.");
//@@ Allocate GPU memory here
hipMalloc(&deviceInput , numInputElements * sizeof(float));
hipMalloc(&deviceOutput, numOutputElements * sizeof(float));
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
//@@ Copy memory to the GPU here
hipMemcpy(deviceInput, hostInput, numInputElements * sizeof(float) , hipMemcpyHostToDevice);
wbTime_stop(GPU, "Copying input memory to the GPU.");
//@@ Initialize the grid and block dimensions here
dim3 DimGrid (ceil(numInputElements/BLOCK_SIZE), 1, 1);
dim3 DimBlock(BLOCK_SIZE, 1, 1);
wbTime_start(Compute, "Performing CUDA computation");
//@@ Launch the GPU Kernel here
hipLaunchKernelGGL(( total), dim3(DimGrid), dim3(DimBlock), 0, 0, deviceInput, deviceOutput, numInputElements);
hipDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
//@@ Copy the GPU memory back to the CPU here
hipMemcpy(hostOutput, deviceOutput, numOutputElements * sizeof(float), hipMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying output memory to the CPU");
/********************************************************************
* Reduce output vector on the host
* NOTE: One could also perform the reduction of the output vector
* recursively and support any size input. For simplicity, we do not
* require that for this lab.
********************************************************************/
for (ii = 1; ii < numOutputElements; ii++) {
hostOutput[0] += hostOutput[ii];
}
wbTime_start(GPU, "Freeing GPU Memory");
//@@ Free the GPU memory here
hipFree(deviceInput);
hipFree(deviceOutput);
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, hostOutput, 1);
free(hostInput);
free(hostOutput);
return 0;
}
| 6878f648d016939f1599240a37da3689d6e6af8d.cu | // MP Reduction
// Given a list (lst) of length n
// Output its sum = lst[0] + lst[1] + ... + lst[n-1];
#include <wb.h>
#define BLOCK_SIZE 32 //@@ You can change this
#define wbCheck(stmt) \
do { \
cudaError_t err = stmt; \
if (err != cudaSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
wbLog(ERROR, "Got CUDA error ... ", cudaGetErrorString(err)); \
return -1; \
} \
} while (0)
__global__ void total(float *input, float *output, int len) {
//@@ Load a segment of the input vector into shared memory
//@@ Traverse the reduction tree
//@@ Write the computed sum of the block to the output vector at the
//@@ correct index
__shared__ float partialSum[2 * BLOCK_SIZE];
unsigned int t = threadIdx.x;
unsigned int start = 2 * blockIdx.x * blockDim.x;
if (start + t < len)
partialSum[t] = input[start + t];
else
partialSum[t] = 0.0;
if (start + blockDim.x + t < len){
partialSum[blockDim.x + t] = input[start + blockDim.x + t];
}
else{
partialSum[blockDim.x + t] = 0.0;
}
for (unsigned int stride = blockDim.x; stride >= 1; stride /= 2)
{
__syncthreads();
if (t < stride)
partialSum[t] += partialSum[t + stride];
}
output[blockIdx.x] = partialSum[0];
}
int main(int argc, char **argv) {
int ii;
wbArg_t args;
float *hostInput; // The input 1D list
float *hostOutput; // The output list
float *deviceInput;
float *deviceOutput;
int numInputElements; // number of elements in the input list
int numOutputElements; // number of elements in the output list
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostInput =
(float *)wbImport(wbArg_getInputFile(args, 0), &numInputElements);
numOutputElements = numInputElements / (BLOCK_SIZE << 1);
if (numInputElements % (BLOCK_SIZE << 1)) {
numOutputElements++;
}
hostOutput = (float *)malloc(numOutputElements * sizeof(float));
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The number of input elements in the input is ",
numInputElements);
wbLog(TRACE, "The number of output elements in the input is ",
numOutputElements);
wbTime_start(GPU, "Allocating GPU memory.");
//@@ Allocate GPU memory here
cudaMalloc(&deviceInput , numInputElements * sizeof(float));
cudaMalloc(&deviceOutput, numOutputElements * sizeof(float));
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
//@@ Copy memory to the GPU here
cudaMemcpy(deviceInput, hostInput, numInputElements * sizeof(float) , cudaMemcpyHostToDevice);
wbTime_stop(GPU, "Copying input memory to the GPU.");
//@@ Initialize the grid and block dimensions here
dim3 DimGrid (ceil(numInputElements/BLOCK_SIZE), 1, 1);
dim3 DimBlock(BLOCK_SIZE, 1, 1);
wbTime_start(Compute, "Performing CUDA computation");
//@@ Launch the GPU Kernel here
total<<<DimGrid, DimBlock>>>(deviceInput, deviceOutput, numInputElements);
cudaDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
//@@ Copy the GPU memory back to the CPU here
cudaMemcpy(hostOutput, deviceOutput, numOutputElements * sizeof(float), cudaMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying output memory to the CPU");
/********************************************************************
* Reduce output vector on the host
* NOTE: One could also perform the reduction of the output vector
* recursively and support any size input. For simplicity, we do not
* require that for this lab.
********************************************************************/
for (ii = 1; ii < numOutputElements; ii++) {
hostOutput[0] += hostOutput[ii];
}
wbTime_start(GPU, "Freeing GPU Memory");
//@@ Free the GPU memory here
cudaFree(deviceInput);
cudaFree(deviceOutput);
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, hostOutput, 1);
free(hostInput);
free(hostOutput);
return 0;
}
|
82cad56eee109afbc045e933ebafbc5560d6da34.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "GPUSolver.h"
/** The number of azimuthal angles */
__constant__ int num_azim[1];
/** The number of energy groups */
__constant__ int num_groups[1];
/** The number of FSRs */
__constant__ int num_FSRs[1];
/** The number of polar angles */
__constant__ int num_polar[1];
/** Twice the number of polar angles */
__constant__ int two_times_num_polar[1];
/** The number of polar angles times energy groups */
__constant__ int polar_times_groups[1];
/** An array for the sines of the polar angle in the polar Quadrature set */
__constant__ FP_PRECISION sinthetas[MAX_POLAR_ANGLES];
/** An array of the weights for the polar angles from the Quadrature set */
__constant__ FP_PRECISION polar_weights[MAX_POLAR_ANGLES*MAX_AZIM_ANGLES];
/** A pointer to an array with the number of tracks per azimuthal angle */
__constant__ int num_tracks[MAX_AZIM_ANGLES/2];
/** The total number of Tracks */
__constant__ int tot_num_tracks[1];
/** A boolean indicating whether or not to use linear interpolation
* to comptue the exponential in the transport equation */
__constant__ bool interpolate_exponential[1];
/** The maximum index of the exponential linear interpolation table */
__constant__ int exp_table_max_index[1];
/** The spacing for the exponential linear interpolation table */
__constant__ FP_PRECISION exp_table_spacing[1];
/** The inverse spacing for the exponential linear interpolation table */
__constant__ FP_PRECISION inverse_exp_table_spacing[1];
/**
* @brief Fast method to round a single precision floating point value
* to an integer on the GPU.
* @param x float floating point value to round
* @return the rounded down integer value
*/
__device__ int round_to_int(float x) {
return __float2int_rd(x);
}
/**
* @brief Fast method to round a double precision floating point value
* to an integer on the GPU.
* @param x double floating point value to round
* @return the rounded down integer value
*/
__device__ int round_to_int(double x) {
return __double2int_rd(x);
}
/**
* @brief Compute the total fission source from all FSRs on the GPU.
* @param FSR_volumes an array of FSR volumes
* @param FSR_materials an array of FSR Material UIDs
* @param materials an array of dev_materials on the device
* @param scalar_flux the scalar flux in each FSR and energy group
* @param fission_sources array of fission sources in each FSR and energy group
*/
__global__ void computeFissionSourcesOnDevice(FP_PRECISION* FSR_volumes,
int* FSR_materials,
dev_material* materials,
FP_PRECISION* scalar_flux,
FP_PRECISION* fission_sources) {
/* Use a shared memory buffer for each thread's fission source */
extern __shared__ FP_PRECISION shared_fission_source[];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
dev_material* curr_material;
FP_PRECISION* nu_sigma_f;
FP_PRECISION volume;
FP_PRECISION source;
/* Initialize fission source to zero */
shared_fission_source[threadIdx.x] = 0;
/* Iterate over all FSRs */
while (tid < *num_FSRs) {
curr_material = &materials[FSR_materials[tid]];
nu_sigma_f = curr_material->_nu_sigma_f;
volume = FSR_volumes[tid];
/* Iterate over energy groups and update fission source for
* this thread block */
for (int e=0; e < *num_groups; e++) {
source = nu_sigma_f[e] * scalar_flux(tid,e) * volume;
shared_fission_source[threadIdx.x] += source;
}
/* Increment thread id */
tid += blockDim.x * gridDim.x;
}
/* Copy this thread's fission source to global memory */
tid = threadIdx.x + blockIdx.x * blockDim.x;
fission_sources[tid] = shared_fission_source[threadIdx.x];
return;
}
/**
* @brief Normalizes all FSR scalar fluxes and Track boundary angular
* fluxes to the total fission source (times \f$ \nu \f$).
* @param scalar_flux an array of the FSR scalar fluxes
* @param boundary_flux an array of the Track boundary fluxes
* @param norm_factor the normalization factor
*/
__global__ void normalizeFluxesOnDevice(FP_PRECISION* scalar_flux,
FP_PRECISION* boundary_flux,
FP_PRECISION norm_factor) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
/* Normalize scalar fluxes for each FSR */
while(tid < *num_FSRs) {
for (int e=0; e < *num_groups; e++)
scalar_flux(tid,e) *= norm_factor;
tid += blockDim.x * gridDim.x;
}
tid = threadIdx.x + blockIdx.x * blockDim.x;
/* Normalize angular boundary fluxes for each Track */
while(tid < *tot_num_tracks) {
for (int pe2=0; pe2 < 2*(*polar_times_groups); pe2++)
boundary_flux(tid,pe2) *= norm_factor;
tid += blockDim.x * gridDim.x;
}
return;
}
/**
* @brief Computes the total source (fission and scattering) in each FSR
* on the GPU.
* @details This method computes the total source in each region based on
* this iteration's current approximation to the scalar flux. A
* residual for the source with respect to the source compute on
* the previous iteration is computed and returned. The residual
* is determined as follows:
* /f$ res = \sqrt{\frac{\displaystyle\sum \displaystyle\sum
* \left(\frac{Q^i - Q^{i-1}{Q^i}\right)^2}{\# FSRs}}} \f$
*
* @param FSR_materials an array of FSR Material UIDs
* @param materials an array of dev_material pointers
* @param scalar_flux an array of FSR scalar fluxes
* @param source an array of FSR sources from this iteration
* @param old_source an array of current FSR sources from previous iteration
* @param reduced_source an array of FSR sources / total xs
* @param inverse_k_eff the inverse of keff
* @param source_residuals an array of the FSR source residuals
* @return the residual between this source and the previous source
*/
__global__ void computeFSRSourcesOnDevice(int* FSR_materials,
dev_material* materials,
FP_PRECISION* scalar_flux,
FP_PRECISION* source,
FP_PRECISION* old_source,
FP_PRECISION* reduced_source,
FP_PRECISION inverse_k_eff,
FP_PRECISION* source_residuals) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
/* Reset the residual for the old and new fission sources to zero */
source_residuals[threadIdx.x + blockIdx.x * blockDim.x] = 0.0;
FP_PRECISION fission_source;
FP_PRECISION scatter_source;
dev_material* curr_material;
FP_PRECISION* nu_sigma_f;
FP_PRECISION* sigma_s;
FP_PRECISION* sigma_t;
FP_PRECISION* chi;
/* Iterate over all FSRs */
while (tid < *num_FSRs) {
curr_material = &materials[FSR_materials[tid]];
nu_sigma_f = curr_material->_nu_sigma_f;
sigma_s = curr_material->_sigma_s;
sigma_t = curr_material->_sigma_t;
chi = curr_material->_chi;
/* Initialize the fission source to zero for this FSR */
fission_source = 0;
/* Compute total fission source for current region */
for (int e=0; e < *num_groups; e++)
fission_source += scalar_flux(tid,e) * nu_sigma_f[e];
/* Compute total scattering source for this FSR in group G */
for (int G=0; G < *num_groups; G++) {
scatter_source = 0;
for (int g=0; g < *num_groups; g++)
scatter_source += sigma_s[G*(*num_groups)+g] * scalar_flux(tid,g);
/* Set the total source for this FSR in this group */
source(tid,G) = (inverse_k_eff * fission_source * chi[G] +
scatter_source) * ONE_OVER_FOUR_PI;
reduced_source(tid,G) = __fdividef(source(tid,G), sigma_t[G]);
/* Compute the norm of residuals of the sources for convergence */
if (fabs(source(tid,G)) > 1E-10)
source_residuals[threadIdx.x + blockIdx.x * blockDim.x] +=
pow((source(tid,G) - old_source(tid,G)) / source(tid,G), 2);
/* Update the old source */
old_source(tid,G) = source(tid,G);
}
/* Increment the thread id */
tid += blockDim.x * gridDim.x;
}
return;
}
/**
* @brief Compute the total fission source from all FSRs and energy groups
* on the GPU.
* @param FSR_volumes an array of the FSR volumes
* @param FSR_materials an array of the FSR Material UIDs
* @param materials an array of the dev_material pointers
* @param scalar_flux an array of FSR scalar fluxes
* @param tot_absorption an array of FSR absorption rates
* @param tot_fission an array of FSR fission rates
*/
__global__ void computeFissionAndAbsorption(FP_PRECISION* FSR_volumes,
int* FSR_materials,
dev_material* materials,
FP_PRECISION* scalar_flux,
FP_PRECISION* tot_absorption,
FP_PRECISION* tot_fission) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
dev_material* curr_material;
FP_PRECISION* nu_sigma_f;
FP_PRECISION* sigma_a;
FP_PRECISION volume;
FP_PRECISION absorption = 0.;
FP_PRECISION fission = 0.;
/* Iterate over all FSRs */
while (tid < *num_FSRs) {
curr_material = &materials[FSR_materials[tid]];
nu_sigma_f = curr_material->_nu_sigma_f;
sigma_a = curr_material->_sigma_a;
volume = FSR_volumes[tid];
FP_PRECISION curr_abs = 0.;
FP_PRECISION curr_fission = 0.;
/* Iterate over all energy groups and update fission and absorption
* rates for this thread block */
for (int e=0; e < *num_groups; e++) {
curr_abs += sigma_a[e] * scalar_flux(tid,e);
curr_fission += nu_sigma_f[e] * scalar_flux(tid,e);
}
absorption += curr_abs * volume;
fission += curr_fission * volume;
/* Increment thread id */
tid += blockDim.x * gridDim.x;
}
/* Copy this thread's fission and absorption rates to global memory */
tid = threadIdx.x + blockIdx.x * blockDim.x;
tot_absorption[tid] = absorption;
tot_fission[tid] = fission;
return;
}
/**
* @brief Perform an atomic addition in double precision to an array address
* on the GPU.
* @details This method is straight out of CUDA C Developers Guide (cc 2013).
* @param address the array memory address
* @param val the value to add to the array
* @return the atomically added array value and input value
*/
__device__ double atomicAdd(double* address, double val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val +
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
/**
* @brief Computes the exponential term in the transport equation for a
* Track segment on the GPU.
* @details This method computes \f$ 1 - exp(-l\Sigma^T_g/sin(\theta_p)) \f$
* for a segment with total group cross-section and for
* some polar angle.
* @param sigma_t the total group cross-section at this energy
* @param length the length of the line segment projected in the xy-plane
* @param _exp_table the exponential linear interpolation table
* @param p the polar angle index
* @return the evaluated exponential
*/
__device__ FP_PRECISION computeExponential(FP_PRECISION sigma_t,
FP_PRECISION length,
FP_PRECISION* _exp_table,
int p) {
FP_PRECISION exponential;
FP_PRECISION tau = sigma_t * length;
/* Evaluate the exponential using the linear interpolation table */
if (*interpolate_exponential) {
int index;
index = round_to_int(tau * (*inverse_exp_table_spacing));
index *= (*two_times_num_polar);
exponential = (1. - (_exp_table[index+2 * p] * tau +
_exp_table[index + 2 * p +1]));
}
/* Evalute the exponential using the intrinsic exp(...) function */
else {
FP_PRECISION sintheta = sinthetas[p];
#ifdef SINGLE
exponential = 1.0 - __expf(- tau / sintheta);
#else
exponential = 1.0 - exp(- tau / sintheta);
#endif
}
return exponential;
}
/**
* @brief Computes the contribution to the FSR scalar flux from a Track segment
* in a single energy group on the GPU.
* @details This method integrates the angular flux for a Track segment across
* energy groups and polar angles, and tallies it into the FSR scalar
* flux, and updates the Track's angular flux.
* @param curr_segment a pointer to the Track segment of interest
* @param azim_index a pointer to the azimuthal angle index for this segment
* @param energy_group the energy group of interest
* @param materials the array of dev_material pointers
* @param track_flux a pointer to the Track's angular flux
* @param reduced_source the array of FSR sources / total xs
* @param polar_weights the array of polar Quadrature weights
* @param _exp_table the exponential interpolation table
* @param scalar_flux the array of FSR scalar fluxes
*/
__device__ void scalarFluxTally(dev_segment* curr_segment,
int azim_index,
int energy_group,
dev_material* materials,
FP_PRECISION* track_flux,
FP_PRECISION* reduced_source,
FP_PRECISION* polar_weights,
FP_PRECISION* _exp_table,
FP_PRECISION* scalar_flux) {
int fsr_id = curr_segment->_region_uid;
FP_PRECISION length = curr_segment->_length;
dev_material* curr_material = &materials[curr_segment->_material_uid];
FP_PRECISION *sigma_t = curr_material->_sigma_t;
/* The change in angular flux long this Track segment in this FSR */
FP_PRECISION delta_psi;
FP_PRECISION exponential;
/* Zero the FSR scalar flux contribution from this segment and energy group */
FP_PRECISION fsr_flux = 0.0;
/* Compute the exponential interpolation table index */
/* Loop over polar angles */
for (int p=0; p < *num_polar; p++) {
exponential = computeExponential(sigma_t[energy_group],
length, _exp_table, p);
delta_psi = (track_flux[p] - reduced_source(fsr_id,energy_group)) *
exponential;
fsr_flux += delta_psi * polar_weights(azim_index,p);
track_flux[p] -= delta_psi;
}
/* Atomically increment the scalar flux for this FSR */
atomicAdd(&scalar_flux(fsr_id,energy_group), fsr_flux);
}
/**
* @brief Updates the boundary flux for a Track given boundary conditions
* on the GPU.
* @details For reflective boundary conditions, the outgoing boundary flux
* for the Track is given to the reflecting track. For vacuum
* boundary conditions, the outgoing flux tallied as leakage.
* Note: Only one energy group is transferred by this routine.
* @param curr_track a pointer to the Track of interest
* @param azim_index a pointer to the azimuthal angle index for this segment
* @param track_flux an array of the outgoing Track flux
* @param boundary_flux an array of all angular fluxes
* @param leakage an array of leakages for each CUDA thread
* @param polar_weights an array of polar Quadrature weights
* @param energy_angle_index the energy group index
* @param direction the Track direction (forward - true, reverse - false)
*/
__device__ void transferBoundaryFlux(dev_track* curr_track,
int azim_index,
FP_PRECISION* track_flux,
FP_PRECISION* boundary_flux,
FP_PRECISION* leakage,
FP_PRECISION* polar_weights,
int energy_angle_index,
bool direction) {
int start = energy_angle_index;
bool bc;
int track_out_id;
/* Extract boundary conditions for this Track and the pointer to the
* outgoing reflective Track, and index into the leakage array */
/* For the "forward" direction */
if (direction) {
bc = curr_track->_bc_out;
track_out_id = curr_track->_track_out;
start += curr_track->_refl_out * (*polar_times_groups);
}
/* For the "reverse" direction */
else {
bc = curr_track->_bc_in;
track_out_id = curr_track->_track_in;
start += curr_track->_refl_in * (*polar_times_groups);
}
FP_PRECISION* track_out_flux = &boundary_flux(track_out_id,start);
/* Put Track's flux in the shared memory temporary flux array */
for (int p=0; p < *num_polar; p++) {
track_out_flux[p] = track_flux[p] * bc;
leakage[0] += track_flux[p] * polar_weights(azim_index,p) * (!bc);
}
}
/**
* @brief This method performs one transport sweep of one halfspace of all
* azimuthal angles, tracks, segments, polar angles and energy groups
* on the GPU.
* @details The method integrates the flux along each track and updates the
* boundary fluxes for the corresponding output Track, while updating
* the scalar flux in each FSR.
* @param scalar_flux an array of FSR scalar fluxes
* @param boundary_flux an array of Track boundary fluxes
* @param reduced_source an array of FSR sources / total xs
* @param leakage an array of angular flux leakaages
* @param materials an array of dev_material pointers
* @param tracks an array of Tracks
* @param _exp_table an array for the exponential interpolation table
* @param tid_offset the Track offset for azimuthal angle halfspace
* @param tid_max the upper bound on the Track IDs for this azimuthal
* angle halfspace
*/
__global__ void transportSweepOnDevice(FP_PRECISION* scalar_flux,
FP_PRECISION* boundary_flux,
FP_PRECISION* reduced_source,
FP_PRECISION* leakage,
dev_material* materials,
dev_track* tracks,
FP_PRECISION* _exp_table,
int tid_offset,
int tid_max) {
/* Shared memory buffer for each thread's angular flux */
extern __shared__ FP_PRECISION temp_flux[];
FP_PRECISION* track_flux;
int tid = tid_offset + threadIdx.x + blockIdx.x * blockDim.x;
int track_id = tid / *num_groups;
int track_flux_index = threadIdx.x * (*two_times_num_polar);
int energy_group = tid % (*num_groups);
int energy_angle_index = energy_group * (*num_polar);
dev_track* curr_track;
int azim_index;
int num_segments;
dev_segment* curr_segment;
/* Iterate over Track with azimuthal angles in (0, pi/2) */
while (track_id < tid_max) {
/* Initialize local registers with important data */
curr_track = &tracks[track_id];
azim_index = curr_track->_azim_angle_index;
num_segments = curr_track->_num_segments;
/* Retrieve pointer to thread's shared memory buffer for angular flux */
track_flux = &temp_flux[track_flux_index];
/* Put Track's flux in the shared memory temporary flux array */
for (int p=0; p < *num_polar; p++) {
/* Forward flux along this Track */
track_flux[p] = boundary_flux(track_id,p+energy_angle_index);
/* Reverse flux along this Track */
track_flux[(*num_polar) + p] =
boundary_flux(track_id,p+energy_angle_index+(*polar_times_groups));
}
/* Loop over each Track segment in forward direction */
for (int i=0; i < num_segments; i++) {
curr_segment = &curr_track->_segments[i];
scalarFluxTally(curr_segment, azim_index, energy_group, materials,
track_flux, reduced_source, polar_weights,
_exp_table, scalar_flux);
}
/* Transfer boundary angular flux to outgoing Track */
transferBoundaryFlux(curr_track, azim_index, track_flux, boundary_flux,
&leakage[threadIdx.x + blockIdx.x * blockDim.x],
polar_weights, energy_angle_index, true);
/* Loop over each Track segment in reverse direction */
track_flux = &temp_flux[track_flux_index + (*num_polar)];
for (int i=num_segments-1; i > -1; i--) {
curr_segment = &curr_track->_segments[i];
scalarFluxTally(curr_segment, azim_index, energy_group, materials,
track_flux, reduced_source, polar_weights,
_exp_table, scalar_flux);
}
/* Transfer boundary angular flux to outgoing Track */
transferBoundaryFlux(curr_track, azim_index, track_flux, boundary_flux,
&leakage[threadIdx.x + blockIdx.x * blockDim.x],
polar_weights, energy_angle_index, false);
/* Update the indices for this thread to the next Track, energy group */
tid += blockDim.x * gridDim.x;
track_id = tid / *num_groups;
energy_group = tid % (*num_groups);
energy_angle_index = energy_group * (*num_polar);
}
return;
}
/**
* @brief Add the source term contribution in the transport equation to
* the FSR scalar flux on the GPU.
* @param scalar_flux an array of FSR scalar fluxes
* @param reduced_source an array of FSR sources / total xs
* @param FSR_volumes an array of FSR volumes
* @param FSR_materials an array of FSR material UIDs
* @param materials an array of dev_material pointers
*/
__global__ void addSourceToScalarFluxOnDevice(FP_PRECISION* scalar_flux,
FP_PRECISION* reduced_source,
FP_PRECISION* FSR_volumes,
int* FSR_materials,
dev_material* materials) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
FP_PRECISION volume;
dev_material* curr_material;
FP_PRECISION* sigma_t;
/* Iterate over all FSRs */
while (tid < *num_FSRs) {
curr_material = &materials[FSR_materials[tid]];
volume = FSR_volumes[tid];
sigma_t = curr_material->_sigma_t;
/* Iterate over all energy groups */
for (int i=0; i < *num_groups; i++) {
scalar_flux(tid,i) *= 0.5;
scalar_flux(tid,i) = FOUR_PI * reduced_source(tid,i) +
__fdividef(scalar_flux(tid,i), (sigma_t[i] * volume));
}
/* Increment thread id */
tid += blockDim.x * gridDim.x;
}
return;
}
/**
* @brief Computes the volume-weighted, energy integrated fission rate in
* each FSR and stores them in an array indexed by FSR ID on the GPU.
* @details This is a helper method for the
* GPUSolver::computeFSRFissionRates(...) method.
* @param fission_rates an array to store the fission rates
* @param fission_rates an array in which to store the FSR fission rates
* @param FSR_materials an array of FSR material UIDs
* @param materials an array of dev_material pointers
* @param scalar_flux an array of FSR scalar fluxes
*/
__global__ void computeFSRFissionRatesOnDevice(double* fission_rates,
int* FSR_materials,
dev_material* materials,
FP_PRECISION* scalar_flux) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
dev_material* curr_material;
FP_PRECISION* sigma_f;
/* Loop over all FSRs and compute the volume-weighted fission rate */
while (tid < *num_FSRs) {
curr_material = &materials[FSR_materials[tid]];
sigma_f = curr_material->_sigma_f;
/* Initialize the fission rate for this FSR to zero */
fission_rates[tid] = 0.0;
for (int i=0; i < *num_groups; i++)
fission_rates[tid] += sigma_f[i] * scalar_flux(tid,i);
/* Increment thread id */
tid += blockDim.x * gridDim.x;
}
return;
}
/**
* @brief Constructor initializes arrays for dev_tracks and dev_materials..
* @details The constructor retrieves the number of energy groups and FSRs
* and azimuthal angles from the Geometry and TrackGenerator if
* passed in as parameters by the user. The constructor initalizes
* the number of CUDA threads and thread blocks each to a default
* of 64.
* @param geometry an optional pointer to the Geometry
* @param track_generator an optional pointer to the TrackjGenerator
*/
GPUSolver::GPUSolver(Geometry* geometry, TrackGenerator* track_generator) :
Solver(geometry, track_generator) {
/* The default number of thread blocks and threads per thread block */
_B = 64;
_T = 64;
_materials = NULL;
_dev_tracks = NULL;
_FSR_materials = NULL;
_tot_absorption = NULL;
_tot_fission = NULL;
_leakage = NULL;
if (track_generator != NULL)
setTrackGenerator(track_generator);
if (geometry != NULL)
setGeometry(geometry);
}
/**
* @brief Solver destructor frees all memory on the device, including arrays
* for the FSR scalar fluxes and sources and Track boundary fluxes.
*/
GPUSolver::~GPUSolver() {
if (_FSR_volumes != NULL) {
hipFree(_FSR_volumes);
_FSR_volumes = NULL;
}
if (_FSR_materials != NULL) {
hipFree(_FSR_materials);
_FSR_materials = NULL;
}
if (_materials != NULL) {
hipFree(_materials);
_materials = NULL;
}
if (_dev_tracks != NULL) {
hipFree(_dev_tracks);
_dev_tracks = NULL;
}
if (_boundary_flux != NULL) {
hipFree(_boundary_flux);
_boundary_flux = NULL;
}
if (_scalar_flux != NULL) {
hipFree(_scalar_flux);
_scalar_flux = NULL;
}
if (_source != NULL) {
hipFree(_source);
_source = NULL;
}
if (_old_source != NULL) {
hipFree(_old_source);
_old_source = NULL;
}
if (_reduced_source != NULL) {
hipFree(_reduced_source);
_reduced_source = NULL;
}
if (_fission_sources != NULL) {
_fission_sources_vec.clear();
_fission_sources = NULL;
}
if (_tot_absorption != NULL) {
_tot_absorption_vec.clear();
_tot_absorption = NULL;
}
if (_tot_fission != NULL) {
_tot_fission_vec.clear();
_tot_fission = NULL;
}
if (_source_residuals != NULL) {
_source_residuals_vec.clear();
_source_residuals = NULL;
}
if (_leakage != NULL) {
_leakage_vec.clear();
_leakage = NULL;
}
if (_exp_table != NULL) {
hipFree(_exp_table);
_exp_table = NULL;
}
}
/**
* @brief Returns the number of thread blocks to execute on the GPU.
* @return the number of thread blocks
*/
int GPUSolver::getNumThreadBlocks() {
return _B;
}
/**
* @brief Returns the number of threads per block to execute on the GPU.
* @return the number of threads per block
*/
int GPUSolver::getNumThreadsPerBlock() {
return _T;
}
/**
* @brief Returns the FSR scalar flux for some energy group.
* @param fsr_id the ID for the FSR of interest
* @param energy_group the energy group of interest
*/
FP_PRECISION GPUSolver::getFSRScalarFlux(int fsr_id, int energy_group) {
/* Error checking */
if (fsr_id >= _num_FSRs)
log_printf(ERROR, "Unable to return a scalar flux for FSR id = %d in energy"
" group %d since the solver only contains FSR with IDs greater "
"than or equal to %d", fsr_id, energy_group, _num_FSRs-1);
if (fsr_id < 0)
log_printf(ERROR, "Unable to return a scalar flux for FSR id = %d in energy"
" group %d since FSRs do not have negative IDs",
fsr_id, energy_group);
if (energy_group-1 >= _num_groups)
log_printf(ERROR, "Unable to return a scalar flux for FSR id = %d in energy"
" group %d since the solver only has %d energy groups",
fsr_id, energy_group, _num_groups);
if (energy_group <= 0)
log_printf(ERROR, "Unable to return a scalar flux for FSR id = %d in energy"
" group %d since energy groups are greater than 1",
fsr_id, energy_group);
/* Copy the scalar flux for this FSR and energy group from the device */
FP_PRECISION fsr_scalar_flux;
int flux_index = fsr_id * _num_groups + (energy_group - 1);
hipMemcpy((void*)&fsr_scalar_flux, (void*)&_scalar_flux[flux_index],
sizeof(FP_PRECISION), hipMemcpyDeviceToHost);
return fsr_scalar_flux;
}
/**
* @brief Return the scalar flux array indexed by FSR IDs and energy groups.
* which contains the corresponding fluxes for each flat source region.
* @return an array of FSR scalar fluxes
*/
FP_PRECISION* GPUSolver::getFSRScalarFluxes() {
if (_scalar_flux == NULL)
log_printf(ERROR, "Unable to returns the GPUSolver's scalar flux "
"array since it has not yet been allocated in memory");
/* Copy the scalar flux for all FSRs from the device to the host */
FP_PRECISION* fsr_scalar_fluxes = new FP_PRECISION[_num_FSRs * _num_groups];
hipMemcpy((void*)fsr_scalar_fluxes, (void*)_scalar_flux,
_num_FSRs * _num_groups * sizeof(FP_PRECISION),
hipMemcpyDeviceToHost);
return fsr_scalar_fluxes;
}
/**
* @brief Returns the FSR source for some energy group.
* @param fsr_id the ID for the FSR of interest
* @param energy_group the energy group of interest
*/
FP_PRECISION GPUSolver::getFSRSource(int fsr_id, int energy_group) {
/* Error checking */
if (fsr_id >= _num_FSRs)
log_printf(ERROR, "Unable to return a source for FSR id = %d in energy"
" group %d since the solver only contains FSR with IDs greater than "
"or equal to %d", fsr_id, energy_group, _num_FSRs-1);
if (fsr_id < 0)
log_printf(ERROR, "Unable to return a source for FSR id = %d in energy"
" group %d since FSRs do not have negative IDs",
fsr_id, energy_group);
if (energy_group-1 >= _num_groups)
log_printf(ERROR, "Unable to return a source for FSR id = %d in energy"
" group %d since the solver only has %d energy groups",
fsr_id, energy_group, _num_groups);
if (energy_group <= 0)
log_printf(ERROR, "Unable to return a source for FSR id = %d in energy"
" group %d since energy groups are greater than 1",
fsr_id, energy_group);
/* Copy the source for this FSR and energy group from the device */
FP_PRECISION fsr_source;
int flux_index = fsr_id * _num_groups + (energy_group - 1);
hipMemcpy((void*)&fsr_source, (void*)&_source[flux_index],
sizeof(FP_PRECISION), hipMemcpyDeviceToHost);
return fsr_source;
}
/**
* @brief Sets the number of thread blocks (>0) for CUDA kernels.
* @param num_blocks the number of thread blocks
*/
void GPUSolver::setNumThreadBlocks(int num_blocks) {
if (num_blocks < 0)
log_printf(ERROR, "Unable to set the number of CUDA thread blocks "
"to %d since it is a negative number", num_blocks);
_B = num_blocks;
}
/**
* @brief Sets the number of threads per block (>0) for CUDA kernels.
* @param num_threads the number of threads per block
*/
void GPUSolver::setNumThreadsPerBlock(int num_threads) {
if (num_threads < 0)
log_printf(ERROR, "Unable to set the number of CUDA threads per block "
"to %d since it is a negative number", num_threads);
_T = num_threads;
}
/**
* @brief Sets the Geometry pointer for the GPUSolver.
* @details The Geometry must already have initialized FSR offset maps
* and segmentized the TrackGenerator's tracks. Each of these
* should be initiated in Python prior to assigning a Geometry
* to the GPUSolver:
*
* @code
* geometry.initializeFlatSourceRegions()
* track_generator.generateTracks()
* @endcode
*
* @param geometry a pointer to a Geometry
*/
void GPUSolver::setGeometry(Geometry* geometry) {
Solver::setGeometry(geometry);
initializeMaterials();
/* Copy the number of energy groups to constant memory on the GPU */
hipMemcpyToSymbol(num_groups, (void*)&_num_groups, sizeof(int), 0,
hipMemcpyHostToDevice);
}
/**
* @brief Sets the TrackGenerator with characteristic tracks for the GPUSolver.
* @details The TrackGenerator must already have generated Tracks and have
* used ray tracing to segmentize them across the Geometry. This
* should be initated in Python prior to assigning the TrackGenerator
* to the GPUSolver:
*
* @code
* track_generator.generateTracks()
* @endcode
*
* @param track_generator a pointer to a TrackGenerator
*/
void GPUSolver::setTrackGenerator(TrackGenerator* track_generator) {
Solver::setTrackGenerator(track_generator);
initializeTracks();
}
/**
* @brief Creates a polar Quadrature object for the GPUSolver on the GPU.
*/
void GPUSolver::initializePolarQuadrature() {
log_printf(INFO, "Initializing polar quadrature on the GPU...");
/* Deletes the old Quadrature if one existed */
if (_quad != NULL)
delete _quad;
_quad = new Quadrature(_quadrature_type, _num_polar);
_polar_times_groups = _num_groups * _num_polar;
/* Copy the number of polar angles to constant memory on the GPU */
hipMemcpyToSymbol(num_polar, (void*)&_num_polar, sizeof(int), 0,
hipMemcpyHostToDevice);
/* Copy twice the number of polar angles to constant memory on the GPU */
hipMemcpyToSymbol(two_times_num_polar, (void*)&_two_times_num_polar,
sizeof(int), 0, hipMemcpyHostToDevice);
/* Copy the number of polar angles times energy groups to constant memory
* on the GPU */
hipMemcpyToSymbol(polar_times_groups, (void*)&_polar_times_groups,
sizeof(int), 0, hipMemcpyHostToDevice);
/* Compute polar times azimuthal angle weights */
if (_polar_weights != NULL)
delete [] _polar_weights;
_polar_weights =
(FP_PRECISION*)malloc(_num_polar * _num_azim * sizeof(FP_PRECISION));
FP_PRECISION* multiples = _quad->getMultiples();
FP_PRECISION* azim_weights = _track_generator->getAzimWeights();
for (int i=0; i < _num_azim; i++) {
for (int j=0; j < _num_polar; j++)
_polar_weights[i*_num_polar+j] = azim_weights[i]*multiples[j]*FOUR_PI;
}
/* Copy the polar weights to constant memory on the GPU */
hipMemcpyToSymbol(polar_weights, (void*)_polar_weights,
_num_polar * _num_azim * sizeof(FP_PRECISION), 0, hipMemcpyHostToDevice);
}
/**
* @brief Initializes the FSR volumes and dev_materials array on the GPU.
* @details This method assigns each FSR a unique, monotonically increasing
* ID, sets the Material for each FSR, and assigns a volume based on
* the cumulative length of all of the segments inside the FSR.
*/
void GPUSolver::initializeFSRs() {
log_printf(INFO, "Initializing FSRs on the GPU...");
/* Delete old FSRs array if it exists */
if (_FSR_volumes != NULL)
hipFree(_FSR_volumes);
if (_FSR_materials != NULL)
hipFree(_FSR_materials);
/* Allocate memory for all FSR volumes and dev_materials on the device */
try{
/* Allocate memory on device for FSR volumes and Material UIDs */
hipMalloc((void**)&_FSR_volumes, _num_FSRs * sizeof(FP_PRECISION));
hipMalloc((void**)&_FSR_materials, _num_FSRs * sizeof(int));
/* Create a temporary FSR array to populate and then copy to device */
FP_PRECISION* temp_FSR_volumes = new FP_PRECISION[_num_FSRs];
/* Create a temporary FSR Material UIDs array to populate and then copy to device */
int* FSRs_to_material_UIDs = new int[_num_FSRs];
/* Populate FSR Material UIDs array */
for (int i = 0; i < _num_FSRs; i++)
FSRs_to_material_UIDs[i] = _geometry->findFSRMaterial(i)->getUid();
/* Initialize each FSRs volume to 0 to avoid NaNs */
memset(temp_FSR_volumes, FP_PRECISION(0.), _num_FSRs*sizeof(FP_PRECISION));
Track* track;
int num_segments;
segment* curr_segment;
segment* segments;
FP_PRECISION volume;
FP_PRECISION* azim_weights = _track_generator->getAzimWeights();
/* Set each FSR's volume by accumulating the total length of all Tracks
* inside the FSR. Iterate over azimuthal angle, Track, Track segment*/
for (int i=0; i < _num_azim; i++) {
for (int j=0; j < _num_tracks[i]; j++) {
track = &_track_generator->getTracks()[i][j];
num_segments = track->getNumSegments();
segments = track->getSegments();
/* Iterate over the Track's segments to update FSR volumes */
for (int s = 0; s < num_segments; s++) {
curr_segment = &segments[s];
volume = curr_segment->_length * azim_weights[i];
temp_FSR_volumes[curr_segment->_region_id] += volume;
}
}
}
/* Copy the temporary array of FSRs to the device */
hipMemcpy((void*)_FSR_volumes, (void*)temp_FSR_volumes,
_num_FSRs * sizeof(FP_PRECISION), hipMemcpyHostToDevice);
hipMemcpy((void*)_FSR_materials, (void*)FSRs_to_material_UIDs,
_num_FSRs * sizeof(int), hipMemcpyHostToDevice);
/* Copy the number of FSRs into constant memory on the GPU */
hipMemcpyToSymbol(num_FSRs, (void*)&_num_FSRs, sizeof(int), 0,
hipMemcpyHostToDevice);
/* Free the temporary array of FSRs on the host */
free(temp_FSR_volumes);
/* Free the temporary array of FSR Material IDs on the host */
free(FSRs_to_material_UIDs);
}
catch(std::exception &e) {
log_printf(ERROR, "Could not allocate memory for the GPUSolver's FSRs "
"on the device. Backtrace:%s", e.what());
}
initializeThrustVectors();
}
/**
* @brief Allocates data on the GPU for all Materials data.
*/
void GPUSolver::initializeMaterials() {
log_printf(INFO, "Initializing materials on the GPU...");
/* Delete old materials array if it exists */
if (_materials != NULL)
hipFree(_materials);
/* Allocate memory for all dev_materials on the device */
try{
std::map<int, Material*> host_materials=_geometry->getMaterials();
std::map<int, Material*>::iterator iter;
/* Iterate through all Materials and clone them as dev_material structs
* on the device */
hipMalloc((void**)&_materials, _num_materials * sizeof(dev_material));
for (iter=host_materials.begin(); iter != host_materials.end(); ++iter)
clone_material_on_gpu(iter->second, &_materials[iter->second->getUid()]);
}
catch(std::exception &e) {
log_printf(ERROR, "Could not allocate memory for the GPUSolver's "
"dev_materials. Backtrace:%s", e.what());
}
}
/**
* @brief Allocates memory on the GPU for all Tracks in the simulation.
*/
void GPUSolver::initializeTracks() {
log_printf(INFO, "Initializing tracks on the GPU...");
/* Delete old Tracks array if it exists */
if (_dev_tracks != NULL)
hipFree(_dev_tracks);
/* Allocate memory for all Tracks and Track offset indices on the device */
try{
/* Allocate array of dev_tracks */
hipMalloc((void**)&_dev_tracks, _tot_num_tracks * sizeof(dev_track));
/* Iterate through all Tracks and clone them as dev_tracks on the device */
int index;
for (int i=0; i < _tot_num_tracks; i++) {
clone_track_on_gpu(_tracks[i], &_dev_tracks[i]);
/* Make Track reflective */
index = computeScalarTrackIndex(_tracks[i]->getTrackInI(),
_tracks[i]->getTrackInJ());
hipMemcpy((void*)&_dev_tracks[i]._track_in,
(void*)&index, sizeof(int), hipMemcpyHostToDevice);
index = computeScalarTrackIndex(_tracks[i]->getTrackOutI(),
_tracks[i]->getTrackOutJ());
hipMemcpy((void*)&_dev_tracks[i]._track_out,
(void*)&index, sizeof(int), hipMemcpyHostToDevice);
}
/* Copy the array of number of Tracks for each azimuthal angle into
* constant memory on GPU */
hipMemcpyToSymbol(num_tracks, (void*)_num_tracks,
_num_azim * sizeof(int), 0, hipMemcpyHostToDevice);
/* Copy the total number of Tracks into constant memory on GPU */
hipMemcpyToSymbol(tot_num_tracks, (void*)&_tot_num_tracks,
sizeof(int), 0, hipMemcpyHostToDevice);
/* Copy the number of azimuthal angles into constant memory on GPU */
hipMemcpyToSymbol(num_azim, (void*)&_num_azim, sizeof(int), 0,
hipMemcpyHostToDevice);
/* Copy the array of number of Tracks for each azimuthal angles into
* constant memory on GPU */
hipMemcpyToSymbol(num_tracks, (void*)_num_tracks,
_num_azim * sizeof(int), 0, hipMemcpyHostToDevice);
/* Copy the total number of Tracks into constant memory on GPU */
hipMemcpyToSymbol(tot_num_tracks, (void*)&_tot_num_tracks,
sizeof(int), 0, hipMemcpyHostToDevice);
}
catch(std::exception &e) {
log_printf(ERROR, "Could not allocate memory for the GPUSolver's "
"dev_tracks on the device. Backtrace:%s", e.what());
}
}
/**
* @brief Allocates memory for Track boundary angular fluxes and leakages
* and FSR scalar fluxes on the GPU.
* @details Deletes memory for old flux arrays if they were allocated for a
* previous simulation.
*/
void GPUSolver::initializeFluxArrays() {
log_printf(INFO, "Initializing flux arrays on the GPU...");
/* Delete old flux arrays if they exist */
if (_boundary_flux != NULL)
hipFree(_boundary_flux);
if (_scalar_flux != NULL)
hipFree(_scalar_flux);
/* Allocate memory for all flux arrays on the device */
try{
hipMalloc((void**)&_boundary_flux,
2*_tot_num_tracks * _polar_times_groups*sizeof(FP_PRECISION));
hipMalloc((void**)&_scalar_flux,
_num_FSRs * _num_groups * sizeof(FP_PRECISION));
}
catch(std::exception &e) {
log_printf(ERROR, "Could not allocate memory for the GPUSolver's fluxes "
"on the device. Backtrace:%s", e.what());
}
}
/**
* @brief Allocates memory for FSR source arrays on the GPU.
* @details Deletes memory for old source arrays if they were allocated for a
* previous simulation.
*/
void GPUSolver::initializeSourceArrays() {
log_printf(INFO, "Initializing source arrays on the GPU...");
/* Delete old sources arrays if they exist */
if (_source != NULL)
hipFree(_source);
if (_old_source != NULL)
hipFree(_old_source);
if (_reduced_source != NULL)
hipFree(_reduced_source);
/* Allocate memory for all source arrays on the device */
try{
hipMalloc((void**)&_source,
_num_FSRs * _num_groups * sizeof(FP_PRECISION));
hipMalloc((void**)&_old_source,
_num_FSRs * _num_groups * sizeof(FP_PRECISION));
hipMalloc((void**)&_reduced_source,
_num_FSRs * _num_groups * sizeof(FP_PRECISION));
}
catch(std::exception &e) {
log_printf(ERROR, "Could not allocate memory for the GPUSolver's FSR "
"sources array on the device. Backtrace:%s", e.what());
}
}
/**
* @brief Initialize Thrust vectors for the fission and absorption rates,
* source residuals, leakage and fission sources.
*/
void GPUSolver::initializeThrustVectors() {
log_printf(INFO, "Initializing Thrust vectors on the GPU...");
/* Delete old vectors if they exist */
if (_fission_sources != NULL) {
_fission_sources = NULL;
_fission_sources_vec.clear();
}
if (_tot_absorption != NULL) {
_tot_absorption = NULL;
_tot_absorption_vec.clear();
}
if (_tot_fission != NULL) {
_tot_fission = NULL;
_tot_fission_vec.clear();
}
if (_source_residuals != NULL) {
_source_residuals = NULL;
_source_residuals_vec.clear();
}
if (_leakage != NULL) {
_leakage = NULL;
_leakage_vec.clear();
}
/* Allocate memory for fission, absorption and source vectors on device */
try{
/* Allocate fission source array on device */
_fission_sources_vec.resize(_B * _T);
_fission_sources = thrust::raw_pointer_cast(&_fission_sources_vec[0]);
/* Allocate total absorption reaction rate array on device */
_tot_absorption_vec.resize(_B * _T);
_tot_absorption = thrust::raw_pointer_cast(&_tot_absorption_vec[0]);
/* Allocate fission reaction rate array on device */
_tot_fission_vec.resize(_B * _T);
_tot_fission = thrust::raw_pointer_cast(&_tot_fission_vec[0]);
/* Allocate source residual array on device */
_source_residuals_vec.resize(_B * _T);
_source_residuals = thrust::raw_pointer_cast(&_source_residuals_vec[0]);
/* Allocate leakage array on device */
_leakage_vec.resize(_B * _T);
_leakage = thrust::raw_pointer_cast(&_leakage_vec[0]);
}
catch(std::exception &e) {
log_printf(ERROR, "Could not allocate memory for the GPUSolver's "
"Thrust vectors. Backtrace:%s", e.what());
}
}
/**
* @brief This method computes the index for the Track j at azimuthal angle i.
* @details This method is necessary since the array of dev_tracks on the device
* is a 1D array which needs a one-to-one mapping from the 2D jagged
* array of Tracks on the host.
* @param i azimuthal angle number
* @param j the jth track at angle i
* @return an index into the device track array
*/
int GPUSolver::computeScalarTrackIndex(int i, int j) {
int index =0;
int p = 0;
/* Iterate over each azimuthal angle and increment index by the number of
* Tracks at each angle */
while (p < i) {
index += _num_tracks[p];
p++;
}
/* Update index for this Track since it is the jth Track at angle i */
index += j;
return index;
}
/**
* @brief Builds a linear interpolation table to compute exponentials for
* each segment of each Track for each polar angle on the GPU.
*/
void GPUSolver::buildExpInterpTable(){
log_printf(INFO, "Building exponential interpolation table on device...");
/* Copy a boolean indicating whether or not to use the linear interpolation
* table or the exp intrinsic function */
hipMemcpyToSymbol(interpolate_exponential,(void*)&_interpolate_exponential,
sizeof(bool), 0, hipMemcpyHostToDevice);
/* Copy the sines of the polar angles which is needed if the user
* requested the use of the exp intrinsic to evaluate exponentials */
hipMemcpyToSymbol(sinthetas, (void*)_quad->getSinThetas(),
_num_polar * sizeof(FP_PRECISION), 0,
hipMemcpyHostToDevice);
/* Set size of interpolation table */
int num_array_values =
10 * sqrt(1. / (8. * _source_convergence_thresh * 1e-2));
_exp_table_spacing = 10. / num_array_values;
_inverse_exp_table_spacing = 1.0 / _exp_table_spacing;
_exp_table_size = _two_times_num_polar * num_array_values;
_exp_table_max_index = _exp_table_size - _two_times_num_polar - 1;
/* Allocate arrays */
FP_PRECISION* exp_table = new FP_PRECISION[_exp_table_size];
FP_PRECISION expon;
FP_PRECISION intercept;
FP_PRECISION slope;
/* Create exponential interpolation table */
for (int i = 0; i < num_array_values; i ++){
for (int p = 0; p < _num_polar; p++){
expon = exp(- (i * _exp_table_spacing) / _quad->getSinTheta(p));
slope = - expon / _quad->getSinTheta(p);
intercept = expon * (1 + (i * _exp_table_spacing)/_quad->getSinTheta(p));
exp_table[_two_times_num_polar * i + 2 * p] = slope;
exp_table[_two_times_num_polar * i + 2 * p + 1] = intercept;
}
}
/* Allocate memory for the interpolation table on the device */
hipMalloc((void**)&_exp_table, _exp_table_size * sizeof(FP_PRECISION));
/* Copy exponential interpolation table to the device */
hipMemcpy((void*)_exp_table, (void*)exp_table,
_exp_table_size * sizeof(FP_PRECISION),
hipMemcpyHostToDevice);
/* Copy table size and spacing to constant memory on the device */
hipMemcpyToSymbol(exp_table_spacing, (void*)&_exp_table_spacing,
sizeof(FP_PRECISION), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(inverse_exp_table_spacing,
(void*)&_inverse_exp_table_spacing,
sizeof(FP_PRECISION), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(exp_table_max_index, (void*)&_exp_table_max_index,
sizeof(int), 0, hipMemcpyHostToDevice);
free(exp_table);
return;
}
/**
* @brief Zero each Track's boundary fluxes for each energy group and polar
* angle in the "forward" and "reverse" directions.
*/
void GPUSolver::zeroTrackFluxes() {
int size = 2 * _tot_num_tracks * _num_polar * _num_groups;
size *= sizeof(FP_PRECISION);
hipMemset(_boundary_flux, 0.0, size);
return;
}
/**
* @brief Set the FSR scalar flux for each energy group to some value.
* @param value the value to assign to each FSR scalar flux
*/
void GPUSolver::flattenFSRFluxes(FP_PRECISION value) {
int size = _num_FSRs * _num_groups * sizeof(FP_PRECISION);
hipMemset(_scalar_flux, value, size);
return;
}
/**
* @brief Set the FSR source for each energy group to some value.
* @param value the value to assign to each FSR source
*/
void GPUSolver::flattenFSRSources(FP_PRECISION value) {
int size = _num_FSRs * _num_groups * sizeof(FP_PRECISION);
hipMemset(_source, value, size);
hipMemset(_old_source, value, size);
return;
}
/**
* @brief Normalizes all FSR scalar fluxes and Track boundary angular
* fluxes to the total fission source (times \f$ \nu \f$).
*/
void GPUSolver::normalizeFluxes() {
int shared_mem = sizeof(FP_PRECISION) * _T;
hipLaunchKernelGGL(( computeFissionSourcesOnDevice), dim3(_B), dim3(_T), shared_mem, 0, _FSR_volumes,
_FSR_materials,
_materials,
_scalar_flux,
_fission_sources);
FP_PRECISION norm_factor = 1.0 / thrust::reduce(_fission_sources_vec.begin(),
_fission_sources_vec.end());
hipLaunchKernelGGL(( normalizeFluxesOnDevice), dim3(_B), dim3(_T), 0, 0, _scalar_flux, _boundary_flux,norm_factor);
}
/**
* @brief Computes the total source (fission and scattering) in each FSR.
* @details This method computes the total source in each FSR based on
* this iteration's current approximation to the scalar flux. A
* residual for the source with respect to the source compute on
* the previous iteration is computed and returned. The residual
* is determined as follows:
* /f$ res = \sqrt{\frac{\displaystyle\sum \displaystyle\sum
* \left(\frac{Q^i - Q^{i-1}{Q^i}\right)^2}{\# FSRs}}} \f$
*
* @return the residual between this source and the previous source
*/
FP_PRECISION GPUSolver::computeFSRSources() {
hipLaunchKernelGGL(( computeFSRSourcesOnDevice), dim3(_B), dim3(_T), 0, 0, _FSR_materials, _materials,
_scalar_flux, _source, _old_source,
_reduced_source, 1.0 / _k_eff,
_source_residuals);
FP_PRECISION residual = thrust::reduce(_source_residuals_vec.begin(),
_source_residuals_vec.end());
residual = sqrt(residual / (_num_groups * _num_FSRs));
return residual;
}
/**
* @brief This method performs one transport sweep of all azimuthal angles,
* Tracks, Track segments, polar angles and energy groups.
* @details The method integrates the flux along each Track and updates the
* boundary fluxes for the corresponding output Track, while updating
* the scalar flux in each flat source region.
*/
void GPUSolver::transportSweep() {
int shared_mem = _T * _two_times_num_polar * sizeof(FP_PRECISION);
int tid_offset, tid_max;
log_printf(DEBUG, "Transport sweep on device with %d blocks and %d threads",
_B, _T);
/* Initialize leakage to zero */
thrust::fill(_leakage_vec.begin(), _leakage_vec.end(), 0.0);
/* Initialize flux in each FSR to zero */
flattenFSRFluxes(0.0);
/* Sweep the first halfspace of azimuthal angle space */
tid_offset = 0;
tid_max = (_tot_num_tracks / 2);
hipLaunchKernelGGL(( transportSweepOnDevice), dim3(_B), dim3(_T), shared_mem, 0, _scalar_flux, _boundary_flux,
_reduced_source, _leakage,
_materials, _dev_tracks,
_exp_table,
tid_offset, tid_max);
/* Sweep the second halfspace of azimuthal angle space */
tid_offset = tid_max * _num_groups;
tid_max = _tot_num_tracks;
hipLaunchKernelGGL(( transportSweepOnDevice), dim3(_B), dim3(_T), shared_mem, 0, _scalar_flux, _boundary_flux,
_reduced_source, _leakage,
_materials, _dev_tracks,
_exp_table,
tid_offset, tid_max);
}
/**
* @brief Add the source term contribution in the transport equation to
* the FSR scalar flux.
*/
void GPUSolver::addSourceToScalarFlux() {
hipLaunchKernelGGL(( addSourceToScalarFluxOnDevice), dim3(_B),dim3(_T), 0, 0, _scalar_flux, _reduced_source,
_FSR_volumes, _FSR_materials,
_materials);
}
/**
* @brief Compute \f$ k_{eff} \f$ from the total fission and absorption rates.
* @details This method computes the current approximation to the
* multiplication factor on this iteration as follows:
* \f$ k_{eff} = \frac{\displaystyle\sum \displaystyle\sum \nu
* \Sigma_f \Phi V}{\displaystyle\sum
* \displaystyle\sum \Sigma_a \Phi V} \f$
*/
void GPUSolver::computeKeff() {
FP_PRECISION tot_absorption;
FP_PRECISION tot_fission;
FP_PRECISION tot_leakage;
/* Compute the total fission and absorption rates on the device.
* This kernel stores partial rates in a Thrust vector with as many
* entries as CUDAthreads executed by the kernel */
hipLaunchKernelGGL(( computeFissionAndAbsorption), dim3(_B), dim3(_T), 0, 0, _FSR_volumes, _FSR_materials,
_materials, _scalar_flux,
_tot_absorption, _tot_fission);
hipDeviceSynchronize();
/* Compute the total absorption rate by reducing the partial absorption
* rates compiled in the Thrust vector */
tot_absorption = thrust::reduce(_tot_absorption_vec.begin(),
_tot_absorption_vec.end());
/* Compute the total fission rate by reducing the partial fission
* rates compiled in the Thrust vector */
tot_fission = thrust::reduce(_tot_fission_vec.begin(),_tot_fission_vec.end());
hipMemcpy((void*)&tot_fission, (void*)_tot_fission,
_B * _T * sizeof(FP_PRECISION), hipMemcpyHostToDevice);
/* Compute the total leakage by reducing the partial leakage
* rates compiled in the Thrust vector */
tot_leakage = 0.5 * thrust::reduce(_leakage_vec.begin(), _leakage_vec.end());
/* Compute the new keff from the fission and absorption rates */
_k_eff = tot_fission / (tot_absorption + tot_leakage);
log_printf(DEBUG, "abs = %f, fiss = %f, leak = %f, keff = %f",
tot_absorption, tot_fission, tot_leakage, _k_eff);
}
/**
* @brief Computes the volume-weighted, energy integrated fission rate in
* each FSR and stores them in an array indexed by FSR ID.
* @details This is a helper method for SWIG to allow users to retrieve
* FSR fission rates as a NumPy array. An example of how this method
* can be called from Python is as follows:
*
* @code
* num_FSRs = geometry.getNumFSRs()
* fission_rates = solver.computeFSRFissionRates(num_FSRs)
* @endcode
*
* @param fission_rates an array to store the fission rates (implicitly passed
* in as a NumPy array from Python)
* @param num_FSRs the number of FSRs passed in from Python
*/
void GPUSolver::computeFSRFissionRates(double* fission_rates, int num_FSRs) {
log_printf(INFO, "Computing FSR fission rates...");
/* Allocate memory for the FSR fission rates on the device */
double* dev_fission_rates;
hipMalloc((void**)&dev_fission_rates, _num_FSRs * sizeof(double));
/* Compute the FSR fission rates on the device */
hipLaunchKernelGGL(( computeFSRFissionRatesOnDevice), dim3(_B),dim3(_T), 0, 0, dev_fission_rates,
_FSR_materials,
_materials,
_scalar_flux);
/* Copy the fission rate array from the device to the host */
hipMemcpy((void*)fission_rates, (void*)dev_fission_rates,
_num_FSRs * sizeof(double), hipMemcpyDeviceToHost);
/* Deallocate the memory assigned to store the fission rates on the device */
hipFree(dev_fission_rates);
return;
}
| 82cad56eee109afbc045e933ebafbc5560d6da34.cu | #include "GPUSolver.h"
/** The number of azimuthal angles */
__constant__ int num_azim[1];
/** The number of energy groups */
__constant__ int num_groups[1];
/** The number of FSRs */
__constant__ int num_FSRs[1];
/** The number of polar angles */
__constant__ int num_polar[1];
/** Twice the number of polar angles */
__constant__ int two_times_num_polar[1];
/** The number of polar angles times energy groups */
__constant__ int polar_times_groups[1];
/** An array for the sines of the polar angle in the polar Quadrature set */
__constant__ FP_PRECISION sinthetas[MAX_POLAR_ANGLES];
/** An array of the weights for the polar angles from the Quadrature set */
__constant__ FP_PRECISION polar_weights[MAX_POLAR_ANGLES*MAX_AZIM_ANGLES];
/** A pointer to an array with the number of tracks per azimuthal angle */
__constant__ int num_tracks[MAX_AZIM_ANGLES/2];
/** The total number of Tracks */
__constant__ int tot_num_tracks[1];
/** A boolean indicating whether or not to use linear interpolation
* to comptue the exponential in the transport equation */
__constant__ bool interpolate_exponential[1];
/** The maximum index of the exponential linear interpolation table */
__constant__ int exp_table_max_index[1];
/** The spacing for the exponential linear interpolation table */
__constant__ FP_PRECISION exp_table_spacing[1];
/** The inverse spacing for the exponential linear interpolation table */
__constant__ FP_PRECISION inverse_exp_table_spacing[1];
/**
* @brief Fast method to round a single precision floating point value
* to an integer on the GPU.
* @param x float floating point value to round
* @return the rounded down integer value
*/
__device__ int round_to_int(float x) {
return __float2int_rd(x);
}
/**
* @brief Fast method to round a double precision floating point value
* to an integer on the GPU.
* @param x double floating point value to round
* @return the rounded down integer value
*/
__device__ int round_to_int(double x) {
return __double2int_rd(x);
}
/**
* @brief Compute the total fission source from all FSRs on the GPU.
* @param FSR_volumes an array of FSR volumes
* @param FSR_materials an array of FSR Material UIDs
* @param materials an array of dev_materials on the device
* @param scalar_flux the scalar flux in each FSR and energy group
* @param fission_sources array of fission sources in each FSR and energy group
*/
__global__ void computeFissionSourcesOnDevice(FP_PRECISION* FSR_volumes,
int* FSR_materials,
dev_material* materials,
FP_PRECISION* scalar_flux,
FP_PRECISION* fission_sources) {
/* Use a shared memory buffer for each thread's fission source */
extern __shared__ FP_PRECISION shared_fission_source[];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
dev_material* curr_material;
FP_PRECISION* nu_sigma_f;
FP_PRECISION volume;
FP_PRECISION source;
/* Initialize fission source to zero */
shared_fission_source[threadIdx.x] = 0;
/* Iterate over all FSRs */
while (tid < *num_FSRs) {
curr_material = &materials[FSR_materials[tid]];
nu_sigma_f = curr_material->_nu_sigma_f;
volume = FSR_volumes[tid];
/* Iterate over energy groups and update fission source for
* this thread block */
for (int e=0; e < *num_groups; e++) {
source = nu_sigma_f[e] * scalar_flux(tid,e) * volume;
shared_fission_source[threadIdx.x] += source;
}
/* Increment thread id */
tid += blockDim.x * gridDim.x;
}
/* Copy this thread's fission source to global memory */
tid = threadIdx.x + blockIdx.x * blockDim.x;
fission_sources[tid] = shared_fission_source[threadIdx.x];
return;
}
/**
* @brief Normalizes all FSR scalar fluxes and Track boundary angular
* fluxes to the total fission source (times \f$ \nu \f$).
* @param scalar_flux an array of the FSR scalar fluxes
* @param boundary_flux an array of the Track boundary fluxes
* @param norm_factor the normalization factor
*/
__global__ void normalizeFluxesOnDevice(FP_PRECISION* scalar_flux,
FP_PRECISION* boundary_flux,
FP_PRECISION norm_factor) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
/* Normalize scalar fluxes for each FSR */
while(tid < *num_FSRs) {
for (int e=0; e < *num_groups; e++)
scalar_flux(tid,e) *= norm_factor;
tid += blockDim.x * gridDim.x;
}
tid = threadIdx.x + blockIdx.x * blockDim.x;
/* Normalize angular boundary fluxes for each Track */
while(tid < *tot_num_tracks) {
for (int pe2=0; pe2 < 2*(*polar_times_groups); pe2++)
boundary_flux(tid,pe2) *= norm_factor;
tid += blockDim.x * gridDim.x;
}
return;
}
/**
* @brief Computes the total source (fission and scattering) in each FSR
* on the GPU.
* @details This method computes the total source in each region based on
* this iteration's current approximation to the scalar flux. A
* residual for the source with respect to the source compute on
* the previous iteration is computed and returned. The residual
* is determined as follows:
* /f$ res = \sqrt{\frac{\displaystyle\sum \displaystyle\sum
* \left(\frac{Q^i - Q^{i-1}{Q^i}\right)^2}{\# FSRs}}} \f$
*
* @param FSR_materials an array of FSR Material UIDs
* @param materials an array of dev_material pointers
* @param scalar_flux an array of FSR scalar fluxes
* @param source an array of FSR sources from this iteration
* @param old_source an array of current FSR sources from previous iteration
* @param reduced_source an array of FSR sources / total xs
* @param inverse_k_eff the inverse of keff
* @param source_residuals an array of the FSR source residuals
* @return the residual between this source and the previous source
*/
__global__ void computeFSRSourcesOnDevice(int* FSR_materials,
dev_material* materials,
FP_PRECISION* scalar_flux,
FP_PRECISION* source,
FP_PRECISION* old_source,
FP_PRECISION* reduced_source,
FP_PRECISION inverse_k_eff,
FP_PRECISION* source_residuals) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
/* Reset the residual for the old and new fission sources to zero */
source_residuals[threadIdx.x + blockIdx.x * blockDim.x] = 0.0;
FP_PRECISION fission_source;
FP_PRECISION scatter_source;
dev_material* curr_material;
FP_PRECISION* nu_sigma_f;
FP_PRECISION* sigma_s;
FP_PRECISION* sigma_t;
FP_PRECISION* chi;
/* Iterate over all FSRs */
while (tid < *num_FSRs) {
curr_material = &materials[FSR_materials[tid]];
nu_sigma_f = curr_material->_nu_sigma_f;
sigma_s = curr_material->_sigma_s;
sigma_t = curr_material->_sigma_t;
chi = curr_material->_chi;
/* Initialize the fission source to zero for this FSR */
fission_source = 0;
/* Compute total fission source for current region */
for (int e=0; e < *num_groups; e++)
fission_source += scalar_flux(tid,e) * nu_sigma_f[e];
/* Compute total scattering source for this FSR in group G */
for (int G=0; G < *num_groups; G++) {
scatter_source = 0;
for (int g=0; g < *num_groups; g++)
scatter_source += sigma_s[G*(*num_groups)+g] * scalar_flux(tid,g);
/* Set the total source for this FSR in this group */
source(tid,G) = (inverse_k_eff * fission_source * chi[G] +
scatter_source) * ONE_OVER_FOUR_PI;
reduced_source(tid,G) = __fdividef(source(tid,G), sigma_t[G]);
/* Compute the norm of residuals of the sources for convergence */
if (fabs(source(tid,G)) > 1E-10)
source_residuals[threadIdx.x + blockIdx.x * blockDim.x] +=
pow((source(tid,G) - old_source(tid,G)) / source(tid,G), 2);
/* Update the old source */
old_source(tid,G) = source(tid,G);
}
/* Increment the thread id */
tid += blockDim.x * gridDim.x;
}
return;
}
/**
* @brief Compute the total fission source from all FSRs and energy groups
* on the GPU.
* @param FSR_volumes an array of the FSR volumes
* @param FSR_materials an array of the FSR Material UIDs
* @param materials an array of the dev_material pointers
* @param scalar_flux an array of FSR scalar fluxes
* @param tot_absorption an array of FSR absorption rates
* @param tot_fission an array of FSR fission rates
*/
__global__ void computeFissionAndAbsorption(FP_PRECISION* FSR_volumes,
int* FSR_materials,
dev_material* materials,
FP_PRECISION* scalar_flux,
FP_PRECISION* tot_absorption,
FP_PRECISION* tot_fission) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
dev_material* curr_material;
FP_PRECISION* nu_sigma_f;
FP_PRECISION* sigma_a;
FP_PRECISION volume;
FP_PRECISION absorption = 0.;
FP_PRECISION fission = 0.;
/* Iterate over all FSRs */
while (tid < *num_FSRs) {
curr_material = &materials[FSR_materials[tid]];
nu_sigma_f = curr_material->_nu_sigma_f;
sigma_a = curr_material->_sigma_a;
volume = FSR_volumes[tid];
FP_PRECISION curr_abs = 0.;
FP_PRECISION curr_fission = 0.;
/* Iterate over all energy groups and update fission and absorption
* rates for this thread block */
for (int e=0; e < *num_groups; e++) {
curr_abs += sigma_a[e] * scalar_flux(tid,e);
curr_fission += nu_sigma_f[e] * scalar_flux(tid,e);
}
absorption += curr_abs * volume;
fission += curr_fission * volume;
/* Increment thread id */
tid += blockDim.x * gridDim.x;
}
/* Copy this thread's fission and absorption rates to global memory */
tid = threadIdx.x + blockIdx.x * blockDim.x;
tot_absorption[tid] = absorption;
tot_fission[tid] = fission;
return;
}
/**
* @brief Perform an atomic addition in double precision to an array address
* on the GPU.
* @details This method is straight out of CUDA C Developers Guide (cc 2013).
* @param address the array memory address
* @param val the value to add to the array
* @return the atomically added array value and input value
*/
__device__ double atomicAdd(double* address, double val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val +
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
/**
* @brief Computes the exponential term in the transport equation for a
* Track segment on the GPU.
* @details This method computes \f$ 1 - exp(-l\Sigma^T_g/sin(\theta_p)) \f$
* for a segment with total group cross-section and for
* some polar angle.
* @param sigma_t the total group cross-section at this energy
* @param length the length of the line segment projected in the xy-plane
* @param _exp_table the exponential linear interpolation table
* @param p the polar angle index
* @return the evaluated exponential
*/
__device__ FP_PRECISION computeExponential(FP_PRECISION sigma_t,
FP_PRECISION length,
FP_PRECISION* _exp_table,
int p) {
FP_PRECISION exponential;
FP_PRECISION tau = sigma_t * length;
/* Evaluate the exponential using the linear interpolation table */
if (*interpolate_exponential) {
int index;
index = round_to_int(tau * (*inverse_exp_table_spacing));
index *= (*two_times_num_polar);
exponential = (1. - (_exp_table[index+2 * p] * tau +
_exp_table[index + 2 * p +1]));
}
/* Evalute the exponential using the intrinsic exp(...) function */
else {
FP_PRECISION sintheta = sinthetas[p];
#ifdef SINGLE
exponential = 1.0 - __expf(- tau / sintheta);
#else
exponential = 1.0 - exp(- tau / sintheta);
#endif
}
return exponential;
}
/**
* @brief Computes the contribution to the FSR scalar flux from a Track segment
* in a single energy group on the GPU.
* @details This method integrates the angular flux for a Track segment across
* energy groups and polar angles, and tallies it into the FSR scalar
* flux, and updates the Track's angular flux.
* @param curr_segment a pointer to the Track segment of interest
* @param azim_index a pointer to the azimuthal angle index for this segment
* @param energy_group the energy group of interest
* @param materials the array of dev_material pointers
* @param track_flux a pointer to the Track's angular flux
* @param reduced_source the array of FSR sources / total xs
* @param polar_weights the array of polar Quadrature weights
* @param _exp_table the exponential interpolation table
* @param scalar_flux the array of FSR scalar fluxes
*/
__device__ void scalarFluxTally(dev_segment* curr_segment,
int azim_index,
int energy_group,
dev_material* materials,
FP_PRECISION* track_flux,
FP_PRECISION* reduced_source,
FP_PRECISION* polar_weights,
FP_PRECISION* _exp_table,
FP_PRECISION* scalar_flux) {
int fsr_id = curr_segment->_region_uid;
FP_PRECISION length = curr_segment->_length;
dev_material* curr_material = &materials[curr_segment->_material_uid];
FP_PRECISION *sigma_t = curr_material->_sigma_t;
/* The change in angular flux long this Track segment in this FSR */
FP_PRECISION delta_psi;
FP_PRECISION exponential;
/* Zero the FSR scalar flux contribution from this segment and energy group */
FP_PRECISION fsr_flux = 0.0;
/* Compute the exponential interpolation table index */
/* Loop over polar angles */
for (int p=0; p < *num_polar; p++) {
exponential = computeExponential(sigma_t[energy_group],
length, _exp_table, p);
delta_psi = (track_flux[p] - reduced_source(fsr_id,energy_group)) *
exponential;
fsr_flux += delta_psi * polar_weights(azim_index,p);
track_flux[p] -= delta_psi;
}
/* Atomically increment the scalar flux for this FSR */
atomicAdd(&scalar_flux(fsr_id,energy_group), fsr_flux);
}
/**
* @brief Updates the boundary flux for a Track given boundary conditions
* on the GPU.
* @details For reflective boundary conditions, the outgoing boundary flux
* for the Track is given to the reflecting track. For vacuum
* boundary conditions, the outgoing flux tallied as leakage.
* Note: Only one energy group is transferred by this routine.
* @param curr_track a pointer to the Track of interest
* @param azim_index a pointer to the azimuthal angle index for this segment
* @param track_flux an array of the outgoing Track flux
* @param boundary_flux an array of all angular fluxes
* @param leakage an array of leakages for each CUDA thread
* @param polar_weights an array of polar Quadrature weights
* @param energy_angle_index the energy group index
* @param direction the Track direction (forward - true, reverse - false)
*/
__device__ void transferBoundaryFlux(dev_track* curr_track,
int azim_index,
FP_PRECISION* track_flux,
FP_PRECISION* boundary_flux,
FP_PRECISION* leakage,
FP_PRECISION* polar_weights,
int energy_angle_index,
bool direction) {
int start = energy_angle_index;
bool bc;
int track_out_id;
/* Extract boundary conditions for this Track and the pointer to the
* outgoing reflective Track, and index into the leakage array */
/* For the "forward" direction */
if (direction) {
bc = curr_track->_bc_out;
track_out_id = curr_track->_track_out;
start += curr_track->_refl_out * (*polar_times_groups);
}
/* For the "reverse" direction */
else {
bc = curr_track->_bc_in;
track_out_id = curr_track->_track_in;
start += curr_track->_refl_in * (*polar_times_groups);
}
FP_PRECISION* track_out_flux = &boundary_flux(track_out_id,start);
/* Put Track's flux in the shared memory temporary flux array */
for (int p=0; p < *num_polar; p++) {
track_out_flux[p] = track_flux[p] * bc;
leakage[0] += track_flux[p] * polar_weights(azim_index,p) * (!bc);
}
}
/**
* @brief This method performs one transport sweep of one halfspace of all
* azimuthal angles, tracks, segments, polar angles and energy groups
* on the GPU.
* @details The method integrates the flux along each track and updates the
* boundary fluxes for the corresponding output Track, while updating
* the scalar flux in each FSR.
* @param scalar_flux an array of FSR scalar fluxes
* @param boundary_flux an array of Track boundary fluxes
* @param reduced_source an array of FSR sources / total xs
* @param leakage an array of angular flux leakaages
* @param materials an array of dev_material pointers
* @param tracks an array of Tracks
* @param _exp_table an array for the exponential interpolation table
* @param tid_offset the Track offset for azimuthal angle halfspace
* @param tid_max the upper bound on the Track IDs for this azimuthal
* angle halfspace
*/
__global__ void transportSweepOnDevice(FP_PRECISION* scalar_flux,
FP_PRECISION* boundary_flux,
FP_PRECISION* reduced_source,
FP_PRECISION* leakage,
dev_material* materials,
dev_track* tracks,
FP_PRECISION* _exp_table,
int tid_offset,
int tid_max) {
/* Shared memory buffer for each thread's angular flux */
extern __shared__ FP_PRECISION temp_flux[];
FP_PRECISION* track_flux;
int tid = tid_offset + threadIdx.x + blockIdx.x * blockDim.x;
int track_id = tid / *num_groups;
int track_flux_index = threadIdx.x * (*two_times_num_polar);
int energy_group = tid % (*num_groups);
int energy_angle_index = energy_group * (*num_polar);
dev_track* curr_track;
int azim_index;
int num_segments;
dev_segment* curr_segment;
/* Iterate over Track with azimuthal angles in (0, pi/2) */
while (track_id < tid_max) {
/* Initialize local registers with important data */
curr_track = &tracks[track_id];
azim_index = curr_track->_azim_angle_index;
num_segments = curr_track->_num_segments;
/* Retrieve pointer to thread's shared memory buffer for angular flux */
track_flux = &temp_flux[track_flux_index];
/* Put Track's flux in the shared memory temporary flux array */
for (int p=0; p < *num_polar; p++) {
/* Forward flux along this Track */
track_flux[p] = boundary_flux(track_id,p+energy_angle_index);
/* Reverse flux along this Track */
track_flux[(*num_polar) + p] =
boundary_flux(track_id,p+energy_angle_index+(*polar_times_groups));
}
/* Loop over each Track segment in forward direction */
for (int i=0; i < num_segments; i++) {
curr_segment = &curr_track->_segments[i];
scalarFluxTally(curr_segment, azim_index, energy_group, materials,
track_flux, reduced_source, polar_weights,
_exp_table, scalar_flux);
}
/* Transfer boundary angular flux to outgoing Track */
transferBoundaryFlux(curr_track, azim_index, track_flux, boundary_flux,
&leakage[threadIdx.x + blockIdx.x * blockDim.x],
polar_weights, energy_angle_index, true);
/* Loop over each Track segment in reverse direction */
track_flux = &temp_flux[track_flux_index + (*num_polar)];
for (int i=num_segments-1; i > -1; i--) {
curr_segment = &curr_track->_segments[i];
scalarFluxTally(curr_segment, azim_index, energy_group, materials,
track_flux, reduced_source, polar_weights,
_exp_table, scalar_flux);
}
/* Transfer boundary angular flux to outgoing Track */
transferBoundaryFlux(curr_track, azim_index, track_flux, boundary_flux,
&leakage[threadIdx.x + blockIdx.x * blockDim.x],
polar_weights, energy_angle_index, false);
/* Update the indices for this thread to the next Track, energy group */
tid += blockDim.x * gridDim.x;
track_id = tid / *num_groups;
energy_group = tid % (*num_groups);
energy_angle_index = energy_group * (*num_polar);
}
return;
}
/**
* @brief Add the source term contribution in the transport equation to
* the FSR scalar flux on the GPU.
* @param scalar_flux an array of FSR scalar fluxes
* @param reduced_source an array of FSR sources / total xs
* @param FSR_volumes an array of FSR volumes
* @param FSR_materials an array of FSR material UIDs
* @param materials an array of dev_material pointers
*/
__global__ void addSourceToScalarFluxOnDevice(FP_PRECISION* scalar_flux,
FP_PRECISION* reduced_source,
FP_PRECISION* FSR_volumes,
int* FSR_materials,
dev_material* materials) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
FP_PRECISION volume;
dev_material* curr_material;
FP_PRECISION* sigma_t;
/* Iterate over all FSRs */
while (tid < *num_FSRs) {
curr_material = &materials[FSR_materials[tid]];
volume = FSR_volumes[tid];
sigma_t = curr_material->_sigma_t;
/* Iterate over all energy groups */
for (int i=0; i < *num_groups; i++) {
scalar_flux(tid,i) *= 0.5;
scalar_flux(tid,i) = FOUR_PI * reduced_source(tid,i) +
__fdividef(scalar_flux(tid,i), (sigma_t[i] * volume));
}
/* Increment thread id */
tid += blockDim.x * gridDim.x;
}
return;
}
/**
* @brief Computes the volume-weighted, energy integrated fission rate in
* each FSR and stores them in an array indexed by FSR ID on the GPU.
* @details This is a helper method for the
* GPUSolver::computeFSRFissionRates(...) method.
* @param fission_rates an array to store the fission rates
* @param fission_rates an array in which to store the FSR fission rates
* @param FSR_materials an array of FSR material UIDs
* @param materials an array of dev_material pointers
* @param scalar_flux an array of FSR scalar fluxes
*/
__global__ void computeFSRFissionRatesOnDevice(double* fission_rates,
int* FSR_materials,
dev_material* materials,
FP_PRECISION* scalar_flux) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
dev_material* curr_material;
FP_PRECISION* sigma_f;
/* Loop over all FSRs and compute the volume-weighted fission rate */
while (tid < *num_FSRs) {
curr_material = &materials[FSR_materials[tid]];
sigma_f = curr_material->_sigma_f;
/* Initialize the fission rate for this FSR to zero */
fission_rates[tid] = 0.0;
for (int i=0; i < *num_groups; i++)
fission_rates[tid] += sigma_f[i] * scalar_flux(tid,i);
/* Increment thread id */
tid += blockDim.x * gridDim.x;
}
return;
}
/**
* @brief Constructor initializes arrays for dev_tracks and dev_materials..
* @details The constructor retrieves the number of energy groups and FSRs
* and azimuthal angles from the Geometry and TrackGenerator if
* passed in as parameters by the user. The constructor initalizes
* the number of CUDA threads and thread blocks each to a default
* of 64.
* @param geometry an optional pointer to the Geometry
* @param track_generator an optional pointer to the TrackjGenerator
*/
GPUSolver::GPUSolver(Geometry* geometry, TrackGenerator* track_generator) :
Solver(geometry, track_generator) {
/* The default number of thread blocks and threads per thread block */
_B = 64;
_T = 64;
_materials = NULL;
_dev_tracks = NULL;
_FSR_materials = NULL;
_tot_absorption = NULL;
_tot_fission = NULL;
_leakage = NULL;
if (track_generator != NULL)
setTrackGenerator(track_generator);
if (geometry != NULL)
setGeometry(geometry);
}
/**
* @brief Solver destructor frees all memory on the device, including arrays
* for the FSR scalar fluxes and sources and Track boundary fluxes.
*/
GPUSolver::~GPUSolver() {
if (_FSR_volumes != NULL) {
cudaFree(_FSR_volumes);
_FSR_volumes = NULL;
}
if (_FSR_materials != NULL) {
cudaFree(_FSR_materials);
_FSR_materials = NULL;
}
if (_materials != NULL) {
cudaFree(_materials);
_materials = NULL;
}
if (_dev_tracks != NULL) {
cudaFree(_dev_tracks);
_dev_tracks = NULL;
}
if (_boundary_flux != NULL) {
cudaFree(_boundary_flux);
_boundary_flux = NULL;
}
if (_scalar_flux != NULL) {
cudaFree(_scalar_flux);
_scalar_flux = NULL;
}
if (_source != NULL) {
cudaFree(_source);
_source = NULL;
}
if (_old_source != NULL) {
cudaFree(_old_source);
_old_source = NULL;
}
if (_reduced_source != NULL) {
cudaFree(_reduced_source);
_reduced_source = NULL;
}
if (_fission_sources != NULL) {
_fission_sources_vec.clear();
_fission_sources = NULL;
}
if (_tot_absorption != NULL) {
_tot_absorption_vec.clear();
_tot_absorption = NULL;
}
if (_tot_fission != NULL) {
_tot_fission_vec.clear();
_tot_fission = NULL;
}
if (_source_residuals != NULL) {
_source_residuals_vec.clear();
_source_residuals = NULL;
}
if (_leakage != NULL) {
_leakage_vec.clear();
_leakage = NULL;
}
if (_exp_table != NULL) {
cudaFree(_exp_table);
_exp_table = NULL;
}
}
/**
* @brief Returns the number of thread blocks to execute on the GPU.
* @return the number of thread blocks
*/
int GPUSolver::getNumThreadBlocks() {
return _B;
}
/**
* @brief Returns the number of threads per block to execute on the GPU.
* @return the number of threads per block
*/
int GPUSolver::getNumThreadsPerBlock() {
return _T;
}
/**
* @brief Returns the FSR scalar flux for some energy group.
* @param fsr_id the ID for the FSR of interest
* @param energy_group the energy group of interest
*/
FP_PRECISION GPUSolver::getFSRScalarFlux(int fsr_id, int energy_group) {
/* Error checking */
if (fsr_id >= _num_FSRs)
log_printf(ERROR, "Unable to return a scalar flux for FSR id = %d in energy"
" group %d since the solver only contains FSR with IDs greater "
"than or equal to %d", fsr_id, energy_group, _num_FSRs-1);
if (fsr_id < 0)
log_printf(ERROR, "Unable to return a scalar flux for FSR id = %d in energy"
" group %d since FSRs do not have negative IDs",
fsr_id, energy_group);
if (energy_group-1 >= _num_groups)
log_printf(ERROR, "Unable to return a scalar flux for FSR id = %d in energy"
" group %d since the solver only has %d energy groups",
fsr_id, energy_group, _num_groups);
if (energy_group <= 0)
log_printf(ERROR, "Unable to return a scalar flux for FSR id = %d in energy"
" group %d since energy groups are greater than 1",
fsr_id, energy_group);
/* Copy the scalar flux for this FSR and energy group from the device */
FP_PRECISION fsr_scalar_flux;
int flux_index = fsr_id * _num_groups + (energy_group - 1);
cudaMemcpy((void*)&fsr_scalar_flux, (void*)&_scalar_flux[flux_index],
sizeof(FP_PRECISION), cudaMemcpyDeviceToHost);
return fsr_scalar_flux;
}
/**
* @brief Return the scalar flux array indexed by FSR IDs and energy groups.
* which contains the corresponding fluxes for each flat source region.
* @return an array of FSR scalar fluxes
*/
FP_PRECISION* GPUSolver::getFSRScalarFluxes() {
if (_scalar_flux == NULL)
log_printf(ERROR, "Unable to returns the GPUSolver's scalar flux "
"array since it has not yet been allocated in memory");
/* Copy the scalar flux for all FSRs from the device to the host */
FP_PRECISION* fsr_scalar_fluxes = new FP_PRECISION[_num_FSRs * _num_groups];
cudaMemcpy((void*)fsr_scalar_fluxes, (void*)_scalar_flux,
_num_FSRs * _num_groups * sizeof(FP_PRECISION),
cudaMemcpyDeviceToHost);
return fsr_scalar_fluxes;
}
/**
* @brief Returns the FSR source for some energy group.
* @param fsr_id the ID for the FSR of interest
* @param energy_group the energy group of interest
*/
FP_PRECISION GPUSolver::getFSRSource(int fsr_id, int energy_group) {
/* Error checking */
if (fsr_id >= _num_FSRs)
log_printf(ERROR, "Unable to return a source for FSR id = %d in energy"
" group %d since the solver only contains FSR with IDs greater than "
"or equal to %d", fsr_id, energy_group, _num_FSRs-1);
if (fsr_id < 0)
log_printf(ERROR, "Unable to return a source for FSR id = %d in energy"
" group %d since FSRs do not have negative IDs",
fsr_id, energy_group);
if (energy_group-1 >= _num_groups)
log_printf(ERROR, "Unable to return a source for FSR id = %d in energy"
" group %d since the solver only has %d energy groups",
fsr_id, energy_group, _num_groups);
if (energy_group <= 0)
log_printf(ERROR, "Unable to return a source for FSR id = %d in energy"
" group %d since energy groups are greater than 1",
fsr_id, energy_group);
/* Copy the source for this FSR and energy group from the device */
FP_PRECISION fsr_source;
int flux_index = fsr_id * _num_groups + (energy_group - 1);
cudaMemcpy((void*)&fsr_source, (void*)&_source[flux_index],
sizeof(FP_PRECISION), cudaMemcpyDeviceToHost);
return fsr_source;
}
/**
* @brief Sets the number of thread blocks (>0) for CUDA kernels.
* @param num_blocks the number of thread blocks
*/
void GPUSolver::setNumThreadBlocks(int num_blocks) {
if (num_blocks < 0)
log_printf(ERROR, "Unable to set the number of CUDA thread blocks "
"to %d since it is a negative number", num_blocks);
_B = num_blocks;
}
/**
* @brief Sets the number of threads per block (>0) for CUDA kernels.
* @param num_threads the number of threads per block
*/
void GPUSolver::setNumThreadsPerBlock(int num_threads) {
if (num_threads < 0)
log_printf(ERROR, "Unable to set the number of CUDA threads per block "
"to %d since it is a negative number", num_threads);
_T = num_threads;
}
/**
* @brief Sets the Geometry pointer for the GPUSolver.
* @details The Geometry must already have initialized FSR offset maps
* and segmentized the TrackGenerator's tracks. Each of these
* should be initiated in Python prior to assigning a Geometry
* to the GPUSolver:
*
* @code
* geometry.initializeFlatSourceRegions()
* track_generator.generateTracks()
* @endcode
*
* @param geometry a pointer to a Geometry
*/
void GPUSolver::setGeometry(Geometry* geometry) {
Solver::setGeometry(geometry);
initializeMaterials();
/* Copy the number of energy groups to constant memory on the GPU */
cudaMemcpyToSymbol(num_groups, (void*)&_num_groups, sizeof(int), 0,
cudaMemcpyHostToDevice);
}
/**
* @brief Sets the TrackGenerator with characteristic tracks for the GPUSolver.
* @details The TrackGenerator must already have generated Tracks and have
* used ray tracing to segmentize them across the Geometry. This
* should be initated in Python prior to assigning the TrackGenerator
* to the GPUSolver:
*
* @code
* track_generator.generateTracks()
* @endcode
*
* @param track_generator a pointer to a TrackGenerator
*/
void GPUSolver::setTrackGenerator(TrackGenerator* track_generator) {
Solver::setTrackGenerator(track_generator);
initializeTracks();
}
/**
* @brief Creates a polar Quadrature object for the GPUSolver on the GPU.
*/
void GPUSolver::initializePolarQuadrature() {
log_printf(INFO, "Initializing polar quadrature on the GPU...");
/* Deletes the old Quadrature if one existed */
if (_quad != NULL)
delete _quad;
_quad = new Quadrature(_quadrature_type, _num_polar);
_polar_times_groups = _num_groups * _num_polar;
/* Copy the number of polar angles to constant memory on the GPU */
cudaMemcpyToSymbol(num_polar, (void*)&_num_polar, sizeof(int), 0,
cudaMemcpyHostToDevice);
/* Copy twice the number of polar angles to constant memory on the GPU */
cudaMemcpyToSymbol(two_times_num_polar, (void*)&_two_times_num_polar,
sizeof(int), 0, cudaMemcpyHostToDevice);
/* Copy the number of polar angles times energy groups to constant memory
* on the GPU */
cudaMemcpyToSymbol(polar_times_groups, (void*)&_polar_times_groups,
sizeof(int), 0, cudaMemcpyHostToDevice);
/* Compute polar times azimuthal angle weights */
if (_polar_weights != NULL)
delete [] _polar_weights;
_polar_weights =
(FP_PRECISION*)malloc(_num_polar * _num_azim * sizeof(FP_PRECISION));
FP_PRECISION* multiples = _quad->getMultiples();
FP_PRECISION* azim_weights = _track_generator->getAzimWeights();
for (int i=0; i < _num_azim; i++) {
for (int j=0; j < _num_polar; j++)
_polar_weights[i*_num_polar+j] = azim_weights[i]*multiples[j]*FOUR_PI;
}
/* Copy the polar weights to constant memory on the GPU */
cudaMemcpyToSymbol(polar_weights, (void*)_polar_weights,
_num_polar * _num_azim * sizeof(FP_PRECISION), 0, cudaMemcpyHostToDevice);
}
/**
* @brief Initializes the FSR volumes and dev_materials array on the GPU.
* @details This method assigns each FSR a unique, monotonically increasing
* ID, sets the Material for each FSR, and assigns a volume based on
* the cumulative length of all of the segments inside the FSR.
*/
void GPUSolver::initializeFSRs() {
log_printf(INFO, "Initializing FSRs on the GPU...");
/* Delete old FSRs array if it exists */
if (_FSR_volumes != NULL)
cudaFree(_FSR_volumes);
if (_FSR_materials != NULL)
cudaFree(_FSR_materials);
/* Allocate memory for all FSR volumes and dev_materials on the device */
try{
/* Allocate memory on device for FSR volumes and Material UIDs */
cudaMalloc((void**)&_FSR_volumes, _num_FSRs * sizeof(FP_PRECISION));
cudaMalloc((void**)&_FSR_materials, _num_FSRs * sizeof(int));
/* Create a temporary FSR array to populate and then copy to device */
FP_PRECISION* temp_FSR_volumes = new FP_PRECISION[_num_FSRs];
/* Create a temporary FSR Material UIDs array to populate and then copy to device */
int* FSRs_to_material_UIDs = new int[_num_FSRs];
/* Populate FSR Material UIDs array */
for (int i = 0; i < _num_FSRs; i++)
FSRs_to_material_UIDs[i] = _geometry->findFSRMaterial(i)->getUid();
/* Initialize each FSRs volume to 0 to avoid NaNs */
memset(temp_FSR_volumes, FP_PRECISION(0.), _num_FSRs*sizeof(FP_PRECISION));
Track* track;
int num_segments;
segment* curr_segment;
segment* segments;
FP_PRECISION volume;
FP_PRECISION* azim_weights = _track_generator->getAzimWeights();
/* Set each FSR's volume by accumulating the total length of all Tracks
* inside the FSR. Iterate over azimuthal angle, Track, Track segment*/
for (int i=0; i < _num_azim; i++) {
for (int j=0; j < _num_tracks[i]; j++) {
track = &_track_generator->getTracks()[i][j];
num_segments = track->getNumSegments();
segments = track->getSegments();
/* Iterate over the Track's segments to update FSR volumes */
for (int s = 0; s < num_segments; s++) {
curr_segment = &segments[s];
volume = curr_segment->_length * azim_weights[i];
temp_FSR_volumes[curr_segment->_region_id] += volume;
}
}
}
/* Copy the temporary array of FSRs to the device */
cudaMemcpy((void*)_FSR_volumes, (void*)temp_FSR_volumes,
_num_FSRs * sizeof(FP_PRECISION), cudaMemcpyHostToDevice);
cudaMemcpy((void*)_FSR_materials, (void*)FSRs_to_material_UIDs,
_num_FSRs * sizeof(int), cudaMemcpyHostToDevice);
/* Copy the number of FSRs into constant memory on the GPU */
cudaMemcpyToSymbol(num_FSRs, (void*)&_num_FSRs, sizeof(int), 0,
cudaMemcpyHostToDevice);
/* Free the temporary array of FSRs on the host */
free(temp_FSR_volumes);
/* Free the temporary array of FSR Material IDs on the host */
free(FSRs_to_material_UIDs);
}
catch(std::exception &e) {
log_printf(ERROR, "Could not allocate memory for the GPUSolver's FSRs "
"on the device. Backtrace:%s", e.what());
}
initializeThrustVectors();
}
/**
* @brief Allocates data on the GPU for all Materials data.
*/
void GPUSolver::initializeMaterials() {
log_printf(INFO, "Initializing materials on the GPU...");
/* Delete old materials array if it exists */
if (_materials != NULL)
cudaFree(_materials);
/* Allocate memory for all dev_materials on the device */
try{
std::map<int, Material*> host_materials=_geometry->getMaterials();
std::map<int, Material*>::iterator iter;
/* Iterate through all Materials and clone them as dev_material structs
* on the device */
cudaMalloc((void**)&_materials, _num_materials * sizeof(dev_material));
for (iter=host_materials.begin(); iter != host_materials.end(); ++iter)
clone_material_on_gpu(iter->second, &_materials[iter->second->getUid()]);
}
catch(std::exception &e) {
log_printf(ERROR, "Could not allocate memory for the GPUSolver's "
"dev_materials. Backtrace:%s", e.what());
}
}
/**
* @brief Allocates memory on the GPU for all Tracks in the simulation.
*/
void GPUSolver::initializeTracks() {
log_printf(INFO, "Initializing tracks on the GPU...");
/* Delete old Tracks array if it exists */
if (_dev_tracks != NULL)
cudaFree(_dev_tracks);
/* Allocate memory for all Tracks and Track offset indices on the device */
try{
/* Allocate array of dev_tracks */
cudaMalloc((void**)&_dev_tracks, _tot_num_tracks * sizeof(dev_track));
/* Iterate through all Tracks and clone them as dev_tracks on the device */
int index;
for (int i=0; i < _tot_num_tracks; i++) {
clone_track_on_gpu(_tracks[i], &_dev_tracks[i]);
/* Make Track reflective */
index = computeScalarTrackIndex(_tracks[i]->getTrackInI(),
_tracks[i]->getTrackInJ());
cudaMemcpy((void*)&_dev_tracks[i]._track_in,
(void*)&index, sizeof(int), cudaMemcpyHostToDevice);
index = computeScalarTrackIndex(_tracks[i]->getTrackOutI(),
_tracks[i]->getTrackOutJ());
cudaMemcpy((void*)&_dev_tracks[i]._track_out,
(void*)&index, sizeof(int), cudaMemcpyHostToDevice);
}
/* Copy the array of number of Tracks for each azimuthal angle into
* constant memory on GPU */
cudaMemcpyToSymbol(num_tracks, (void*)_num_tracks,
_num_azim * sizeof(int), 0, cudaMemcpyHostToDevice);
/* Copy the total number of Tracks into constant memory on GPU */
cudaMemcpyToSymbol(tot_num_tracks, (void*)&_tot_num_tracks,
sizeof(int), 0, cudaMemcpyHostToDevice);
/* Copy the number of azimuthal angles into constant memory on GPU */
cudaMemcpyToSymbol(num_azim, (void*)&_num_azim, sizeof(int), 0,
cudaMemcpyHostToDevice);
/* Copy the array of number of Tracks for each azimuthal angles into
* constant memory on GPU */
cudaMemcpyToSymbol(num_tracks, (void*)_num_tracks,
_num_azim * sizeof(int), 0, cudaMemcpyHostToDevice);
/* Copy the total number of Tracks into constant memory on GPU */
cudaMemcpyToSymbol(tot_num_tracks, (void*)&_tot_num_tracks,
sizeof(int), 0, cudaMemcpyHostToDevice);
}
catch(std::exception &e) {
log_printf(ERROR, "Could not allocate memory for the GPUSolver's "
"dev_tracks on the device. Backtrace:%s", e.what());
}
}
/**
* @brief Allocates memory for Track boundary angular fluxes and leakages
* and FSR scalar fluxes on the GPU.
* @details Deletes memory for old flux arrays if they were allocated for a
* previous simulation.
*/
void GPUSolver::initializeFluxArrays() {
log_printf(INFO, "Initializing flux arrays on the GPU...");
/* Delete old flux arrays if they exist */
if (_boundary_flux != NULL)
cudaFree(_boundary_flux);
if (_scalar_flux != NULL)
cudaFree(_scalar_flux);
/* Allocate memory for all flux arrays on the device */
try{
cudaMalloc((void**)&_boundary_flux,
2*_tot_num_tracks * _polar_times_groups*sizeof(FP_PRECISION));
cudaMalloc((void**)&_scalar_flux,
_num_FSRs * _num_groups * sizeof(FP_PRECISION));
}
catch(std::exception &e) {
log_printf(ERROR, "Could not allocate memory for the GPUSolver's fluxes "
"on the device. Backtrace:%s", e.what());
}
}
/**
* @brief Allocates memory for FSR source arrays on the GPU.
* @details Deletes memory for old source arrays if they were allocated for a
* previous simulation.
*/
void GPUSolver::initializeSourceArrays() {
log_printf(INFO, "Initializing source arrays on the GPU...");
/* Delete old sources arrays if they exist */
if (_source != NULL)
cudaFree(_source);
if (_old_source != NULL)
cudaFree(_old_source);
if (_reduced_source != NULL)
cudaFree(_reduced_source);
/* Allocate memory for all source arrays on the device */
try{
cudaMalloc((void**)&_source,
_num_FSRs * _num_groups * sizeof(FP_PRECISION));
cudaMalloc((void**)&_old_source,
_num_FSRs * _num_groups * sizeof(FP_PRECISION));
cudaMalloc((void**)&_reduced_source,
_num_FSRs * _num_groups * sizeof(FP_PRECISION));
}
catch(std::exception &e) {
log_printf(ERROR, "Could not allocate memory for the GPUSolver's FSR "
"sources array on the device. Backtrace:%s", e.what());
}
}
/**
* @brief Initialize Thrust vectors for the fission and absorption rates,
* source residuals, leakage and fission sources.
*/
void GPUSolver::initializeThrustVectors() {
log_printf(INFO, "Initializing Thrust vectors on the GPU...");
/* Delete old vectors if they exist */
if (_fission_sources != NULL) {
_fission_sources = NULL;
_fission_sources_vec.clear();
}
if (_tot_absorption != NULL) {
_tot_absorption = NULL;
_tot_absorption_vec.clear();
}
if (_tot_fission != NULL) {
_tot_fission = NULL;
_tot_fission_vec.clear();
}
if (_source_residuals != NULL) {
_source_residuals = NULL;
_source_residuals_vec.clear();
}
if (_leakage != NULL) {
_leakage = NULL;
_leakage_vec.clear();
}
/* Allocate memory for fission, absorption and source vectors on device */
try{
/* Allocate fission source array on device */
_fission_sources_vec.resize(_B * _T);
_fission_sources = thrust::raw_pointer_cast(&_fission_sources_vec[0]);
/* Allocate total absorption reaction rate array on device */
_tot_absorption_vec.resize(_B * _T);
_tot_absorption = thrust::raw_pointer_cast(&_tot_absorption_vec[0]);
/* Allocate fission reaction rate array on device */
_tot_fission_vec.resize(_B * _T);
_tot_fission = thrust::raw_pointer_cast(&_tot_fission_vec[0]);
/* Allocate source residual array on device */
_source_residuals_vec.resize(_B * _T);
_source_residuals = thrust::raw_pointer_cast(&_source_residuals_vec[0]);
/* Allocate leakage array on device */
_leakage_vec.resize(_B * _T);
_leakage = thrust::raw_pointer_cast(&_leakage_vec[0]);
}
catch(std::exception &e) {
log_printf(ERROR, "Could not allocate memory for the GPUSolver's "
"Thrust vectors. Backtrace:%s", e.what());
}
}
/**
* @brief This method computes the index for the Track j at azimuthal angle i.
* @details This method is necessary since the array of dev_tracks on the device
* is a 1D array which needs a one-to-one mapping from the 2D jagged
* array of Tracks on the host.
* @param i azimuthal angle number
* @param j the jth track at angle i
* @return an index into the device track array
*/
int GPUSolver::computeScalarTrackIndex(int i, int j) {
int index =0;
int p = 0;
/* Iterate over each azimuthal angle and increment index by the number of
* Tracks at each angle */
while (p < i) {
index += _num_tracks[p];
p++;
}
/* Update index for this Track since it is the jth Track at angle i */
index += j;
return index;
}
/**
* @brief Builds a linear interpolation table to compute exponentials for
* each segment of each Track for each polar angle on the GPU.
*/
void GPUSolver::buildExpInterpTable(){
log_printf(INFO, "Building exponential interpolation table on device...");
/* Copy a boolean indicating whether or not to use the linear interpolation
* table or the exp intrinsic function */
cudaMemcpyToSymbol(interpolate_exponential,(void*)&_interpolate_exponential,
sizeof(bool), 0, cudaMemcpyHostToDevice);
/* Copy the sines of the polar angles which is needed if the user
* requested the use of the exp intrinsic to evaluate exponentials */
cudaMemcpyToSymbol(sinthetas, (void*)_quad->getSinThetas(),
_num_polar * sizeof(FP_PRECISION), 0,
cudaMemcpyHostToDevice);
/* Set size of interpolation table */
int num_array_values =
10 * sqrt(1. / (8. * _source_convergence_thresh * 1e-2));
_exp_table_spacing = 10. / num_array_values;
_inverse_exp_table_spacing = 1.0 / _exp_table_spacing;
_exp_table_size = _two_times_num_polar * num_array_values;
_exp_table_max_index = _exp_table_size - _two_times_num_polar - 1;
/* Allocate arrays */
FP_PRECISION* exp_table = new FP_PRECISION[_exp_table_size];
FP_PRECISION expon;
FP_PRECISION intercept;
FP_PRECISION slope;
/* Create exponential interpolation table */
for (int i = 0; i < num_array_values; i ++){
for (int p = 0; p < _num_polar; p++){
expon = exp(- (i * _exp_table_spacing) / _quad->getSinTheta(p));
slope = - expon / _quad->getSinTheta(p);
intercept = expon * (1 + (i * _exp_table_spacing)/_quad->getSinTheta(p));
exp_table[_two_times_num_polar * i + 2 * p] = slope;
exp_table[_two_times_num_polar * i + 2 * p + 1] = intercept;
}
}
/* Allocate memory for the interpolation table on the device */
cudaMalloc((void**)&_exp_table, _exp_table_size * sizeof(FP_PRECISION));
/* Copy exponential interpolation table to the device */
cudaMemcpy((void*)_exp_table, (void*)exp_table,
_exp_table_size * sizeof(FP_PRECISION),
cudaMemcpyHostToDevice);
/* Copy table size and spacing to constant memory on the device */
cudaMemcpyToSymbol(exp_table_spacing, (void*)&_exp_table_spacing,
sizeof(FP_PRECISION), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(inverse_exp_table_spacing,
(void*)&_inverse_exp_table_spacing,
sizeof(FP_PRECISION), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(exp_table_max_index, (void*)&_exp_table_max_index,
sizeof(int), 0, cudaMemcpyHostToDevice);
free(exp_table);
return;
}
/**
* @brief Zero each Track's boundary fluxes for each energy group and polar
* angle in the "forward" and "reverse" directions.
*/
void GPUSolver::zeroTrackFluxes() {
int size = 2 * _tot_num_tracks * _num_polar * _num_groups;
size *= sizeof(FP_PRECISION);
cudaMemset(_boundary_flux, 0.0, size);
return;
}
/**
* @brief Set the FSR scalar flux for each energy group to some value.
* @param value the value to assign to each FSR scalar flux
*/
void GPUSolver::flattenFSRFluxes(FP_PRECISION value) {
int size = _num_FSRs * _num_groups * sizeof(FP_PRECISION);
cudaMemset(_scalar_flux, value, size);
return;
}
/**
* @brief Set the FSR source for each energy group to some value.
* @param value the value to assign to each FSR source
*/
void GPUSolver::flattenFSRSources(FP_PRECISION value) {
int size = _num_FSRs * _num_groups * sizeof(FP_PRECISION);
cudaMemset(_source, value, size);
cudaMemset(_old_source, value, size);
return;
}
/**
* @brief Normalizes all FSR scalar fluxes and Track boundary angular
* fluxes to the total fission source (times \f$ \nu \f$).
*/
void GPUSolver::normalizeFluxes() {
int shared_mem = sizeof(FP_PRECISION) * _T;
computeFissionSourcesOnDevice<<<_B, _T, shared_mem>>>(_FSR_volumes,
_FSR_materials,
_materials,
_scalar_flux,
_fission_sources);
FP_PRECISION norm_factor = 1.0 / thrust::reduce(_fission_sources_vec.begin(),
_fission_sources_vec.end());
normalizeFluxesOnDevice<<<_B, _T>>>(_scalar_flux, _boundary_flux,norm_factor);
}
/**
* @brief Computes the total source (fission and scattering) in each FSR.
* @details This method computes the total source in each FSR based on
* this iteration's current approximation to the scalar flux. A
* residual for the source with respect to the source compute on
* the previous iteration is computed and returned. The residual
* is determined as follows:
* /f$ res = \sqrt{\frac{\displaystyle\sum \displaystyle\sum
* \left(\frac{Q^i - Q^{i-1}{Q^i}\right)^2}{\# FSRs}}} \f$
*
* @return the residual between this source and the previous source
*/
FP_PRECISION GPUSolver::computeFSRSources() {
computeFSRSourcesOnDevice<<<_B, _T>>>(_FSR_materials, _materials,
_scalar_flux, _source, _old_source,
_reduced_source, 1.0 / _k_eff,
_source_residuals);
FP_PRECISION residual = thrust::reduce(_source_residuals_vec.begin(),
_source_residuals_vec.end());
residual = sqrt(residual / (_num_groups * _num_FSRs));
return residual;
}
/**
* @brief This method performs one transport sweep of all azimuthal angles,
* Tracks, Track segments, polar angles and energy groups.
* @details The method integrates the flux along each Track and updates the
* boundary fluxes for the corresponding output Track, while updating
* the scalar flux in each flat source region.
*/
void GPUSolver::transportSweep() {
int shared_mem = _T * _two_times_num_polar * sizeof(FP_PRECISION);
int tid_offset, tid_max;
log_printf(DEBUG, "Transport sweep on device with %d blocks and %d threads",
_B, _T);
/* Initialize leakage to zero */
thrust::fill(_leakage_vec.begin(), _leakage_vec.end(), 0.0);
/* Initialize flux in each FSR to zero */
flattenFSRFluxes(0.0);
/* Sweep the first halfspace of azimuthal angle space */
tid_offset = 0;
tid_max = (_tot_num_tracks / 2);
transportSweepOnDevice<<<_B, _T, shared_mem>>>(_scalar_flux, _boundary_flux,
_reduced_source, _leakage,
_materials, _dev_tracks,
_exp_table,
tid_offset, tid_max);
/* Sweep the second halfspace of azimuthal angle space */
tid_offset = tid_max * _num_groups;
tid_max = _tot_num_tracks;
transportSweepOnDevice<<<_B, _T, shared_mem>>>(_scalar_flux, _boundary_flux,
_reduced_source, _leakage,
_materials, _dev_tracks,
_exp_table,
tid_offset, tid_max);
}
/**
* @brief Add the source term contribution in the transport equation to
* the FSR scalar flux.
*/
void GPUSolver::addSourceToScalarFlux() {
addSourceToScalarFluxOnDevice<<<_B,_T>>>(_scalar_flux, _reduced_source,
_FSR_volumes, _FSR_materials,
_materials);
}
/**
* @brief Compute \f$ k_{eff} \f$ from the total fission and absorption rates.
* @details This method computes the current approximation to the
* multiplication factor on this iteration as follows:
* \f$ k_{eff} = \frac{\displaystyle\sum \displaystyle\sum \nu
* \Sigma_f \Phi V}{\displaystyle\sum
* \displaystyle\sum \Sigma_a \Phi V} \f$
*/
void GPUSolver::computeKeff() {
FP_PRECISION tot_absorption;
FP_PRECISION tot_fission;
FP_PRECISION tot_leakage;
/* Compute the total fission and absorption rates on the device.
* This kernel stores partial rates in a Thrust vector with as many
* entries as CUDAthreads executed by the kernel */
computeFissionAndAbsorption<<<_B, _T>>>(_FSR_volumes, _FSR_materials,
_materials, _scalar_flux,
_tot_absorption, _tot_fission);
cudaDeviceSynchronize();
/* Compute the total absorption rate by reducing the partial absorption
* rates compiled in the Thrust vector */
tot_absorption = thrust::reduce(_tot_absorption_vec.begin(),
_tot_absorption_vec.end());
/* Compute the total fission rate by reducing the partial fission
* rates compiled in the Thrust vector */
tot_fission = thrust::reduce(_tot_fission_vec.begin(),_tot_fission_vec.end());
cudaMemcpy((void*)&tot_fission, (void*)_tot_fission,
_B * _T * sizeof(FP_PRECISION), cudaMemcpyHostToDevice);
/* Compute the total leakage by reducing the partial leakage
* rates compiled in the Thrust vector */
tot_leakage = 0.5 * thrust::reduce(_leakage_vec.begin(), _leakage_vec.end());
/* Compute the new keff from the fission and absorption rates */
_k_eff = tot_fission / (tot_absorption + tot_leakage);
log_printf(DEBUG, "abs = %f, fiss = %f, leak = %f, keff = %f",
tot_absorption, tot_fission, tot_leakage, _k_eff);
}
/**
* @brief Computes the volume-weighted, energy integrated fission rate in
* each FSR and stores them in an array indexed by FSR ID.
* @details This is a helper method for SWIG to allow users to retrieve
* FSR fission rates as a NumPy array. An example of how this method
* can be called from Python is as follows:
*
* @code
* num_FSRs = geometry.getNumFSRs()
* fission_rates = solver.computeFSRFissionRates(num_FSRs)
* @endcode
*
* @param fission_rates an array to store the fission rates (implicitly passed
* in as a NumPy array from Python)
* @param num_FSRs the number of FSRs passed in from Python
*/
void GPUSolver::computeFSRFissionRates(double* fission_rates, int num_FSRs) {
log_printf(INFO, "Computing FSR fission rates...");
/* Allocate memory for the FSR fission rates on the device */
double* dev_fission_rates;
cudaMalloc((void**)&dev_fission_rates, _num_FSRs * sizeof(double));
/* Compute the FSR fission rates on the device */
computeFSRFissionRatesOnDevice<<<_B,_T>>>(dev_fission_rates,
_FSR_materials,
_materials,
_scalar_flux);
/* Copy the fission rate array from the device to the host */
cudaMemcpy((void*)fission_rates, (void*)dev_fission_rates,
_num_FSRs * sizeof(double), cudaMemcpyDeviceToHost);
/* Deallocate the memory assigned to store the fission rates on the device */
cudaFree(dev_fission_rates);
return;
}
|
e11b9b17287e52db831f7be511dcaec2d7e8712f.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/Context.h>
#include <c10/core/Device.h>
#include <c10/core/DeviceGuard.h>
#include <nnutils/gpu/mask_image_from_size.h>
#include <THH/THH.h>
#include <cstdint>
#include "../mask_image_from_size.h"
namespace nnutils {
namespace pytorch {
namespace gpu {
template <typename T>
void MaskImageFromSizeLauncher::operator()(
const long int N, const long int C, const long int H, const long int W,
const long int* xs, T* x, const T& m, const c10::Device& device) {
at::DeviceGuard device_guard(device);
auto stream = c10::hip::getCurrentHIPStreamMasqueradingAsCUDA();
nnutils::gpu::mask_image_from_size(N, C, H, W, xs, x, m, stream);
}
#define INSTANTITATE_OPERATOR(TYPE) \
template void MaskImageFromSizeLauncher::operator()<TYPE>( \
const long int N, const long int C, const long int H, const long int W, \
const long int* xs, TYPE* x, const TYPE& m, const c10::Device& device)
INSTANTITATE_OPERATOR(uint8_t);
INSTANTITATE_OPERATOR(int8_t);
INSTANTITATE_OPERATOR(int16_t);
INSTANTITATE_OPERATOR(int32_t);
INSTANTITATE_OPERATOR(int64_t);
INSTANTITATE_OPERATOR(double);
INSTANTITATE_OPERATOR(float);
#undef INSTANTITATE_OPERATOR
} // namespace gpu
} // namespace pytorch
} // namespace nnutils
| e11b9b17287e52db831f7be511dcaec2d7e8712f.cu | #include <ATen/Context.h>
#include <c10/core/Device.h>
#include <c10/core/DeviceGuard.h>
#include <nnutils/gpu/mask_image_from_size.h>
#include <THC/THC.h>
#include <cstdint>
#include "../mask_image_from_size.h"
namespace nnutils {
namespace pytorch {
namespace gpu {
template <typename T>
void MaskImageFromSizeLauncher::operator()(
const long int N, const long int C, const long int H, const long int W,
const long int* xs, T* x, const T& m, const c10::Device& device) {
at::DeviceGuard device_guard(device);
auto stream = c10::cuda::getCurrentCUDAStream();
nnutils::gpu::mask_image_from_size(N, C, H, W, xs, x, m, stream);
}
#define INSTANTITATE_OPERATOR(TYPE) \
template void MaskImageFromSizeLauncher::operator()<TYPE>( \
const long int N, const long int C, const long int H, const long int W, \
const long int* xs, TYPE* x, const TYPE& m, const c10::Device& device)
INSTANTITATE_OPERATOR(uint8_t);
INSTANTITATE_OPERATOR(int8_t);
INSTANTITATE_OPERATOR(int16_t);
INSTANTITATE_OPERATOR(int32_t);
INSTANTITATE_OPERATOR(int64_t);
INSTANTITATE_OPERATOR(double);
INSTANTITATE_OPERATOR(float);
#undef INSTANTITATE_OPERATOR
} // namespace gpu
} // namespace pytorch
} // namespace nnutils
|
d6a4f983d1911f3ccc4b6dff6429fcbbf7fdd5c5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "dali/core/convert.h"
#include "dali/core/cuda_utils.h"
#include "dali/core/error_handling.h"
#include "dali/core/static_switch.h"
#include "dali/operators/generic/cast.h"
namespace dali {
template <typename OType, typename IType>
__global__ void
BatchedCastKernel(OType * output, const IType * in, size_t N) {
size_t tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid < N) {
output[tid] = ConvertSat<OType>(in[tid]);
}
}
template <typename OType, typename IType>
DALIError_t BatchedCast(OType * output,
const IType * input,
size_t N,
hipStream_t stream) {
DALI_ASSERT(output != nullptr);
DALI_ASSERT(input != nullptr);
const int threads = 512;
const int blocks = (N + threads - 1)/threads;
hipLaunchKernelGGL(( BatchedCastKernel), dim3(blocks), dim3(threads), 0, stream, output, input, N);
return DALISuccess;
}
template<>
void Cast<GPUBackend>::RunImpl(DeviceWorkspace &ws) {
const auto &input = ws.Input<GPUBackend>(0);
auto &output = ws.Output<GPUBackend>(0);
DALIDataType itype = input.type().id();
TYPE_SWITCH(output_type_, type2id, OType, CAST_ALLOWED_TYPES, (
output.SetLayout(input.GetLayout());
output.mutable_data<OType>();
output.ResizeLike(input);
TYPE_SWITCH(itype, type2id, IType, CAST_ALLOWED_TYPES, (
BatchedCast(output.mutable_data<OType>(), input.data<IType>(), input.size(), ws.stream());
), DALI_FAIL(make_string("Invalid input type: ", itype));); // NOLINT(whitespace/parens)
), DALI_FAIL(make_string("Invalid output type: ", output_type_));); // NOLINT(whitespace/parens)
}
DALI_REGISTER_OPERATOR(Cast, Cast<GPUBackend>, GPU);
} // namespace dali
| d6a4f983d1911f3ccc4b6dff6429fcbbf7fdd5c5.cu | // Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "dali/core/convert.h"
#include "dali/core/cuda_utils.h"
#include "dali/core/error_handling.h"
#include "dali/core/static_switch.h"
#include "dali/operators/generic/cast.h"
namespace dali {
template <typename OType, typename IType>
__global__ void
BatchedCastKernel(OType * output, const IType * in, size_t N) {
size_t tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid < N) {
output[tid] = ConvertSat<OType>(in[tid]);
}
}
template <typename OType, typename IType>
DALIError_t BatchedCast(OType * output,
const IType * input,
size_t N,
cudaStream_t stream) {
DALI_ASSERT(output != nullptr);
DALI_ASSERT(input != nullptr);
const int threads = 512;
const int blocks = (N + threads - 1)/threads;
BatchedCastKernel<<<blocks, threads, 0, stream>>>(output, input, N);
return DALISuccess;
}
template<>
void Cast<GPUBackend>::RunImpl(DeviceWorkspace &ws) {
const auto &input = ws.Input<GPUBackend>(0);
auto &output = ws.Output<GPUBackend>(0);
DALIDataType itype = input.type().id();
TYPE_SWITCH(output_type_, type2id, OType, CAST_ALLOWED_TYPES, (
output.SetLayout(input.GetLayout());
output.mutable_data<OType>();
output.ResizeLike(input);
TYPE_SWITCH(itype, type2id, IType, CAST_ALLOWED_TYPES, (
BatchedCast(output.mutable_data<OType>(), input.data<IType>(), input.size(), ws.stream());
), DALI_FAIL(make_string("Invalid input type: ", itype));); // NOLINT(whitespace/parens)
), DALI_FAIL(make_string("Invalid output type: ", output_type_));); // NOLINT(whitespace/parens)
}
DALI_REGISTER_OPERATOR(Cast, Cast<GPUBackend>, GPU);
} // namespace dali
|
df147021a47a18ef82e7c5bc8d5465bcaca9b75b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<cstdio>
#include<memory>
#include<vector>
#include<functional>
#include<iostream>
using namespace std;
using fp = void(*)(int*);
__global__ void
test(int *d_data){
printf("hello world\n");
for(int i = 0;i<10;i++)
printf("%d:%d\n",i,d_data[i]);
}
int uniquePtr(){
cout<<"uniquePtr"<<endl;
int *d_data0;
function<void(int*)> lambda = [](int*p){hipFree(p);};
unique_ptr<int,function<void(int*)>> d_data{nullptr, lambda};
hipMalloc((void**)&d_data0,sizeof(int)*10);
d_data.reset(d_data0);
//unique_ptr
// unique_ptr cudaDeviceReset
//cudaDeviceResetcudaFree
// cudaDeviceReset,main
int h_data[10] = {1,2,3,4,5,6,7,8,9,10};
hipMemcpy(d_data.get(),h_data,sizeof(int)*10,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( test), dim3(1),dim3(1), 0, 0, d_data.get());
hipDeviceSynchronize();
return 0;
}
int uniquePtr1(){
cout<<"uniquePtr1"<<endl;
function<void(int*)> lambda = [](int*p){hipFree(p);};
vector<unique_ptr<int,function<void(int*)>> > vec;
for(int i=0; i<2;i++){
vec.emplace_back(nullptr,lambda);
int* tmp;
hipMalloc((void**)&tmp,sizeof(int)*10);
vec[i].reset(tmp);
int h_data[10] = {1,2,3,4,5,6,7,8,9,10};
hipMemcpy(vec[i].get(),h_data,sizeof(int)*10,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( test), dim3(1),dim3(1), 0, 0, vec[i].get());
}
hipDeviceSynchronize();
return 0;
}
int normal(){
cout<<"normal"<<endl;
int *d_data;
// cudaFreecuda-memcheck
hipMalloc((void**)&d_data,sizeof(int)*10);
int h_data[10] = {1,2,3,4,5,6,7,8,9,10};
hipMemcpy(d_data,h_data,sizeof(int)*10,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( test), dim3(1),dim3(1), 0, 0, d_data);
hipDeviceSynchronize();
return 0;
}
int main(){
#ifdef UNIQUE
uniquePtr();
cout<<"-------------"<<endl;
uniquePtr1();
#else
normal();
#endif
//context
//
hipDeviceReset();
return 0;
}
| df147021a47a18ef82e7c5bc8d5465bcaca9b75b.cu | #include<cstdio>
#include<memory>
#include<vector>
#include<functional>
#include<iostream>
using namespace std;
using fp = void(*)(int*);
__global__ void
test(int *d_data){
printf("hello world\n");
for(int i = 0;i<10;i++)
printf("%d:%d\n",i,d_data[i]);
}
int uniquePtr(){
cout<<"uniquePtr"<<endl;
int *d_data0;
function<void(int*)> lambda = [](int*p){cudaFree(p);};
unique_ptr<int,function<void(int*)>> d_data{nullptr, lambda};
cudaMalloc((void**)&d_data0,sizeof(int)*10);
d_data.reset(d_data0);
//交给unique_ptr做指针维护
// unique_ptr 的生命周期要与cudaDeviceReset一起考虑,
//cudaDeviceReset是将上下文都重置,如果之前并未执行cudaFree则会造成内存泄漏
// 但是,如果不调用cudaDeviceReset,其会在main函数生命周期之后执行
int h_data[10] = {1,2,3,4,5,6,7,8,9,10};
cudaMemcpy(d_data.get(),h_data,sizeof(int)*10,cudaMemcpyHostToDevice);
test<<<1,1>>>(d_data.get());
cudaDeviceSynchronize();
return 0;
}
int uniquePtr1(){
cout<<"uniquePtr1"<<endl;
function<void(int*)> lambda = [](int*p){cudaFree(p);};
vector<unique_ptr<int,function<void(int*)>> > vec;
for(int i=0; i<2;i++){
vec.emplace_back(nullptr,lambda);
int* tmp;
cudaMalloc((void**)&tmp,sizeof(int)*10);
vec[i].reset(tmp);
int h_data[10] = {1,2,3,4,5,6,7,8,9,10};
cudaMemcpy(vec[i].get(),h_data,sizeof(int)*10,cudaMemcpyHostToDevice);
test<<<1,1>>>(vec[i].get());
}
cudaDeviceSynchronize();
return 0;
}
int normal(){
cout<<"normal"<<endl;
int *d_data;
// 故意缺少cudaFree,调用cuda-memcheck
cudaMalloc((void**)&d_data,sizeof(int)*10);
int h_data[10] = {1,2,3,4,5,6,7,8,9,10};
cudaMemcpy(d_data,h_data,sizeof(int)*10,cudaMemcpyHostToDevice);
test<<<1,1>>>(d_data);
cudaDeviceSynchronize();
return 0;
}
int main(){
#ifdef UNIQUE
uniquePtr();
cout<<"-------------"<<endl;
uniquePtr1();
#else
normal();
#endif
//一定要加上这句,不然底层context会自己帮忙释放未释放的内存,
//显示调用就意味着内存需要手动自己释放
cudaDeviceReset();
return 0;
}
|
b2bd3dcb600e3d027de19c8f6d384ad1a7bdf994.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "FF_calc.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *q_S_ref_dS = NULL;
hipMalloc(&q_S_ref_dS, XSIZE*YSIZE);
float *WK = NULL;
hipMalloc(&WK, XSIZE*YSIZE);
float *vdW = NULL;
hipMalloc(&vdW, XSIZE*YSIZE);
int num_q = 1;
int num_ele = 1;
float c1 = 1;
float r_m = 1;
float *FF_table = NULL;
hipMalloc(&FF_table, XSIZE*YSIZE);
float rho = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
FF_calc), dim3(gridBlock),dim3(threadBlock), 0, 0, q_S_ref_dS,WK,vdW,num_q,num_ele,c1,r_m,FF_table,rho);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
FF_calc), dim3(gridBlock),dim3(threadBlock), 0, 0, q_S_ref_dS,WK,vdW,num_q,num_ele,c1,r_m,FF_table,rho);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
FF_calc), dim3(gridBlock),dim3(threadBlock), 0, 0, q_S_ref_dS,WK,vdW,num_q,num_ele,c1,r_m,FF_table,rho);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | b2bd3dcb600e3d027de19c8f6d384ad1a7bdf994.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "FF_calc.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *q_S_ref_dS = NULL;
cudaMalloc(&q_S_ref_dS, XSIZE*YSIZE);
float *WK = NULL;
cudaMalloc(&WK, XSIZE*YSIZE);
float *vdW = NULL;
cudaMalloc(&vdW, XSIZE*YSIZE);
int num_q = 1;
int num_ele = 1;
float c1 = 1;
float r_m = 1;
float *FF_table = NULL;
cudaMalloc(&FF_table, XSIZE*YSIZE);
float rho = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
FF_calc<<<gridBlock,threadBlock>>>(q_S_ref_dS,WK,vdW,num_q,num_ele,c1,r_m,FF_table,rho);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
FF_calc<<<gridBlock,threadBlock>>>(q_S_ref_dS,WK,vdW,num_q,num_ele,c1,r_m,FF_table,rho);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
FF_calc<<<gridBlock,threadBlock>>>(q_S_ref_dS,WK,vdW,num_q,num_ele,c1,r_m,FF_table,rho);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
device-var-init.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// REQUIRES: nvptx-registered-target
// Make sure we don't allow dynamic initialization for device
// variables, but accept empty constructors allowed by CUDA.
// RUN: %clang_cc1 -triple nvptx64-nvidia-cuda -fcuda-is-device -std=c++11 \
// RUN: -fno-threadsafe-statics -emit-llvm -o - %s | FileCheck %s
#ifdef __clang__
#include "Inputs/cuda.h"
#endif
// Use the types we share with Sema tests.
#include "Inputs/cuda-initializers.h"
__device__ int d_v;
// CHECK: @d_v = addrspace(1) externally_initialized global i32 0,
__shared__ int s_v;
// CHECK: @s_v = addrspace(3) global i32 undef,
__constant__ int c_v;
// CHECK: addrspace(4) externally_initialized global i32 0,
__device__ int d_v_i = 1;
// CHECK: @d_v_i = addrspace(1) externally_initialized global i32 1,
// trivial constructor -- allowed
__device__ T d_t;
// CHECK: @d_t = addrspace(1) externally_initialized global %struct.T zeroinitializer
__shared__ T s_t;
// CHECK: @s_t = addrspace(3) global %struct.T undef,
__constant__ T c_t;
// CHECK: @c_t = addrspace(4) externally_initialized global %struct.T zeroinitializer,
__device__ T d_t_i = {2};
// CHECK: @d_t_i = addrspace(1) externally_initialized global %struct.T { i32 2 },
__constant__ T c_t_i = {2};
// CHECK: @c_t_i = addrspace(4) externally_initialized global %struct.T { i32 2 },
// empty constructor
__device__ EC d_ec;
// CHECK: @d_ec = addrspace(1) externally_initialized global %struct.EC zeroinitializer,
__shared__ EC s_ec;
// CHECK: @s_ec = addrspace(3) global %struct.EC undef,
__constant__ EC c_ec;
// CHECK: @c_ec = addrspace(4) externally_initialized global %struct.EC zeroinitializer,
// empty destructor
__device__ ED d_ed;
// CHECK: @d_ed = addrspace(1) externally_initialized global %struct.ED zeroinitializer,
__shared__ ED s_ed;
// CHECK: @s_ed = addrspace(3) global %struct.ED undef,
__constant__ ED c_ed;
// CHECK: @c_ed = addrspace(4) externally_initialized global %struct.ED zeroinitializer,
__device__ ECD d_ecd;
// CHECK: @d_ecd = addrspace(1) externally_initialized global %struct.ECD zeroinitializer,
__shared__ ECD s_ecd;
// CHECK: @s_ecd = addrspace(3) global %struct.ECD undef,
__constant__ ECD c_ecd;
// CHECK: @c_ecd = addrspace(4) externally_initialized global %struct.ECD zeroinitializer,
// empty templated constructor -- allowed with no arguments
__device__ ETC d_etc;
// CHECK: @d_etc = addrspace(1) externally_initialized global %struct.ETC zeroinitializer,
__shared__ ETC s_etc;
// CHECK: @s_etc = addrspace(3) global %struct.ETC undef,
__constant__ ETC c_etc;
// CHECK: @c_etc = addrspace(4) externally_initialized global %struct.ETC zeroinitializer,
__device__ NCFS d_ncfs;
// CHECK: @d_ncfs = addrspace(1) externally_initialized global %struct.NCFS { i32 3 }
__constant__ NCFS c_ncfs;
// CHECK: @c_ncfs = addrspace(4) externally_initialized global %struct.NCFS { i32 3 }
// Regular base class -- allowed
__device__ T_B_T d_t_b_t;
// CHECK: @d_t_b_t = addrspace(1) externally_initialized global %struct.T_B_T zeroinitializer,
__shared__ T_B_T s_t_b_t;
// CHECK: @s_t_b_t = addrspace(3) global %struct.T_B_T undef,
__constant__ T_B_T c_t_b_t;
// CHECK: @c_t_b_t = addrspace(4) externally_initialized global %struct.T_B_T zeroinitializer,
// Incapsulated object of allowed class -- allowed
__device__ T_F_T d_t_f_t;
// CHECK: @d_t_f_t = addrspace(1) externally_initialized global %struct.T_F_T zeroinitializer,
__shared__ T_F_T s_t_f_t;
// CHECK: @s_t_f_t = addrspace(3) global %struct.T_F_T undef,
__constant__ T_F_T c_t_f_t;
// CHECK: @c_t_f_t = addrspace(4) externally_initialized global %struct.T_F_T zeroinitializer,
// array of allowed objects -- allowed
__device__ T_FA_T d_t_fa_t;
// CHECK: @d_t_fa_t = addrspace(1) externally_initialized global %struct.T_FA_T zeroinitializer,
__shared__ T_FA_T s_t_fa_t;
// CHECK: @s_t_fa_t = addrspace(3) global %struct.T_FA_T undef,
__constant__ T_FA_T c_t_fa_t;
// CHECK: @c_t_fa_t = addrspace(4) externally_initialized global %struct.T_FA_T zeroinitializer,
// Calling empty base class initializer is OK
__device__ EC_I_EC d_ec_i_ec;
// CHECK: @d_ec_i_ec = addrspace(1) externally_initialized global %struct.EC_I_EC zeroinitializer,
__shared__ EC_I_EC s_ec_i_ec;
// CHECK: @s_ec_i_ec = addrspace(3) global %struct.EC_I_EC undef,
__constant__ EC_I_EC c_ec_i_ec;
// CHECK: @c_ec_i_ec = addrspace(4) externally_initialized global %struct.EC_I_EC zeroinitializer,
// We should not emit global initializers for device-side variables.
// CHECK-NOT: @__cxx_global_var_init
// Make sure that initialization restrictions do not apply to local
// variables.
__device__ void df() {
T t;
// CHECK-NOT: call
EC ec;
// CHECK: call void @_ZN2ECC1Ev(%struct.EC* %ec)
ED ed;
// CHECK-NOT: call
ECD ecd;
// CHECK: call void @_ZN3ECDC1Ev(%struct.ECD* %ecd)
ETC etc;
// CHECK: call void @_ZN3ETCC1IJEEEDpT_(%struct.ETC* %etc)
UC uc;
// undefined constructor -- not allowed
// CHECK: call void @_ZN2UCC1Ev(%struct.UC* %uc)
UD ud;
// undefined destructor -- not allowed
// CHECK-NOT: call
ECI eci;
// empty constructor w/ initializer list -- not allowed
// CHECK: call void @_ZN3ECIC1Ev(%struct.ECI* %eci)
NEC nec;
// non-empty constructor -- not allowed
// CHECK: call void @_ZN3NECC1Ev(%struct.NEC* %nec)
// non-empty destructor -- not allowed
NED ned;
// no-constructor, virtual method -- not allowed
// CHECK: call void @_ZN3NCVC1Ev(%struct.NCV* %ncv)
NCV ncv;
// CHECK-NOT: call
VD vd;
// CHECK: call void @_ZN2VDC1Ev(%struct.VD* %vd)
NCF ncf;
// CHECK: call void @_ZN3NCFC1Ev(%struct.NCF* %ncf)
NCFS ncfs;
// CHECK: call void @_ZN4NCFSC1Ev(%struct.NCFS* %ncfs)
UTC utc;
// CHECK: call void @_ZN3UTCC1IJEEEDpT_(%struct.UTC* %utc)
NETC netc;
// CHECK: call void @_ZN4NETCC1IJEEEDpT_(%struct.NETC* %netc)
T_B_T t_b_t;
// CHECK-NOT: call
T_F_T t_f_t;
// CHECK-NOT: call
T_FA_T t_fa_t;
// CHECK-NOT: call
EC_I_EC ec_i_ec;
// CHECK: call void @_ZN7EC_I_ECC1Ev(%struct.EC_I_EC* %ec_i_ec)
EC_I_EC1 ec_i_ec1;
// CHECK: call void @_ZN8EC_I_EC1C1Ev(%struct.EC_I_EC1* %ec_i_ec1)
T_V_T t_v_t;
// CHECK: call void @_ZN5T_V_TC1Ev(%struct.T_V_T* %t_v_t)
T_B_NEC t_b_nec;
// CHECK: call void @_ZN7T_B_NECC1Ev(%struct.T_B_NEC* %t_b_nec)
T_F_NEC t_f_nec;
// CHECK: call void @_ZN7T_F_NECC1Ev(%struct.T_F_NEC* %t_f_nec)
T_FA_NEC t_fa_nec;
// CHECK: call void @_ZN8T_FA_NECC1Ev(%struct.T_FA_NEC* %t_fa_nec)
T_B_NED t_b_ned;
// CHECK-NOT: call
T_F_NED t_f_ned;
// CHECK-NOT: call
T_FA_NED t_fa_ned;
// CHECK-NOT: call
static __shared__ EC s_ec;
// CHECK-NOT: call void @_ZN2ECC1Ev(%struct.EC* addrspacecast (%struct.EC addrspace(3)* @_ZZ2dfvE4s_ec to %struct.EC*))
static __shared__ ETC s_etc;
// CHECK-NOT: call void @_ZN3ETCC1IJEEEDpT_(%struct.ETC* addrspacecast (%struct.ETC addrspace(3)* @_ZZ2dfvE5s_etc to %struct.ETC*))
// anchor point separating constructors and destructors
df(); // CHECK: call void @_Z2dfv()
// Verify that we only call non-empty destructors
// CHECK-NEXT: call void @_ZN8T_FA_NEDD1Ev(%struct.T_FA_NED* %t_fa_ned) #6
// CHECK-NEXT: call void @_ZN7T_F_NEDD1Ev(%struct.T_F_NED* %t_f_ned) #6
// CHECK-NEXT: call void @_ZN7T_B_NEDD1Ev(%struct.T_B_NED* %t_b_ned) #6
// CHECK-NEXT: call void @_ZN2VDD1Ev(%struct.VD* %vd)
// CHECK-NEXT: call void @_ZN3NEDD1Ev(%struct.NED* %ned)
// CHECK-NEXT: call void @_ZN2UDD1Ev(%struct.UD* %ud)
// CHECK-NEXT: call void @_ZN3ECDD1Ev(%struct.ECD* %ecd)
// CHECK-NEXT: call void @_ZN2EDD1Ev(%struct.ED* %ed)
// CHECK-NEXT: ret void
}
// We should not emit global init function.
// CHECK-NOT: @_GLOBAL__sub_I
| device-var-init.cu | // REQUIRES: nvptx-registered-target
// Make sure we don't allow dynamic initialization for device
// variables, but accept empty constructors allowed by CUDA.
// RUN: %clang_cc1 -triple nvptx64-nvidia-cuda -fcuda-is-device -std=c++11 \
// RUN: -fno-threadsafe-statics -emit-llvm -o - %s | FileCheck %s
#ifdef __clang__
#include "Inputs/cuda.h"
#endif
// Use the types we share with Sema tests.
#include "Inputs/cuda-initializers.h"
__device__ int d_v;
// CHECK: @d_v = addrspace(1) externally_initialized global i32 0,
__shared__ int s_v;
// CHECK: @s_v = addrspace(3) global i32 undef,
__constant__ int c_v;
// CHECK: addrspace(4) externally_initialized global i32 0,
__device__ int d_v_i = 1;
// CHECK: @d_v_i = addrspace(1) externally_initialized global i32 1,
// trivial constructor -- allowed
__device__ T d_t;
// CHECK: @d_t = addrspace(1) externally_initialized global %struct.T zeroinitializer
__shared__ T s_t;
// CHECK: @s_t = addrspace(3) global %struct.T undef,
__constant__ T c_t;
// CHECK: @c_t = addrspace(4) externally_initialized global %struct.T zeroinitializer,
__device__ T d_t_i = {2};
// CHECK: @d_t_i = addrspace(1) externally_initialized global %struct.T { i32 2 },
__constant__ T c_t_i = {2};
// CHECK: @c_t_i = addrspace(4) externally_initialized global %struct.T { i32 2 },
// empty constructor
__device__ EC d_ec;
// CHECK: @d_ec = addrspace(1) externally_initialized global %struct.EC zeroinitializer,
__shared__ EC s_ec;
// CHECK: @s_ec = addrspace(3) global %struct.EC undef,
__constant__ EC c_ec;
// CHECK: @c_ec = addrspace(4) externally_initialized global %struct.EC zeroinitializer,
// empty destructor
__device__ ED d_ed;
// CHECK: @d_ed = addrspace(1) externally_initialized global %struct.ED zeroinitializer,
__shared__ ED s_ed;
// CHECK: @s_ed = addrspace(3) global %struct.ED undef,
__constant__ ED c_ed;
// CHECK: @c_ed = addrspace(4) externally_initialized global %struct.ED zeroinitializer,
__device__ ECD d_ecd;
// CHECK: @d_ecd = addrspace(1) externally_initialized global %struct.ECD zeroinitializer,
__shared__ ECD s_ecd;
// CHECK: @s_ecd = addrspace(3) global %struct.ECD undef,
__constant__ ECD c_ecd;
// CHECK: @c_ecd = addrspace(4) externally_initialized global %struct.ECD zeroinitializer,
// empty templated constructor -- allowed with no arguments
__device__ ETC d_etc;
// CHECK: @d_etc = addrspace(1) externally_initialized global %struct.ETC zeroinitializer,
__shared__ ETC s_etc;
// CHECK: @s_etc = addrspace(3) global %struct.ETC undef,
__constant__ ETC c_etc;
// CHECK: @c_etc = addrspace(4) externally_initialized global %struct.ETC zeroinitializer,
__device__ NCFS d_ncfs;
// CHECK: @d_ncfs = addrspace(1) externally_initialized global %struct.NCFS { i32 3 }
__constant__ NCFS c_ncfs;
// CHECK: @c_ncfs = addrspace(4) externally_initialized global %struct.NCFS { i32 3 }
// Regular base class -- allowed
__device__ T_B_T d_t_b_t;
// CHECK: @d_t_b_t = addrspace(1) externally_initialized global %struct.T_B_T zeroinitializer,
__shared__ T_B_T s_t_b_t;
// CHECK: @s_t_b_t = addrspace(3) global %struct.T_B_T undef,
__constant__ T_B_T c_t_b_t;
// CHECK: @c_t_b_t = addrspace(4) externally_initialized global %struct.T_B_T zeroinitializer,
// Incapsulated object of allowed class -- allowed
__device__ T_F_T d_t_f_t;
// CHECK: @d_t_f_t = addrspace(1) externally_initialized global %struct.T_F_T zeroinitializer,
__shared__ T_F_T s_t_f_t;
// CHECK: @s_t_f_t = addrspace(3) global %struct.T_F_T undef,
__constant__ T_F_T c_t_f_t;
// CHECK: @c_t_f_t = addrspace(4) externally_initialized global %struct.T_F_T zeroinitializer,
// array of allowed objects -- allowed
__device__ T_FA_T d_t_fa_t;
// CHECK: @d_t_fa_t = addrspace(1) externally_initialized global %struct.T_FA_T zeroinitializer,
__shared__ T_FA_T s_t_fa_t;
// CHECK: @s_t_fa_t = addrspace(3) global %struct.T_FA_T undef,
__constant__ T_FA_T c_t_fa_t;
// CHECK: @c_t_fa_t = addrspace(4) externally_initialized global %struct.T_FA_T zeroinitializer,
// Calling empty base class initializer is OK
__device__ EC_I_EC d_ec_i_ec;
// CHECK: @d_ec_i_ec = addrspace(1) externally_initialized global %struct.EC_I_EC zeroinitializer,
__shared__ EC_I_EC s_ec_i_ec;
// CHECK: @s_ec_i_ec = addrspace(3) global %struct.EC_I_EC undef,
__constant__ EC_I_EC c_ec_i_ec;
// CHECK: @c_ec_i_ec = addrspace(4) externally_initialized global %struct.EC_I_EC zeroinitializer,
// We should not emit global initializers for device-side variables.
// CHECK-NOT: @__cxx_global_var_init
// Make sure that initialization restrictions do not apply to local
// variables.
__device__ void df() {
T t;
// CHECK-NOT: call
EC ec;
// CHECK: call void @_ZN2ECC1Ev(%struct.EC* %ec)
ED ed;
// CHECK-NOT: call
ECD ecd;
// CHECK: call void @_ZN3ECDC1Ev(%struct.ECD* %ecd)
ETC etc;
// CHECK: call void @_ZN3ETCC1IJEEEDpT_(%struct.ETC* %etc)
UC uc;
// undefined constructor -- not allowed
// CHECK: call void @_ZN2UCC1Ev(%struct.UC* %uc)
UD ud;
// undefined destructor -- not allowed
// CHECK-NOT: call
ECI eci;
// empty constructor w/ initializer list -- not allowed
// CHECK: call void @_ZN3ECIC1Ev(%struct.ECI* %eci)
NEC nec;
// non-empty constructor -- not allowed
// CHECK: call void @_ZN3NECC1Ev(%struct.NEC* %nec)
// non-empty destructor -- not allowed
NED ned;
// no-constructor, virtual method -- not allowed
// CHECK: call void @_ZN3NCVC1Ev(%struct.NCV* %ncv)
NCV ncv;
// CHECK-NOT: call
VD vd;
// CHECK: call void @_ZN2VDC1Ev(%struct.VD* %vd)
NCF ncf;
// CHECK: call void @_ZN3NCFC1Ev(%struct.NCF* %ncf)
NCFS ncfs;
// CHECK: call void @_ZN4NCFSC1Ev(%struct.NCFS* %ncfs)
UTC utc;
// CHECK: call void @_ZN3UTCC1IJEEEDpT_(%struct.UTC* %utc)
NETC netc;
// CHECK: call void @_ZN4NETCC1IJEEEDpT_(%struct.NETC* %netc)
T_B_T t_b_t;
// CHECK-NOT: call
T_F_T t_f_t;
// CHECK-NOT: call
T_FA_T t_fa_t;
// CHECK-NOT: call
EC_I_EC ec_i_ec;
// CHECK: call void @_ZN7EC_I_ECC1Ev(%struct.EC_I_EC* %ec_i_ec)
EC_I_EC1 ec_i_ec1;
// CHECK: call void @_ZN8EC_I_EC1C1Ev(%struct.EC_I_EC1* %ec_i_ec1)
T_V_T t_v_t;
// CHECK: call void @_ZN5T_V_TC1Ev(%struct.T_V_T* %t_v_t)
T_B_NEC t_b_nec;
// CHECK: call void @_ZN7T_B_NECC1Ev(%struct.T_B_NEC* %t_b_nec)
T_F_NEC t_f_nec;
// CHECK: call void @_ZN7T_F_NECC1Ev(%struct.T_F_NEC* %t_f_nec)
T_FA_NEC t_fa_nec;
// CHECK: call void @_ZN8T_FA_NECC1Ev(%struct.T_FA_NEC* %t_fa_nec)
T_B_NED t_b_ned;
// CHECK-NOT: call
T_F_NED t_f_ned;
// CHECK-NOT: call
T_FA_NED t_fa_ned;
// CHECK-NOT: call
static __shared__ EC s_ec;
// CHECK-NOT: call void @_ZN2ECC1Ev(%struct.EC* addrspacecast (%struct.EC addrspace(3)* @_ZZ2dfvE4s_ec to %struct.EC*))
static __shared__ ETC s_etc;
// CHECK-NOT: call void @_ZN3ETCC1IJEEEDpT_(%struct.ETC* addrspacecast (%struct.ETC addrspace(3)* @_ZZ2dfvE5s_etc to %struct.ETC*))
// anchor point separating constructors and destructors
df(); // CHECK: call void @_Z2dfv()
// Verify that we only call non-empty destructors
// CHECK-NEXT: call void @_ZN8T_FA_NEDD1Ev(%struct.T_FA_NED* %t_fa_ned) #6
// CHECK-NEXT: call void @_ZN7T_F_NEDD1Ev(%struct.T_F_NED* %t_f_ned) #6
// CHECK-NEXT: call void @_ZN7T_B_NEDD1Ev(%struct.T_B_NED* %t_b_ned) #6
// CHECK-NEXT: call void @_ZN2VDD1Ev(%struct.VD* %vd)
// CHECK-NEXT: call void @_ZN3NEDD1Ev(%struct.NED* %ned)
// CHECK-NEXT: call void @_ZN2UDD1Ev(%struct.UD* %ud)
// CHECK-NEXT: call void @_ZN3ECDD1Ev(%struct.ECD* %ecd)
// CHECK-NEXT: call void @_ZN2EDD1Ev(%struct.ED* %ed)
// CHECK-NEXT: ret void
}
// We should not emit global init function.
// CHECK-NOT: @_GLOBAL__sub_I
|
c6fbdf1d68e9c3ce61fb0547d819138209fbcde8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//#include <torch/torch.h>
#include <torch/extension.h>
//#include <torch/serialize/tensor.h>
//#include <ATen/ATen.h>
//#include <ATen/hip/HIPContext.h>
#define CUDA_NUM_THREADS 256
#define THREADS_PER_BLOCK 64
#define DIM0(TENSOR) ((TENSOR).x)
#define DIM1(TENSOR) ((TENSOR).y)
#define DIM2(TENSOR) ((TENSOR).z)
#define DIM3(TENSOR) ((TENSOR).w)
#define DIM3_INDEX(TENSOR, xx, yy, zz, ww) ((TENSOR)[((xx) * (TENSOR##_stride.x)) + ((yy) * (TENSOR##_stride.y)) + ((zz) * (TENSOR##_stride.z)) + ((ww) * (TENSOR##_stride.w))])
#ifdef __cplusplus
extern "C" {
#endif
__global__ void Max (const int n, const float *top_temp, float *top_data, float *mask,
const int mask_index){
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n)
{
return;
}
if (top_data[index] < top_temp[index])
{
top_data[index] = top_temp[index];
mask[index] = mask_index;
}
}
__global__ void get_temp_grad (const int n, const float *gradOutput, const float *mask,
float *top_grad, const int mask_index){
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n)
{
return;
}
if (((int) mask[index]) == mask_index)
top_grad[index] = gradOutput[index];
}
__global__ void MaxDepth (const int n, const float *bottom_data, const int step,
const int depth, float *idx){
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n)
{
return;
}
int base = index / step * step * depth + index % step;
int k = 0;
for (int i = 1; i < depth; i++)
if (bottom_data[base + k * step] < bottom_data[base + i * step])
k = i;
idx[index] = k;
}
__global__ void sga_down_forward (const int n, const float *filters, const int height,
const int width, const int depth, const int wsize,
float *top_data){
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n)
{
return;
}
int step = height * width;
// int wsize=radius+1;
int base = index / width * step * depth + index % width; //up->down
int fbase = index / width * step * wsize + index % width;
int kp = 0;
for (int row = 0; row < height; row++)
{
int shift = fbase + row * width;
int base0 = base + row * width;
int k = kp;
kp = 0;
/* if(row-1>=0)
for(int i = 1; i < depth; i++){
if(top_data[base0-width+k*step]<top_data[base0-width+i*step])
k = i;
*/
for (int d = 0; d < depth; d++)
{
float temp = 0;
int location = base0 + d * step;
temp += top_data[location] * filters[shift];
if (row - 1 >= 0)
temp += top_data[location - width] * filters[shift + step];
else
temp += top_data[location] * filters[shift + step];
if (row - 1 >= 0 && d - 1 >= 0)
temp +=
top_data[location - width - step] * filters[shift + 2 * step];
else
temp += top_data[location] * filters[shift + 2 * step];
if (row - 1 >= 0 && d + 1 < depth)
temp +=
top_data[location - width + step] * filters[shift + 3 * step];
else
temp += top_data[location] * filters[shift + 3 * step];
if (row - 1 >= 0)
temp +=
top_data[base0 - width + k * step] * filters[shift + 4 * step];
else
temp += top_data[location] * filters[shift + 4 * step];
top_data[location] = temp;
if (top_data[base0 + kp * step] < temp)
kp = d;
}
}
}
__global__ void sga_down_data_backward (const int n, const float *filters, float *top_diff,
const float *idx, const int height, const int width,
const int depth, const int wsize, float *bottom_diff){
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n)
{
return;
}
int step = height * width;
int base = index / width * step * depth + index % width; //up->down
int fbase = index / width * step * wsize + index % width;
//1
int base_idx = index / width * step + index % width;
//
for (int row = height - 1; row >= 0; row--)
{
int shift = fbase + row * width;
for (int d = 0; d < depth; d++)
{
int location = base + d * step + row * width;
float temp = top_diff[location];
if (row + 1 < height)
temp +=
top_diff[location + width] * filters[shift + width + step];
if (row + 1 < height && d + 1 < depth)
temp +=
top_diff[location + width + step] * filters[shift + width +
2 * step];
if (row + 1 < height && d - 1 >= 0)
temp +=
top_diff[location + width - step] * filters[shift + width +
3 * step];
top_diff[location] = temp;
bottom_diff[location] += temp * filters[shift];
}
//2
if (row + 1 < height)
{
int k = idx[base_idx + row * width];
int location = base + k * step + row * width;
float temp = 0;
for (int d = 0; d < depth; d++)
temp +=
top_diff[base + row * width + width +
d * step] * filters[shift + width + 4 * step];
top_diff[location] += temp;
bottom_diff[location] += temp * filters[shift];
}
//2
}
/* for(int d = 0; d < depth; d ++){
int shift = fbase;
int location = base + d * step;
bottom_diff[location] += top_diff[location] * (filters[shift + step] + filters[shift + 2*step] + filters[shift + 3*step] + filters[shift + 4*step]);
// bottom_diff[location] += top_diff[location];
shift += width;
location += width;
bottom_diff[location] += top_diff[location] * filters[shift + 2*step];
}
for(int row=1;row<height;row++){
int location = base + row * width;
int shift = fbase + row * width;
bottom_diff[location] += top_diff[location] * filters[shift + 3*step];
location += (depth - 1)*step;
bottom_diff[location] += top_diff[location] * filters[shift + 4*step];
}
*/
for (int row = 0; row < height; row++)
{
int location = base + row * width;
int shift = fbase + row * width;
bottom_diff[location] += top_diff[location] * filters[shift + 2 * step];
location += (depth - 1) * step;
bottom_diff[location] += top_diff[location] * filters[shift + 3 * step];
}
}
__global__ void sga_down_weight_backward (const int n, const float *bottom_data,
const float *top_data, const float *temp_diff,
const float *idx, const int height, const int width,
const int depth, const int wsize,
float *filters_diff){
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n)
{
return;
}
int step = height * width;
int base = index / step * step * depth + index % step; //up->down
int fbase = index / step * step * wsize + index % step;
int row = index % step / width;
for (int i = 0; i < depth; i++)
filters_diff[fbase] +=
temp_diff[base + i * step] * bottom_data[base + i * step];
if (row - 1 >= 0)
{
int location = fbase + step;
for (int i = 0; i < depth; i++)
filters_diff[location] +=
temp_diff[base + i * step] * top_data[base + i * step - width];
location = fbase + 2 * step;
filters_diff[location] += temp_diff[base] * bottom_data[base];
for (int i = 1; i < depth; i++)
filters_diff[location] +=
temp_diff[base + i * step] * top_data[base + (i - 1) * step -
width];
location = fbase + 3 * step;
filters_diff[location] +=
temp_diff[base + (depth - 1) * step] * bottom_data[base +
(depth -
1) * step];
for (int i = 0; i < depth - 1; i++)
filters_diff[location] +=
temp_diff[base + i * step] * top_data[base + (i + 1) * step -
width];
}
/*
else{
for(int i=0; i<depth; i++){
float temp = temp_diff[base+i*step]*bottom_data[base+i*step];
filters_diff[fbase + step] += temp; //temp_diff[base+i*step]*bottom_data[base+i*step];
filters_diff[fbase + 3*step] += temp; //temp_diff[base+i*step]*bottom_data[base+i*step];
filters_diff[fbase + 4*step] += temp; //temp_diff[base+i*step]*bottom_data[base+i*step];
}
}
*/
//1
if (row - 1 >= 0)
{
int location = fbase + 4 * step;
int k = idx[index - width];
for (int i = 0; i < depth; i++)
filters_diff[location] +=
temp_diff[base + i * step] * top_data[base + k * step - width];
}
//
/*
else{
int location = fbase + 2*step;
for(int i=0; i<depth; i++)
filters_diff[location] += temp_diff[base+i*step]*bottom_data[base+i*step];
}
*/
}
__global__ void sga_up_forward (const int n, const float *filters, const int height,
const int width, const int depth, const int wsize,
float *top_data){
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n)
{
return;
}
int step = height * width;
// int wsize=radius+1;
int base = index / width * step * depth + index % width; //up->down
int fbase = index / width * step * wsize + index % width;
int kp = 0; //1
for (int row = height - 1; row >= 0; row--)
{
int shift = fbase + row * width;
//2
int base0 = base + row * width;
int k = kp;
kp = 0;
//2
for (int d = 0; d < depth; d++)
{
float temp = 0;
int location = base + d * step + row * width;
temp += top_data[location] * filters[shift];
if (row + 1 < height)
temp += top_data[location + width] * filters[shift + step];
else
temp += top_data[location] * filters[shift + step];
if (row + 1 < height && d - 1 >= 0)
temp +=
top_data[location + width - step] * filters[shift + 2 * step];
else
temp += top_data[location] * filters[shift + 2 * step];
if (row + 1 < height && d + 1 < depth)
temp +=
top_data[location + width + step] * filters[shift + 3 * step];
else
temp += top_data[location] * filters[shift + 3 * step];
//3
if (row + 1 < height)
temp +=
top_data[base0 + width + k * step] * filters[shift + 4 * step];
else
temp += top_data[location] * filters[shift + 4 * step];
top_data[location] = temp;
if (top_data[base0 + kp * step] < temp)
kp = d;
//3
}
}
}
__global__ void sga_up_data_backward (const int n, const float *filters, float *top_diff,
const float *idx, const int height, const int width,
const int depth, const int wsize, float *bottom_diff){
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n)
{
return;
}
int step = height * width;
int base = index / width * step * depth + index % width; //up->down
int fbase = index / width * step * wsize + index % width;
//1
int base_idx = index / width * step + index % width;
//
for (int row = 0; row < height; row++)
{
int shift = fbase + row * width;
for (int d = 0; d < depth; d++)
{
int location = base + d * step + row * width;
float temp = top_diff[location];
if (row - 1 >= 0)
temp +=
top_diff[location - width] * filters[shift - width + step];
if (row - 1 >= 0 && d + 1 < depth)
temp +=
top_diff[location - width + step] * filters[shift - width +
2 * step];
if (row - 1 >= 0 && d - 1 >= 0)
temp +=
top_diff[location - width - step] * filters[shift - width +
3 * step];
top_diff[location] = temp;
bottom_diff[location] += temp * filters[shift];
}
//2
if (row - 1 >= 0)
{
int k = idx[base_idx + row * width];
int location = base + k * step + row * width;
float temp = 0;
for (int d = 0; d < depth; d++)
temp +=
top_diff[base + row * width - width +
d * step] * filters[shift - width + 4 * step];
top_diff[location] += temp;
bottom_diff[location] += temp * filters[shift];
}
//2
}
/* for(int d = 0; d < depth; d ++){
int shift = fbase + width*(height-1);
int location = base + width*(height-1) + d * step;
bottom_diff[location] += top_diff[location] * (filters[shift + step] + filters[shift + 2*step] + filters[shift + 3*step] + filters[shift + 4*step]);
// bottom_diff[location] += top_diff[location];
shift -= width;
location -= width;
bottom_diff[location] += top_diff[location] * filters[shift + 2*step];
}
for(int row=0;row<height-1;row++){
int shift = fbase + row * width;
int location = base + row * width;
bottom_diff[location] += top_diff[location] * filters[shift + 3*step];
location += (depth - 1)*step;
bottom_diff[location] += top_diff[location] * filters[shift + 4*step];
}*/
for (int row = 0; row < height; row++)
{
int shift = fbase + row * width;
int location = base + row * width;
bottom_diff[location] += top_diff[location] * filters[shift + 2 * step];
location += (depth - 1) * step;
bottom_diff[location] += top_diff[location] * filters[shift + 3 * step];
}
}
__global__ void sga_up_weight_backward (const int n, const float *bottom_data,
const float *top_data, const float *temp_diff,
const float *idx, const int height, const int width,
const int depth, const int wsize, float *filters_diff){
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n)
{
return;
}
int step = height * width;
int base = index / step * step * depth + index % step; //up->down
int fbase = index / step * step * wsize + index % step;
int row = index % step / width;
for (int i = 0; i < depth; i++)
filters_diff[fbase] +=
temp_diff[base + i * step] * bottom_data[base + i * step];
if (row + 1 < height)
{
int location = fbase + step;
for (int i = 0; i < depth; i++)
filters_diff[location] +=
temp_diff[base + i * step] * top_data[base + i * step + width];
location = fbase + 2 * step;
filters_diff[location] += temp_diff[base] * bottom_data[base];
for (int i = 1; i < depth; i++)
filters_diff[location] +=
temp_diff[base + i * step] * top_data[base + (i - 1) * step +
width];
location = fbase + 3 * step;
filters_diff[location] +=
temp_diff[base + (depth - 1) * step] * bottom_data[base +
(depth -
1) * step];
for (int i = 0; i < depth - 1; i++)
filters_diff[location] +=
temp_diff[base + i * step] * top_data[base + (i + 1) * step +
width];
}
/*
else{
//int location = fbase + step;
for(int i=0; i<depth; i++){
float temp = temp_diff[base+i*step]*bottom_data[base+i*step];
filters_diff[fbase + step] += temp; //temp_diff[base+i*step]*bottom_data[base+i*step];
filters_diff[fbase + 3*step] += temp; //temp_diff[base+i*step]*bottom_data[base+i*step];
filters_diff[fbase + 4*step] += temp; //temp_diff[base+i*step]*bottom_data[base+i*step];
}
// filters_diff[location] += temp_diff[base+i*step]*bottom_data[base+i*step];
// location = fbase + 3*step;
// for(int i=0; i<depth; i++)
// filters_diff[location] += temp_diff[base+i*step]*bottom_data[base+i*step];
//
// location = fbase + 4*step;
// for(int i=0; i<depth; i++)
// filters_diff[location] += temp_diff[base+i*step]*bottom_data[base+i*step];
}*/
//1
if (row + 1 < height)
{
int location = fbase + 4 * step;
int k = idx[index + width];
for (int i = 0; i < depth; i++)
filters_diff[location] +=
temp_diff[base + i * step] * top_data[base + k * step + width];
}
//
/*
else{
int location = fbase + 2*step;
for(int i=0; i<depth; i++)
filters_diff[location] += temp_diff[base+i*step]*bottom_data[base+i*step];
}*/
}
__global__ void sga_right_forward (const int n, const float *filters, const int height,
const int width, const int depth, const int wsize,
float *top_data){
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n)
{
return;
}
int step = height * width;
// int wsize=radius+1;
int base = index / height * step * depth + (index % height) * width; //up->down
int fbase = index / height * step * wsize + (index % height) * width;
int kp = 0;
for (int col = 0; col < width; col++)
{
int shift = fbase + col;
//2
int base0 = base + col;
int k = kp;
kp = 0;
//2
for (int d = 0; d < depth; d++)
{
float temp = 0;
int location = base + d * step + col;
temp += top_data[location] * filters[shift];
if (col - 1 >= 0)
temp += top_data[location - 1] * filters[shift + step];
else
temp += top_data[location] * filters[shift + step];
if (col - 1 >= 0 && d - 1 >= 0)
temp += top_data[location - 1 - step] * filters[shift + 2 * step];
else
temp += top_data[location] * filters[shift + 2 * step];
if (col - 1 >= 0 && d + 1 < depth)
temp += top_data[location - 1 + step] * filters[shift + 3 * step];
else
temp += top_data[location] * filters[shift + 3 * step];
//3
if (col - 1 >= 0)
temp +=
top_data[base0 - 1 + k * step] * filters[shift + 4 * step];
else
temp += top_data[location] * filters[shift + 4 * step];
top_data[location] = temp;
if (top_data[base0 + kp * step] < temp)
kp = d;
//3
}
}
}
__global__ void sga_right_data_backward (const int n, const float *filters, float *top_diff,
const float *idx, const int height, const int width,
const int depth, const int wsize, float *bottom_diff){
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n)
{
return;
}
int step = height * width;
int base = index / height * step * depth + (index % height) * width; //up->down
int fbase = index / height * step * wsize + (index % height) * width;
//1
int base_idx = index / height * step + (index % height) * width;
//
for (int col = width - 1; col >= 0; col--)
{
int shift = fbase + col;
for (int d = 0; d < depth; d++)
{
int location = base + d * step + col;
float temp = top_diff[location];
if (col + 1 < width)
temp += top_diff[location + 1] * filters[shift + 1 + step];
if (col + 1 < width && d + 1 < depth)
temp +=
top_diff[location + 1 + step] * filters[shift + 1 + 2 * step];
if (col + 1 < width && d - 1 >= 0)
temp +=
top_diff[location + 1 - step] * filters[shift + 1 + 3 * step];
top_diff[location] = temp;
bottom_diff[location] += (temp * filters[shift]);
}
//2
if (col + 1 < width)
{
int k = idx[base_idx + col];
int location = base + k * step + col;
float temp = 0;
for (int d = 0; d < depth; d++)
temp +=
top_diff[base + col + 1 + d * step] * filters[shift + 1 +
4 * step];
top_diff[location] += temp;
bottom_diff[location] += temp * filters[shift];
}
//2
}
/*
for(int d = 0; d < depth; d ++){
int shift = fbase;// + width*(height-1);
int location = base;// + width*(height-1) + d * step;
bottom_diff[location] += top_diff[location] * (filters[shift + step] + filters[shift + 2*step] + filters[shift + 3*step] + filters[shift + 4*step]);
// bottom_diff[location] += top_diff[location];
shift += 1;
location += 1;
bottom_diff[location] += top_diff[location] * filters[shift + 2*step];
}
for(int col=1;col<width;col++){
int shift = fbase + col;
int location = base + col;
bottom_diff[location] += top_diff[location] * filters[shift + 3*step];
location += (depth - 1)*step;
bottom_diff[location] += top_diff[location] * filters[shift + 4*step];
}*/
for (int col = 0; col < width; col++)
{
int shift = fbase + col;
int location = base + col;
bottom_diff[location] += top_diff[location] * filters[shift + 2 * step];
location += (depth - 1) * step;
bottom_diff[location] += top_diff[location] * filters[shift + 3 * step];
}
}
__global__ void sga_right_weight_backward (const int n, const float *bottom_data,
const float *top_data, const float *temp_diff,
const float *idx, const int height,
const int width, const int depth, const int wsize,
float *filters_diff){
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n)
{
return;
}
int step = height * width;
int base = index / step * step * depth + index % step; //up->down
int fbase = index / step * step * wsize + index % step;
// int row = index%step/width;
int col = index % step % width;
for (int i = 0; i < depth; i++)
filters_diff[fbase] +=
temp_diff[base + i * step] * bottom_data[base + i * step];
if (col - 1 >= 0)
{
int location = fbase + step;
for (int i = 0; i < depth; i++)
filters_diff[location] +=
temp_diff[base + i * step] * top_data[base + i * step - 1];
location = fbase + 2 * step;
filters_diff[location] += temp_diff[base] * bottom_data[base];
for (int i = 1; i < depth; i++)
filters_diff[location] +=
temp_diff[base + i * step] * top_data[base + (i - 1) * step - 1];
location = fbase + 3 * step;
filters_diff[location] +=
temp_diff[base + (depth - 1) * step] * bottom_data[base +
(depth -
1) * step];
for (int i = 0; i < depth - 1; i++)
filters_diff[location] +=
temp_diff[base + i * step] * top_data[base + (i + 1) * step - 1];
}
/*
else{
//int location = fbase + step;
for(int i=0; i<depth; i++){
float temp = temp_diff[base+i*step]*bottom_data[base+i*step];
filters_diff[fbase + step] += temp; //temp_diff[base+i*step]*bottom_data[base+i*step];
filters_diff[fbase + 3*step] += temp; //temp_diff[base+i*step]*bottom_data[base+i*step];
filters_diff[fbase + 4*step] += temp; //temp_diff[base+i*step]*bottom_data[base+i*step];
}
// filters_diff[location] += temp_diff[base+i*step]*bottom_data[base+i*step];
// location = fbase + 3*step;
// for(int i=0; i<depth; i++)
// filters_diff[location] += temp_diff[base+i*step]*bottom_data[base+i*step];
//
// location = fbase + 4*step;
// for(int i=0; i<depth; i++)
// filters_diff[location] += temp_diff[base+i*step]*bottom_data[base+i*step];
}*/
//1
if (col - 1 >= 0)
{
int location = fbase + 4 * step;
int k = idx[index - 1];
for (int i = 0; i < depth; i++)
filters_diff[location] +=
temp_diff[base + i * step] * top_data[base + k * step - 1];
}
//
/*
else{
int location = fbase + 2*step;
for(int i=0; i<depth; i++)
filters_diff[location] += temp_diff[base+i*step]*bottom_data[base+i*step];
}*/
}
__global__ void sga_left_forward (const int n, const float *filters, const int height,
const int width, const int depth, const int wsize,
float *top_data){
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n)
{
return;
}
int step = height * width;
// int wsize=radius+1;
int base = index / height * step * depth + (index % height) * width; //up->down
int fbase = index / height * step * wsize + (index % height) * width;
int kp = 0;
for (int col = width - 1; col >= 0; col--)
{
int shift = fbase + col;
//2
int base0 = base + col;
int k = kp;
kp = 0;
//2
for (int d = 0; d < depth; d++)
{
float temp = 0;
int location = base + d * step + col;
temp += top_data[location] * filters[shift];
if (col + 1 < width)
temp += top_data[location + 1] * filters[shift + step];
else
temp += top_data[location] * filters[shift + step];
if (col + 1 < width && d - 1 >= 0)
temp += top_data[location + 1 - step] * filters[shift + 2 * step];
else
temp += top_data[location] * filters[shift + 2 * step];
if (col + 1 < width && d + 1 < depth)
temp += top_data[location + 1 + step] * filters[shift + 3 * step];
else
temp += top_data[location] * filters[shift + 3 * step];
//3
if (col + 1 < width)
temp +=
top_data[base0 + 1 + k * step] * filters[shift + 4 * step];
else
temp += top_data[location] * filters[shift + 4 * step];
top_data[location] = temp;
if (top_data[base0 + kp * step] < temp)
kp = d;
//3
}
}
}
__global__ void sga_left_data_backward (const int n, const float *filters, float *top_diff,
const float *idx, const int height, const int width,
const int depth, const int wsize, float *bottom_diff){
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n)
{
return;
}
int step = height * width;
int base = index / height * step * depth + (index % height) * width; //up->down
int fbase = index / height * step * wsize + (index % height) * width;
//1
int base_idx = index / height * step + (index % height) * width;
//
for (int col = 0; col < width; col++)
{
int shift = fbase + col;
for (int d = 0; d < depth; d++)
{
int location = base + d * step + col;
float temp = top_diff[location];
if (col - 1 >= 0)
temp += top_diff[location - 1] * filters[shift - 1 + step];
if (col - 1 >= 0 && d + 1 < depth)
temp +=
top_diff[location - 1 + step] * filters[shift - 1 + 2 * step];
if (col - 1 >= 0 && d - 1 >= 0)
temp +=
top_diff[location - 1 - step] * filters[shift - 1 + 3 * step];
top_diff[location] = temp;
bottom_diff[location] += temp * filters[shift];
}
//2
if (col - 1 >= 0)
{
int k = idx[base_idx + col];
int location = base + k * step + col;
float temp = 0;
for (int d = 0; d < depth; d++)
temp +=
top_diff[base + col - 1 + d * step] * filters[shift - 1 +
4 * step];
top_diff[location] += temp;
//top_diff[base + col - 1 + d*step] * filters[shift - 1 + 4*step];
bottom_diff[location] += temp * filters[shift];
}
//2
}
/*
for(int d = 0; d < depth; d ++){
int shift = fbase + width-1;// + width*(height-1);
int location = base + width-1;// + width*(height-1) + d * step;
bottom_diff[location] += top_diff[location] * (filters[shift + step] + filters[shift + 2*step] + filters[shift + 3*step] + filters[shift + 4*step]);
// bottom_diff[location] += top_diff[location];
shift -= 1;
location -= 1;
bottom_diff[location] += top_diff[location] * filters[shift + 2*step];
}
for(int col=0;col<width-1;col++){
int shift = fbase + col;
int location = base + col;
bottom_diff[location] += top_diff[location] * filters[shift + 3*step];
location += (depth - 1)*step;
bottom_diff[location] += top_diff[location] * filters[shift + 4*step];
}*/
for (int col = 0; col < width; col++)
{
int shift = fbase + col;
int location = base + col;
bottom_diff[location] += top_diff[location] * filters[shift + 2 * step];
location += (depth - 1) * step;
bottom_diff[location] += top_diff[location] * filters[shift + 3 * step];
}
}
__global__ void sga_left_weight_backward (const int n, const float *bottom_data,
const float *top_data, const float *temp_diff,
const float *idx, const int height, const int width,
const int depth, const int wsize,
float *filters_diff){
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n)
{
return;
}
int step = height * width;
int base = index / step * step * depth + index % step; //up->down
int fbase = index / step * step * wsize + index % step;
// int row = index%step/width;
int col = index % step % width;
for (int i = 0; i < depth; i++)
filters_diff[fbase] +=
temp_diff[base + i * step] * bottom_data[base + i * step];
if (col + 1 < width)
{
int location = fbase + step;
for (int i = 0; i < depth; i++)
filters_diff[location] +=
temp_diff[base + i * step] * top_data[base + i * step + 1];
location = fbase + 2 * step;
filters_diff[location] += temp_diff[base] * bottom_data[base];
for (int i = 1; i < depth; i++)
filters_diff[location] +=
temp_diff[base + i * step] * top_data[base + (i - 1) * step + 1];
location = fbase + 3 * step;
filters_diff[location] +=
temp_diff[base + (depth - 1) * step] * bottom_data[base +
(depth -
1) * step];
for (int i = 0; i < depth - 1; i++)
filters_diff[location] +=
temp_diff[base + i * step] * top_data[base + (i + 1) * step + 1];
}
/*
else{
//int location = fbase + step;
for(int i=0; i<depth; i++){
float temp = temp_diff[base+i*step]*bottom_data[base+i*step];
filters_diff[fbase + step] += temp; //temp_diff[base+i*step]*bottom_data[base+i*step];
filters_diff[fbase + 3*step] += temp; //temp_diff[base+i*step]*bottom_data[base+i*step];
filters_diff[fbase + 4*step] += temp; //temp_diff[base+i*step]*bottom_data[base+i*step];
}
// filters_diff[location] += temp_diff[base+i*step]*bottom_data[base+i*step];
// location = fbase + 3*step;
// for(int i=0; i<depth; i++)
// filters_diff[location] += temp_diff[base+i*step]*bottom_data[base+i*step];
//
// location = fbase + 4*step;
// for(int i=0; i<depth; i++)
// filters_diff[location] += temp_diff[base+i*step]*bottom_data[base+i*step];
}*/
//1
if (col + 1 < width)
{
int location = fbase + 4 * step;
int k = idx[index + 1];
for (int i = 0; i < depth; i++)
filters_diff[location] +=
temp_diff[base + i * step] * top_data[base + k * step + 1];
}
//
/*
else{
int location = fbase + 2*step;
for(int i=0; i<depth; i++)
filters_diff[location] += temp_diff[base+i*step]*bottom_data[base+i*step];
}
*/
}
void sga_kernel_forward (at::Tensor input, at::Tensor guidance_down,
at::Tensor guidance_up, at::Tensor guidance_right,
at::Tensor guidance_left, at::Tensor temp_out,
at::Tensor output, at::Tensor mask){
int num = input.size(0);
int channel = input.size(1);
int depth = input.size(2);
int height = input.size(3);
int width = input.size(4);
int wsize = guidance_down.size(2);
//THCudaTensor_nElement(state, input);
float *top_data = output.data<float>();
float *top_temp = temp_out.data<float>();
float *top_mask = mask.data<float>();
const float *bottom_data = input.data<float>();
const float *g0 = guidance_down.data<float>();
const float *g1 = guidance_up.data<float>();
const float *g2 = guidance_right.data<float>();
const float *g3 = guidance_left.data<float>();
int n = num * channel * width;
int threads = (n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
int N = input.numel ();
// hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipMemcpy (top_temp, bottom_data, sizeof (float) * N,
hipMemcpyDeviceToDevice);
hipLaunchKernelGGL(( sga_down_forward) , dim3(threads), dim3(CUDA_NUM_THREADS) , 0, 0, n, g0, height, width,
depth, wsize, top_temp);
// hipMemset( top_mask, 0, sizeof(float)*N);
hipMemcpy (top_data, top_temp, sizeof (float) * N,
hipMemcpyDeviceToDevice);
hipMemcpy (top_temp, bottom_data, sizeof (float) * N,
hipMemcpyDeviceToDevice);
hipLaunchKernelGGL(( sga_up_forward) , dim3(threads), dim3(CUDA_NUM_THREADS) , 0, 0, n, g1, height, width,
depth, wsize, top_temp);
hipLaunchKernelGGL(( Max) , dim3((N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS),
dim3(CUDA_NUM_THREADS) , 0, 0, N, top_temp, top_data, top_mask, 1);
n = num * channel * height;
threads = (n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
hipMemcpy (top_temp, bottom_data, sizeof (float) * N,
hipMemcpyDeviceToDevice);
hipLaunchKernelGGL(( sga_right_forward) , dim3(threads), dim3(CUDA_NUM_THREADS) , 0, 0, n, g2, height, width,
depth, wsize,
top_temp);
hipLaunchKernelGGL(( Max) , dim3((N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS),
dim3(CUDA_NUM_THREADS) , 0, 0, N, top_temp, top_data, top_mask, 2);
hipMemcpy (top_temp, bottom_data, sizeof (float) * N,
hipMemcpyDeviceToDevice);
hipLaunchKernelGGL(( sga_left_forward) , dim3(threads), dim3(CUDA_NUM_THREADS) , 0, 0, n, g3, height, width,
depth, wsize, top_temp);
hipLaunchKernelGGL(( Max) , dim3((N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS),
dim3(CUDA_NUM_THREADS) , 0, 0, N, top_temp, top_data, top_mask, 3);
// hipMemset( top_temp, 0, sizeof(float)*THCudaTensor_nElement(state, top_temp));
}
void sga_kernel_backward (at::Tensor input, at::Tensor guidance_down,
at::Tensor guidance_up, at::Tensor guidance_right,
at::Tensor guidance_left, at::Tensor temp_out,
at::Tensor mask, at::Tensor max_idx,
at::Tensor gradOutput, at::Tensor temp_grad,
at::Tensor gradInput, at::Tensor grad_down,
at::Tensor grad_up, at::Tensor grad_right,
at::Tensor grad_left){
int num = input.size(0);
int channel = input.size(1);
int depth = input.size(2);
int height = input.size(3);
int width = input.size(4);
int wsize = guidance_down.size(2);
//THCudaTensor_nElement(state, input);
float *top_grad = temp_grad.data<float>();
float *top_temp = temp_out.data<float>();
const float *top_mask = mask.data<float>();
const float *bottom_data = input.data<float>();
const float *grad_out = gradOutput.data<float>();
const float *g0 = guidance_down.data<float>();
const float *g1 = guidance_up.data<float>();
const float *g2 = guidance_right.data<float>();
const float *g3 = guidance_left.data<float>();
float *grad0 = grad_down.data<float>();
float *grad1 = grad_up.data<float>();
float *grad2 = grad_right.data<float>();
float *grad3 = grad_left.data<float>();
float *grad_input = gradInput.data<float>();
float *idx = max_idx.data<float>();
int N = input.numel ();
// hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
//backward for left
int n = num * channel * height;
// hipMemcpy(top_temp, bottom_data, sizeof(float)*N, hipMemcpyDeviceToDevice);
// sga_left_forward<<<(n + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS, CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state)>>>
// (n,g3,height,width,depth,wsize,top_temp);
hipMemset (top_grad, 0, sizeof (float) * N);
hipLaunchKernelGGL(( get_temp_grad) , dim3((N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS),
dim3(CUDA_NUM_THREADS) , 0, 0, N, grad_out, top_mask, top_grad, 3);
N = num * channel * width * height;
hipLaunchKernelGGL(( MaxDepth) , dim3((N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS),
dim3(CUDA_NUM_THREADS) , 0, 0, N, top_temp, height * width, depth, idx);
hipLaunchKernelGGL(( sga_left_data_backward) , dim3((n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS),
dim3(CUDA_NUM_THREADS) , 0, 0, n, g3, top_grad, idx, height, width, depth, wsize,
grad_input);
n = num * channel * width * height;
hipLaunchKernelGGL(( sga_left_weight_backward) , dim3((n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS),
dim3(CUDA_NUM_THREADS) , 0, 0, n, bottom_data, top_temp, top_grad, idx, height,
width, depth, wsize, grad3);
//backward for down
N = input.numel ();
n = num * channel * width;
hipMemcpy (top_temp, bottom_data, sizeof (float) * N,
hipMemcpyDeviceToDevice);
hipLaunchKernelGGL(( sga_down_forward) , dim3((n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS),
dim3(CUDA_NUM_THREADS) , 0, 0, n, g0, height, width, depth, wsize, top_temp);
hipMemset (top_grad, 0, sizeof (float) * N);
hipLaunchKernelGGL(( get_temp_grad) , dim3((N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS),
dim3(CUDA_NUM_THREADS) , 0, 0, N, grad_out, top_mask, top_grad, 0);
N = num * channel * width * height;
hipLaunchKernelGGL(( MaxDepth) , dim3((N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS),
dim3(CUDA_NUM_THREADS) , 0, 0, N, top_temp, height * width, depth, idx);
hipLaunchKernelGGL(( sga_down_data_backward) , dim3((n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS),
dim3(CUDA_NUM_THREADS) , 0, 0, n, g0, top_grad, idx, height, width, depth, wsize,
grad_input);
n = num * channel * width * height;
hipLaunchKernelGGL(( sga_down_weight_backward) , dim3((n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS),
dim3(CUDA_NUM_THREADS) , 0, 0, n, bottom_data, top_temp, top_grad, idx, height,
width, depth, wsize, grad0);
// backward for up
N = input.numel ();
n = * channel * width;
hipMemcpy (top_temp, bottom_data, sizeof (float) * N,
hipMemcpyDeviceToDevice);
hipLaunchKernelGGL(( sga_up_forward) , dim3((n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS),
dim3(CUDA_NUM_THREADS) , 0, 0, n, g1, height, width, depth, wsize, top_temp);
hipMemset (top_grad, 0, sizeof (float) * N);
hipLaunchKernelGGL(( get_temp_grad) , dim3((N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS),
dim3(CUDA_NUM_THREADS) , 0, 0, N, grad_out, top_mask, top_grad, 1);
N = num * channel * width * height;
hipLaunchKernelGGL(( MaxDepth) , dim3((N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS),
dim3(CUDA_NUM_THREADS) , 0, 0, N, top_temp, height * width, depth, idx);
hipLaunchKernelGGL(( sga_up_data_backward) , dim3((n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS),
dim3(CUDA_NUM_THREADS) , 0, 0, n, g1, top_grad, idx, height, width, depth, wsize,
grad_input);
n = num * channel * width * height;
hipLaunchKernelGGL(( sga_up_weight_backward) , dim3((n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS),
dim3(CUDA_NUM_THREADS) , 0, 0, n, bottom_data, top_temp, top_grad, idx, height,
width, depth, wsize, grad1);
//backward for right
N = input.numel ();
n = num * channel * height;
hipMemcpy (top_temp, bottom_data, sizeof (float) * N,
hipMemcpyDeviceToDevice);
hipLaunchKernelGGL(( sga_right_forward) , dim3((n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS),
dim3(CUDA_NUM_THREADS) , 0, 0, n, g2, height, width, depth, wsize, top_temp);
hipMemset (top_grad, 0, sizeof (float) * N);
hipLaunchKernelGGL(( get_temp_grad) , dim3((N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS),
dim3(CUDA_NUM_THREADS) , 0, 0, N, grad_out, top_mask, top_grad, 2);
N = num * channel * width * height;
hipLaunchKernelGGL(( MaxDepth) , dim3((N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS),
dim3(CUDA_NUM_THREADS) , 0, 0, N, top_temp, height * width, depth, idx);
hipLaunchKernelGGL(( sga_right_data_backward) , dim3((n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS),
dim3(CUDA_NUM_THREADS) , 0, 0, n, g2, top_grad, idx, height, width, depth, wsize,
grad_input);
n = num * channel * width * height;
hipLaunchKernelGGL(( sga_right_weight_backward) , dim3((n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS),
dim3(CUDA_NUM_THREADS) , 0, 0, n, bottom_data, top_temp, top_grad, idx, height,
width, depth, wsize, grad2);
}
__global__ void lga_filtering_forward (const int n, const float *bottom_data,
const float *filters, const int height,
const int width, const int channel, const int radius,
float *top_data){
int index = blockIdx.x * blockDim.x + threadIdx.x;
// printf("OK\n");
// printf("%d, %.2f, %.2f\n", index, bottom_data[index], top_data[index]);
if (index >= n)
{
return;
}
// top_data[index]=1.0;
// assert(0);
int step = height * width;
int wsize = 2 * radius + 1;
// int fsize=wsize*wsize*3;
int fbase =
index / (step * channel) * (step * wsize * wsize * 3) + index % step;
int row = index % step / width;
int col = index % width;
int depth = index / step % channel;
for (int d = -1; d <= 1; d++)
{
for (int r = -radius; r <= radius; r++)
{
for (int c = -radius; c <= radius; c++)
{
int rr = r + row;
int cc = c + col;
int dd = d + depth;
int shift = 0;
if (rr >= 0 && cc >= 0 && dd >= 0 && rr < height && cc < width
&& dd < channel)
shift = r * width + c + d * step;
int location =
(d + 1) * (wsize * wsize) + (r + radius) * wsize + c + radius;
top_data[index] +=
bottom_data[index + shift] * filters[fbase + location * step];
}
}
}
// top_data[index]=1.0;
// printf("%d, %d, %d, %.2f, %.2f\n", index, row, col, bottom_data[index], top_data[index]);
}
__global__ void lga_filter_backward (const int n, const float *bottom_data,
const float *top_diff, const int height, const int width,
const int channel, const int radius, float *filter_diff){
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n)
{
return;
}
int step = height * width;
int wsize = 2 * radius + 1;
int base =
index / (step * wsize * wsize * 3) * (step * channel) + index % step;
int location = index / step % (wsize * wsize * 3);
int d = location / (wsize * wsize) - 1;
int r = (location / wsize) % wsize - radius;
int c = location % wsize - radius;
int rr = index % step / width + r;
int cc = index % width + c;
for (int i = 0; i < channel; i++)
{
int dd = i + d;
if (rr >= 0 && cc >= 0 && dd >= 0 && rr < height && cc < width
&& dd < channel)
{
int shift = r * width + c + d * step;
filter_diff[index] +=
top_diff[base + i * step] * bottom_data[base + shift + i * step];
}
else
filter_diff[index] +=
top_diff[base + i * step] * bottom_data[base + i * step];
}
}
__global__ void lga_data_backward (const int n, const float *filters, const float *top_diff,
const int height, const int width, const int channel,
const int radius, float *bottom_diff){
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n)
{
return;
}
int step = height * width;
int wsize = 2 * radius + 1;
// int fsize=wsize*wsize*3;
int fbase =
index / (step * channel) * (step * wsize * wsize * 3) + index % step;
int row = index % step / width;
int col = index % width;
int depth = index / step % channel;
for (int d = -1; d <= 1; d++)
{
for (int r = -radius; r <= radius; r++)
{
for (int c = -radius; c <= radius; c++)
{
int rr = r + row;
int cc = c + col;
int dd = d + depth;
// int shift = 0;
if (rr >= 0 && cc >= 0 && dd >= 0 && rr < height && cc < width
&& dd < channel)
{
int shift = r * width + c + d * step;
// int fshift= r*width+c;
int location =
(-d + 1) * (wsize * wsize) + (-r + radius) * wsize - c +
radius;
bottom_diff[index] +=
top_diff[index + shift] * filters[fbase + r * width + c +
location * step];
}
else
{
int location =
(d + 1) * (wsize * wsize) + (r + radius) * wsize + c +
radius;
bottom_diff[index] +=
top_diff[index] * filters[fbase + location * step];
}
}
}
}
}
void lga_forward (at::Tensor input, at::Tensor filters, at::Tensor output,
const int radius){
// print_kernel<<<10, 10>>>();
// hipDeviceSynchronize();
// int num=input->size(0);
int channel = input.size(1);
int height = input.size(2);
int width = input.size(3);
int n = input.numel ();
// printf("%d, %d, %d, %d, %d\n", height, width, channel, n, radius);
// hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
/* float *temp = new float[n];
float *out = input.data<float>();
hipMemcpy(temp,out,n*sizeof(float),hipMemcpyDeviceToHost);
for(int i=0;i<n;i++)
printf("%.2f ", temp[i]);
*/
hipLaunchKernelGGL(( lga_filtering_forward) , dim3((n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS),
dim3(CUDA_NUM_THREADS) , 0, 0, n, input.data<float>(), filters.data<float>(),
height, width, channel, radius,
output.data<float>());
// temp = new float[n];
}
void lga_backward (at::Tensor input, at::Tensor filters, at::Tensor gradOutput,
at::Tensor gradInput, at::Tensor gradFilters, const int radius){
// int num=input->size(0);
int channel = input.size(1);
int height = input.size(2);
int width = input.size(3);
// hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
int n = filters.numel ();
hipLaunchKernelGGL(( lga_filter_backward) , dim3((n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS),
dim3(CUDA_NUM_THREADS) , 0, 0, n, input.data<float>(),
gradOutput.data<float>(), height, width, channel,
radius, gradFilters.data<float>());
// printf("%d, %d, %d, %d\n", height, width, channel, n);
n = input.numel ();
float *grad = gradInput.data<float>();
hipMemset (grad, 0, sizeof (float) * n);
hipLaunchKernelGGL(( lga_data_backward) , dim3((n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS),
dim3(CUDA_NUM_THREADS) , 0, 0, n, filters.data<float>(),
gradOutput.data<float>(), height, width, channel,
radius, grad);
}
void lga3d_forward (at::Tensor input, at::Tensor filters, at::Tensor output,
const int radius){
// int num=input->size(0);
int channel = input.size(2);
int height = input.size(3);
int width = input.size(4);
int n = input.numel ();
// hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipLaunchKernelGGL(( lga_filtering_forward) , dim3((n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS),
dim3(CUDA_NUM_THREADS) , 0, 0, n, input.data<float>(), filters.data<float>(),
height, width, channel, radius,
output.data<float>());
}
void lga3d_backward (at::Tensor input, at::Tensor filters, at::Tensor gradOutput,
at::Tensor gradInput, at::Tensor gradFilters,
const int radius){
// int num=input->size(0);
int channel = input.size(2);
int height = input.size(3);
int width = input.size(4);
// hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
int n = filters.numel ();
hipLaunchKernelGGL(( lga_filter_backward) , dim3((n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS),
dim3(CUDA_NUM_THREADS) , 0, 0, n, input.data<float>(),
gradOutput.data<float>(), height, width, channel,
radius, gradFilters.data<float>());
n = input.numel ();
float *grad = gradInput.data<float>();
hipMemset (grad, 0, sizeof (float) * n);
hipLaunchKernelGGL(( lga_data_backward) , dim3((n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS),
dim3(CUDA_NUM_THREADS) , 0, 0, n, filters.data<float>(),
gradOutput.data<float>(), height, width, channel,
radius, grad);
}
#ifdef __cplusplus
}
#endif
| c6fbdf1d68e9c3ce61fb0547d819138209fbcde8.cu | //#include <torch/torch.h>
#include <torch/extension.h>
//#include <torch/serialize/tensor.h>
//#include <ATen/ATen.h>
//#include <ATen/cuda/CUDAContext.h>
#define CUDA_NUM_THREADS 256
#define THREADS_PER_BLOCK 64
#define DIM0(TENSOR) ((TENSOR).x)
#define DIM1(TENSOR) ((TENSOR).y)
#define DIM2(TENSOR) ((TENSOR).z)
#define DIM3(TENSOR) ((TENSOR).w)
#define DIM3_INDEX(TENSOR, xx, yy, zz, ww) ((TENSOR)[((xx) * (TENSOR##_stride.x)) + ((yy) * (TENSOR##_stride.y)) + ((zz) * (TENSOR##_stride.z)) + ((ww) * (TENSOR##_stride.w))])
#ifdef __cplusplus
extern "C" {
#endif
__global__ void Max (const int n, const float *top_temp, float *top_data, float *mask,
const int mask_index){
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n)
{
return;
}
if (top_data[index] < top_temp[index])
{
top_data[index] = top_temp[index];
mask[index] = mask_index;
}
}
__global__ void get_temp_grad (const int n, const float *gradOutput, const float *mask,
float *top_grad, const int mask_index){
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n)
{
return;
}
if (((int) mask[index]) == mask_index)
top_grad[index] = gradOutput[index];
}
__global__ void MaxDepth (const int n, const float *bottom_data, const int step,
const int depth, float *idx){
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n)
{
return;
}
int base = index / step * step * depth + index % step;
int k = 0;
for (int i = 1; i < depth; i++)
if (bottom_data[base + k * step] < bottom_data[base + i * step])
k = i;
idx[index] = k;
}
__global__ void sga_down_forward (const int n, const float *filters, const int height,
const int width, const int depth, const int wsize,
float *top_data){
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n)
{
return;
}
int step = height * width;
// int wsize=radius+1;
int base = index / width * step * depth + index % width; //up->down
int fbase = index / width * step * wsize + index % width;
int kp = 0;
for (int row = 0; row < height; row++)
{
int shift = fbase + row * width;
int base0 = base + row * width;
int k = kp;
kp = 0;
/* if(row-1>=0)
for(int i = 1; i < depth; i++){
if(top_data[base0-width+k*step]<top_data[base0-width+i*step])
k = i;
*/
for (int d = 0; d < depth; d++)
{
float temp = 0;
int location = base0 + d * step;
temp += top_data[location] * filters[shift];
if (row - 1 >= 0)
temp += top_data[location - width] * filters[shift + step];
else
temp += top_data[location] * filters[shift + step];
if (row - 1 >= 0 && d - 1 >= 0)
temp +=
top_data[location - width - step] * filters[shift + 2 * step];
else
temp += top_data[location] * filters[shift + 2 * step];
if (row - 1 >= 0 && d + 1 < depth)
temp +=
top_data[location - width + step] * filters[shift + 3 * step];
else
temp += top_data[location] * filters[shift + 3 * step];
if (row - 1 >= 0)
temp +=
top_data[base0 - width + k * step] * filters[shift + 4 * step];
else
temp += top_data[location] * filters[shift + 4 * step];
top_data[location] = temp;
if (top_data[base0 + kp * step] < temp)
kp = d;
}
}
}
__global__ void sga_down_data_backward (const int n, const float *filters, float *top_diff,
const float *idx, const int height, const int width,
const int depth, const int wsize, float *bottom_diff){
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n)
{
return;
}
int step = height * width;
int base = index / width * step * depth + index % width; //up->down
int fbase = index / width * step * wsize + index % width;
//1
int base_idx = index / width * step + index % width;
//
for (int row = height - 1; row >= 0; row--)
{
int shift = fbase + row * width;
for (int d = 0; d < depth; d++)
{
int location = base + d * step + row * width;
float temp = top_diff[location];
if (row + 1 < height)
temp +=
top_diff[location + width] * filters[shift + width + step];
if (row + 1 < height && d + 1 < depth)
temp +=
top_diff[location + width + step] * filters[shift + width +
2 * step];
if (row + 1 < height && d - 1 >= 0)
temp +=
top_diff[location + width - step] * filters[shift + width +
3 * step];
top_diff[location] = temp;
bottom_diff[location] += temp * filters[shift];
}
//2
if (row + 1 < height)
{
int k = idx[base_idx + row * width];
int location = base + k * step + row * width;
float temp = 0;
for (int d = 0; d < depth; d++)
temp +=
top_diff[base + row * width + width +
d * step] * filters[shift + width + 4 * step];
top_diff[location] += temp;
bottom_diff[location] += temp * filters[shift];
}
//2
}
/* for(int d = 0; d < depth; d ++){
int shift = fbase;
int location = base + d * step;
bottom_diff[location] += top_diff[location] * (filters[shift + step] + filters[shift + 2*step] + filters[shift + 3*step] + filters[shift + 4*step]);
// bottom_diff[location] += top_diff[location];
shift += width;
location += width;
bottom_diff[location] += top_diff[location] * filters[shift + 2*step];
}
for(int row=1;row<height;row++){
int location = base + row * width;
int shift = fbase + row * width;
bottom_diff[location] += top_diff[location] * filters[shift + 3*step];
location += (depth - 1)*step;
bottom_diff[location] += top_diff[location] * filters[shift + 4*step];
}
*/
for (int row = 0; row < height; row++)
{
int location = base + row * width;
int shift = fbase + row * width;
bottom_diff[location] += top_diff[location] * filters[shift + 2 * step];
location += (depth - 1) * step;
bottom_diff[location] += top_diff[location] * filters[shift + 3 * step];
}
}
__global__ void sga_down_weight_backward (const int n, const float *bottom_data,
const float *top_data, const float *temp_diff,
const float *idx, const int height, const int width,
const int depth, const int wsize,
float *filters_diff){
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n)
{
return;
}
int step = height * width;
int base = index / step * step * depth + index % step; //up->down
int fbase = index / step * step * wsize + index % step;
int row = index % step / width;
for (int i = 0; i < depth; i++)
filters_diff[fbase] +=
temp_diff[base + i * step] * bottom_data[base + i * step];
if (row - 1 >= 0)
{
int location = fbase + step;
for (int i = 0; i < depth; i++)
filters_diff[location] +=
temp_diff[base + i * step] * top_data[base + i * step - width];
location = fbase + 2 * step;
filters_diff[location] += temp_diff[base] * bottom_data[base];
for (int i = 1; i < depth; i++)
filters_diff[location] +=
temp_diff[base + i * step] * top_data[base + (i - 1) * step -
width];
location = fbase + 3 * step;
filters_diff[location] +=
temp_diff[base + (depth - 1) * step] * bottom_data[base +
(depth -
1) * step];
for (int i = 0; i < depth - 1; i++)
filters_diff[location] +=
temp_diff[base + i * step] * top_data[base + (i + 1) * step -
width];
}
/*
else{
for(int i=0; i<depth; i++){
float temp = temp_diff[base+i*step]*bottom_data[base+i*step];
filters_diff[fbase + step] += temp; //temp_diff[base+i*step]*bottom_data[base+i*step];
filters_diff[fbase + 3*step] += temp; //temp_diff[base+i*step]*bottom_data[base+i*step];
filters_diff[fbase + 4*step] += temp; //temp_diff[base+i*step]*bottom_data[base+i*step];
}
}
*/
//1
if (row - 1 >= 0)
{
int location = fbase + 4 * step;
int k = idx[index - width];
for (int i = 0; i < depth; i++)
filters_diff[location] +=
temp_diff[base + i * step] * top_data[base + k * step - width];
}
//
/*
else{
int location = fbase + 2*step;
for(int i=0; i<depth; i++)
filters_diff[location] += temp_diff[base+i*step]*bottom_data[base+i*step];
}
*/
}
__global__ void sga_up_forward (const int n, const float *filters, const int height,
const int width, const int depth, const int wsize,
float *top_data){
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n)
{
return;
}
int step = height * width;
// int wsize=radius+1;
int base = index / width * step * depth + index % width; //up->down
int fbase = index / width * step * wsize + index % width;
int kp = 0; //1
for (int row = height - 1; row >= 0; row--)
{
int shift = fbase + row * width;
//2
int base0 = base + row * width;
int k = kp;
kp = 0;
//2
for (int d = 0; d < depth; d++)
{
float temp = 0;
int location = base + d * step + row * width;
temp += top_data[location] * filters[shift];
if (row + 1 < height)
temp += top_data[location + width] * filters[shift + step];
else
temp += top_data[location] * filters[shift + step];
if (row + 1 < height && d - 1 >= 0)
temp +=
top_data[location + width - step] * filters[shift + 2 * step];
else
temp += top_data[location] * filters[shift + 2 * step];
if (row + 1 < height && d + 1 < depth)
temp +=
top_data[location + width + step] * filters[shift + 3 * step];
else
temp += top_data[location] * filters[shift + 3 * step];
//3
if (row + 1 < height)
temp +=
top_data[base0 + width + k * step] * filters[shift + 4 * step];
else
temp += top_data[location] * filters[shift + 4 * step];
top_data[location] = temp;
if (top_data[base0 + kp * step] < temp)
kp = d;
//3
}
}
}
__global__ void sga_up_data_backward (const int n, const float *filters, float *top_diff,
const float *idx, const int height, const int width,
const int depth, const int wsize, float *bottom_diff){
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n)
{
return;
}
int step = height * width;
int base = index / width * step * depth + index % width; //up->down
int fbase = index / width * step * wsize + index % width;
//1
int base_idx = index / width * step + index % width;
//
for (int row = 0; row < height; row++)
{
int shift = fbase + row * width;
for (int d = 0; d < depth; d++)
{
int location = base + d * step + row * width;
float temp = top_diff[location];
if (row - 1 >= 0)
temp +=
top_diff[location - width] * filters[shift - width + step];
if (row - 1 >= 0 && d + 1 < depth)
temp +=
top_diff[location - width + step] * filters[shift - width +
2 * step];
if (row - 1 >= 0 && d - 1 >= 0)
temp +=
top_diff[location - width - step] * filters[shift - width +
3 * step];
top_diff[location] = temp;
bottom_diff[location] += temp * filters[shift];
}
//2
if (row - 1 >= 0)
{
int k = idx[base_idx + row * width];
int location = base + k * step + row * width;
float temp = 0;
for (int d = 0; d < depth; d++)
temp +=
top_diff[base + row * width - width +
d * step] * filters[shift - width + 4 * step];
top_diff[location] += temp;
bottom_diff[location] += temp * filters[shift];
}
//2
}
/* for(int d = 0; d < depth; d ++){
int shift = fbase + width*(height-1);
int location = base + width*(height-1) + d * step;
bottom_diff[location] += top_diff[location] * (filters[shift + step] + filters[shift + 2*step] + filters[shift + 3*step] + filters[shift + 4*step]);
// bottom_diff[location] += top_diff[location];
shift -= width;
location -= width;
bottom_diff[location] += top_diff[location] * filters[shift + 2*step];
}
for(int row=0;row<height-1;row++){
int shift = fbase + row * width;
int location = base + row * width;
bottom_diff[location] += top_diff[location] * filters[shift + 3*step];
location += (depth - 1)*step;
bottom_diff[location] += top_diff[location] * filters[shift + 4*step];
}*/
for (int row = 0; row < height; row++)
{
int shift = fbase + row * width;
int location = base + row * width;
bottom_diff[location] += top_diff[location] * filters[shift + 2 * step];
location += (depth - 1) * step;
bottom_diff[location] += top_diff[location] * filters[shift + 3 * step];
}
}
__global__ void sga_up_weight_backward (const int n, const float *bottom_data,
const float *top_data, const float *temp_diff,
const float *idx, const int height, const int width,
const int depth, const int wsize, float *filters_diff){
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n)
{
return;
}
int step = height * width;
int base = index / step * step * depth + index % step; //up->down
int fbase = index / step * step * wsize + index % step;
int row = index % step / width;
for (int i = 0; i < depth; i++)
filters_diff[fbase] +=
temp_diff[base + i * step] * bottom_data[base + i * step];
if (row + 1 < height)
{
int location = fbase + step;
for (int i = 0; i < depth; i++)
filters_diff[location] +=
temp_diff[base + i * step] * top_data[base + i * step + width];
location = fbase + 2 * step;
filters_diff[location] += temp_diff[base] * bottom_data[base];
for (int i = 1; i < depth; i++)
filters_diff[location] +=
temp_diff[base + i * step] * top_data[base + (i - 1) * step +
width];
location = fbase + 3 * step;
filters_diff[location] +=
temp_diff[base + (depth - 1) * step] * bottom_data[base +
(depth -
1) * step];
for (int i = 0; i < depth - 1; i++)
filters_diff[location] +=
temp_diff[base + i * step] * top_data[base + (i + 1) * step +
width];
}
/*
else{
//int location = fbase + step;
for(int i=0; i<depth; i++){
float temp = temp_diff[base+i*step]*bottom_data[base+i*step];
filters_diff[fbase + step] += temp; //temp_diff[base+i*step]*bottom_data[base+i*step];
filters_diff[fbase + 3*step] += temp; //temp_diff[base+i*step]*bottom_data[base+i*step];
filters_diff[fbase + 4*step] += temp; //temp_diff[base+i*step]*bottom_data[base+i*step];
}
// filters_diff[location] += temp_diff[base+i*step]*bottom_data[base+i*step];
// location = fbase + 3*step;
// for(int i=0; i<depth; i++)
// filters_diff[location] += temp_diff[base+i*step]*bottom_data[base+i*step];
//
// location = fbase + 4*step;
// for(int i=0; i<depth; i++)
// filters_diff[location] += temp_diff[base+i*step]*bottom_data[base+i*step];
}*/
//1
if (row + 1 < height)
{
int location = fbase + 4 * step;
int k = idx[index + width];
for (int i = 0; i < depth; i++)
filters_diff[location] +=
temp_diff[base + i * step] * top_data[base + k * step + width];
}
//
/*
else{
int location = fbase + 2*step;
for(int i=0; i<depth; i++)
filters_diff[location] += temp_diff[base+i*step]*bottom_data[base+i*step];
}*/
}
__global__ void sga_right_forward (const int n, const float *filters, const int height,
const int width, const int depth, const int wsize,
float *top_data){
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n)
{
return;
}
int step = height * width;
// int wsize=radius+1;
int base = index / height * step * depth + (index % height) * width; //up->down
int fbase = index / height * step * wsize + (index % height) * width;
int kp = 0;
for (int col = 0; col < width; col++)
{
int shift = fbase + col;
//2
int base0 = base + col;
int k = kp;
kp = 0;
//2
for (int d = 0; d < depth; d++)
{
float temp = 0;
int location = base + d * step + col;
temp += top_data[location] * filters[shift];
if (col - 1 >= 0)
temp += top_data[location - 1] * filters[shift + step];
else
temp += top_data[location] * filters[shift + step];
if (col - 1 >= 0 && d - 1 >= 0)
temp += top_data[location - 1 - step] * filters[shift + 2 * step];
else
temp += top_data[location] * filters[shift + 2 * step];
if (col - 1 >= 0 && d + 1 < depth)
temp += top_data[location - 1 + step] * filters[shift + 3 * step];
else
temp += top_data[location] * filters[shift + 3 * step];
//3
if (col - 1 >= 0)
temp +=
top_data[base0 - 1 + k * step] * filters[shift + 4 * step];
else
temp += top_data[location] * filters[shift + 4 * step];
top_data[location] = temp;
if (top_data[base0 + kp * step] < temp)
kp = d;
//3
}
}
}
__global__ void sga_right_data_backward (const int n, const float *filters, float *top_diff,
const float *idx, const int height, const int width,
const int depth, const int wsize, float *bottom_diff){
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n)
{
return;
}
int step = height * width;
int base = index / height * step * depth + (index % height) * width; //up->down
int fbase = index / height * step * wsize + (index % height) * width;
//1
int base_idx = index / height * step + (index % height) * width;
//
for (int col = width - 1; col >= 0; col--)
{
int shift = fbase + col;
for (int d = 0; d < depth; d++)
{
int location = base + d * step + col;
float temp = top_diff[location];
if (col + 1 < width)
temp += top_diff[location + 1] * filters[shift + 1 + step];
if (col + 1 < width && d + 1 < depth)
temp +=
top_diff[location + 1 + step] * filters[shift + 1 + 2 * step];
if (col + 1 < width && d - 1 >= 0)
temp +=
top_diff[location + 1 - step] * filters[shift + 1 + 3 * step];
top_diff[location] = temp;
bottom_diff[location] += (temp * filters[shift]);
}
//2
if (col + 1 < width)
{
int k = idx[base_idx + col];
int location = base + k * step + col;
float temp = 0;
for (int d = 0; d < depth; d++)
temp +=
top_diff[base + col + 1 + d * step] * filters[shift + 1 +
4 * step];
top_diff[location] += temp;
bottom_diff[location] += temp * filters[shift];
}
//2
}
/*
for(int d = 0; d < depth; d ++){
int shift = fbase;// + width*(height-1);
int location = base;// + width*(height-1) + d * step;
bottom_diff[location] += top_diff[location] * (filters[shift + step] + filters[shift + 2*step] + filters[shift + 3*step] + filters[shift + 4*step]);
// bottom_diff[location] += top_diff[location];
shift += 1;
location += 1;
bottom_diff[location] += top_diff[location] * filters[shift + 2*step];
}
for(int col=1;col<width;col++){
int shift = fbase + col;
int location = base + col;
bottom_diff[location] += top_diff[location] * filters[shift + 3*step];
location += (depth - 1)*step;
bottom_diff[location] += top_diff[location] * filters[shift + 4*step];
}*/
for (int col = 0; col < width; col++)
{
int shift = fbase + col;
int location = base + col;
bottom_diff[location] += top_diff[location] * filters[shift + 2 * step];
location += (depth - 1) * step;
bottom_diff[location] += top_diff[location] * filters[shift + 3 * step];
}
}
__global__ void sga_right_weight_backward (const int n, const float *bottom_data,
const float *top_data, const float *temp_diff,
const float *idx, const int height,
const int width, const int depth, const int wsize,
float *filters_diff){
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n)
{
return;
}
int step = height * width;
int base = index / step * step * depth + index % step; //up->down
int fbase = index / step * step * wsize + index % step;
// int row = index%step/width;
int col = index % step % width;
for (int i = 0; i < depth; i++)
filters_diff[fbase] +=
temp_diff[base + i * step] * bottom_data[base + i * step];
if (col - 1 >= 0)
{
int location = fbase + step;
for (int i = 0; i < depth; i++)
filters_diff[location] +=
temp_diff[base + i * step] * top_data[base + i * step - 1];
location = fbase + 2 * step;
filters_diff[location] += temp_diff[base] * bottom_data[base];
for (int i = 1; i < depth; i++)
filters_diff[location] +=
temp_diff[base + i * step] * top_data[base + (i - 1) * step - 1];
location = fbase + 3 * step;
filters_diff[location] +=
temp_diff[base + (depth - 1) * step] * bottom_data[base +
(depth -
1) * step];
for (int i = 0; i < depth - 1; i++)
filters_diff[location] +=
temp_diff[base + i * step] * top_data[base + (i + 1) * step - 1];
}
/*
else{
//int location = fbase + step;
for(int i=0; i<depth; i++){
float temp = temp_diff[base+i*step]*bottom_data[base+i*step];
filters_diff[fbase + step] += temp; //temp_diff[base+i*step]*bottom_data[base+i*step];
filters_diff[fbase + 3*step] += temp; //temp_diff[base+i*step]*bottom_data[base+i*step];
filters_diff[fbase + 4*step] += temp; //temp_diff[base+i*step]*bottom_data[base+i*step];
}
// filters_diff[location] += temp_diff[base+i*step]*bottom_data[base+i*step];
// location = fbase + 3*step;
// for(int i=0; i<depth; i++)
// filters_diff[location] += temp_diff[base+i*step]*bottom_data[base+i*step];
//
// location = fbase + 4*step;
// for(int i=0; i<depth; i++)
// filters_diff[location] += temp_diff[base+i*step]*bottom_data[base+i*step];
}*/
//1
if (col - 1 >= 0)
{
int location = fbase + 4 * step;
int k = idx[index - 1];
for (int i = 0; i < depth; i++)
filters_diff[location] +=
temp_diff[base + i * step] * top_data[base + k * step - 1];
}
//
/*
else{
int location = fbase + 2*step;
for(int i=0; i<depth; i++)
filters_diff[location] += temp_diff[base+i*step]*bottom_data[base+i*step];
}*/
}
__global__ void sga_left_forward (const int n, const float *filters, const int height,
const int width, const int depth, const int wsize,
float *top_data){
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n)
{
return;
}
int step = height * width;
// int wsize=radius+1;
int base = index / height * step * depth + (index % height) * width; //up->down
int fbase = index / height * step * wsize + (index % height) * width;
int kp = 0;
for (int col = width - 1; col >= 0; col--)
{
int shift = fbase + col;
//2
int base0 = base + col;
int k = kp;
kp = 0;
//2
for (int d = 0; d < depth; d++)
{
float temp = 0;
int location = base + d * step + col;
temp += top_data[location] * filters[shift];
if (col + 1 < width)
temp += top_data[location + 1] * filters[shift + step];
else
temp += top_data[location] * filters[shift + step];
if (col + 1 < width && d - 1 >= 0)
temp += top_data[location + 1 - step] * filters[shift + 2 * step];
else
temp += top_data[location] * filters[shift + 2 * step];
if (col + 1 < width && d + 1 < depth)
temp += top_data[location + 1 + step] * filters[shift + 3 * step];
else
temp += top_data[location] * filters[shift + 3 * step];
//3
if (col + 1 < width)
temp +=
top_data[base0 + 1 + k * step] * filters[shift + 4 * step];
else
temp += top_data[location] * filters[shift + 4 * step];
top_data[location] = temp;
if (top_data[base0 + kp * step] < temp)
kp = d;
//3
}
}
}
__global__ void sga_left_data_backward (const int n, const float *filters, float *top_diff,
const float *idx, const int height, const int width,
const int depth, const int wsize, float *bottom_diff){
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n)
{
return;
}
int step = height * width;
int base = index / height * step * depth + (index % height) * width; //up->down
int fbase = index / height * step * wsize + (index % height) * width;
//1
int base_idx = index / height * step + (index % height) * width;
//
for (int col = 0; col < width; col++)
{
int shift = fbase + col;
for (int d = 0; d < depth; d++)
{
int location = base + d * step + col;
float temp = top_diff[location];
if (col - 1 >= 0)
temp += top_diff[location - 1] * filters[shift - 1 + step];
if (col - 1 >= 0 && d + 1 < depth)
temp +=
top_diff[location - 1 + step] * filters[shift - 1 + 2 * step];
if (col - 1 >= 0 && d - 1 >= 0)
temp +=
top_diff[location - 1 - step] * filters[shift - 1 + 3 * step];
top_diff[location] = temp;
bottom_diff[location] += temp * filters[shift];
}
//2
if (col - 1 >= 0)
{
int k = idx[base_idx + col];
int location = base + k * step + col;
float temp = 0;
for (int d = 0; d < depth; d++)
temp +=
top_diff[base + col - 1 + d * step] * filters[shift - 1 +
4 * step];
top_diff[location] += temp;
//top_diff[base + col - 1 + d*step] * filters[shift - 1 + 4*step];
bottom_diff[location] += temp * filters[shift];
}
//2
}
/*
for(int d = 0; d < depth; d ++){
int shift = fbase + width-1;// + width*(height-1);
int location = base + width-1;// + width*(height-1) + d * step;
bottom_diff[location] += top_diff[location] * (filters[shift + step] + filters[shift + 2*step] + filters[shift + 3*step] + filters[shift + 4*step]);
// bottom_diff[location] += top_diff[location];
shift -= 1;
location -= 1;
bottom_diff[location] += top_diff[location] * filters[shift + 2*step];
}
for(int col=0;col<width-1;col++){
int shift = fbase + col;
int location = base + col;
bottom_diff[location] += top_diff[location] * filters[shift + 3*step];
location += (depth - 1)*step;
bottom_diff[location] += top_diff[location] * filters[shift + 4*step];
}*/
for (int col = 0; col < width; col++)
{
int shift = fbase + col;
int location = base + col;
bottom_diff[location] += top_diff[location] * filters[shift + 2 * step];
location += (depth - 1) * step;
bottom_diff[location] += top_diff[location] * filters[shift + 3 * step];
}
}
__global__ void sga_left_weight_backward (const int n, const float *bottom_data,
const float *top_data, const float *temp_diff,
const float *idx, const int height, const int width,
const int depth, const int wsize,
float *filters_diff){
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n)
{
return;
}
int step = height * width;
int base = index / step * step * depth + index % step; //up->down
int fbase = index / step * step * wsize + index % step;
// int row = index%step/width;
int col = index % step % width;
for (int i = 0; i < depth; i++)
filters_diff[fbase] +=
temp_diff[base + i * step] * bottom_data[base + i * step];
if (col + 1 < width)
{
int location = fbase + step;
for (int i = 0; i < depth; i++)
filters_diff[location] +=
temp_diff[base + i * step] * top_data[base + i * step + 1];
location = fbase + 2 * step;
filters_diff[location] += temp_diff[base] * bottom_data[base];
for (int i = 1; i < depth; i++)
filters_diff[location] +=
temp_diff[base + i * step] * top_data[base + (i - 1) * step + 1];
location = fbase + 3 * step;
filters_diff[location] +=
temp_diff[base + (depth - 1) * step] * bottom_data[base +
(depth -
1) * step];
for (int i = 0; i < depth - 1; i++)
filters_diff[location] +=
temp_diff[base + i * step] * top_data[base + (i + 1) * step + 1];
}
/*
else{
//int location = fbase + step;
for(int i=0; i<depth; i++){
float temp = temp_diff[base+i*step]*bottom_data[base+i*step];
filters_diff[fbase + step] += temp; //temp_diff[base+i*step]*bottom_data[base+i*step];
filters_diff[fbase + 3*step] += temp; //temp_diff[base+i*step]*bottom_data[base+i*step];
filters_diff[fbase + 4*step] += temp; //temp_diff[base+i*step]*bottom_data[base+i*step];
}
// filters_diff[location] += temp_diff[base+i*step]*bottom_data[base+i*step];
// location = fbase + 3*step;
// for(int i=0; i<depth; i++)
// filters_diff[location] += temp_diff[base+i*step]*bottom_data[base+i*step];
//
// location = fbase + 4*step;
// for(int i=0; i<depth; i++)
// filters_diff[location] += temp_diff[base+i*step]*bottom_data[base+i*step];
}*/
//1
if (col + 1 < width)
{
int location = fbase + 4 * step;
int k = idx[index + 1];
for (int i = 0; i < depth; i++)
filters_diff[location] +=
temp_diff[base + i * step] * top_data[base + k * step + 1];
}
//
/*
else{
int location = fbase + 2*step;
for(int i=0; i<depth; i++)
filters_diff[location] += temp_diff[base+i*step]*bottom_data[base+i*step];
}
*/
}
void sga_kernel_forward (at::Tensor input, at::Tensor guidance_down,
at::Tensor guidance_up, at::Tensor guidance_right,
at::Tensor guidance_left, at::Tensor temp_out,
at::Tensor output, at::Tensor mask){
int num = input.size(0);
int channel = input.size(1);
int depth = input.size(2);
int height = input.size(3);
int width = input.size(4);
int wsize = guidance_down.size(2);
//THCudaTensor_nElement(state, input);
float *top_data = output.data<float>();
float *top_temp = temp_out.data<float>();
float *top_mask = mask.data<float>();
const float *bottom_data = input.data<float>();
const float *g0 = guidance_down.data<float>();
const float *g1 = guidance_up.data<float>();
const float *g2 = guidance_right.data<float>();
const float *g3 = guidance_left.data<float>();
int n = num * channel * width;
int threads = (n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
int N = input.numel ();
// cudaStream_t stream = at::cuda::getCurrentCUDAStream();
cudaMemcpy (top_temp, bottom_data, sizeof (float) * N,
cudaMemcpyDeviceToDevice);
sga_down_forward <<< threads, CUDA_NUM_THREADS >>> (n, g0, height, width,
depth, wsize, top_temp);
// cudaMemset( top_mask, 0, sizeof(float)*N);
cudaMemcpy (top_data, top_temp, sizeof (float) * N,
cudaMemcpyDeviceToDevice);
cudaMemcpy (top_temp, bottom_data, sizeof (float) * N,
cudaMemcpyDeviceToDevice);
sga_up_forward <<< threads, CUDA_NUM_THREADS >>> (n, g1, height, width,
depth, wsize, top_temp);
Max <<< (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS,
CUDA_NUM_THREADS >>> (N, top_temp, top_data, top_mask, 1);
n = num * channel * height;
threads = (n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
cudaMemcpy (top_temp, bottom_data, sizeof (float) * N,
cudaMemcpyDeviceToDevice);
sga_right_forward <<< threads, CUDA_NUM_THREADS >>> (n, g2, height, width,
depth, wsize,
top_temp);
Max <<< (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS,
CUDA_NUM_THREADS >>> (N, top_temp, top_data, top_mask, 2);
cudaMemcpy (top_temp, bottom_data, sizeof (float) * N,
cudaMemcpyDeviceToDevice);
sga_left_forward <<< threads, CUDA_NUM_THREADS >>> (n, g3, height, width,
depth, wsize, top_temp);
Max <<< (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS,
CUDA_NUM_THREADS >>> (N, top_temp, top_data, top_mask, 3);
// cudaMemset( top_temp, 0, sizeof(float)*THCudaTensor_nElement(state, top_temp));
}
void sga_kernel_backward (at::Tensor input, at::Tensor guidance_down,
at::Tensor guidance_up, at::Tensor guidance_right,
at::Tensor guidance_left, at::Tensor temp_out,
at::Tensor mask, at::Tensor max_idx,
at::Tensor gradOutput, at::Tensor temp_grad,
at::Tensor gradInput, at::Tensor grad_down,
at::Tensor grad_up, at::Tensor grad_right,
at::Tensor grad_left){
int num = input.size(0);
int channel = input.size(1);
int depth = input.size(2);
int height = input.size(3);
int width = input.size(4);
int wsize = guidance_down.size(2);
//THCudaTensor_nElement(state, input);
float *top_grad = temp_grad.data<float>();
float *top_temp = temp_out.data<float>();
const float *top_mask = mask.data<float>();
const float *bottom_data = input.data<float>();
const float *grad_out = gradOutput.data<float>();
const float *g0 = guidance_down.data<float>();
const float *g1 = guidance_up.data<float>();
const float *g2 = guidance_right.data<float>();
const float *g3 = guidance_left.data<float>();
float *grad0 = grad_down.data<float>();
float *grad1 = grad_up.data<float>();
float *grad2 = grad_right.data<float>();
float *grad3 = grad_left.data<float>();
float *grad_input = gradInput.data<float>();
float *idx = max_idx.data<float>();
int N = input.numel ();
// cudaStream_t stream = at::cuda::getCurrentCUDAStream();
//backward for left
int n = num * channel * height;
// cudaMemcpy(top_temp, bottom_data, sizeof(float)*N, cudaMemcpyDeviceToDevice);
// sga_left_forward<<<(n + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS, CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state)>>>
// (n,g3,height,width,depth,wsize,top_temp);
cudaMemset (top_grad, 0, sizeof (float) * N);
get_temp_grad <<< (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS,
CUDA_NUM_THREADS >>> (N, grad_out, top_mask, top_grad, 3);
N = num * channel * width * height;
MaxDepth <<< (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS,
CUDA_NUM_THREADS >>> (N, top_temp, height * width, depth, idx);
sga_left_data_backward <<< (n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS,
CUDA_NUM_THREADS >>> (n, g3, top_grad, idx, height, width, depth, wsize,
grad_input);
n = num * channel * width * height;
sga_left_weight_backward <<< (n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS,
CUDA_NUM_THREADS >>> (n, bottom_data, top_temp, top_grad, idx, height,
width, depth, wsize, grad3);
//backward for down
N = input.numel ();
n = num * channel * width;
cudaMemcpy (top_temp, bottom_data, sizeof (float) * N,
cudaMemcpyDeviceToDevice);
sga_down_forward <<< (n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS,
CUDA_NUM_THREADS >>> (n, g0, height, width, depth, wsize, top_temp);
cudaMemset (top_grad, 0, sizeof (float) * N);
get_temp_grad <<< (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS,
CUDA_NUM_THREADS >>> (N, grad_out, top_mask, top_grad, 0);
N = num * channel * width * height;
MaxDepth <<< (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS,
CUDA_NUM_THREADS >>> (N, top_temp, height * width, depth, idx);
sga_down_data_backward <<< (n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS,
CUDA_NUM_THREADS >>> (n, g0, top_grad, idx, height, width, depth, wsize,
grad_input);
n = num * channel * width * height;
sga_down_weight_backward <<< (n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS,
CUDA_NUM_THREADS >>> (n, bottom_data, top_temp, top_grad, idx, height,
width, depth, wsize, grad0);
// backward for up
N = input.numel ();
n = * channel * width;
cudaMemcpy (top_temp, bottom_data, sizeof (float) * N,
cudaMemcpyDeviceToDevice);
sga_up_forward <<< (n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS,
CUDA_NUM_THREADS >>> (n, g1, height, width, depth, wsize, top_temp);
cudaMemset (top_grad, 0, sizeof (float) * N);
get_temp_grad <<< (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS,
CUDA_NUM_THREADS >>> (N, grad_out, top_mask, top_grad, 1);
N = num * channel * width * height;
MaxDepth <<< (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS,
CUDA_NUM_THREADS >>> (N, top_temp, height * width, depth, idx);
sga_up_data_backward <<< (n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS,
CUDA_NUM_THREADS >>> (n, g1, top_grad, idx, height, width, depth, wsize,
grad_input);
n = num * channel * width * height;
sga_up_weight_backward <<< (n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS,
CUDA_NUM_THREADS >>> (n, bottom_data, top_temp, top_grad, idx, height,
width, depth, wsize, grad1);
//backward for right
N = input.numel ();
n = num * channel * height;
cudaMemcpy (top_temp, bottom_data, sizeof (float) * N,
cudaMemcpyDeviceToDevice);
sga_right_forward <<< (n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS,
CUDA_NUM_THREADS >>> (n, g2, height, width, depth, wsize, top_temp);
cudaMemset (top_grad, 0, sizeof (float) * N);
get_temp_grad <<< (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS,
CUDA_NUM_THREADS >>> (N, grad_out, top_mask, top_grad, 2);
N = num * channel * width * height;
MaxDepth <<< (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS,
CUDA_NUM_THREADS >>> (N, top_temp, height * width, depth, idx);
sga_right_data_backward <<< (n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS,
CUDA_NUM_THREADS >>> (n, g2, top_grad, idx, height, width, depth, wsize,
grad_input);
n = num * channel * width * height;
sga_right_weight_backward <<< (n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS,
CUDA_NUM_THREADS >>> (n, bottom_data, top_temp, top_grad, idx, height,
width, depth, wsize, grad2);
}
__global__ void lga_filtering_forward (const int n, const float *bottom_data,
const float *filters, const int height,
const int width, const int channel, const int radius,
float *top_data){
int index = blockIdx.x * blockDim.x + threadIdx.x;
// printf("OK\n");
// printf("%d, %.2f, %.2f\n", index, bottom_data[index], top_data[index]);
if (index >= n)
{
return;
}
// top_data[index]=1.0;
// assert(0);
int step = height * width;
int wsize = 2 * radius + 1;
// int fsize=wsize*wsize*3;
int fbase =
index / (step * channel) * (step * wsize * wsize * 3) + index % step;
int row = index % step / width;
int col = index % width;
int depth = index / step % channel;
for (int d = -1; d <= 1; d++)
{
for (int r = -radius; r <= radius; r++)
{
for (int c = -radius; c <= radius; c++)
{
int rr = r + row;
int cc = c + col;
int dd = d + depth;
int shift = 0;
if (rr >= 0 && cc >= 0 && dd >= 0 && rr < height && cc < width
&& dd < channel)
shift = r * width + c + d * step;
int location =
(d + 1) * (wsize * wsize) + (r + radius) * wsize + c + radius;
top_data[index] +=
bottom_data[index + shift] * filters[fbase + location * step];
}
}
}
// top_data[index]=1.0;
// printf("%d, %d, %d, %.2f, %.2f\n", index, row, col, bottom_data[index], top_data[index]);
}
__global__ void lga_filter_backward (const int n, const float *bottom_data,
const float *top_diff, const int height, const int width,
const int channel, const int radius, float *filter_diff){
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n)
{
return;
}
int step = height * width;
int wsize = 2 * radius + 1;
int base =
index / (step * wsize * wsize * 3) * (step * channel) + index % step;
int location = index / step % (wsize * wsize * 3);
int d = location / (wsize * wsize) - 1;
int r = (location / wsize) % wsize - radius;
int c = location % wsize - radius;
int rr = index % step / width + r;
int cc = index % width + c;
for (int i = 0; i < channel; i++)
{
int dd = i + d;
if (rr >= 0 && cc >= 0 && dd >= 0 && rr < height && cc < width
&& dd < channel)
{
int shift = r * width + c + d * step;
filter_diff[index] +=
top_diff[base + i * step] * bottom_data[base + shift + i * step];
}
else
filter_diff[index] +=
top_diff[base + i * step] * bottom_data[base + i * step];
}
}
__global__ void lga_data_backward (const int n, const float *filters, const float *top_diff,
const int height, const int width, const int channel,
const int radius, float *bottom_diff){
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n)
{
return;
}
int step = height * width;
int wsize = 2 * radius + 1;
// int fsize=wsize*wsize*3;
int fbase =
index / (step * channel) * (step * wsize * wsize * 3) + index % step;
int row = index % step / width;
int col = index % width;
int depth = index / step % channel;
for (int d = -1; d <= 1; d++)
{
for (int r = -radius; r <= radius; r++)
{
for (int c = -radius; c <= radius; c++)
{
int rr = r + row;
int cc = c + col;
int dd = d + depth;
// int shift = 0;
if (rr >= 0 && cc >= 0 && dd >= 0 && rr < height && cc < width
&& dd < channel)
{
int shift = r * width + c + d * step;
// int fshift= r*width+c;
int location =
(-d + 1) * (wsize * wsize) + (-r + radius) * wsize - c +
radius;
bottom_diff[index] +=
top_diff[index + shift] * filters[fbase + r * width + c +
location * step];
}
else
{
int location =
(d + 1) * (wsize * wsize) + (r + radius) * wsize + c +
radius;
bottom_diff[index] +=
top_diff[index] * filters[fbase + location * step];
}
}
}
}
}
void lga_forward (at::Tensor input, at::Tensor filters, at::Tensor output,
const int radius){
// print_kernel<<<10, 10>>>();
// cudaDeviceSynchronize();
// int num=input->size(0);
int channel = input.size(1);
int height = input.size(2);
int width = input.size(3);
int n = input.numel ();
// printf("%d, %d, %d, %d, %d\n", height, width, channel, n, radius);
// cudaStream_t stream = at::cuda::getCurrentCUDAStream();
/* float *temp = new float[n];
float *out = input.data<float>();
cudaMemcpy(temp,out,n*sizeof(float),cudaMemcpyDeviceToHost);
for(int i=0;i<n;i++)
printf("%.2f ", temp[i]);
*/
lga_filtering_forward <<< (n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS,
CUDA_NUM_THREADS >>> (n, input.data<float>(), filters.data<float>(),
height, width, channel, radius,
output.data<float>());
// temp = new float[n];
}
void lga_backward (at::Tensor input, at::Tensor filters, at::Tensor gradOutput,
at::Tensor gradInput, at::Tensor gradFilters, const int radius){
// int num=input->size(0);
int channel = input.size(1);
int height = input.size(2);
int width = input.size(3);
// cudaStream_t stream = at::cuda::getCurrentCUDAStream();
int n = filters.numel ();
lga_filter_backward <<< (n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS,
CUDA_NUM_THREADS >>> (n, input.data<float>(),
gradOutput.data<float>(), height, width, channel,
radius, gradFilters.data<float>());
// printf("%d, %d, %d, %d\n", height, width, channel, n);
n = input.numel ();
float *grad = gradInput.data<float>();
cudaMemset (grad, 0, sizeof (float) * n);
lga_data_backward <<< (n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS,
CUDA_NUM_THREADS >>> (n, filters.data<float>(),
gradOutput.data<float>(), height, width, channel,
radius, grad);
}
void lga3d_forward (at::Tensor input, at::Tensor filters, at::Tensor output,
const int radius){
// int num=input->size(0);
int channel = input.size(2);
int height = input.size(3);
int width = input.size(4);
int n = input.numel ();
// cudaStream_t stream = at::cuda::getCurrentCUDAStream();
lga_filtering_forward <<< (n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS,
CUDA_NUM_THREADS >>> (n, input.data<float>(), filters.data<float>(),
height, width, channel, radius,
output.data<float>());
}
void lga3d_backward (at::Tensor input, at::Tensor filters, at::Tensor gradOutput,
at::Tensor gradInput, at::Tensor gradFilters,
const int radius){
// int num=input->size(0);
int channel = input.size(2);
int height = input.size(3);
int width = input.size(4);
// cudaStream_t stream = at::cuda::getCurrentCUDAStream();
int n = filters.numel ();
lga_filter_backward <<< (n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS,
CUDA_NUM_THREADS >>> (n, input.data<float>(),
gradOutput.data<float>(), height, width, channel,
radius, gradFilters.data<float>());
n = input.numel ();
float *grad = gradInput.data<float>();
cudaMemset (grad, 0, sizeof (float) * n);
lga_data_backward <<< (n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS,
CUDA_NUM_THREADS >>> (n, filters.data<float>(),
gradOutput.data<float>(), height, width, channel,
radius, grad);
}
#ifdef __cplusplus
}
#endif
|
d5acebf6ed5c2719eaa3e1eaad9468fe1fc32348.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <multiply.h>
#include <basic_types.h>
#include <texture.h>
#include <util.h>
#include <cutil.h>
#ifdef _WIN32
#pragma warning (push)
#pragma warning (disable : 4244 4267 4521)
#endif
#include <cusp/multiply.h>
#include <matrix.h>
#include <matrix_cusp.h>
#include <amgx_cusparse.h>
#ifdef _WIN32
#pragma warning (pop)
#endif
#include <logger.h>
#include <sm_utils.inl>
#include <amgx_types/math.h>
#include <amgx_types/util.h>
namespace amgx
{
#define USE_EXPERIMENTAL_4x4
template <class Matrix, class Vector>
class Multiply_1x1;
template <class Matrix, class Vector, class IVector>
class Multiply_1x1_masked;
template <class Matrix, class Vector>
class Multiply_1x1_with_mask;
template <class Matrix, class Vector>
class Multiply_1x1_with_mask_restriction;
template <class Matrix, class Vector>
class Multiply_3x3;
template <class Matrix, class Vector>
class Multiply_4x4;
template <class Matrix, class Vector>
class Multiply_bxb;
template <typename TConfig>
void multiply_block_size(Matrix<TConfig> &A, Vector<TConfig> &B, Vector<TConfig> &C, ViewType view)
{
typedef Matrix<TConfig> TMatrix;
typedef Vector<TConfig> TVector;
if (A.get_block_size() == 1)
{
Multiply_1x1<TMatrix, TVector>::multiply_1x1(A, B, C, view);
}
else if (A.get_block_dimy() == 3 && A.get_block_dimx() == 3)
{
Multiply_3x3<TMatrix, TVector>::multiply_3x3(A, B, C, view);
}
else if (A.get_block_dimy() == 4 && A.get_block_dimx() == 4)
{
Multiply_4x4<TMatrix, TVector>::multiply_4x4(A, B, C, view);
}
else
{
Multiply_bxb<TMatrix, TVector>::multiply_bxb(A, B, C, view);
}
}
template <typename TConfig>
void multiply(Matrix<TConfig> &A, Vector<TConfig> &B, Vector<TConfig> &C, ViewType view)
{
typedef Matrix<TConfig> TMatrix;
typedef Vector<TConfig> TVector;
if (!A.is_initialized())
{
FatalError("Trying to multiply uninitialized matrix", AMGX_ERR_BAD_PARAMETERS);
}
if (A.get_block_dimx() != B.get_block_dimy())
{
std::stringstream ss;
ss << "Matrix and vector dimensions don't match: A.dimx == " << A.get_block_dimx() << ", B.dimy == " << B.get_block_dimy();
FatalError(ss.str().c_str(), AMGX_ERR_BAD_PARAMETERS);
}
typedef Matrix<TConfig> TMatrix;
typedef Vector<TConfig> TVector;
bool latencyHiding = (A.getViewInterior() != A.getViewExterior() && !A.is_matrix_singleGPU() && B.dirtybit != 0);
if (latencyHiding)
{
A.manager->exchange_halo_split_gather(B, B.tag);
// Multiply interior rows
multiply_block_size(A, B, C, A.getViewInterior());
// Finish halo exchange
A.manager->exchange_halo_split_finish(B, B.tag);
// Multiply rows with halo dependencies
ViewType bnd_view = (ViewType)(~(A.getViewInterior()) & A.getViewExterior());
multiply_block_size(A, B, C, bnd_view);
}
else
{
if (!A.is_matrix_singleGPU() && B.dirtybit != 0)
{
A.manager->exchange_halo_v2(B, B.tag);
}
multiply_block_size(A, B, C, A.getViewExterior());
}
C.dirtybit = 1;
C.set_block_dimy(A.get_block_dimx());
}
template <class TConfig>
void multiply_masked(Matrix<TConfig> &A, Vector<TConfig> &B, Vector<TConfig> &C, typename Matrix<TConfig>::IVector &mask, ViewType view)
{
typedef Matrix<TConfig> TMatrix;
typedef Vector<TConfig> TVector;
typedef typename Matrix<TConfig>::IVector TIVector;
if (!A.is_initialized())
{
FatalError("Trying to multiply uninitialized matrix", AMGX_ERR_BAD_PARAMETERS);
}
if(A.get_block_size() != 1)
{
FatalError("Unsupported blocksize for multiply_masked()", AMGX_ERR_BAD_PARAMETERS);
}
if (A.get_block_dimx() != B.get_block_dimy())
{
std::stringstream ss;
ss << "Matrix and vector dimensions don't match: A.dimx == " << A.get_block_dimx() << ", B.dimy == " << B.get_block_dimy();
FatalError(ss.str().c_str(), AMGX_ERR_BAD_PARAMETERS);
}
Multiply_1x1_masked<TMatrix, TVector, TIVector>::multiply_1x1_masked(A, B, C, mask, view);
C.set_block_dimy(A.get_block_dimx());
}
template<class Matrix, class Vector>
void multiply_with_mask(Matrix &A, Vector &B, Vector &C)
{
if (!A.is_initialized())
{
FatalError("Trying to multiply uninitialized matrix", AMGX_ERR_BAD_PARAMETERS);
}
if (A.get_block_dimx() != B.get_block_dimy())
{
std::stringstream ss;
ss << "Matrix and vector dimensions don't match: A.dimx == " << A.get_block_dimx() << ", B.dimy == " << B.get_block_dimy();
FatalError(ss.str().c_str(), AMGX_ERR_BAD_PARAMETERS);
}
if (A.get_block_size() == 1)
{
Multiply_1x1_with_mask<Matrix, Vector>::multiply_1x1(A, B, C);
}
else
{
FatalError("multiply with mask not supported for bsize != 1", AMGX_ERR_NOT_IMPLEMENTED);
}
C.set_block_dimy(A.get_block_dimx());
C.dirtybit = 1;
//if (!A.is_matrix_singleGPU() && C.size() == B.size() && C.delayed_send==0)
// A.manager->exchange_halo_async(C, C.tag);
}
template<class Matrix, class Vector>
void multiply_with_mask_restriction(Matrix &A, Vector &B, Vector &C, Matrix &P)
{
if (!A.is_initialized())
{
FatalError("Trying to multiply uninitialized matrix", AMGX_ERR_BAD_PARAMETERS);
}
if (A.get_block_dimx() != B.get_block_dimy())
{
std::stringstream ss;
ss << "Matrix and vector dimensions don't match: A.dimx == " << A.get_block_dimx() << ", B.dimy == " << B.get_block_dimy();
FatalError(ss.str().c_str(), AMGX_ERR_BAD_PARAMETERS);
}
if (A.get_block_size() == 1)
{
Multiply_1x1_with_mask_restriction<Matrix, Vector>::multiply_1x1(A, B, C, P);
}
else
{
FatalError("multiply with mask not supported for bsize != 1", AMGX_ERR_NOT_IMPLEMENTED);
}
C.set_block_dimy(A.get_block_dimx());
C.dirtybit = 1;
}
template<class TConfig>
void multiplyMM(const Matrix<TConfig> &A, const Matrix<TConfig> &B, Matrix<TConfig> &C)
{
if (!A.is_initialized())
{
FatalError("Trying to multiply uninitialized matrix", AMGX_ERR_BAD_PARAMETERS);
}
if (A.get_block_dimx() != B.get_block_dimx() || A.get_block_dimy() != B.get_block_dimy())
{
FatalError("Matrices dimensions do not match", AMGX_ERR_BAD_PARAMETERS);
}
if (TConfig::memSpace == AMGX_device)
{
FatalError("Error, multiplyMM not implemented on device", AMGX_ERR_BAD_PARAMETERS);
}
else
{
if (A.get_block_size() != 1)
{
FatalError("multiplyMM only works for block_size ==1", AMGX_ERR_NOT_IMPLEMENTED);
}
else
{
typedef typename TConfig::IndPrec IndexType;
typedef typename TConfig::MatPrec ValueType;
typedef typename Matrix<TConfig>::IVector IVector;
typedef typename Matrix<TConfig>::MVector MVector;
C.set_initialized(0);
IndexType num_nonzeros = 0;
IVector mask(B.get_num_cols(), IndexType (-1));
// Compute nnz in C (including explicit zeros)
for (size_t i = 0; i < A.get_num_rows(); i++)
{
for (IndexType jj = A.row_offsets[i]; jj < A.row_offsets[i + 1]; jj++)
{
IndexType j = A.col_indices[jj];
for (IndexType kk = B.row_offsets[j]; kk < B.row_offsets[j + 1]; kk++)
{
IndexType k = B.col_indices[kk];
if (mask[k] != i)
{
mask[k] = i;
num_nonzeros++;
}
}
}
}
// Resize output
C.resize(A.get_num_rows(), B.get_num_cols(), num_nonzeros);
const IndexType unseen = static_cast<IndexType>(-1);
const IndexType init = static_cast<IndexType>(-2);
// Compute entries of C
IVector next(B.get_num_cols(), unseen);
MVector sums(B.get_num_cols(), types::util<ValueType>::get_zero());
num_nonzeros = 0;
C.row_offsets[0] = 0;
for (size_t i = 0; i < A.get_num_rows(); i++)
{
IndexType head = init;
IndexType length = 0;
IndexType jj_start = A.row_offsets[i];
IndexType jj_end = A.row_offsets[i + 1];
for (IndexType jj = jj_start; jj < jj_end; jj++)
{
IndexType j = A.col_indices[jj];
ValueType v = A.values[jj];
IndexType kk_start = B.row_offsets[j];
IndexType kk_end = B.row_offsets[j + 1];
for (IndexType kk = kk_start; kk < kk_end; kk++)
{
IndexType k = B.col_indices[kk];
sums[k] = sums[k] + v * B.values[kk];
if (next[k] == unseen)
{
next[k] = head;
head = k;
length++;
}
}
}
for (IndexType jj = 0; jj < length; jj++)
{
//if(sums[head] != ValueType(0))
//{
C.col_indices[num_nonzeros] = head;
C.values[num_nonzeros] = sums[head];
num_nonzeros++;
//}
IndexType temp = head;
head = next[head];
// clear arrays
next[temp] = unseen;
sums[temp] = types::util<ValueType>::get_zero();
}
C.row_offsets[i + 1] = num_nonzeros;
}
// Resize output again since pass2 omits explict zeros
//C.resize(A.num_rows, B.num_cols, num_nonzeros);
C.set_initialized(1);
}
}
}
// --------------------------------
// KERNELS
// --------------------------------
template<typename IndexType, typename ValueTypeA, typename ValueTypeB, int eighthwarps_per_block, int bsize, int log_bsize, int half_bsize, bool ROW_MAJOR>
__global__
void blockDiaCsrMultiplyKernel(const IndexType *row_offsets,
const IndexType *column_indices,
const ValueTypeA *nonzero_values,
const ValueTypeB *B,
ValueTypeB *C,
const IndexType num_block_rows,
const IndexType row_offset)
{
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
int eighthwarp_id = row_offset + (tid >> log_bsize);
const int block_eighthwarp_id = threadIdx.x >> log_bsize;
const int vec_entry_index = threadIdx.x & (bsize - 1);
volatile __shared__ ValueTypeB s_xtemp[ bsize * eighthwarps_per_block ];
ValueTypeB C_temp;
int offset, s_offset;
ValueTypeB temp[bsize];
while (eighthwarp_id < num_block_rows)
{
//i = eighthwarp_id;
C_temp = types::util<ValueTypeB>::get_zero();
// Contribution from each nonzero column
int jmax = row_offsets[eighthwarp_id + 1];
for (int jind = row_offsets[eighthwarp_id]; jind < jmax; jind++)
{
IndexType jcol = column_indices[jind];
offset = jcol * bsize + vec_entry_index;
types::util<ValueTypeB>::volcast(__cachingLoad(&B[offset]), s_xtemp + threadIdx.x);
// Load nonzero_values
s_offset = block_eighthwarp_id * bsize;
if (ROW_MAJOR)
{
offset = jind * bsize * bsize + vec_entry_index * bsize;
loadAsVector<bsize>(nonzero_values + offset, temp);
}
else
{
offset = jind * bsize * bsize + vec_entry_index;
#pragma unroll
for (int m = 0; m < bsize; m++)
{
types::util<ValueTypeA>::to_uptype(nonzero_values[offset + bsize * m], temp[m]);
}
}
#pragma unroll
for (int m = 0; m < bsize; m++)
{
C_temp = C_temp + temp[m] * types::util<ValueTypeB>::volcast(s_xtemp[s_offset + m]);
}
}
C[eighthwarp_id * bsize + vec_entry_index] = C_temp;
eighthwarp_id += gridDim.x * blockDim.x >> log_bsize;
}
}
#ifdef USE_EXPERIMENTAL_4x4
template< typename IndexType, typename ValueTypeA, typename ValueTypeB, int CTA_SIZE, bool ROW_MAJOR >
__global__
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 350
__launch_bounds__( CTA_SIZE, 16 )
#elif defined(__CUDA_ARCH__)
__launch_bounds__( CTA_SIZE, 8 )
#endif
void blockDiaCsrMultiplyKernelDiaProps_4x4( const IndexType *row_offsets,
const IndexType *column_indices,
const IndexType *dia_ptr,
const ValueTypeA *nonzero_values,
const ValueTypeB *B,
ValueTypeB *C,
const int num_block_rows,
const int row_offset )
{
const int nHalfWarps = CTA_SIZE / 16; // Number of half warps per CTA.
const int laneId = threadIdx.x % 32;
const int halfWarpId = threadIdx.x / 16;
const int halfLaneId = threadIdx.x % 16;
const int halfLaneId_div_4 = halfLaneId / 4;
const int halfLaneId_mod_4 = halfLaneId % 4;
const int laneId_div_16 = laneId / 16;
const int upperHalf = 16 * laneId_div_16;
const int upperMask = 0xffff << upperHalf;
#if __CUDA_ARCH__ < 300
const int nWarps = CTA_SIZE / 32; // Number of half warps per CTA.
const int warpId = threadIdx.x / 32;
volatile __shared__ IndexType s_aColIds[nWarps][32];
#endif
// Shared memory needed to exchange X and delta.
__shared__ volatile ValueTypeB s_mem[CTA_SIZE];
// Each thread keeps its own pointer to shared memory to avoid some extra computations.
volatile ValueTypeB *my_s_mem = &s_mem[16 * halfWarpId];
// Iterate over the rows of the matrix. One warp per two rows.
for ( int aRowId = blockIdx.x * nHalfWarps + halfWarpId ; aRowId < num_block_rows ; aRowId += gridDim.x * nHalfWarps )
{
// Load one block of B.
ValueTypeB my_Ax = types::util<ValueTypeB>::get_zero();
// The diagonal.
if ( halfLaneId_div_4 == 0 )
{
types::util<ValueTypeB>::volcast( B[4 * aRowId + halfLaneId_mod_4], my_s_mem + halfLaneId);
}
// Load the diagonal.
int diagId = dia_ptr[aRowId];
// Update my values.
ValueTypeA my_val = nonzero_values[16 * diagId + halfLaneId];
if ( ROW_MAJOR )
{
my_Ax = my_Ax + my_val * types::util<ValueTypeB>::volcast(my_s_mem[halfLaneId_mod_4]);
}
else
{
my_Ax = my_Ax + my_val * types::util<ValueTypeB>::volcast(my_s_mem[halfLaneId_div_4]);
}
// The range of the rows.
int aColBegin = row_offsets[aRowId + 0];
int aColEnd = row_offsets[aRowId + 1];
// Each warp load column indices of 16 nonzero blocks
for ( ; utils::any( aColBegin < aColEnd ) ; aColBegin += 16 )
{
int aColIt = aColBegin + halfLaneId;
// Get the ID of the column.
int aColId = -1;
if ( aColIt < aColEnd )
{
aColId = column_indices[aColIt];
}
#if __CUDA_ARCH__ < 300
s_aColIds[warpId][laneId] = aColId;
#endif
// Loop over columns. We compute 8 columns per iteration.
for ( int k = 0, nCols = __popc( utils::ballot(aColId != -1) & upperMask ) ; k < nCols ; k += 4 )
{
int my_k = k + halfLaneId_div_4;
// Exchange column indices.
#if __CUDA_ARCH__ >= 300
int waColId = utils::shfl( aColId, upperHalf + my_k );
#else
int waColId = s_aColIds[warpId][upperHalf + my_k];
#endif
// Load 8 blocks of X if needed.
ValueTypeB my_x = types::util<ValueTypeB>::get_zero();
if ( waColId != -1 )
{
my_x = B[4 * waColId + halfLaneId_mod_4];
}
types::util<ValueTypeB>::volcast( my_x, my_s_mem + halfLaneId);
// Load 8 blocks of A.
#pragma unroll
for ( int i = 0 ; i < 4 ; ++i )
{
int w_aColTmp = aColBegin + k + i, w_aColIt = -1;
if ( w_aColTmp < aColEnd )
{
w_aColIt = w_aColTmp;
}
ValueTypeA my_val = types::util<ValueTypeA>::get_zero();
if ( w_aColIt != -1 )
{
my_val = nonzero_values[16 * w_aColIt + halfLaneId];
}
if ( ROW_MAJOR )
{
my_Ax = my_Ax + my_val * types::util<ValueTypeB>::volcast(my_s_mem[4 * i + halfLaneId_mod_4]);
}
else
{
my_Ax = my_Ax + my_val * types::util<ValueTypeB>::volcast(my_s_mem[4 * i + halfLaneId_div_4]);
}
}
} // Loop over k
} // Loop over aColIt
// Reduce bmAx terms.
#if __CUDA_ARCH__ >= 300
if ( ROW_MAJOR )
{
my_Ax = my_Ax + utils::shfl_xor( my_Ax, 1 );
my_Ax = my_Ax + utils::shfl_xor( my_Ax, 2 );
}
else
{
my_Ax = my_Ax + utils::shfl_xor( my_Ax, 4 );
my_Ax = my_Ax + utils::shfl_xor( my_Ax, 8 );
}
#else
types::util<ValueTypeB>::volcast(my_Ax, s_mem + threadIdx.x);
{
if ( ROW_MAJOR )
{
if ( laneId < 31 )
{
my_Ax = my_Ax + types::util<ValueTypeB>::volcast(s_mem[threadIdx.x + 1]);
types::util<ValueTypeB>::volcast(my_Ax, s_mem + threadIdx.x);
}
if ( laneId < 30 )
{
my_Ax = my_Ax + types::util<ValueTypeB>::volcast(s_mem[threadIdx.x + 2]);
types::util<ValueTypeB>::volcast(my_Ax, s_mem + threadIdx.x);
}
}
else
{
if ( laneId < 31 )
{
my_Ax = my_Ax + types::util<ValueTypeB>::volcast(s_mem[threadIdx.x + 4]);
types::util<ValueTypeB>::volcast(my_Ax, s_mem + threadIdx.x);
}
if ( laneId < 30 )
{
my_Ax = my_Ax + types::util<ValueTypeB>::volcast(s_mem[threadIdx.x + 8]);
types::util<ValueTypeB>::volcast(my_Ax, s_mem + threadIdx.x);
}
}
}
#endif
// Store the results.
if ( ROW_MAJOR )
{
if ( halfLaneId_mod_4 == 0 )
{
C[4 * aRowId + halfLaneId_div_4] = my_Ax;
}
}
else
{
if ( halfLaneId_div_4 == 0 )
{
C[4 * aRowId + halfLaneId_mod_4] = my_Ax;
}
}
}
}
#else
template<typename IndexType, typename ValueTypeA, typename ValueTypeB, int eighthwarps_per_block, int bsize, int log_bsize, int half_bsize, int bsize_sq, bool ROW_MAJOR>
__global__
void blockDiaCsrMultiplyKernelDiaProps_4x4(const IndexType *row_offsets,
const IndexType *column_indices,
const IndexType *dia_ptr,
const ValueTypeA *nonzero_values,
const ValueTypeB *B,
ValueTypeB *C,
const int num_block_rows,
const int row_offset)
{
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
int eighthwarp_id = row_offset + (tid >> log_bsize);
const int block_eighthwarp_id = threadIdx.x >> log_bsize;
const int vec_entry_index = threadIdx.x & (bsize - 1);
volatile __shared__ ValueTypeB s_xtemp[ bsize * eighthwarps_per_block ];
ValueTypeB C_temp;
int offset, s_offset;
while (eighthwarp_id < num_block_rows)
{
//i = eighthwarp_id;
C_temp = 0.;
// Contribution from diagonal
offset = eighthwarp_id * bsize + vec_entry_index;
s_xtemp[threadIdx.x] = __cachingLoad(&B[offset]);
// Load dia_values and do matrix multiply
s_offset = block_eighthwarp_id * bsize;
ValueTypeB temp[bsize];
if (ROW_MAJOR)
{
loadAsVector<bsize>(nonzero_values + bsize_sq * dia_ptr[eighthwarp_id] + vec_entry_index * bsize, temp);
}
else
{
#pragma unroll
for (int m = 0; m < bsize; m++)
{
temp[m] = nonzero_values[bsize_sq * dia_ptr[eighthwarp_id] + vec_entry_index + bsize * m];
}
}
#pragma unroll
for (int m = 0; m < bsize; m++)
{
C_temp += temp[m] * s_xtemp[s_offset + m];
}
// Contribution from each nonzero column
int jmax = row_offsets[eighthwarp_id + 1];
for (int jind = row_offsets[eighthwarp_id]; jind < jmax; jind++)
{
IndexType jcol = column_indices[jind];
offset = jcol * bsize + vec_entry_index;
s_xtemp[threadIdx.x] = __cachingLoad(&B[offset]);
// Load nonzero_values
s_offset = block_eighthwarp_id * bsize;
if (ROW_MAJOR)
{
offset = jind * bsize_sq + vec_entry_index * bsize;
loadAsVector<bsize>(nonzero_values + offset, temp);
}
else
{
offset = jind * bsize_sq + vec_entry_index;
#pragma unroll
for (int m = 0; m < bsize; m++)
{
temp[m] = nonzero_values[offset + bsize * m];
}
}
#pragma unroll
for (int m = 0; m < bsize; m++)
{
C_temp += temp[m] * s_xtemp[s_offset + m];
}
}
C[eighthwarp_id * bsize + vec_entry_index] = C_temp;
eighthwarp_id += gridDim.x * blockDim.x >> log_bsize;
}
}
#endif
// implementation for arbitrary block size
template<typename IndexType, typename ValueTypeA, typename ValueTypeB, int blockrows_per_cta, int blockrows_per_warp, int bsize, int diag, bool ROW_MAJOR>
__global__
void blockDiaCsrMultiplyKernelDiaProps(const IndexType *row_offsets,
const IndexType *column_indices,
const IndexType *dia_ptr,
const ValueTypeA *nonzero_values,
const ValueTypeB *B,
ValueTypeB *C,
const int num_block_rows,
const int row_offset)
{
int warp_id = threadIdx.x / 32;
int warp_thread_id = threadIdx.x & 31;
// padding row blocks to fit in a single warp
if ( warp_thread_id >= blockrows_per_warp * bsize ) { return; }
// new thread id with padding
int tid = warp_id * blockrows_per_warp * bsize + warp_thread_id;
int eighthwarp_id = row_offset + blockIdx.x * blockrows_per_cta + tid / bsize;
const int block_eighthwarp_id = tid / bsize;
const int vec_entry_index = tid % bsize;
const int bsize_sq = bsize * bsize;
volatile __shared__ ValueTypeB s_xtemp[ bsize * blockrows_per_cta ];
ValueTypeB C_temp;
int offset, s_offset;
while (eighthwarp_id < num_block_rows)
{
C_temp = types::util<ValueTypeB>::get_zero();
ValueTypeB temp[bsize];
if ( diag )
{
// Contribution from diagonal
offset = eighthwarp_id * bsize + vec_entry_index;
types::util<ValueTypeB>::volcast( __cachingLoad(&B[offset]), s_xtemp + tid);
// Load dia_values and do matrix multiply
s_offset = block_eighthwarp_id * bsize;
if (ROW_MAJOR)
{
loadAsVector<bsize>(nonzero_values + bsize_sq * dia_ptr[eighthwarp_id] + vec_entry_index * bsize, temp);
}
else
{
offset = dia_ptr[eighthwarp_id] * bsize_sq + vec_entry_index;
#pragma unroll
for (int m = 0; m < bsize; m++)
{
types::util<ValueTypeA>::to_uptype(nonzero_values[offset + bsize * m], temp[m]);
}
}
#pragma unroll
for (int m = 0; m < bsize; m++)
{
C_temp = C_temp + temp[m] * types::util<ValueTypeB>::volcast(s_xtemp[s_offset + m]);
}
}
// Contribution from each nonzero column
int jmax = row_offsets[eighthwarp_id + 1];
for (int jind = row_offsets[eighthwarp_id]; jind < jmax; jind++)
{
IndexType jcol = column_indices[jind];
offset = jcol * bsize + vec_entry_index;
types::util<ValueTypeB>::volcast( __cachingLoad(&B[offset]), s_xtemp + tid);
// Load nonzero_values
s_offset = block_eighthwarp_id * bsize;
if (ROW_MAJOR)
{
offset = jind * bsize_sq + vec_entry_index * bsize;
loadAsVector<bsize>(nonzero_values + offset, temp);
}
else
{
offset = jind * bsize_sq + vec_entry_index;
#pragma unroll
for (int m = 0; m < bsize; m++)
{
types::util<ValueTypeA>::to_uptype(nonzero_values[offset + bsize * m], temp[m]);
}
}
#pragma unroll
for (int m = 0; m < bsize; m++)
{
C_temp = C_temp + temp[m] * types::util<ValueTypeB>::volcast(s_xtemp[s_offset + m]);
}
}
C[eighthwarp_id * bsize + vec_entry_index] = C_temp;
eighthwarp_id += gridDim.x * blockrows_per_cta;
}
}
// --------------------------------------
// Methods
// -------------------------------------
// Method to perform BSPmV on host using block_dia_csr_matrix format
template <class Matrix, class Vector>
void multiply_common_sqblock_host_diag(const Matrix &A, const Vector &B, Vector &C)
{
typedef typename Matrix::TConfig TConfig;
if (TConfig::memSpace == AMGX_device)
{
FatalError("Executrion path error: device matrix in host path", AMGX_ERR_NOT_IMPLEMENTED);
}
else
{
//TODO:: This implementation is very inneficient, Use BLAS
typedef typename TConfig::IndPrec IndexType;
typedef typename TConfig::MatPrec ValueType;
typedef typename Vector::value_type ValueTypeB;
IndexType bsize = A.get_block_dimy();
ValueTypeB temp;
for (int i = 0; i < A.get_num_rows(); i++)
{
// Initialize RHS to 0
for (int m = 0; m < bsize; m++)
{
C[i * bsize + m] = types::util<ValueTypeB>::get_zero();
}
// Contribution from diagonal blocks
for (int n = 0; n < bsize; n++)
{
temp = B[i * bsize + n];
for (int m = 0; m < bsize; m++)
{
C[i * bsize + m] = C[i * bsize + m] + A.values[A.diag[i] * bsize * bsize + m * bsize + n] * temp;
}
}
// Contribution from nonzero off-diagonal blocks
for (int j = A.row_offsets[i]; j < A.row_offsets[i + 1]; j++)
{
IndexType jcol = A.col_indices[j];
for (int n = 0; n < bsize; n++)
{
temp = B[jcol * bsize + n];
for (int m = 0; m < bsize; m++)
{
C[i * bsize + m] = C[i * bsize + m] + A.values[j * bsize * bsize + m * bsize + n] * temp;
}
}
}
}
}
}
template <class Matrix, class Vector>
void multiply_common_sqblock_host_nodiag(const Matrix &A, const Vector &B, Vector &C)
{
typedef typename Matrix::TConfig TConfig;
if (TConfig::memSpace == AMGX_device)
{
FatalError("Executrion path error: device matrix in host path", AMGX_ERR_NOT_IMPLEMENTED);
}
else
{
//TODO:: This implementation is very inneficient, Use BLAS
typedef typename TConfig::IndPrec IndexType;
typedef typename TConfig::MatPrec ValueType;
typedef typename Vector::value_type ValueTypeB;
IndexType bsize = A.get_block_dimy();
ValueTypeB temp;
for (int i = 0; i < A.get_num_rows(); i++)
{
// Initialize RHS to 0
for (int m = 0; m < bsize; m++)
{
C[i * bsize + m] = types::util<ValueTypeB>::get_zero();
}
// Contribution from nonzero blocks
for (int j = A.row_offsets[i]; j < A.row_offsets[i + 1]; j++)
{
IndexType jcol = A.col_indices[j];
for (int n = 0; n < bsize; n++)
{
temp = B[jcol * bsize + n];
for (int m = 0; m < bsize; m++)
{
C[i * bsize + m] = C[i * bsize + m] + A.values[j * bsize * bsize + m * bsize + n] * temp;
}
}
}
}
}
}
template <class Matrix, class Vector>
class Multiply_1x1
{
public:
typedef typename Matrix::TConfig TConfig;
static void multiply_1x1(Matrix &A, Vector &B, Vector &C, ViewType view)
{
if (TConfig::memSpace == AMGX_host)
{
if (A.hasProps(DIAG))
{
multiply_common_sqblock_host_diag(A, B, C);
}
else
{
multiply_common_sqblock_host_nodiag(A, B, C);
}
}
else
{
typedef typename TConfig::VecPrec ValueTypeB;
Cusparse::bsrmv<TConfig>(types::util<ValueTypeB>::get_one(), A, B, types::util<ValueTypeB>::get_zero(), C, view);
cudaCheckError();
// FatalError("Mixed precision is not supported for scalar matrix type", AMGX_ERR_NOT_IMPLEMENTED);
}
}
};
template <class Matrix, class Vector, class IVector>
class Multiply_1x1_masked
{
public:
typedef typename Matrix::TConfig TConfig;
static void multiply_1x1_masked(Matrix &A, Vector &B, Vector &C, IVector mask, ViewType view)
{
if (TConfig::memSpace == AMGX_host)
{
FatalError("Masked multiply is not supported on host", AMGX_ERR_NOT_IMPLEMENTED);
}
else
{
typedef typename TConfig::VecPrec ValueTypeB;
Cusparse::bsrxmv<TConfig>(types::util<ValueTypeB>::get_one(), A, B, types::util<ValueTypeB>::get_zero(), C, mask, view);
cudaCheckError();
}
}
};
template <class Matrix, class Vector>
class Multiply_1x1_with_mask
{
public:
typedef typename Matrix::TConfig TConfig;
static void multiply_1x1(Matrix &A, Vector &B, Vector &C)
{
if (TConfig::memSpace == AMGX_host)
{
FatalError("multiply with mask not supported on host", AMGX_ERR_NOT_IMPLEMENTED);
}
else
{
typedef typename TConfig::VecPrec ValueTypeB;
Cusparse::bsrmv_with_mask<TConfig>(types::util<ValueTypeB>::get_one(), A, B, types::util<ValueTypeB>::get_zero(), C );
cudaCheckError();
// FatalError("Mixed precision is not supported for scalar matrix type", AMGX_ERR_NOT_IMPLEMENTED);
}
}
};
template <class Matrix, class Vector>
class Multiply_1x1_with_mask_restriction
{
public:
typedef typename Matrix::TConfig TConfig;
static void multiply_1x1(Matrix &A, Vector &B, Vector &C, Matrix &P)
{
if (TConfig::memSpace == AMGX_host)
{
FatalError("multiply with mask not supported on host", AMGX_ERR_NOT_IMPLEMENTED);
}
else
{
typedef typename TConfig::VecPrec ValueTypeB;
Cusparse::bsrmv_with_mask_restriction<TConfig>(types::util<ValueTypeB>::get_one(), A, B, types::util<ValueTypeB>::get_zero(), C, P);
cudaCheckError();
// FatalError("Mixed precision is not supported for scalar matrix type", AMGX_ERR_NOT_IMPLEMENTED);
}
}
};
// Method to perform BSPmV on device using block_dia_csr_matrix format
template <class Matrix, class Vector>
class Multiply_4x4
{
public:
typedef typename Matrix::TConfig TConfig;
static void multiply_4x4(const Matrix &A, const Vector &B, Vector &C, ViewType view)
{
if (TConfig::memSpace == AMGX_host)
{
if (A.hasProps(DIAG))
{
multiply_common_sqblock_host_diag(A, B, C);
}
else
{
multiply_common_sqblock_host_nodiag(A, B, C);
}
}
else
{
//TODO: compare with cublas
typedef typename TConfig::IndPrec IndexType;
typedef typename TConfig::MatPrec ValueTypeA;
typedef typename TConfig::VecPrec ValueTypeB;
int num_rows, offset;
A.getOffsetAndSizeForView(view, &offset, &num_rows);
const IndexType *A_row_offsets_ptr = A.row_offsets.raw();
const IndexType *A_column_indices_ptr = A.col_indices.raw();
const IndexType *A_dia_ind_ptr = A.diag.raw();
const ValueTypeA *A_nonzero_values_ptr = A.values.raw();
const ValueTypeB *B_ptr = B.raw();
ValueTypeB *C_ptr = C.raw();
cudaCheckError();
const unsigned int threads_per_block = 128;
const int eightwarps_per_block = threads_per_block / 4;
const int num_warps_per_cta = threads_per_block / 32;
const int num_blocks = min(AMGX_GRID_MAX_SIZE, (int) (num_rows + num_warps_per_cta - 1) / num_warps_per_cta); // (int) (A.get_num_rows()-1)/eightwarps_per_block + 1;
if (!A.hasProps(DIAG))
{
if (A.getBlockFormat() == ROW_MAJOR)
{
hipFuncSetCacheConfig(blockDiaCsrMultiplyKernel<IndexType, ValueTypeA, ValueTypeB, eightwarps_per_block, 4, 2, 2, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( blockDiaCsrMultiplyKernel<IndexType, ValueTypeA, ValueTypeB, eightwarps_per_block, 4, 2, 2, true>) , dim3(num_blocks), dim3(threads_per_block), 0, 0, A_row_offsets_ptr, A_column_indices_ptr, A_nonzero_values_ptr, B_ptr, C_ptr, offset + num_rows, offset);
}
else
{
hipFuncSetCacheConfig(blockDiaCsrMultiplyKernel<IndexType, ValueTypeA, ValueTypeB, eightwarps_per_block, 4, 2, 2, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( blockDiaCsrMultiplyKernel<IndexType, ValueTypeA, ValueTypeB, eightwarps_per_block, 4, 2, 2, false>) , dim3(num_blocks), dim3(threads_per_block), 0, 0, A_row_offsets_ptr, A_column_indices_ptr, A_nonzero_values_ptr, B_ptr, C_ptr, offset + num_rows, offset);
}
}
else
{
#ifdef USE_EXPERIMENTAL_4x4
if ( A.getBlockFormat() == ROW_MAJOR )
{
hipLaunchKernelGGL(( blockDiaCsrMultiplyKernelDiaProps_4x4<IndexType, ValueTypeA, ValueTypeB, threads_per_block, true >) , dim3(num_blocks), dim3(threads_per_block), 0, 0, A_row_offsets_ptr, A_column_indices_ptr, A_dia_ind_ptr, A_nonzero_values_ptr, B_ptr, C_ptr, offset + num_rows, offset);
}
else
{
hipLaunchKernelGGL(( blockDiaCsrMultiplyKernelDiaProps_4x4<IndexType, ValueTypeA, ValueTypeB, threads_per_block, false >) , dim3(num_blocks), dim3(threads_per_block), 0, 0, A_row_offsets_ptr, A_column_indices_ptr, A_dia_ind_ptr, A_nonzero_values_ptr, B_ptr, C_ptr, offset + num_rows, offset);
}
#else
if (A.getBlockFormat() == ROW_MAJOR)
{
hipFuncSetCacheConfig(blockDiaCsrMultiplyKernelDiaProps_4x4<IndexType, ValueTypeA, ValueTypeB, eightwarps_per_block, 4, 2, 2, 16, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( blockDiaCsrMultiplyKernelDiaProps_4x4<IndexType, ValueTypeA, ValueTypeB, eightwarps_per_block, 4, 2, 2, 16, true>) , dim3(num_blocks), dim3(threads_per_block), 0, 0, A_row_offsets_ptr, A_column_indices_ptr, A_dia_ind_ptr, A_nonzero_values_ptr, B_ptr, C_ptr, offset + num_rows, offset);
}
else
{
hipFuncSetCacheConfig(blockDiaCsrMultiplyKernelDiaProps_4x4<IndexType, ValueTypeA, ValueTypeB, eightwarps_per_block, 4, 2, 2, 16, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( blockDiaCsrMultiplyKernelDiaProps_4x4<IndexType, ValueTypeA, ValueTypeB, eightwarps_per_block, 4, 2, 2, 16, false>) , dim3(num_blocks), dim3(threads_per_block), 0, 0, A_row_offsets_ptr, A_column_indices_ptr, A_dia_ind_ptr, A_nonzero_values_ptr, B_ptr, C_ptr, offset + num_rows, offset);
}
#endif
}
cudaCheckError();
}
}
};
// Method to perform BSPmV on device using block_dia_csr_matrix format
template <class Matrix, class Vector>
class Multiply_bxb
{
public:
typedef typename Matrix::TConfig TConfig;
static void multiply_bxb(Matrix &A, Vector &B, Vector &C, ViewType view)
{
if (TConfig::memSpace == AMGX_host)
{
if (A.hasProps(DIAG))
{
multiply_common_sqblock_host_diag(A, B, C);
}
else
{
multiply_common_sqblock_host_nodiag(A, B, C);
}
}
else
{
typedef typename TConfig::VecPrec ValueTypeB;
Cusparse::bsrmv(types::util<ValueTypeB>::get_one(), A, B, types::util<ValueTypeB>::get_zero(), C, view);
cudaCheckError();
}
}
};
// Method to perform BSPmV on device using block_dia_csr_matrix format
template <class Matrix, class Vector>
class Multiply_3x3
{
public:
typedef typename Matrix::TConfig TConfig;
static void multiply_3x3(const Matrix &A, const Vector &B, Vector &C, ViewType view)
{
if (TConfig::memSpace == AMGX_host)
{
if (A.hasProps(DIAG))
{
multiply_common_sqblock_host_diag(A, B, C);
}
else
{
multiply_common_sqblock_host_nodiag(A, B, C);
}
}
else
{
//TODO: compare with cublas
typedef typename TConfig::IndPrec IndexType;
typedef typename TConfig::MatPrec ValueTypeA;
typedef typename TConfig::VecPrec ValueTypeB;
int num_rows, offset;
A.getOffsetAndSizeForView(view, &offset, &num_rows);
const IndexType *A_row_offsets_ptr = A.row_offsets.raw();
const IndexType *A_column_indices_ptr = A.col_indices.raw();
const IndexType *A_dia_ind_ptr = A.diag.raw();
const ValueTypeA *A_nonzero_values_ptr = A.values.raw();
const ValueTypeB *B_ptr = B.raw();
ValueTypeB *C_ptr = C.raw();
cudaCheckError();
const int threads_per_block = 64 * 3;
const int blockrows_per_warp = 32 / 3;
const int blockrows_per_cta = (threads_per_block / 32) * blockrows_per_warp;
const int num_blocks = min(AMGX_GRID_MAX_SIZE, (int) (num_rows - 1) / blockrows_per_cta + 1); // (int) (A.get_num_rows()-1)/eightwarps_per_block + 1;
if (!A.hasProps(DIAG))
{
if (A.getBlockFormat() == ROW_MAJOR)
{
hipFuncSetCacheConfig(blockDiaCsrMultiplyKernelDiaProps<IndexType, ValueTypeA, ValueTypeB, blockrows_per_cta, blockrows_per_warp, 3, 0, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( blockDiaCsrMultiplyKernelDiaProps<IndexType, ValueTypeA, ValueTypeB, blockrows_per_cta, blockrows_per_warp, 3, 0, true>) , dim3(num_blocks), dim3(threads_per_block), 0, 0, A_row_offsets_ptr, A_column_indices_ptr, A_dia_ind_ptr, A_nonzero_values_ptr, B_ptr, C_ptr, offset + num_rows, offset);
}
else
{
hipFuncSetCacheConfig(blockDiaCsrMultiplyKernelDiaProps<IndexType, ValueTypeA, ValueTypeB, blockrows_per_cta, blockrows_per_warp, 3, 0, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( blockDiaCsrMultiplyKernelDiaProps<IndexType, ValueTypeA, ValueTypeB, blockrows_per_cta, blockrows_per_warp, 3, 0, false>) , dim3(num_blocks), dim3(threads_per_block), 0, 0, A_row_offsets_ptr, A_column_indices_ptr, A_dia_ind_ptr, A_nonzero_values_ptr, B_ptr, C_ptr, offset + num_rows, offset);
}
}
else
{
if (A.getBlockFormat() == ROW_MAJOR)
{
hipFuncSetCacheConfig(blockDiaCsrMultiplyKernelDiaProps<IndexType, ValueTypeA, ValueTypeB, blockrows_per_cta, blockrows_per_warp, 3, 1, true>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( blockDiaCsrMultiplyKernelDiaProps<IndexType, ValueTypeA, ValueTypeB, blockrows_per_cta, blockrows_per_warp, 3, 1, true>) , dim3(num_blocks), dim3(threads_per_block), 0, 0, A_row_offsets_ptr, A_column_indices_ptr, A_dia_ind_ptr, A_nonzero_values_ptr, B_ptr, C_ptr, offset + num_rows, offset);
}
else
{
hipFuncSetCacheConfig(blockDiaCsrMultiplyKernelDiaProps<IndexType, ValueTypeA, ValueTypeB, blockrows_per_cta, blockrows_per_warp, 3, 1, false>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( blockDiaCsrMultiplyKernelDiaProps<IndexType, ValueTypeA, ValueTypeB, blockrows_per_cta, blockrows_per_warp, 3, 1, false>) , dim3(num_blocks), dim3(threads_per_block), 0, 0, A_row_offsets_ptr, A_column_indices_ptr, A_dia_ind_ptr, A_nonzero_values_ptr, B_ptr, C_ptr, offset + num_rows, offset);
}
}
cudaCheckError();
}
}
};
// -------------------------------
// Explict instantiations
// -------------------------------
#define AMGX_CASE_LINE(CASE) template void multiplyMM(const Matrix<TemplateMode<CASE>::Type>&, const Matrix<TemplateMode<CASE>::Type>&, Matrix<TemplateMode<CASE>::Type>&);
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#define AMGX_CASE_LINE(CASE) template void multiply(Matrix<TemplateMode<CASE>::Type> &, Vector<TemplateMode<CASE>::Type>&, Vector<TemplateMode<CASE>::Type> &, ViewType);
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#define AMGX_CASE_LINE(CASE) template void multiply_masked(Matrix<TemplateMode<CASE>::Type> &, Vector<TemplateMode<CASE>::Type> &, Vector<TemplateMode<CASE>::Type> &, typename Matrix<TemplateMode<CASE>::Type>::IVector &, ViewType);
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#define AMGX_CASE_LINE(CASE) template void multiply_with_mask(Matrix<TemplateMode<CASE>::Type> &, Vector<TemplateMode<CASE>::Type>&, Vector<TemplateMode<CASE>::Type> &);
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#define AMGX_CASE_LINE(CASE) template void multiply_with_mask_restriction(Matrix<TemplateMode<CASE>::Type> &, Vector<TemplateMode<CASE>::Type>&, Vector<TemplateMode<CASE>::Type> &, Matrix<TemplateMode<CASE>::Type> & );
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
} // namespace amgx
| d5acebf6ed5c2719eaa3e1eaad9468fe1fc32348.cu | /* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <multiply.h>
#include <basic_types.h>
#include <texture.h>
#include <util.h>
#include <cutil.h>
#ifdef _WIN32
#pragma warning (push)
#pragma warning (disable : 4244 4267 4521)
#endif
#include <cusp/multiply.h>
#include <matrix.h>
#include <matrix_cusp.h>
#include <amgx_cusparse.h>
#ifdef _WIN32
#pragma warning (pop)
#endif
#include <logger.h>
#include <sm_utils.inl>
#include <amgx_types/math.h>
#include <amgx_types/util.h>
namespace amgx
{
#define USE_EXPERIMENTAL_4x4
template <class Matrix, class Vector>
class Multiply_1x1;
template <class Matrix, class Vector, class IVector>
class Multiply_1x1_masked;
template <class Matrix, class Vector>
class Multiply_1x1_with_mask;
template <class Matrix, class Vector>
class Multiply_1x1_with_mask_restriction;
template <class Matrix, class Vector>
class Multiply_3x3;
template <class Matrix, class Vector>
class Multiply_4x4;
template <class Matrix, class Vector>
class Multiply_bxb;
template <typename TConfig>
void multiply_block_size(Matrix<TConfig> &A, Vector<TConfig> &B, Vector<TConfig> &C, ViewType view)
{
typedef Matrix<TConfig> TMatrix;
typedef Vector<TConfig> TVector;
if (A.get_block_size() == 1)
{
Multiply_1x1<TMatrix, TVector>::multiply_1x1(A, B, C, view);
}
else if (A.get_block_dimy() == 3 && A.get_block_dimx() == 3)
{
Multiply_3x3<TMatrix, TVector>::multiply_3x3(A, B, C, view);
}
else if (A.get_block_dimy() == 4 && A.get_block_dimx() == 4)
{
Multiply_4x4<TMatrix, TVector>::multiply_4x4(A, B, C, view);
}
else
{
Multiply_bxb<TMatrix, TVector>::multiply_bxb(A, B, C, view);
}
}
template <typename TConfig>
void multiply(Matrix<TConfig> &A, Vector<TConfig> &B, Vector<TConfig> &C, ViewType view)
{
typedef Matrix<TConfig> TMatrix;
typedef Vector<TConfig> TVector;
if (!A.is_initialized())
{
FatalError("Trying to multiply uninitialized matrix", AMGX_ERR_BAD_PARAMETERS);
}
if (A.get_block_dimx() != B.get_block_dimy())
{
std::stringstream ss;
ss << "Matrix and vector dimensions don't match: A.dimx == " << A.get_block_dimx() << ", B.dimy == " << B.get_block_dimy();
FatalError(ss.str().c_str(), AMGX_ERR_BAD_PARAMETERS);
}
typedef Matrix<TConfig> TMatrix;
typedef Vector<TConfig> TVector;
bool latencyHiding = (A.getViewInterior() != A.getViewExterior() && !A.is_matrix_singleGPU() && B.dirtybit != 0);
if (latencyHiding)
{
A.manager->exchange_halo_split_gather(B, B.tag);
// Multiply interior rows
multiply_block_size(A, B, C, A.getViewInterior());
// Finish halo exchange
A.manager->exchange_halo_split_finish(B, B.tag);
// Multiply rows with halo dependencies
ViewType bnd_view = (ViewType)(~(A.getViewInterior()) & A.getViewExterior());
multiply_block_size(A, B, C, bnd_view);
}
else
{
if (!A.is_matrix_singleGPU() && B.dirtybit != 0)
{
A.manager->exchange_halo_v2(B, B.tag);
}
multiply_block_size(A, B, C, A.getViewExterior());
}
C.dirtybit = 1;
C.set_block_dimy(A.get_block_dimx());
}
template <class TConfig>
void multiply_masked(Matrix<TConfig> &A, Vector<TConfig> &B, Vector<TConfig> &C, typename Matrix<TConfig>::IVector &mask, ViewType view)
{
typedef Matrix<TConfig> TMatrix;
typedef Vector<TConfig> TVector;
typedef typename Matrix<TConfig>::IVector TIVector;
if (!A.is_initialized())
{
FatalError("Trying to multiply uninitialized matrix", AMGX_ERR_BAD_PARAMETERS);
}
if(A.get_block_size() != 1)
{
FatalError("Unsupported blocksize for multiply_masked()", AMGX_ERR_BAD_PARAMETERS);
}
if (A.get_block_dimx() != B.get_block_dimy())
{
std::stringstream ss;
ss << "Matrix and vector dimensions don't match: A.dimx == " << A.get_block_dimx() << ", B.dimy == " << B.get_block_dimy();
FatalError(ss.str().c_str(), AMGX_ERR_BAD_PARAMETERS);
}
Multiply_1x1_masked<TMatrix, TVector, TIVector>::multiply_1x1_masked(A, B, C, mask, view);
C.set_block_dimy(A.get_block_dimx());
}
template<class Matrix, class Vector>
void multiply_with_mask(Matrix &A, Vector &B, Vector &C)
{
if (!A.is_initialized())
{
FatalError("Trying to multiply uninitialized matrix", AMGX_ERR_BAD_PARAMETERS);
}
if (A.get_block_dimx() != B.get_block_dimy())
{
std::stringstream ss;
ss << "Matrix and vector dimensions don't match: A.dimx == " << A.get_block_dimx() << ", B.dimy == " << B.get_block_dimy();
FatalError(ss.str().c_str(), AMGX_ERR_BAD_PARAMETERS);
}
if (A.get_block_size() == 1)
{
Multiply_1x1_with_mask<Matrix, Vector>::multiply_1x1(A, B, C);
}
else
{
FatalError("multiply with mask not supported for bsize != 1", AMGX_ERR_NOT_IMPLEMENTED);
}
C.set_block_dimy(A.get_block_dimx());
C.dirtybit = 1;
//if (!A.is_matrix_singleGPU() && C.size() == B.size() && C.delayed_send==0)
// A.manager->exchange_halo_async(C, C.tag);
}
template<class Matrix, class Vector>
void multiply_with_mask_restriction(Matrix &A, Vector &B, Vector &C, Matrix &P)
{
if (!A.is_initialized())
{
FatalError("Trying to multiply uninitialized matrix", AMGX_ERR_BAD_PARAMETERS);
}
if (A.get_block_dimx() != B.get_block_dimy())
{
std::stringstream ss;
ss << "Matrix and vector dimensions don't match: A.dimx == " << A.get_block_dimx() << ", B.dimy == " << B.get_block_dimy();
FatalError(ss.str().c_str(), AMGX_ERR_BAD_PARAMETERS);
}
if (A.get_block_size() == 1)
{
Multiply_1x1_with_mask_restriction<Matrix, Vector>::multiply_1x1(A, B, C, P);
}
else
{
FatalError("multiply with mask not supported for bsize != 1", AMGX_ERR_NOT_IMPLEMENTED);
}
C.set_block_dimy(A.get_block_dimx());
C.dirtybit = 1;
}
template<class TConfig>
void multiplyMM(const Matrix<TConfig> &A, const Matrix<TConfig> &B, Matrix<TConfig> &C)
{
if (!A.is_initialized())
{
FatalError("Trying to multiply uninitialized matrix", AMGX_ERR_BAD_PARAMETERS);
}
if (A.get_block_dimx() != B.get_block_dimx() || A.get_block_dimy() != B.get_block_dimy())
{
FatalError("Matrices dimensions do not match", AMGX_ERR_BAD_PARAMETERS);
}
if (TConfig::memSpace == AMGX_device)
{
FatalError("Error, multiplyMM not implemented on device", AMGX_ERR_BAD_PARAMETERS);
}
else
{
if (A.get_block_size() != 1)
{
FatalError("multiplyMM only works for block_size ==1", AMGX_ERR_NOT_IMPLEMENTED);
}
else
{
typedef typename TConfig::IndPrec IndexType;
typedef typename TConfig::MatPrec ValueType;
typedef typename Matrix<TConfig>::IVector IVector;
typedef typename Matrix<TConfig>::MVector MVector;
C.set_initialized(0);
IndexType num_nonzeros = 0;
IVector mask(B.get_num_cols(), IndexType (-1));
// Compute nnz in C (including explicit zeros)
for (size_t i = 0; i < A.get_num_rows(); i++)
{
for (IndexType jj = A.row_offsets[i]; jj < A.row_offsets[i + 1]; jj++)
{
IndexType j = A.col_indices[jj];
for (IndexType kk = B.row_offsets[j]; kk < B.row_offsets[j + 1]; kk++)
{
IndexType k = B.col_indices[kk];
if (mask[k] != i)
{
mask[k] = i;
num_nonzeros++;
}
}
}
}
// Resize output
C.resize(A.get_num_rows(), B.get_num_cols(), num_nonzeros);
const IndexType unseen = static_cast<IndexType>(-1);
const IndexType init = static_cast<IndexType>(-2);
// Compute entries of C
IVector next(B.get_num_cols(), unseen);
MVector sums(B.get_num_cols(), types::util<ValueType>::get_zero());
num_nonzeros = 0;
C.row_offsets[0] = 0;
for (size_t i = 0; i < A.get_num_rows(); i++)
{
IndexType head = init;
IndexType length = 0;
IndexType jj_start = A.row_offsets[i];
IndexType jj_end = A.row_offsets[i + 1];
for (IndexType jj = jj_start; jj < jj_end; jj++)
{
IndexType j = A.col_indices[jj];
ValueType v = A.values[jj];
IndexType kk_start = B.row_offsets[j];
IndexType kk_end = B.row_offsets[j + 1];
for (IndexType kk = kk_start; kk < kk_end; kk++)
{
IndexType k = B.col_indices[kk];
sums[k] = sums[k] + v * B.values[kk];
if (next[k] == unseen)
{
next[k] = head;
head = k;
length++;
}
}
}
for (IndexType jj = 0; jj < length; jj++)
{
//if(sums[head] != ValueType(0))
//{
C.col_indices[num_nonzeros] = head;
C.values[num_nonzeros] = sums[head];
num_nonzeros++;
//}
IndexType temp = head;
head = next[head];
// clear arrays
next[temp] = unseen;
sums[temp] = types::util<ValueType>::get_zero();
}
C.row_offsets[i + 1] = num_nonzeros;
}
// Resize output again since pass2 omits explict zeros
//C.resize(A.num_rows, B.num_cols, num_nonzeros);
C.set_initialized(1);
}
}
}
// --------------------------------
// KERNELS
// --------------------------------
template<typename IndexType, typename ValueTypeA, typename ValueTypeB, int eighthwarps_per_block, int bsize, int log_bsize, int half_bsize, bool ROW_MAJOR>
__global__
void blockDiaCsrMultiplyKernel(const IndexType *row_offsets,
const IndexType *column_indices,
const ValueTypeA *nonzero_values,
const ValueTypeB *B,
ValueTypeB *C,
const IndexType num_block_rows,
const IndexType row_offset)
{
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
int eighthwarp_id = row_offset + (tid >> log_bsize);
const int block_eighthwarp_id = threadIdx.x >> log_bsize;
const int vec_entry_index = threadIdx.x & (bsize - 1);
volatile __shared__ ValueTypeB s_xtemp[ bsize * eighthwarps_per_block ];
ValueTypeB C_temp;
int offset, s_offset;
ValueTypeB temp[bsize];
while (eighthwarp_id < num_block_rows)
{
//i = eighthwarp_id;
C_temp = types::util<ValueTypeB>::get_zero();
// Contribution from each nonzero column
int jmax = row_offsets[eighthwarp_id + 1];
for (int jind = row_offsets[eighthwarp_id]; jind < jmax; jind++)
{
IndexType jcol = column_indices[jind];
offset = jcol * bsize + vec_entry_index;
types::util<ValueTypeB>::volcast(__cachingLoad(&B[offset]), s_xtemp + threadIdx.x);
// Load nonzero_values
s_offset = block_eighthwarp_id * bsize;
if (ROW_MAJOR)
{
offset = jind * bsize * bsize + vec_entry_index * bsize;
loadAsVector<bsize>(nonzero_values + offset, temp);
}
else
{
offset = jind * bsize * bsize + vec_entry_index;
#pragma unroll
for (int m = 0; m < bsize; m++)
{
types::util<ValueTypeA>::to_uptype(nonzero_values[offset + bsize * m], temp[m]);
}
}
#pragma unroll
for (int m = 0; m < bsize; m++)
{
C_temp = C_temp + temp[m] * types::util<ValueTypeB>::volcast(s_xtemp[s_offset + m]);
}
}
C[eighthwarp_id * bsize + vec_entry_index] = C_temp;
eighthwarp_id += gridDim.x * blockDim.x >> log_bsize;
}
}
#ifdef USE_EXPERIMENTAL_4x4
template< typename IndexType, typename ValueTypeA, typename ValueTypeB, int CTA_SIZE, bool ROW_MAJOR >
__global__
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 350
__launch_bounds__( CTA_SIZE, 16 )
#elif defined(__CUDA_ARCH__)
__launch_bounds__( CTA_SIZE, 8 )
#endif
void blockDiaCsrMultiplyKernelDiaProps_4x4( const IndexType *row_offsets,
const IndexType *column_indices,
const IndexType *dia_ptr,
const ValueTypeA *nonzero_values,
const ValueTypeB *B,
ValueTypeB *C,
const int num_block_rows,
const int row_offset )
{
const int nHalfWarps = CTA_SIZE / 16; // Number of half warps per CTA.
const int laneId = threadIdx.x % 32;
const int halfWarpId = threadIdx.x / 16;
const int halfLaneId = threadIdx.x % 16;
const int halfLaneId_div_4 = halfLaneId / 4;
const int halfLaneId_mod_4 = halfLaneId % 4;
const int laneId_div_16 = laneId / 16;
const int upperHalf = 16 * laneId_div_16;
const int upperMask = 0xffff << upperHalf;
#if __CUDA_ARCH__ < 300
const int nWarps = CTA_SIZE / 32; // Number of half warps per CTA.
const int warpId = threadIdx.x / 32;
volatile __shared__ IndexType s_aColIds[nWarps][32];
#endif
// Shared memory needed to exchange X and delta.
__shared__ volatile ValueTypeB s_mem[CTA_SIZE];
// Each thread keeps its own pointer to shared memory to avoid some extra computations.
volatile ValueTypeB *my_s_mem = &s_mem[16 * halfWarpId];
// Iterate over the rows of the matrix. One warp per two rows.
for ( int aRowId = blockIdx.x * nHalfWarps + halfWarpId ; aRowId < num_block_rows ; aRowId += gridDim.x * nHalfWarps )
{
// Load one block of B.
ValueTypeB my_Ax = types::util<ValueTypeB>::get_zero();
// The diagonal.
if ( halfLaneId_div_4 == 0 )
{
types::util<ValueTypeB>::volcast( B[4 * aRowId + halfLaneId_mod_4], my_s_mem + halfLaneId);
}
// Load the diagonal.
int diagId = dia_ptr[aRowId];
// Update my values.
ValueTypeA my_val = nonzero_values[16 * diagId + halfLaneId];
if ( ROW_MAJOR )
{
my_Ax = my_Ax + my_val * types::util<ValueTypeB>::volcast(my_s_mem[halfLaneId_mod_4]);
}
else
{
my_Ax = my_Ax + my_val * types::util<ValueTypeB>::volcast(my_s_mem[halfLaneId_div_4]);
}
// The range of the rows.
int aColBegin = row_offsets[aRowId + 0];
int aColEnd = row_offsets[aRowId + 1];
// Each warp load column indices of 16 nonzero blocks
for ( ; utils::any( aColBegin < aColEnd ) ; aColBegin += 16 )
{
int aColIt = aColBegin + halfLaneId;
// Get the ID of the column.
int aColId = -1;
if ( aColIt < aColEnd )
{
aColId = column_indices[aColIt];
}
#if __CUDA_ARCH__ < 300
s_aColIds[warpId][laneId] = aColId;
#endif
// Loop over columns. We compute 8 columns per iteration.
for ( int k = 0, nCols = __popc( utils::ballot(aColId != -1) & upperMask ) ; k < nCols ; k += 4 )
{
int my_k = k + halfLaneId_div_4;
// Exchange column indices.
#if __CUDA_ARCH__ >= 300
int waColId = utils::shfl( aColId, upperHalf + my_k );
#else
int waColId = s_aColIds[warpId][upperHalf + my_k];
#endif
// Load 8 blocks of X if needed.
ValueTypeB my_x = types::util<ValueTypeB>::get_zero();
if ( waColId != -1 )
{
my_x = B[4 * waColId + halfLaneId_mod_4];
}
types::util<ValueTypeB>::volcast( my_x, my_s_mem + halfLaneId);
// Load 8 blocks of A.
#pragma unroll
for ( int i = 0 ; i < 4 ; ++i )
{
int w_aColTmp = aColBegin + k + i, w_aColIt = -1;
if ( w_aColTmp < aColEnd )
{
w_aColIt = w_aColTmp;
}
ValueTypeA my_val = types::util<ValueTypeA>::get_zero();
if ( w_aColIt != -1 )
{
my_val = nonzero_values[16 * w_aColIt + halfLaneId];
}
if ( ROW_MAJOR )
{
my_Ax = my_Ax + my_val * types::util<ValueTypeB>::volcast(my_s_mem[4 * i + halfLaneId_mod_4]);
}
else
{
my_Ax = my_Ax + my_val * types::util<ValueTypeB>::volcast(my_s_mem[4 * i + halfLaneId_div_4]);
}
}
} // Loop over k
} // Loop over aColIt
// Reduce bmAx terms.
#if __CUDA_ARCH__ >= 300
if ( ROW_MAJOR )
{
my_Ax = my_Ax + utils::shfl_xor( my_Ax, 1 );
my_Ax = my_Ax + utils::shfl_xor( my_Ax, 2 );
}
else
{
my_Ax = my_Ax + utils::shfl_xor( my_Ax, 4 );
my_Ax = my_Ax + utils::shfl_xor( my_Ax, 8 );
}
#else
types::util<ValueTypeB>::volcast(my_Ax, s_mem + threadIdx.x);
{
if ( ROW_MAJOR )
{
if ( laneId < 31 )
{
my_Ax = my_Ax + types::util<ValueTypeB>::volcast(s_mem[threadIdx.x + 1]);
types::util<ValueTypeB>::volcast(my_Ax, s_mem + threadIdx.x);
}
if ( laneId < 30 )
{
my_Ax = my_Ax + types::util<ValueTypeB>::volcast(s_mem[threadIdx.x + 2]);
types::util<ValueTypeB>::volcast(my_Ax, s_mem + threadIdx.x);
}
}
else
{
if ( laneId < 31 )
{
my_Ax = my_Ax + types::util<ValueTypeB>::volcast(s_mem[threadIdx.x + 4]);
types::util<ValueTypeB>::volcast(my_Ax, s_mem + threadIdx.x);
}
if ( laneId < 30 )
{
my_Ax = my_Ax + types::util<ValueTypeB>::volcast(s_mem[threadIdx.x + 8]);
types::util<ValueTypeB>::volcast(my_Ax, s_mem + threadIdx.x);
}
}
}
#endif
// Store the results.
if ( ROW_MAJOR )
{
if ( halfLaneId_mod_4 == 0 )
{
C[4 * aRowId + halfLaneId_div_4] = my_Ax;
}
}
else
{
if ( halfLaneId_div_4 == 0 )
{
C[4 * aRowId + halfLaneId_mod_4] = my_Ax;
}
}
}
}
#else
template<typename IndexType, typename ValueTypeA, typename ValueTypeB, int eighthwarps_per_block, int bsize, int log_bsize, int half_bsize, int bsize_sq, bool ROW_MAJOR>
__global__
void blockDiaCsrMultiplyKernelDiaProps_4x4(const IndexType *row_offsets,
const IndexType *column_indices,
const IndexType *dia_ptr,
const ValueTypeA *nonzero_values,
const ValueTypeB *B,
ValueTypeB *C,
const int num_block_rows,
const int row_offset)
{
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
int eighthwarp_id = row_offset + (tid >> log_bsize);
const int block_eighthwarp_id = threadIdx.x >> log_bsize;
const int vec_entry_index = threadIdx.x & (bsize - 1);
volatile __shared__ ValueTypeB s_xtemp[ bsize * eighthwarps_per_block ];
ValueTypeB C_temp;
int offset, s_offset;
while (eighthwarp_id < num_block_rows)
{
//i = eighthwarp_id;
C_temp = 0.;
// Contribution from diagonal
offset = eighthwarp_id * bsize + vec_entry_index;
s_xtemp[threadIdx.x] = __cachingLoad(&B[offset]);
// Load dia_values and do matrix multiply
s_offset = block_eighthwarp_id * bsize;
ValueTypeB temp[bsize];
if (ROW_MAJOR)
{
loadAsVector<bsize>(nonzero_values + bsize_sq * dia_ptr[eighthwarp_id] + vec_entry_index * bsize, temp);
}
else
{
#pragma unroll
for (int m = 0; m < bsize; m++)
{
temp[m] = nonzero_values[bsize_sq * dia_ptr[eighthwarp_id] + vec_entry_index + bsize * m];
}
}
#pragma unroll
for (int m = 0; m < bsize; m++)
{
C_temp += temp[m] * s_xtemp[s_offset + m];
}
// Contribution from each nonzero column
int jmax = row_offsets[eighthwarp_id + 1];
for (int jind = row_offsets[eighthwarp_id]; jind < jmax; jind++)
{
IndexType jcol = column_indices[jind];
offset = jcol * bsize + vec_entry_index;
s_xtemp[threadIdx.x] = __cachingLoad(&B[offset]);
// Load nonzero_values
s_offset = block_eighthwarp_id * bsize;
if (ROW_MAJOR)
{
offset = jind * bsize_sq + vec_entry_index * bsize;
loadAsVector<bsize>(nonzero_values + offset, temp);
}
else
{
offset = jind * bsize_sq + vec_entry_index;
#pragma unroll
for (int m = 0; m < bsize; m++)
{
temp[m] = nonzero_values[offset + bsize * m];
}
}
#pragma unroll
for (int m = 0; m < bsize; m++)
{
C_temp += temp[m] * s_xtemp[s_offset + m];
}
}
C[eighthwarp_id * bsize + vec_entry_index] = C_temp;
eighthwarp_id += gridDim.x * blockDim.x >> log_bsize;
}
}
#endif
// implementation for arbitrary block size
template<typename IndexType, typename ValueTypeA, typename ValueTypeB, int blockrows_per_cta, int blockrows_per_warp, int bsize, int diag, bool ROW_MAJOR>
__global__
void blockDiaCsrMultiplyKernelDiaProps(const IndexType *row_offsets,
const IndexType *column_indices,
const IndexType *dia_ptr,
const ValueTypeA *nonzero_values,
const ValueTypeB *B,
ValueTypeB *C,
const int num_block_rows,
const int row_offset)
{
int warp_id = threadIdx.x / 32;
int warp_thread_id = threadIdx.x & 31;
// padding row blocks to fit in a single warp
if ( warp_thread_id >= blockrows_per_warp * bsize ) { return; }
// new thread id with padding
int tid = warp_id * blockrows_per_warp * bsize + warp_thread_id;
int eighthwarp_id = row_offset + blockIdx.x * blockrows_per_cta + tid / bsize;
const int block_eighthwarp_id = tid / bsize;
const int vec_entry_index = tid % bsize;
const int bsize_sq = bsize * bsize;
volatile __shared__ ValueTypeB s_xtemp[ bsize * blockrows_per_cta ];
ValueTypeB C_temp;
int offset, s_offset;
while (eighthwarp_id < num_block_rows)
{
C_temp = types::util<ValueTypeB>::get_zero();
ValueTypeB temp[bsize];
if ( diag )
{
// Contribution from diagonal
offset = eighthwarp_id * bsize + vec_entry_index;
types::util<ValueTypeB>::volcast( __cachingLoad(&B[offset]), s_xtemp + tid);
// Load dia_values and do matrix multiply
s_offset = block_eighthwarp_id * bsize;
if (ROW_MAJOR)
{
loadAsVector<bsize>(nonzero_values + bsize_sq * dia_ptr[eighthwarp_id] + vec_entry_index * bsize, temp);
}
else
{
offset = dia_ptr[eighthwarp_id] * bsize_sq + vec_entry_index;
#pragma unroll
for (int m = 0; m < bsize; m++)
{
types::util<ValueTypeA>::to_uptype(nonzero_values[offset + bsize * m], temp[m]);
}
}
#pragma unroll
for (int m = 0; m < bsize; m++)
{
C_temp = C_temp + temp[m] * types::util<ValueTypeB>::volcast(s_xtemp[s_offset + m]);
}
}
// Contribution from each nonzero column
int jmax = row_offsets[eighthwarp_id + 1];
for (int jind = row_offsets[eighthwarp_id]; jind < jmax; jind++)
{
IndexType jcol = column_indices[jind];
offset = jcol * bsize + vec_entry_index;
types::util<ValueTypeB>::volcast( __cachingLoad(&B[offset]), s_xtemp + tid);
// Load nonzero_values
s_offset = block_eighthwarp_id * bsize;
if (ROW_MAJOR)
{
offset = jind * bsize_sq + vec_entry_index * bsize;
loadAsVector<bsize>(nonzero_values + offset, temp);
}
else
{
offset = jind * bsize_sq + vec_entry_index;
#pragma unroll
for (int m = 0; m < bsize; m++)
{
types::util<ValueTypeA>::to_uptype(nonzero_values[offset + bsize * m], temp[m]);
}
}
#pragma unroll
for (int m = 0; m < bsize; m++)
{
C_temp = C_temp + temp[m] * types::util<ValueTypeB>::volcast(s_xtemp[s_offset + m]);
}
}
C[eighthwarp_id * bsize + vec_entry_index] = C_temp;
eighthwarp_id += gridDim.x * blockrows_per_cta;
}
}
// --------------------------------------
// Methods
// -------------------------------------
// Method to perform BSPmV on host using block_dia_csr_matrix format
template <class Matrix, class Vector>
void multiply_common_sqblock_host_diag(const Matrix &A, const Vector &B, Vector &C)
{
typedef typename Matrix::TConfig TConfig;
if (TConfig::memSpace == AMGX_device)
{
FatalError("Executrion path error: device matrix in host path", AMGX_ERR_NOT_IMPLEMENTED);
}
else
{
//TODO:: This implementation is very inneficient, Use BLAS
typedef typename TConfig::IndPrec IndexType;
typedef typename TConfig::MatPrec ValueType;
typedef typename Vector::value_type ValueTypeB;
IndexType bsize = A.get_block_dimy();
ValueTypeB temp;
for (int i = 0; i < A.get_num_rows(); i++)
{
// Initialize RHS to 0
for (int m = 0; m < bsize; m++)
{
C[i * bsize + m] = types::util<ValueTypeB>::get_zero();
}
// Contribution from diagonal blocks
for (int n = 0; n < bsize; n++)
{
temp = B[i * bsize + n];
for (int m = 0; m < bsize; m++)
{
C[i * bsize + m] = C[i * bsize + m] + A.values[A.diag[i] * bsize * bsize + m * bsize + n] * temp;
}
}
// Contribution from nonzero off-diagonal blocks
for (int j = A.row_offsets[i]; j < A.row_offsets[i + 1]; j++)
{
IndexType jcol = A.col_indices[j];
for (int n = 0; n < bsize; n++)
{
temp = B[jcol * bsize + n];
for (int m = 0; m < bsize; m++)
{
C[i * bsize + m] = C[i * bsize + m] + A.values[j * bsize * bsize + m * bsize + n] * temp;
}
}
}
}
}
}
template <class Matrix, class Vector>
void multiply_common_sqblock_host_nodiag(const Matrix &A, const Vector &B, Vector &C)
{
typedef typename Matrix::TConfig TConfig;
if (TConfig::memSpace == AMGX_device)
{
FatalError("Executrion path error: device matrix in host path", AMGX_ERR_NOT_IMPLEMENTED);
}
else
{
//TODO:: This implementation is very inneficient, Use BLAS
typedef typename TConfig::IndPrec IndexType;
typedef typename TConfig::MatPrec ValueType;
typedef typename Vector::value_type ValueTypeB;
IndexType bsize = A.get_block_dimy();
ValueTypeB temp;
for (int i = 0; i < A.get_num_rows(); i++)
{
// Initialize RHS to 0
for (int m = 0; m < bsize; m++)
{
C[i * bsize + m] = types::util<ValueTypeB>::get_zero();
}
// Contribution from nonzero blocks
for (int j = A.row_offsets[i]; j < A.row_offsets[i + 1]; j++)
{
IndexType jcol = A.col_indices[j];
for (int n = 0; n < bsize; n++)
{
temp = B[jcol * bsize + n];
for (int m = 0; m < bsize; m++)
{
C[i * bsize + m] = C[i * bsize + m] + A.values[j * bsize * bsize + m * bsize + n] * temp;
}
}
}
}
}
}
template <class Matrix, class Vector>
class Multiply_1x1
{
public:
typedef typename Matrix::TConfig TConfig;
static void multiply_1x1(Matrix &A, Vector &B, Vector &C, ViewType view)
{
if (TConfig::memSpace == AMGX_host)
{
if (A.hasProps(DIAG))
{
multiply_common_sqblock_host_diag(A, B, C);
}
else
{
multiply_common_sqblock_host_nodiag(A, B, C);
}
}
else
{
typedef typename TConfig::VecPrec ValueTypeB;
Cusparse::bsrmv<TConfig>(types::util<ValueTypeB>::get_one(), A, B, types::util<ValueTypeB>::get_zero(), C, view);
cudaCheckError();
// FatalError("Mixed precision is not supported for scalar matrix type", AMGX_ERR_NOT_IMPLEMENTED);
}
}
};
template <class Matrix, class Vector, class IVector>
class Multiply_1x1_masked
{
public:
typedef typename Matrix::TConfig TConfig;
static void multiply_1x1_masked(Matrix &A, Vector &B, Vector &C, IVector mask, ViewType view)
{
if (TConfig::memSpace == AMGX_host)
{
FatalError("Masked multiply is not supported on host", AMGX_ERR_NOT_IMPLEMENTED);
}
else
{
typedef typename TConfig::VecPrec ValueTypeB;
Cusparse::bsrxmv<TConfig>(types::util<ValueTypeB>::get_one(), A, B, types::util<ValueTypeB>::get_zero(), C, mask, view);
cudaCheckError();
}
}
};
template <class Matrix, class Vector>
class Multiply_1x1_with_mask
{
public:
typedef typename Matrix::TConfig TConfig;
static void multiply_1x1(Matrix &A, Vector &B, Vector &C)
{
if (TConfig::memSpace == AMGX_host)
{
FatalError("multiply with mask not supported on host", AMGX_ERR_NOT_IMPLEMENTED);
}
else
{
typedef typename TConfig::VecPrec ValueTypeB;
Cusparse::bsrmv_with_mask<TConfig>(types::util<ValueTypeB>::get_one(), A, B, types::util<ValueTypeB>::get_zero(), C );
cudaCheckError();
// FatalError("Mixed precision is not supported for scalar matrix type", AMGX_ERR_NOT_IMPLEMENTED);
}
}
};
template <class Matrix, class Vector>
class Multiply_1x1_with_mask_restriction
{
public:
typedef typename Matrix::TConfig TConfig;
static void multiply_1x1(Matrix &A, Vector &B, Vector &C, Matrix &P)
{
if (TConfig::memSpace == AMGX_host)
{
FatalError("multiply with mask not supported on host", AMGX_ERR_NOT_IMPLEMENTED);
}
else
{
typedef typename TConfig::VecPrec ValueTypeB;
Cusparse::bsrmv_with_mask_restriction<TConfig>(types::util<ValueTypeB>::get_one(), A, B, types::util<ValueTypeB>::get_zero(), C, P);
cudaCheckError();
// FatalError("Mixed precision is not supported for scalar matrix type", AMGX_ERR_NOT_IMPLEMENTED);
}
}
};
// Method to perform BSPmV on device using block_dia_csr_matrix format
template <class Matrix, class Vector>
class Multiply_4x4
{
public:
typedef typename Matrix::TConfig TConfig;
static void multiply_4x4(const Matrix &A, const Vector &B, Vector &C, ViewType view)
{
if (TConfig::memSpace == AMGX_host)
{
if (A.hasProps(DIAG))
{
multiply_common_sqblock_host_diag(A, B, C);
}
else
{
multiply_common_sqblock_host_nodiag(A, B, C);
}
}
else
{
//TODO: compare with cublas
typedef typename TConfig::IndPrec IndexType;
typedef typename TConfig::MatPrec ValueTypeA;
typedef typename TConfig::VecPrec ValueTypeB;
int num_rows, offset;
A.getOffsetAndSizeForView(view, &offset, &num_rows);
const IndexType *A_row_offsets_ptr = A.row_offsets.raw();
const IndexType *A_column_indices_ptr = A.col_indices.raw();
const IndexType *A_dia_ind_ptr = A.diag.raw();
const ValueTypeA *A_nonzero_values_ptr = A.values.raw();
const ValueTypeB *B_ptr = B.raw();
ValueTypeB *C_ptr = C.raw();
cudaCheckError();
const unsigned int threads_per_block = 128;
const int eightwarps_per_block = threads_per_block / 4;
const int num_warps_per_cta = threads_per_block / 32;
const int num_blocks = min(AMGX_GRID_MAX_SIZE, (int) (num_rows + num_warps_per_cta - 1) / num_warps_per_cta); // (int) (A.get_num_rows()-1)/eightwarps_per_block + 1;
if (!A.hasProps(DIAG))
{
if (A.getBlockFormat() == ROW_MAJOR)
{
cudaFuncSetCacheConfig(blockDiaCsrMultiplyKernel<IndexType, ValueTypeA, ValueTypeB, eightwarps_per_block, 4, 2, 2, true>, cudaFuncCachePreferL1);
blockDiaCsrMultiplyKernel<IndexType, ValueTypeA, ValueTypeB, eightwarps_per_block, 4, 2, 2, true> <<< num_blocks, threads_per_block>>>(A_row_offsets_ptr, A_column_indices_ptr, A_nonzero_values_ptr, B_ptr, C_ptr, offset + num_rows, offset);
}
else
{
cudaFuncSetCacheConfig(blockDiaCsrMultiplyKernel<IndexType, ValueTypeA, ValueTypeB, eightwarps_per_block, 4, 2, 2, false>, cudaFuncCachePreferL1);
blockDiaCsrMultiplyKernel<IndexType, ValueTypeA, ValueTypeB, eightwarps_per_block, 4, 2, 2, false> <<< num_blocks, threads_per_block>>>(A_row_offsets_ptr, A_column_indices_ptr, A_nonzero_values_ptr, B_ptr, C_ptr, offset + num_rows, offset);
}
}
else
{
#ifdef USE_EXPERIMENTAL_4x4
if ( A.getBlockFormat() == ROW_MAJOR )
{
blockDiaCsrMultiplyKernelDiaProps_4x4<IndexType, ValueTypeA, ValueTypeB, threads_per_block, true > <<< num_blocks, threads_per_block>>>(A_row_offsets_ptr, A_column_indices_ptr, A_dia_ind_ptr, A_nonzero_values_ptr, B_ptr, C_ptr, offset + num_rows, offset);
}
else
{
blockDiaCsrMultiplyKernelDiaProps_4x4<IndexType, ValueTypeA, ValueTypeB, threads_per_block, false > <<< num_blocks, threads_per_block>>>(A_row_offsets_ptr, A_column_indices_ptr, A_dia_ind_ptr, A_nonzero_values_ptr, B_ptr, C_ptr, offset + num_rows, offset);
}
#else
if (A.getBlockFormat() == ROW_MAJOR)
{
cudaFuncSetCacheConfig(blockDiaCsrMultiplyKernelDiaProps_4x4<IndexType, ValueTypeA, ValueTypeB, eightwarps_per_block, 4, 2, 2, 16, true>, cudaFuncCachePreferL1);
blockDiaCsrMultiplyKernelDiaProps_4x4<IndexType, ValueTypeA, ValueTypeB, eightwarps_per_block, 4, 2, 2, 16, true> <<< num_blocks, threads_per_block>>>(A_row_offsets_ptr, A_column_indices_ptr, A_dia_ind_ptr, A_nonzero_values_ptr, B_ptr, C_ptr, offset + num_rows, offset);
}
else
{
cudaFuncSetCacheConfig(blockDiaCsrMultiplyKernelDiaProps_4x4<IndexType, ValueTypeA, ValueTypeB, eightwarps_per_block, 4, 2, 2, 16, false>, cudaFuncCachePreferL1);
blockDiaCsrMultiplyKernelDiaProps_4x4<IndexType, ValueTypeA, ValueTypeB, eightwarps_per_block, 4, 2, 2, 16, false> <<< num_blocks, threads_per_block>>>(A_row_offsets_ptr, A_column_indices_ptr, A_dia_ind_ptr, A_nonzero_values_ptr, B_ptr, C_ptr, offset + num_rows, offset);
}
#endif
}
cudaCheckError();
}
}
};
// Method to perform BSPmV on device using block_dia_csr_matrix format
template <class Matrix, class Vector>
class Multiply_bxb
{
public:
typedef typename Matrix::TConfig TConfig;
static void multiply_bxb(Matrix &A, Vector &B, Vector &C, ViewType view)
{
if (TConfig::memSpace == AMGX_host)
{
if (A.hasProps(DIAG))
{
multiply_common_sqblock_host_diag(A, B, C);
}
else
{
multiply_common_sqblock_host_nodiag(A, B, C);
}
}
else
{
typedef typename TConfig::VecPrec ValueTypeB;
Cusparse::bsrmv(types::util<ValueTypeB>::get_one(), A, B, types::util<ValueTypeB>::get_zero(), C, view);
cudaCheckError();
}
}
};
// Method to perform BSPmV on device using block_dia_csr_matrix format
template <class Matrix, class Vector>
class Multiply_3x3
{
public:
typedef typename Matrix::TConfig TConfig;
static void multiply_3x3(const Matrix &A, const Vector &B, Vector &C, ViewType view)
{
if (TConfig::memSpace == AMGX_host)
{
if (A.hasProps(DIAG))
{
multiply_common_sqblock_host_diag(A, B, C);
}
else
{
multiply_common_sqblock_host_nodiag(A, B, C);
}
}
else
{
//TODO: compare with cublas
typedef typename TConfig::IndPrec IndexType;
typedef typename TConfig::MatPrec ValueTypeA;
typedef typename TConfig::VecPrec ValueTypeB;
int num_rows, offset;
A.getOffsetAndSizeForView(view, &offset, &num_rows);
const IndexType *A_row_offsets_ptr = A.row_offsets.raw();
const IndexType *A_column_indices_ptr = A.col_indices.raw();
const IndexType *A_dia_ind_ptr = A.diag.raw();
const ValueTypeA *A_nonzero_values_ptr = A.values.raw();
const ValueTypeB *B_ptr = B.raw();
ValueTypeB *C_ptr = C.raw();
cudaCheckError();
const int threads_per_block = 64 * 3;
const int blockrows_per_warp = 32 / 3;
const int blockrows_per_cta = (threads_per_block / 32) * blockrows_per_warp;
const int num_blocks = min(AMGX_GRID_MAX_SIZE, (int) (num_rows - 1) / blockrows_per_cta + 1); // (int) (A.get_num_rows()-1)/eightwarps_per_block + 1;
if (!A.hasProps(DIAG))
{
if (A.getBlockFormat() == ROW_MAJOR)
{
cudaFuncSetCacheConfig(blockDiaCsrMultiplyKernelDiaProps<IndexType, ValueTypeA, ValueTypeB, blockrows_per_cta, blockrows_per_warp, 3, 0, true>, cudaFuncCachePreferL1);
blockDiaCsrMultiplyKernelDiaProps<IndexType, ValueTypeA, ValueTypeB, blockrows_per_cta, blockrows_per_warp, 3, 0, true> <<< num_blocks, threads_per_block>>>(A_row_offsets_ptr, A_column_indices_ptr, A_dia_ind_ptr, A_nonzero_values_ptr, B_ptr, C_ptr, offset + num_rows, offset);
}
else
{
cudaFuncSetCacheConfig(blockDiaCsrMultiplyKernelDiaProps<IndexType, ValueTypeA, ValueTypeB, blockrows_per_cta, blockrows_per_warp, 3, 0, false>, cudaFuncCachePreferL1);
blockDiaCsrMultiplyKernelDiaProps<IndexType, ValueTypeA, ValueTypeB, blockrows_per_cta, blockrows_per_warp, 3, 0, false> <<< num_blocks, threads_per_block>>>(A_row_offsets_ptr, A_column_indices_ptr, A_dia_ind_ptr, A_nonzero_values_ptr, B_ptr, C_ptr, offset + num_rows, offset);
}
}
else
{
if (A.getBlockFormat() == ROW_MAJOR)
{
cudaFuncSetCacheConfig(blockDiaCsrMultiplyKernelDiaProps<IndexType, ValueTypeA, ValueTypeB, blockrows_per_cta, blockrows_per_warp, 3, 1, true>, cudaFuncCachePreferL1);
blockDiaCsrMultiplyKernelDiaProps<IndexType, ValueTypeA, ValueTypeB, blockrows_per_cta, blockrows_per_warp, 3, 1, true> <<< num_blocks, threads_per_block>>>(A_row_offsets_ptr, A_column_indices_ptr, A_dia_ind_ptr, A_nonzero_values_ptr, B_ptr, C_ptr, offset + num_rows, offset);
}
else
{
cudaFuncSetCacheConfig(blockDiaCsrMultiplyKernelDiaProps<IndexType, ValueTypeA, ValueTypeB, blockrows_per_cta, blockrows_per_warp, 3, 1, false>, cudaFuncCachePreferL1);
blockDiaCsrMultiplyKernelDiaProps<IndexType, ValueTypeA, ValueTypeB, blockrows_per_cta, blockrows_per_warp, 3, 1, false> <<< num_blocks, threads_per_block>>>(A_row_offsets_ptr, A_column_indices_ptr, A_dia_ind_ptr, A_nonzero_values_ptr, B_ptr, C_ptr, offset + num_rows, offset);
}
}
cudaCheckError();
}
}
};
// -------------------------------
// Explict instantiations
// -------------------------------
#define AMGX_CASE_LINE(CASE) template void multiplyMM(const Matrix<TemplateMode<CASE>::Type>&, const Matrix<TemplateMode<CASE>::Type>&, Matrix<TemplateMode<CASE>::Type>&);
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#define AMGX_CASE_LINE(CASE) template void multiply(Matrix<TemplateMode<CASE>::Type> &, Vector<TemplateMode<CASE>::Type>&, Vector<TemplateMode<CASE>::Type> &, ViewType);
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#define AMGX_CASE_LINE(CASE) template void multiply_masked(Matrix<TemplateMode<CASE>::Type> &, Vector<TemplateMode<CASE>::Type> &, Vector<TemplateMode<CASE>::Type> &, typename Matrix<TemplateMode<CASE>::Type>::IVector &, ViewType);
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#define AMGX_CASE_LINE(CASE) template void multiply_with_mask(Matrix<TemplateMode<CASE>::Type> &, Vector<TemplateMode<CASE>::Type>&, Vector<TemplateMode<CASE>::Type> &);
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#define AMGX_CASE_LINE(CASE) template void multiply_with_mask_restriction(Matrix<TemplateMode<CASE>::Type> &, Vector<TemplateMode<CASE>::Type>&, Vector<TemplateMode<CASE>::Type> &, Matrix<TemplateMode<CASE>::Type> & );
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
} // namespace amgx
|
5bc4deb69be9dfde53a54cd90373529cbf2bb0f1.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <sys/types.h>
#include "imageFile.h"
#include <time.h>
#include <string.h>
#include <math.h>
//macro to check return value of the cuda runtime call and exits
//if call failed
#define cudaCheck(value) { \
hipError_t _m_cudaStat = value; \
if (_m_cudaStat != hipSuccess) { \
fprintf(stderr, "Error %s at line %d in file %s\n", \
hipGetErrorString(_m_cudaStat), __LINE__, __FILE__); \
exit(1); \
} }
__global__ void anyMethod(unsigned char* buff , unsigned char* buffer_out , int w , int h)
{
int x = blockIdx.x * blockDim.x +threadIdx.x ;
int y = blockIdx.y * blockDim.y +threadIdx.y;
int width = w , height = h;
if((x>=0 && x < width) && (y>=0 && y<height))
{
int hx = -buff[width*(y-1) + (x-1)] + buff[width*(y-1)+(x+1)]
-2*buff[width*(y)+(x-1)] + 2* buff[width*(y)+(x+1)]
-buff[width*(y+1)+(x-1)] + buff[width*(y+1)+(x+1)];
int vx = buff[width*(y-1)+(x-1)] +2*buff[width*(y-1)+(x+1)] +buff[width*(y-1)+(x+1)]
-buff[width*(y+1)+(x-1)] -2* buff[width*(y+1)+(x)] - buff[width*(y+1)+(x+1)];
//this is the main part changed to get the sort of tie dye effect for at least
//the first part of the picture
hx = hx*4;
vx = vx/5;
int val = (int)sqrt((float)(hx) * (float)(hx) + (float)(vx) * (float)(vx));
buffer_out[y * width + x] = (unsigned char) val;
}
}
int main(int argc, char* argv[])
{
Photo* inputPhoto;
int width;
int height;
// int blockSize;
// int minGridSize;
char* infile;
char* outfile;
unsigned char *buff;
unsigned char *buffer_out;
infile = argv[1];
outfile = argv[2];
//readImage passed in and place in var, then get h and w of image
inputPhoto = readImage(infile);
width = getImageWidth(inputPhoto);
height = getImageHeight(inputPhoto);
//we need to keep track of image size
int ImageSize = width*height*sizeof(unsigned char);
cudaCheck(hipMalloc(&buff, ImageSize));
cudaCheck(hipMalloc(&buffer_out,ImageSize));
cudaCheck(hipMemcpy(buff, inputPhoto->data, ImageSize, hipMemcpyHostToDevice));
//set grid and block sizes
//hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, (void*) anyMethod,0, width * height);
//int gridSize = (width * height + blockSize - 1);
//decided to try 8, 8 in this format. I was just playing around with this and it changed the picture so I
//went with this.
dim3 threadsPerBlock(8,8);
dim3 numBlocks((width)/8,(height)/8);
//timings
hipEvent_t start, end;
float elapsedTime;
cudaCheck(hipEventCreate(&start));
cudaCheck(hipEventCreate(&end));
//need to record an event
cudaCheck(hipEventRecord(start));
hipLaunchKernelGGL(( anyMethod) , dim3(threadsPerBlock), dim3(numBlocks), 0, 0, buff, buffer_out, height, width);
cudaCheck(hipDeviceSynchronize());
cudaCheck(hipEventRecord(end));
//compute elapsed time
cudaCheck(hipEventSynchronize(end));
cudaCheck(hipEventElapsedTime(&elapsedTime, start, end));
// cudaCheck(hipMemcpy(inputPhoto->data, buffer_out,ImageSize, hipMemcpyDeviceToHost));
cudaCheck(hipMemcpy(inputPhoto->data, buffer_out,ImageSize, hipMemcpyDeviceToHost));
cudaCheck(hipFree(buffer_out));
cudaCheck(hipFree(buff));
// cudaCheck(hipFree(buf));
writeImage(inputPhoto, outfile);
printf("%s Any method took %f sec\n",infile,(elapsedTime/1000.0));
return 0;
}
| 5bc4deb69be9dfde53a54cd90373529cbf2bb0f1.cu | #include <stdlib.h>
#include <cuda.h>
#include <stdio.h>
#include <sys/types.h>
#include "imageFile.h"
#include <time.h>
#include <string.h>
#include <math.h>
//macro to check return value of the cuda runtime call and exits
//if call failed
#define cudaCheck(value) { \
cudaError_t _m_cudaStat = value; \
if (_m_cudaStat != cudaSuccess) { \
fprintf(stderr, "Error %s at line %d in file %s\n", \
cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__); \
exit(1); \
} }
__global__ void anyMethod(unsigned char* buff , unsigned char* buffer_out , int w , int h)
{
int x = blockIdx.x * blockDim.x +threadIdx.x ;
int y = blockIdx.y * blockDim.y +threadIdx.y;
int width = w , height = h;
if((x>=0 && x < width) && (y>=0 && y<height))
{
int hx = -buff[width*(y-1) + (x-1)] + buff[width*(y-1)+(x+1)]
-2*buff[width*(y)+(x-1)] + 2* buff[width*(y)+(x+1)]
-buff[width*(y+1)+(x-1)] + buff[width*(y+1)+(x+1)];
int vx = buff[width*(y-1)+(x-1)] +2*buff[width*(y-1)+(x+1)] +buff[width*(y-1)+(x+1)]
-buff[width*(y+1)+(x-1)] -2* buff[width*(y+1)+(x)] - buff[width*(y+1)+(x+1)];
//this is the main part changed to get the sort of tie dye effect for at least
//the first part of the picture
hx = hx*4;
vx = vx/5;
int val = (int)sqrt((float)(hx) * (float)(hx) + (float)(vx) * (float)(vx));
buffer_out[y * width + x] = (unsigned char) val;
}
}
int main(int argc, char* argv[])
{
Photo* inputPhoto;
int width;
int height;
// int blockSize;
// int minGridSize;
char* infile;
char* outfile;
unsigned char *buff;
unsigned char *buffer_out;
infile = argv[1];
outfile = argv[2];
//readImage passed in and place in var, then get h and w of image
inputPhoto = readImage(infile);
width = getImageWidth(inputPhoto);
height = getImageHeight(inputPhoto);
//we need to keep track of image size
int ImageSize = width*height*sizeof(unsigned char);
cudaCheck(cudaMalloc(&buff, ImageSize));
cudaCheck(cudaMalloc(&buffer_out,ImageSize));
cudaCheck(cudaMemcpy(buff, inputPhoto->data, ImageSize, cudaMemcpyHostToDevice));
//set grid and block sizes
//cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, (void*) anyMethod,0, width * height);
//int gridSize = (width * height + blockSize - 1);
//decided to try 8, 8 in this format. I was just playing around with this and it changed the picture so I
//went with this.
dim3 threadsPerBlock(8,8);
dim3 numBlocks((width)/8,(height)/8);
//timings
cudaEvent_t start, end;
float elapsedTime;
cudaCheck(cudaEventCreate(&start));
cudaCheck(cudaEventCreate(&end));
//need to record an event
cudaCheck(cudaEventRecord(start));
anyMethod <<<threadsPerBlock, numBlocks>>>(buff, buffer_out, height, width);
cudaCheck(cudaDeviceSynchronize());
cudaCheck(cudaEventRecord(end));
//compute elapsed time
cudaCheck(cudaEventSynchronize(end));
cudaCheck(cudaEventElapsedTime(&elapsedTime, start, end));
// cudaCheck(cudaMemcpy(inputPhoto->data, buffer_out,ImageSize, cudaMemcpyDeviceToHost));
cudaCheck(cudaMemcpy(inputPhoto->data, buffer_out,ImageSize, cudaMemcpyDeviceToHost));
cudaCheck(cudaFree(buffer_out));
cudaCheck(cudaFree(buff));
// cudaCheck(cudaFree(buf));
writeImage(inputPhoto, outfile);
printf("%s Any method took %f sec\n",infile,(elapsedTime/1000.0));
return 0;
}
|
b0565299545546f894888423bd993954ea1ac2e0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "gr_solve.cuh"
__host__ static
neuron_solve_t* set_host_gr_solve ( const char *type, neuron_t *gr )
{
neuron_solve_t *gr_solve = ( neuron_solve_t * ) malloc ( sizeof ( neuron_solve_t ) );
int n_gr = gr -> n;
//int gr_x = gr -> nx;
//int gr_y = gr -> ny;
// Matrix
double *mat = ( double * ) malloc ( GR_COMP * GR_COMP * sizeof ( double ) );
int *l_connect = ( int * ) malloc ( GR_COMP * sizeof ( int ) );
for ( int i = 0; i < GR_COMP * GR_COMP; i++ ) { mat [ i ] = 0.0; }
// !!!DUPLICATE CODE!!!
double rad [ GR_COMP ], len [ GR_COMP ], Ra [ GR_COMP ];
FILE *file = fopen ( PARAM_FILE_GR, "r" );
if ( ! file ) { fprintf ( stderr, "no such file %s\n", PARAM_FILE_GR ); exit ( 1 ); }
for ( int i = 0; i < GR_COMP; i++ ) {
int i1, i2, i3;
double d1, d2, d3, d4, i_l1, i_l2, i_l3, i_Na, i_KV, i_KA, i_KCa, i_KM, i_KIR, i_Ca;
if ( fscanf ( file, "%d %d %lf %lf %lf %lf %d %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf ",
&i1, &i2, &d1, &d2, &d3, &d4, &i3, &i_l1, &i_l2, &i_l3, &i_Na, &i_KV, &i_KA, &i_KCa, &i_KM, &i_KIR, &i_Ca ) == ( EOF ) ){
printf ( "PARAM_FILE_READING_ERROR\n" );
exit ( 1 );
}
rad [ i1 ] = 0.5 * d1 * 1e-4; // [mum -> cm]
len [ i1 ] = d2 * 1e-4; // [mum -> cm]
Ra [ i1 ] = d3 * 1e-3; // [kohm-cm]
l_connect [ i ] = i2;
}
for ( int i = 0; i < GR_COMP; i++ ) {
int d = l_connect [ i ];
if ( d >= 0 ) {
mat [ d + GR_COMP * i ] = ( 2.0 / ( ( Ra [ i ] * len [ i ] ) / ( rad [ i ] * rad [ i ] * M_PI ) + ( Ra [ d ] * len [ d ] ) / ( rad [ d ] * rad [ d ] * M_PI ) ) ); // [mS]
mat [ i + GR_COMP * d ] = mat [ d + GR_COMP * i ]; // i*NGR -> set Rows, +d -> set Columns
}
}
// Debug
//for ( int i = 0; i < GR_COMP; i++ ) { printf ("mat [%d] = %f\n", i, mat [i]); }
// count number of nonzero elements NNZ
int nnz = GR_COMP;
for ( int i = 0; i < GR_COMP * GR_COMP; i++ ) { nnz += ( mat [ i ] > 0.0 ); }
gr_solve -> nnz = nnz * ( gr -> n ); printf ( "GR -> nnz = %d\n", gr_solve -> nnz );
gr_solve -> val = ( double * ) malloc ( gr_solve -> nnz * sizeof ( double ) ); // nonzero and diagrnal elements array
gr_solve -> val_ori = ( double * ) malloc ( gr_solve -> nnz * sizeof ( double ) );
gr_solve -> b = ( double * ) malloc ( GR_COMP * gr -> n * sizeof ( double ) ); // b value
gr_solve -> dammy = ( double * ) malloc ( GR_COMP * gr -> n * sizeof ( double ) ); // calculation vec
gr_solve -> col = ( int * ) malloc ( gr_solve -> nnz * sizeof ( int ) ); //column number of val's elements
gr_solve -> row = ( int * ) malloc ( ( GR_COMP * gr -> n + 1 ) * sizeof ( int ) ); //row index
gr_solve -> dig = ( int * ) malloc ( GR_COMP * gr -> n * sizeof ( int ) ); //dig index
sprintf ( gr_solve -> type, "%s", type );
double *val = gr_solve -> val;
double *val_ori = gr_solve -> val_ori;
double *b = gr_solve -> b;
double *dammy = gr_solve -> dammy;
int *col = gr_solve -> col;
int *row = gr_solve -> row;
int *dig = gr_solve -> dig;
for ( int i = 0; i < gr_solve -> nnz; i++ ) { val [ i ] = val_ori [ i ] = 0.0; col [ i ] = 0; }
for ( int i = 0; i < GR_COMP * gr -> n + 1; i++) { row [ i ] = 0; }
for ( int i = 0; i < GR_COMP * gr -> n; i++) { dig [ i ] = 0; b [ i ] = 0; dammy [ i ] = 0.0; }
// Create CSR
int num_row = 0, num_col = 0, num_dig = 0, num = 0, count_row = 0;
//int count_gj = 0;
for ( int i = 0; i < GR_COMP * GR_COMP; i++ )
{
if ( mat [ i ] > 0.0 ) {
val [ num ] = - mat [ i ];
count_row++;
col [ num ] = num_col;
num++;
}
else if ( num_row == num_col )
{
val [ num ] = 0.0;
count_row++;
col [ num ] = num_col;
num_dig = num;
num++;
}
num_col++;
if ( num_col == GR_COMP )
{
for (int j = num_row * GR_COMP; j < num_row * GR_COMP + GR_COMP; j++ )
{
if ( j != num_row * GR_COMP + num_row ) {
val [ num_dig ] += mat [ j ];
dig [ num_row ] = num_dig;
}
}
num_col = 0;
num_row++;
row [ num_row ] = count_row;
}
}
for ( int i = 1; i < gr -> n; i++ )
{
for ( int j = 0; j < nnz; j++ )
{
val [ j + nnz * i ] = val [ j ];
//val_ori [ j + nnz * i ] = val [ j ];
col [ j + nnz * i ] = col [ j ] + GR_COMP * i;
}
for ( int j = 0; j < GR_COMP; j++ )
{
row [ j + GR_COMP * i ] = row [ j ] + nnz * i;
dig [ j + GR_COMP * i ] = dig [ j ] + nnz * i;
}
}
row [ GR_COMP * gr -> n ] = gr_solve -> nnz;
for ( int i = 0; i < gr_solve -> nnz; i++ )
{
if ( ( 0 == strncmp ( gr_solve -> type, "FORWARD_EULER", 13 ) ) ||
( 0 == strncmp ( gr_solve -> type, "RUNGE_KUTTA_4", 13 ) ) ||
( 0 == strncmp ( gr_solve -> type, "RKC", 3 ) ) ) { val [ i ] *= -1; }
if ( 0 == strncmp ( gr_solve -> type, "CN", 2 ) ) { val [ i ] /= 2.0; }
val_ori [ i ] = val [ i ];
} // nnz is OK.
free ( mat );
free ( l_connect );
return gr_solve;
}
__global__
static void device_mem_allocation ( const int nc, const int l_nnz, neuron_solve_t* d_gr_solve,
double *d_val, double *d_val_ori, double *d_b, int *d_col, int *d_row, int *d_dig, double *d_dammy )
{
d_gr_solve -> nnz = l_nnz;
d_gr_solve -> val = d_val;
d_gr_solve -> val_ori = d_val_ori;
d_gr_solve -> b = d_b;
d_gr_solve -> col = d_col;
d_gr_solve -> row = d_row; // # of neurons
d_gr_solve -> dig = d_dig; // # of all compartments
d_gr_solve -> dammy = d_dammy;
d_gr_solve -> numThreadsPerBlock = 128;
d_gr_solve -> numBlocks = ( int ) ( nc / d_gr_solve -> numThreadsPerBlock ) + 1;
//Debug
//printf ( "From GPU \n n = %d, nc = %d\n", d_gr -> n, d_gr -> nc );
}
__global__
static void device_mem_allocation2 (const int n, double ** dev, double *ptr )
{
dev [ n ] = ptr;
}
__global__ static
void device_mem_allocation3 ( neuron_solve_t* d_gr_solve, double **d_vec )
{
d_gr_solve -> vec = d_vec;
}
neuron_solve_t *gr_solve_initialize ( neuron_solve_t *p_gr_solve, const char *type, neuron_t *gr, neuron_t *d_gr ) // tentatively, type is ignored
{
neuron_solve_t *d_gr_solve;
hipMalloc ( ( neuron_solve_t **) &d_gr_solve, sizeof ( neuron_solve_t ) );
if ( gr -> nc == 0 ) { return d_gr_solve; }
neuron_solve_t *h_gr_solve = set_host_gr_solve ( type, gr );
int l_nnz = h_gr_solve -> nnz;
double *d_val, *d_val_ori, *d_b, *d_dammy;
hipMalloc ( ( double ** ) &d_val, l_nnz * sizeof ( double ) );
hipMalloc ( ( double ** ) &d_val_ori, l_nnz * sizeof ( double ) );
hipMalloc ( ( double ** ) &d_b, gr -> nc * sizeof ( double ) );
hipMalloc ( ( double ** ) &d_dammy, gr -> nc * sizeof ( double ) );
int *d_col, *d_row, *d_dig;
hipMalloc ( ( int ** ) &d_col, l_nnz * sizeof ( int ) );
hipMalloc ( ( int ** ) &d_row, ( GR_COMP * gr -> n + 1 ) * sizeof ( int ) );
hipMalloc ( ( int ** ) &d_dig, ( GR_COMP * gr -> n ) * sizeof ( int ) );
p_gr_solve -> val = d_val;
p_gr_solve -> val_ori = d_val_ori;
p_gr_solve -> b = d_b;
p_gr_solve -> dammy = d_dammy;
p_gr_solve -> col = d_col;
p_gr_solve -> row = d_row;
p_gr_solve -> dig = d_dig;
p_gr_solve -> nnz = l_nnz;
p_gr_solve -> numThreadsPerBlock = 128;
p_gr_solve -> numBlocks = ( int ) ( ( gr -> nc ) / ( p_gr_solve -> numThreadsPerBlock ) ) + 1;
sprintf ( p_gr_solve -> type, "%s", type );
hipDeviceSynchronize ( );
hipLaunchKernelGGL(( device_mem_allocation) , dim3(1), dim3(1) , 0, 0, gr -> nc, l_nnz, d_gr_solve, d_val, d_val_ori, d_b, d_col, d_row, d_dig, d_dammy );
hipDeviceSynchronize ( );
hipMemcpy ( p_gr_solve -> val, h_gr_solve -> val, l_nnz * sizeof ( double ), hipMemcpyHostToDevice );
hipMemcpy ( p_gr_solve -> val_ori, h_gr_solve -> val_ori, l_nnz * sizeof ( double ), hipMemcpyHostToDevice );
hipMemcpy ( p_gr_solve -> b, h_gr_solve -> b, gr -> nc * sizeof ( double ), hipMemcpyHostToDevice );
hipMemcpy ( p_gr_solve -> dammy, h_gr_solve -> dammy, gr -> nc * sizeof ( double ), hipMemcpyHostToDevice );
hipMemcpy ( p_gr_solve -> col, h_gr_solve -> col, l_nnz * sizeof ( int ), hipMemcpyHostToDevice );
hipMemcpy ( p_gr_solve -> row, h_gr_solve -> row, ( GR_COMP * gr -> n + 1 ) * sizeof ( int ), hipMemcpyHostToDevice );
hipMemcpy ( p_gr_solve -> dig, h_gr_solve -> dig, ( GR_COMP * gr -> n ) * sizeof ( int ), hipMemcpyHostToDevice );
free ( h_gr_solve -> val ); free ( h_gr_solve -> val_ori ); free ( h_gr_solve -> b );
free ( h_gr_solve -> col ); free ( h_gr_solve -> row ); free ( h_gr_solve -> dig );
free ( h_gr_solve -> dammy );
// set vec
double **d_vec;
if ( 0 == strncmp ( p_gr_solve -> type, "RKC", 3 ) ) {
hipMalloc ( ( double *** ) &d_vec, n_vec_RKC * sizeof ( double * ) );
p_gr_solve -> vec = ( double ** ) malloc ( n_vec_RKC * sizeof ( double * ) );
for ( int i = 0; i < n_vec_RKC; i++ ) {
hipMalloc ( ( double ** ) ( & ( p_gr_solve -> vec [ i ] ) ), gr -> nc * sizeof ( double ) );
hipLaunchKernelGGL(( device_mem_allocation2) , dim3(1), dim3(1) , 0, 0, i, d_vec, p_gr_solve -> vec [ i ] );
}
hipLaunchKernelGGL(( device_mem_allocation3) , dim3(1), dim3(1) , 0, 0, d_gr_solve, d_vec );
RKC_vec_initialize ( d_gr, d_gr_solve, gr, p_gr_solve );
}
/**/
else if ( 0 == strncmp ( p_gr_solve -> type, "CN", 2 ) ) {
hipMalloc ( ( double *** ) &d_vec, n_vec_CNm * sizeof ( double * ) );
p_gr_solve -> vec = ( double ** ) malloc ( n_vec_CNm * sizeof ( double * ) );
for ( int i = 0; i < n_vec_CNm; i++ ) {
hipMalloc ( ( double ** ) ( & ( p_gr_solve -> vec [ i ] ) ), gr -> nc * sizeof ( double ) );
hipLaunchKernelGGL(( device_mem_allocation2) , dim3(1), dim3(1) , 0, 0, i, d_vec, p_gr_solve -> vec [ i ] );
}
hipLaunchKernelGGL(( device_mem_allocation3) , dim3(1), dim3(1) , 0, 0, d_gr_solve, d_vec );
//printf ( "pre gr_cnm_vec_initialize\n" ); // Debug
hipLaunchKernelGGL(( gr_cnm_vec_initialize) , dim3(p_gr_solve -> numBlocks), dim3(p_gr_solve -> numThreadsPerBlock) , 0, 0, d_gr, d_gr_solve );
hipLaunchKernelGGL(( gr_update_ion) , dim3(p_gr_solve -> numBlocks), dim3(p_gr_solve -> numThreadsPerBlock) , 0, 0, d_gr, d_gr_solve, CN_DT );
double **l_ion = gr -> ion;
hipLaunchKernelGGL(( gr_Na_update) , dim3(p_gr_solve -> numBlocks), dim3(p_gr_solve -> numThreadsPerBlock) , 0, 0,
gr -> nc, gr -> elem [ v ], CN_DT, gr -> elem [ compart ],
l_ion [ o_Na ], l_ion [ c1_Na ], l_ion [ c2_Na ], l_ion [ c3_Na ], l_ion [ c4_Na ], l_ion [ c5_Na ],
l_ion [ i1_Na ], l_ion [ i2_Na ], l_ion [ i3_Na ], l_ion [ i4_Na ], l_ion [ i5_Na ], l_ion [ i6_Na ] );
// Debug
//printf ("\n");
}
/*
else if ( 0 == strncmp ( gr_solve -> type, "RUNGE_KUTTA_4", 13 ) ) {
gr_solve -> vec = ( double ** ) malloc ( gr_n_vec_RK4 * sizeof ( double *) );
for ( int i = 0; i < gr_n_vec_RK4; i++ ) {
gr_solve -> vec [ i ] = ( double * ) malloc ( gr -> nc * sizeof ( double ) );
}
}*/
free ( h_gr_solve );
return d_gr_solve;
}
void gr_solve_finalize ( const int n_gr, neuron_solve_t *d_gr_solve, neuron_solve_t *p_gr_solve )
{
if ( n_gr > 0 ) {
hipFree ( p_gr_solve -> val );
hipFree ( p_gr_solve -> val_ori );
hipFree ( p_gr_solve -> b );
hipFree ( p_gr_solve -> dammy );
hipFree ( p_gr_solve -> col );
hipFree ( p_gr_solve -> row );
hipFree ( p_gr_solve -> dig );
if ( 0 == strncmp ( p_gr_solve -> type, "RKC", 3 ) )
{
for ( int i = 0; i < n_vec_RKC; i++ ) { hipFree ( p_gr_solve -> vec [ i ] ); }
free ( p_gr_solve -> vec );
free ( p_gr_solve -> h_work );
free ( p_gr_solve -> h_others );
free ( p_gr_solve -> h_bool );
}
else if ( 0 == strncmp ( p_gr_solve -> type, "CN", 2 ) )
{
for ( int i = 0; i < n_vec_CNm; i++ ) { hipFree ( p_gr_solve -> vec [ i ] ); }
free ( p_gr_solve -> vec );
}
/*
else if ( 0 == strncmp ( gr_solve -> type, "RUNGE_KUTTA_4", 13 ) ) {
for ( int i = 0; i < gr_n_vec_RK4; i++ ) { free ( gr_solve -> vec [ i ] ); }
}*/
}
hipFree ( d_gr_solve ); free ( p_gr_solve );
}
__host__
void gr_solve_update_v ( neuron_t *d_gr, neuron_solve_t *d_gr_solve,
neuron_t *p_gr, neuron_solve_t *p_gr_solve,
synapse_t *d_mfgr, synapse_t *d_gogr )
{
if ( 0 == strncmp ( p_gr_solve -> type, "BE", 2 ) ) {
gr_solve_by_bem ( d_gr, d_gr_solve, p_gr, p_gr_solve, d_mfgr, d_gogr );
}
else if ( 0 == strncmp ( p_gr_solve -> type, "CN", 2 ) ) {
gr_solve_by_cnm ( d_gr, d_gr_solve, p_gr, p_gr_solve, d_mfgr, d_gogr );
}
else if ( 0 == strncmp ( p_gr_solve -> type, "RKC", 3 ) ) {
gr_solve_by_rkc ( d_gr, d_gr_solve, p_gr, p_gr_solve, d_mfgr, d_gogr );
}
else { printf ( "solver Error\n" ); }
} | b0565299545546f894888423bd993954ea1ac2e0.cu | #include "gr_solve.cuh"
__host__ static
neuron_solve_t* set_host_gr_solve ( const char *type, neuron_t *gr )
{
neuron_solve_t *gr_solve = ( neuron_solve_t * ) malloc ( sizeof ( neuron_solve_t ) );
int n_gr = gr -> n;
//int gr_x = gr -> nx;
//int gr_y = gr -> ny;
// Matrix
double *mat = ( double * ) malloc ( GR_COMP * GR_COMP * sizeof ( double ) );
int *l_connect = ( int * ) malloc ( GR_COMP * sizeof ( int ) );
for ( int i = 0; i < GR_COMP * GR_COMP; i++ ) { mat [ i ] = 0.0; }
// !!!DUPLICATE CODE!!!
double rad [ GR_COMP ], len [ GR_COMP ], Ra [ GR_COMP ];
FILE *file = fopen ( PARAM_FILE_GR, "r" );
if ( ! file ) { fprintf ( stderr, "no such file %s\n", PARAM_FILE_GR ); exit ( 1 ); }
for ( int i = 0; i < GR_COMP; i++ ) {
int i1, i2, i3;
double d1, d2, d3, d4, i_l1, i_l2, i_l3, i_Na, i_KV, i_KA, i_KCa, i_KM, i_KIR, i_Ca;
if ( fscanf ( file, "%d %d %lf %lf %lf %lf %d %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf ",
&i1, &i2, &d1, &d2, &d3, &d4, &i3, &i_l1, &i_l2, &i_l3, &i_Na, &i_KV, &i_KA, &i_KCa, &i_KM, &i_KIR, &i_Ca ) == ( EOF ) ){
printf ( "PARAM_FILE_READING_ERROR\n" );
exit ( 1 );
}
rad [ i1 ] = 0.5 * d1 * 1e-4; // [mum -> cm]
len [ i1 ] = d2 * 1e-4; // [mum -> cm]
Ra [ i1 ] = d3 * 1e-3; // [kohm-cm]
l_connect [ i ] = i2;
}
for ( int i = 0; i < GR_COMP; i++ ) {
int d = l_connect [ i ];
if ( d >= 0 ) {
mat [ d + GR_COMP * i ] = ( 2.0 / ( ( Ra [ i ] * len [ i ] ) / ( rad [ i ] * rad [ i ] * M_PI ) + ( Ra [ d ] * len [ d ] ) / ( rad [ d ] * rad [ d ] * M_PI ) ) ); // [mS]
mat [ i + GR_COMP * d ] = mat [ d + GR_COMP * i ]; // i*NGR -> set Rows, +d -> set Columns
}
}
// Debug
//for ( int i = 0; i < GR_COMP; i++ ) { printf ("mat [%d] = %f\n", i, mat [i]); }
// count number of nonzero elements NNZ
int nnz = GR_COMP;
for ( int i = 0; i < GR_COMP * GR_COMP; i++ ) { nnz += ( mat [ i ] > 0.0 ); }
gr_solve -> nnz = nnz * ( gr -> n ); printf ( "GR -> nnz = %d\n", gr_solve -> nnz );
gr_solve -> val = ( double * ) malloc ( gr_solve -> nnz * sizeof ( double ) ); // nonzero and diagrnal elements array
gr_solve -> val_ori = ( double * ) malloc ( gr_solve -> nnz * sizeof ( double ) );
gr_solve -> b = ( double * ) malloc ( GR_COMP * gr -> n * sizeof ( double ) ); // b value
gr_solve -> dammy = ( double * ) malloc ( GR_COMP * gr -> n * sizeof ( double ) ); // calculation vec
gr_solve -> col = ( int * ) malloc ( gr_solve -> nnz * sizeof ( int ) ); //column number of val's elements
gr_solve -> row = ( int * ) malloc ( ( GR_COMP * gr -> n + 1 ) * sizeof ( int ) ); //row index
gr_solve -> dig = ( int * ) malloc ( GR_COMP * gr -> n * sizeof ( int ) ); //dig index
sprintf ( gr_solve -> type, "%s", type );
double *val = gr_solve -> val;
double *val_ori = gr_solve -> val_ori;
double *b = gr_solve -> b;
double *dammy = gr_solve -> dammy;
int *col = gr_solve -> col;
int *row = gr_solve -> row;
int *dig = gr_solve -> dig;
for ( int i = 0; i < gr_solve -> nnz; i++ ) { val [ i ] = val_ori [ i ] = 0.0; col [ i ] = 0; }
for ( int i = 0; i < GR_COMP * gr -> n + 1; i++) { row [ i ] = 0; }
for ( int i = 0; i < GR_COMP * gr -> n; i++) { dig [ i ] = 0; b [ i ] = 0; dammy [ i ] = 0.0; }
// Create CSR
int num_row = 0, num_col = 0, num_dig = 0, num = 0, count_row = 0;
//int count_gj = 0;
for ( int i = 0; i < GR_COMP * GR_COMP; i++ )
{
if ( mat [ i ] > 0.0 ) {
val [ num ] = - mat [ i ];
count_row++;
col [ num ] = num_col;
num++;
}
else if ( num_row == num_col )
{
val [ num ] = 0.0;
count_row++;
col [ num ] = num_col;
num_dig = num;
num++;
}
num_col++;
if ( num_col == GR_COMP )
{
for (int j = num_row * GR_COMP; j < num_row * GR_COMP + GR_COMP; j++ )
{
if ( j != num_row * GR_COMP + num_row ) {
val [ num_dig ] += mat [ j ];
dig [ num_row ] = num_dig;
}
}
num_col = 0;
num_row++;
row [ num_row ] = count_row;
}
}
for ( int i = 1; i < gr -> n; i++ )
{
for ( int j = 0; j < nnz; j++ )
{
val [ j + nnz * i ] = val [ j ];
//val_ori [ j + nnz * i ] = val [ j ];
col [ j + nnz * i ] = col [ j ] + GR_COMP * i;
}
for ( int j = 0; j < GR_COMP; j++ )
{
row [ j + GR_COMP * i ] = row [ j ] + nnz * i;
dig [ j + GR_COMP * i ] = dig [ j ] + nnz * i;
}
}
row [ GR_COMP * gr -> n ] = gr_solve -> nnz;
for ( int i = 0; i < gr_solve -> nnz; i++ )
{
if ( ( 0 == strncmp ( gr_solve -> type, "FORWARD_EULER", 13 ) ) ||
( 0 == strncmp ( gr_solve -> type, "RUNGE_KUTTA_4", 13 ) ) ||
( 0 == strncmp ( gr_solve -> type, "RKC", 3 ) ) ) { val [ i ] *= -1; }
if ( 0 == strncmp ( gr_solve -> type, "CN", 2 ) ) { val [ i ] /= 2.0; }
val_ori [ i ] = val [ i ];
} // nnz is OK.
free ( mat );
free ( l_connect );
return gr_solve;
}
__global__
static void device_mem_allocation ( const int nc, const int l_nnz, neuron_solve_t* d_gr_solve,
double *d_val, double *d_val_ori, double *d_b, int *d_col, int *d_row, int *d_dig, double *d_dammy )
{
d_gr_solve -> nnz = l_nnz;
d_gr_solve -> val = d_val;
d_gr_solve -> val_ori = d_val_ori;
d_gr_solve -> b = d_b;
d_gr_solve -> col = d_col;
d_gr_solve -> row = d_row; // # of neurons
d_gr_solve -> dig = d_dig; // # of all compartments
d_gr_solve -> dammy = d_dammy;
d_gr_solve -> numThreadsPerBlock = 128;
d_gr_solve -> numBlocks = ( int ) ( nc / d_gr_solve -> numThreadsPerBlock ) + 1;
//Debug
//printf ( "From GPU \n n = %d, nc = %d\n", d_gr -> n, d_gr -> nc );
}
__global__
static void device_mem_allocation2 (const int n, double ** dev, double *ptr )
{
dev [ n ] = ptr;
}
__global__ static
void device_mem_allocation3 ( neuron_solve_t* d_gr_solve, double **d_vec )
{
d_gr_solve -> vec = d_vec;
}
neuron_solve_t *gr_solve_initialize ( neuron_solve_t *p_gr_solve, const char *type, neuron_t *gr, neuron_t *d_gr ) // tentatively, type is ignored
{
neuron_solve_t *d_gr_solve;
cudaMalloc ( ( neuron_solve_t **) &d_gr_solve, sizeof ( neuron_solve_t ) );
if ( gr -> nc == 0 ) { return d_gr_solve; }
neuron_solve_t *h_gr_solve = set_host_gr_solve ( type, gr );
int l_nnz = h_gr_solve -> nnz;
double *d_val, *d_val_ori, *d_b, *d_dammy;
cudaMalloc ( ( double ** ) &d_val, l_nnz * sizeof ( double ) );
cudaMalloc ( ( double ** ) &d_val_ori, l_nnz * sizeof ( double ) );
cudaMalloc ( ( double ** ) &d_b, gr -> nc * sizeof ( double ) );
cudaMalloc ( ( double ** ) &d_dammy, gr -> nc * sizeof ( double ) );
int *d_col, *d_row, *d_dig;
cudaMalloc ( ( int ** ) &d_col, l_nnz * sizeof ( int ) );
cudaMalloc ( ( int ** ) &d_row, ( GR_COMP * gr -> n + 1 ) * sizeof ( int ) );
cudaMalloc ( ( int ** ) &d_dig, ( GR_COMP * gr -> n ) * sizeof ( int ) );
p_gr_solve -> val = d_val;
p_gr_solve -> val_ori = d_val_ori;
p_gr_solve -> b = d_b;
p_gr_solve -> dammy = d_dammy;
p_gr_solve -> col = d_col;
p_gr_solve -> row = d_row;
p_gr_solve -> dig = d_dig;
p_gr_solve -> nnz = l_nnz;
p_gr_solve -> numThreadsPerBlock = 128;
p_gr_solve -> numBlocks = ( int ) ( ( gr -> nc ) / ( p_gr_solve -> numThreadsPerBlock ) ) + 1;
sprintf ( p_gr_solve -> type, "%s", type );
cudaDeviceSynchronize ( );
device_mem_allocation <<< 1, 1 >>> ( gr -> nc, l_nnz, d_gr_solve, d_val, d_val_ori, d_b, d_col, d_row, d_dig, d_dammy );
cudaDeviceSynchronize ( );
cudaMemcpy ( p_gr_solve -> val, h_gr_solve -> val, l_nnz * sizeof ( double ), cudaMemcpyHostToDevice );
cudaMemcpy ( p_gr_solve -> val_ori, h_gr_solve -> val_ori, l_nnz * sizeof ( double ), cudaMemcpyHostToDevice );
cudaMemcpy ( p_gr_solve -> b, h_gr_solve -> b, gr -> nc * sizeof ( double ), cudaMemcpyHostToDevice );
cudaMemcpy ( p_gr_solve -> dammy, h_gr_solve -> dammy, gr -> nc * sizeof ( double ), cudaMemcpyHostToDevice );
cudaMemcpy ( p_gr_solve -> col, h_gr_solve -> col, l_nnz * sizeof ( int ), cudaMemcpyHostToDevice );
cudaMemcpy ( p_gr_solve -> row, h_gr_solve -> row, ( GR_COMP * gr -> n + 1 ) * sizeof ( int ), cudaMemcpyHostToDevice );
cudaMemcpy ( p_gr_solve -> dig, h_gr_solve -> dig, ( GR_COMP * gr -> n ) * sizeof ( int ), cudaMemcpyHostToDevice );
free ( h_gr_solve -> val ); free ( h_gr_solve -> val_ori ); free ( h_gr_solve -> b );
free ( h_gr_solve -> col ); free ( h_gr_solve -> row ); free ( h_gr_solve -> dig );
free ( h_gr_solve -> dammy );
// set vec
double **d_vec;
if ( 0 == strncmp ( p_gr_solve -> type, "RKC", 3 ) ) {
cudaMalloc ( ( double *** ) &d_vec, n_vec_RKC * sizeof ( double * ) );
p_gr_solve -> vec = ( double ** ) malloc ( n_vec_RKC * sizeof ( double * ) );
for ( int i = 0; i < n_vec_RKC; i++ ) {
cudaMalloc ( ( double ** ) ( & ( p_gr_solve -> vec [ i ] ) ), gr -> nc * sizeof ( double ) );
device_mem_allocation2 <<< 1, 1 >>> ( i, d_vec, p_gr_solve -> vec [ i ] );
}
device_mem_allocation3 <<< 1, 1 >>> ( d_gr_solve, d_vec );
RKC_vec_initialize ( d_gr, d_gr_solve, gr, p_gr_solve );
}
/**/
else if ( 0 == strncmp ( p_gr_solve -> type, "CN", 2 ) ) {
cudaMalloc ( ( double *** ) &d_vec, n_vec_CNm * sizeof ( double * ) );
p_gr_solve -> vec = ( double ** ) malloc ( n_vec_CNm * sizeof ( double * ) );
for ( int i = 0; i < n_vec_CNm; i++ ) {
cudaMalloc ( ( double ** ) ( & ( p_gr_solve -> vec [ i ] ) ), gr -> nc * sizeof ( double ) );
device_mem_allocation2 <<< 1, 1 >>> ( i, d_vec, p_gr_solve -> vec [ i ] );
}
device_mem_allocation3 <<< 1, 1 >>> ( d_gr_solve, d_vec );
//printf ( "pre gr_cnm_vec_initialize\n" ); // Debug
gr_cnm_vec_initialize <<< p_gr_solve -> numBlocks, p_gr_solve -> numThreadsPerBlock >>> ( d_gr, d_gr_solve );
gr_update_ion <<< p_gr_solve -> numBlocks, p_gr_solve -> numThreadsPerBlock >>> ( d_gr, d_gr_solve, CN_DT );
double **l_ion = gr -> ion;
gr_Na_update <<< p_gr_solve -> numBlocks, p_gr_solve -> numThreadsPerBlock >>>
( gr -> nc, gr -> elem [ v ], CN_DT, gr -> elem [ compart ],
l_ion [ o_Na ], l_ion [ c1_Na ], l_ion [ c2_Na ], l_ion [ c3_Na ], l_ion [ c4_Na ], l_ion [ c5_Na ],
l_ion [ i1_Na ], l_ion [ i2_Na ], l_ion [ i3_Na ], l_ion [ i4_Na ], l_ion [ i5_Na ], l_ion [ i6_Na ] );
// Debug
//printf ("\n");
}
/*
else if ( 0 == strncmp ( gr_solve -> type, "RUNGE_KUTTA_4", 13 ) ) {
gr_solve -> vec = ( double ** ) malloc ( gr_n_vec_RK4 * sizeof ( double *) );
for ( int i = 0; i < gr_n_vec_RK4; i++ ) {
gr_solve -> vec [ i ] = ( double * ) malloc ( gr -> nc * sizeof ( double ) );
}
}*/
free ( h_gr_solve );
return d_gr_solve;
}
void gr_solve_finalize ( const int n_gr, neuron_solve_t *d_gr_solve, neuron_solve_t *p_gr_solve )
{
if ( n_gr > 0 ) {
cudaFree ( p_gr_solve -> val );
cudaFree ( p_gr_solve -> val_ori );
cudaFree ( p_gr_solve -> b );
cudaFree ( p_gr_solve -> dammy );
cudaFree ( p_gr_solve -> col );
cudaFree ( p_gr_solve -> row );
cudaFree ( p_gr_solve -> dig );
if ( 0 == strncmp ( p_gr_solve -> type, "RKC", 3 ) )
{
for ( int i = 0; i < n_vec_RKC; i++ ) { cudaFree ( p_gr_solve -> vec [ i ] ); }
free ( p_gr_solve -> vec );
free ( p_gr_solve -> h_work );
free ( p_gr_solve -> h_others );
free ( p_gr_solve -> h_bool );
}
else if ( 0 == strncmp ( p_gr_solve -> type, "CN", 2 ) )
{
for ( int i = 0; i < n_vec_CNm; i++ ) { cudaFree ( p_gr_solve -> vec [ i ] ); }
free ( p_gr_solve -> vec );
}
/*
else if ( 0 == strncmp ( gr_solve -> type, "RUNGE_KUTTA_4", 13 ) ) {
for ( int i = 0; i < gr_n_vec_RK4; i++ ) { free ( gr_solve -> vec [ i ] ); }
}*/
}
cudaFree ( d_gr_solve ); free ( p_gr_solve );
}
__host__
void gr_solve_update_v ( neuron_t *d_gr, neuron_solve_t *d_gr_solve,
neuron_t *p_gr, neuron_solve_t *p_gr_solve,
synapse_t *d_mfgr, synapse_t *d_gogr )
{
if ( 0 == strncmp ( p_gr_solve -> type, "BE", 2 ) ) {
gr_solve_by_bem ( d_gr, d_gr_solve, p_gr, p_gr_solve, d_mfgr, d_gogr );
}
else if ( 0 == strncmp ( p_gr_solve -> type, "CN", 2 ) ) {
gr_solve_by_cnm ( d_gr, d_gr_solve, p_gr, p_gr_solve, d_mfgr, d_gogr );
}
else if ( 0 == strncmp ( p_gr_solve -> type, "RKC", 3 ) ) {
gr_solve_by_rkc ( d_gr, d_gr_solve, p_gr, p_gr_solve, d_mfgr, d_gogr );
}
else { printf ( "solver Error\n" ); }
} |
fe2501d0c07976e1e0d131a12c9a25f7a25826fd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/THCTensorMathMagma.cu"
#else
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
#ifdef USE_MAGMA
static void THCTensor_(copyArray1d)(THCState *state, THCTensor *self, real *src, int k)
{
int64_t size[1] = { k };
int64_t stride[1] = { 1 };
THCTensor_(resizeNd)(state, self, 1, size, stride);
size_t len = k * sizeof(real);
THCudaCheck(hipMemcpy(self->storage->data + self->storageOffset, src, len, hipMemcpyHostToDevice));
}
static void THCTensor_(copyArray2d)(THCState *state, THCTensor *self, real *src, int m, int n)
{
int64_t size[2] = { m, n };
int64_t stride[2] = { 1, m };
THCTensor_(resizeNd)(state, self, 2, size, stride);
size_t len = m * n * sizeof(real);
THCudaCheck(hipMemcpy(self->storage->data + self->storageOffset, src, len, hipMemcpyHostToDevice));
}
static void THCTensor_(copyTensor2d)(THCState *state, real *dst, THCTensor *self)
{
THAssert(self->nDimension == 2);
size_t len = THCTensor_(nElement)(state, self)*sizeof(real);
THCTensor *temp = THCTensor_(newTranspose)(state, self, 0, 1);
THCTensor *selfc = THCTensor_(newContiguous)(state, temp);
THCudaCheck(hipMemcpy(dst, selfc->storage->data + selfc->storageOffset, len, hipMemcpyDeviceToHost));
THCTensor_(free)(state, temp);
THCTensor_(free)(state, selfc);
}
#endif // USE_MAGMA
static THCTensor* THCTensor_(newColumnMajor)(THCState *state, THCTensor *self, THCTensor *src)
{
THAssert(src->nDimension == 2);
if (self == src && self->stride[0] == 1 && self->stride[1] == self->size[0])
{
THCTensor_(retain)(state, self);
return self;
}
if (self == src)
self = THCTensor_(new)(state);
else
THCTensor_(retain)(state, self);
int64_t size[2] = { src->size[0], src->size[1] };
int64_t stride[2] = { 1, src->size[0] };
THCTensor_(resizeNd)(state, self, 2, size, stride);
THCTensor_(copy)(state, self, src);
return self;
}
THC_API void THCTensor_(gesv)(THCState *state, THCTensor *rb_, THCTensor *ra_, THCTensor *b_, THCTensor *a_)
{
#ifdef USE_MAGMA
THArgCheck(a_->nDimension == 2, 1, "A should be 2 dimensional");
THArgCheck(b_->nDimension == 2, 2, "b should be 2 dimensional");
THArgCheck(a_->size[0] == a_->size[1], 1, "A should be square");
THArgCheck(b_->size[0] == a_->size[0], 2, "A,b size incompatible");
int64_t n = a_->size[0];
int64_t nrhs = b_->size[1];
THCTensor *a = THCTensor_(newColumnMajor)(state, ra_, a_);
THCTensor *b = THCTensor_(newColumnMajor)(state, rb_, b_);
real *a_data = THCTensor_(data)(state, a);
real *b_data = THCTensor_(data)(state, b);
int *ipiv = th_magma_malloc_pinned<int>(n);
int info;
#if defined(THC_REAL_IS_FLOAT)
magma_sgesv_gpu(n, nrhs, a_data, n, ipiv, b_data, n, &info);
#else
magma_dgesv_gpu(n, nrhs, a_data, n, ipiv, b_data, n, &info);
#endif
if (info < 0)
THError("MAGMA gesv : Argument %d : illegal value", -info);
else if (info > 0)
THError("MAGMA gesv : U(%d,%d) is zero, singular U.", info, info);
magma_free_pinned(ipiv);
THCTensor_(freeCopyTo)(state, a, ra_);
THCTensor_(freeCopyTo)(state, b, rb_);
#else
THError(NoMagma(gesv));
#endif
}
THC_API void THCTensor_(gels)(THCState *state, THCTensor *rb_, THCTensor *ra_, THCTensor *b_, THCTensor *a_)
{
#ifdef USE_MAGMA
THArgCheck(a_->nDimension == 2, 1, "A should be 2 dimensional");
THArgCheck(b_->nDimension == 2, 1, "b should be 2 dimensional");
THArgCheck(a_->size[0] == b_->size[0], 2, "size incompatible A,b");
THArgCheck(a_->size[0] >= a_->size[1], 2, "A should have m >= n");
THCTensor *a = THCTensor_(newColumnMajor)(state, ra_, a_);
THCTensor *b = THCTensor_(newColumnMajor)(state, rb_, b_);
real *a_data = THCTensor_(data)(state, a);
real *b_data = THCTensor_(data)(state, b);
int64_t m = a->size[0];
int64_t n = a->size[1];
int64_t nrhs = b->size[1];
real wkopt;
int info;
#if defined(THC_REAL_IS_FLOAT)
magma_sgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, &wkopt, -1, &info);
#else
magma_dgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, &wkopt, -1, &info);
#endif
real *hwork = th_magma_malloc_pinned<real>((size_t)wkopt);
#if defined(THC_REAL_IS_FLOAT)
magma_sgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, hwork, (int)wkopt, &info);
#else
magma_dgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, hwork, (int)wkopt, &info);
#endif
magma_free_pinned(hwork);
if (info != 0)
THError("MAGMA gels : Argument %d : illegal value", -info);
THCTensor_(freeCopyTo)(state, a, ra_);
THCTensor_(freeCopyTo)(state, b, rb_);
#else
THError(NoMagma(gels));
#endif
}
THC_API void THCTensor_(syev)(THCState *state, THCTensor *re_, THCTensor *rv_, THCTensor *a, const char *jobzs, const char *uplos)
{
#ifdef USE_MAGMA
int64_t n = a->size[0];
int64_t lda = n;
magma_uplo_t uplo = uplos[0] == 'U' ? MagmaUpper : MagmaLower;
magma_vec_t jobz = jobzs[0] == 'N' ? MagmaNoVec : MagmaVec;
THCTensor *input = THCTensor_(newColumnMajor)(state, rv_, a);
real *input_data = THCTensor_(data)(state, input);
// eigen values and workspace
real *w = th_magma_malloc_pinned<real>(n);
real *wA = th_magma_malloc_pinned<real>(lda * n);
// compute optimal size of work array
int info;
real lwork;
int liwork;
#if defined(THC_REAL_IS_FLOAT)
magma_ssyevd_gpu(jobz, uplo, n, input_data, lda, w, wA, n, &lwork, -1, &liwork, -1, &info);
#else
magma_dsyevd_gpu(jobz, uplo, n, input_data, lda, w, wA, n, &lwork, -1, &liwork, -1, &info);
#endif
real *work = th_magma_malloc_pinned<real>((size_t)lwork);
int *iwork = th_magma_malloc_pinned<int>(liwork);
// compute eigenvalues and, optionally, eigenvectors
#if defined(THC_REAL_IS_FLOAT)
magma_ssyevd_gpu(jobz, uplo, n, input_data, lda, w, wA, n, work, (int) lwork, iwork, liwork, &info);
#else
magma_dsyevd_gpu(jobz, uplo, n, input_data, lda, w, wA, n, work, (int) lwork, iwork, liwork, &info);
#endif
// copy eigen values from w to re_
if (info == 0)
THCTensor_(copyArray1d)(state, re_, w, n);
magma_free_pinned(iwork);
magma_free_pinned(work);
magma_free_pinned(wA);
magma_free_pinned(w);
// check error value
if (info > 0)
THError("MAGMA syev : Failed to converge. %d off-diagonal elements of an didn't converge to zero", info);
else if (info < 0)
THError("MAGMA syev : Argument %d : illegal value", -info);
THCTensor_(freeCopyTo)(state, input, rv_);
#else
THError(NoMagma(syev));
#endif
}
THC_API void THCTensor_(geev)(THCState *state, THCTensor *re_, THCTensor *rv_, THCTensor *a_, const char *jobvrs)
{
#ifdef USE_MAGMA
THArgCheck(a_->nDimension == 2, 3, "A should be 2 dimensional");
THArgCheck(a_->size[0] == a_->size[1], 3, "A should be square");
magma_vec_t jobvr = jobvrs[0] == 'N' ? MagmaNoVec : MagmaVec;
int64_t n = a_->size[0];
real *a_data = th_magma_malloc_pinned<real>(n * n);
THCTensor_(copyTensor2d)(state, a_data, a_);
real *wr = th_magma_malloc_pinned<real>(n);
real *wi = th_magma_malloc_pinned<real>(n);
real *vr_data = NULL;
int64_t ldvr = 1;
if (jobvr == MagmaVec)
{
vr_data = th_magma_malloc_pinned<real>(n * n);
ldvr = n;
}
real wkopt;
int info;
#if defined(THC_REAL_IS_FLOAT)
magma_sgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, &wkopt, -1, &info);
#else
magma_dgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, &wkopt, -1, &info);
#endif
int lwork = (int) wkopt;
real *work_data = th_magma_malloc_pinned<real>(lwork);
#if defined(THC_REAL_IS_FLOAT)
magma_sgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, work_data, lwork, &info);
#else
magma_dgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, work_data, lwork, &info);
#endif
if (info > 0)
THError("MAGMA geev : Failed to converge. %d off-diagonal elements of an didn't converge to zero", info);
else if (info < 0)
THError("MAGMA geev : Argument %d : illegal value", -info);
{
THCTensor_(resize2d)(state, re_, 2, n);
THCTensor *re = THCTensor_(newContiguous)(state, re_);
THCudaCheck(hipMemcpy(re->storage->data + re->storageOffset, wr, n*sizeof(real), hipMemcpyHostToDevice));
THCudaCheck(hipMemcpy(re->storage->data + re->storageOffset + n, wi, n*sizeof(real), hipMemcpyHostToDevice));
THCTensor_(freeCopyTo)(state, re, re_);
THCTensor_(transpose)(state, re_, NULL, 0, 1);
}
if (jobvr == MagmaVec)
THCTensor_(copyArray2d)(state, rv_, vr_data, n, n);
magma_free_pinned(work_data);
magma_free_pinned(vr_data);
magma_free_pinned(wi);
magma_free_pinned(wr);
magma_free_pinned(a_data);
#else
THError(NoMagma(geev));
#endif
}
THC_API void THCTensor_(gesvd)(THCState *state, THCTensor *ru_, THCTensor *rs_, THCTensor *rv_, THCTensor *a, const char *jobu)
{
#ifdef USE_MAGMA
THCTensor *ra_ = THCTensor_(new)(state);
THCTensor_(gesvd2)(state, ru_, rs_, rv_, ra_, a, jobu);
THCTensor_(free)(state, ra_);
#else
THError(NoMagma(gesvd));
#endif
}
THC_API void THCTensor_(gesvd2)(THCState *state, THCTensor *ru_, THCTensor *rs_, THCTensor *rv_, THCTensor *ra_, THCTensor *a, const char *jobus)
{
#ifdef USE_MAGMA
THArgCheck(a->nDimension == 2, 2, "A should be 2 dimensional");
magma_vec_t jobz = jobus[0] == 'A' ? MagmaAllVec : jobus[0] == 'S' ? MagmaSomeVec : jobus[0] == 'O' ? MagmaOverwriteVec : MagmaNoVec;
int iunused[1];
int64_t m = a->size[0];
int64_t n = a->size[1];
int64_t k = m < n ? m : n;
int64_t j = (jobz == MagmaAllVec) ? m : k;
int64_t jv = (jobz == MagmaAllVec) ? n : k;
real *a_data = th_magma_malloc_pinned<real>(m * n);
THCTensor_(copyTensor2d)(state, a_data, a);
real *rs_data = th_magma_malloc_pinned<real>(k);
real *ru_data = th_magma_malloc_pinned<real>(m * j);
real *rv_data = th_magma_malloc_pinned<real>(n * n);
real wkopt;
int info;
#if defined(THC_REAL_IS_FLOAT)
magma_sgesdd(jobz, m, n, a_data, m, rs_data, ru_data, m, rv_data, n, &wkopt, -1, iunused, &info);
#else
magma_dgesdd(jobz, m, n, a_data, m, rs_data, ru_data, m, rv_data, n, &wkopt, -1, iunused, &info);
#endif
int lwork = (int) wkopt;
real *work_data = th_magma_malloc_pinned<real>(lwork);
int *iwork = th_magma_malloc_pinned<int>(8 * k);
#if defined(THC_REAL_IS_FLOAT)
magma_sgesdd(jobz, m, n, a_data, m, rs_data, ru_data, m, rv_data, n, work_data, lwork, iwork, &info);
#else
magma_dgesdd(jobz, m, n, a_data, m, rs_data, ru_data, m, rv_data, n, work_data, lwork, iwork, &info);
#endif
if (info > 0)
THError("MAGMA gesdd : the updating process of SBDSDC did not converge (error: %d)", info);
else if (info < 0)
THError("MAGMA gesdd : Argument %d : illegal value", -info);
THCTensor_(copyArray2d)(state, rv_, rv_data, n, n);
THCTensor_(transpose)(state, rv_, NULL, 0, 1);
if (jobz != MagmaAllVec)
THCTensor_(narrow)(state, rv_, rv_, 1, 0, jv);
THCTensor_(copyArray2d)(state, ru_, ru_data, m, j);
THCTensor_(copyArray1d)(state, rs_, rs_data, k);
THCTensor_(copyArray2d)(state, ra_, a_data, m, n);
magma_free_pinned(work_data);
magma_free_pinned(iwork);
magma_free_pinned(rv_data);
magma_free_pinned(ru_data);
magma_free_pinned(rs_data);
magma_free_pinned(a_data);
#else
THError(NoMagma(gesvd2));
#endif
}
THC_API void THCTensor_(getri)(THCState *state, THCTensor *ra_, THCTensor *a)
{
THArgCheck(a->nDimension == 2, 2, "A should be 2 dimensional");
THArgCheck(a->size[0] == a->size[1], 2, "A should be square");
#ifdef USE_MAGMA
int info;
int64_t n = a->size[0];
int lwork = n * magma_get_sgetri_nb(n);
THCTensor *input = THCTensor_(newColumnMajor)(state, ra_, a);
real *input_data = THCTensor_(data)(state, input);
int *ipiv = th_magma_malloc_pinned<int>(n);
THCTensor *work = THCTensor_(newWithSize1d)(state, lwork);
real *work_data = THCTensor_(data)(state, work);
// Run LU
#if defined(THC_REAL_IS_FLOAT)
magma_sgetrf_gpu(n, n, input_data, n, ipiv, &info);
#else
magma_dgetrf_gpu(n, n, input_data, n, ipiv, &info);
#endif
if (info > 0)
THError("MAGMA getrf : U(%d,%d) is 0, U is singular", info, info);
else if (info < 0)
THError("MAGMA getrf : Argument %d : illegal value", -info);
// Inverse
#if defined(THC_REAL_IS_FLOAT)
magma_sgetri_gpu(n, input_data, n, ipiv, work_data, lwork, &info);
#else
magma_dgetri_gpu(n, input_data, n, ipiv, work_data, lwork, &info);
#endif
if (info > 0)
THError("MAGMA getri : U(%d,%d) is 0, U is singular", info, info);
else if (info < 0)
THError("MAGMA getri : Argument %d : illegal value", -info);
THCTensor_(free)(state, work);
magma_free_pinned(ipiv);
THCTensor_(freeCopyTo)(state, input, ra_);
#else
int64_t n = a->size[0];
// input
THCTensor *input = THCTensor_(newColumnMajor)(state, a, a);
THCTensor_(resizeNd)(state, ra_, 2, input->size, input->stride);
real *matrices1[1] = { THCTensor_(data)(state, input) };
real *matrices2[1] = { THCTensor_(data)(state, ra_) };
// Copy pointers to device.
real **d_matrices1, **d_matrices2;
THCudaCheck(THCudaMalloc(state, (void**)&d_matrices1, sizeof(real*)));
THCudaCheck(THCudaMalloc(state, (void**)&d_matrices2, sizeof(real*)));
THCudaCheck(hipMemcpyAsync(d_matrices1, matrices1, sizeof(real*),
hipMemcpyHostToDevice, THCState_getCurrentStream(state)));
THCudaCheck(hipMemcpyAsync(d_matrices2, matrices2, sizeof(real*),
hipMemcpyHostToDevice, THCState_getCurrentStream(state)));
int info;
int *info_gpu;
THCudaCheck(THCudaMalloc(state, (void**)&info_gpu, sizeof(int)));
int *ipiv_gpu;
THCudaCheck(THCudaMalloc(state, (void**)&ipiv_gpu, n * sizeof(int)));
// Run LU
#if defined(THC_REAL_IS_FLOAT)
THCudaBlas_Sgetrf(state, n, d_matrices1, n, ipiv_gpu, info_gpu, 1);
#else
THCudaBlas_Dgetrf(state, n, d_matrices1, n, ipiv_gpu, info_gpu, 1);
#endif
THCudaCheck(hipMemcpy(&info, info_gpu, sizeof(int), hipMemcpyDeviceToHost));
if (info > 0)
THError("CUBLAS getrf : U(%d,%d) is 0, U is singular", info, info);
else if (info < 0)
THError("CUBLAS getrf : Argument %d : illegal value", -info);
// Inverse
#if defined(THC_REAL_IS_FLOAT)
THCudaBlas_Sgetri(state, n, (const real**)d_matrices1, n, ipiv_gpu, d_matrices2, n, info_gpu, 1);
#else
THCudaBlas_Dgetri(state, n, (const real**)d_matrices1, n, ipiv_gpu, d_matrices2, n, info_gpu, 1);
#endif
THCudaCheck(hipMemcpy(&info, info_gpu, sizeof(int), hipMemcpyDeviceToHost));
if (info > 0)
THError("CUBLAS getri : U(%d,%d) is 0, U is singular", info, info);
else if (info < 0)
THError("CUBLAS getri : Argument %d : illegal value", -info);
THCudaCheck(THCudaFree(state, ipiv_gpu));
THCudaCheck(THCudaFree(state, info_gpu));
THCudaCheck(THCudaFree(state, d_matrices1));
THCudaCheck(THCudaFree(state, d_matrices2));
THCTensor_(free)(state, input);
#endif
}
__global__ void THCTensor_(copyUpperSymmetric)(real *input, int n, int len)
{
for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < len; idx += 65535) {
const int r = idx % n;
const int c = idx / n;
if (r > c) {
input[idx] = input[r*n + c];
}
}
}
__global__ void THCTensor_(copyLowerSymmetric)(real *input, int n, int len)
{
for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < len; idx += 65535) {
const int r = idx % n;
const int c = idx / n;
if (r < c) {
input[idx] = input[r*n + c];
}
}
}
THC_API void THCTensor_(potri)(THCState *state, THCTensor *ra_, THCTensor *a, const char *uplo)
{
#ifdef USE_MAGMA
THArgCheck(a->nDimension == 2, 2, "A should be 2 dimensional");
THArgCheck(a->size[0] == a->size[1], 2, "A should be square");
int64_t n = a->size[0];
magma_uplo_t ul = uplo[0] == 'U' ? MagmaUpper : MagmaLower;
THCTensor *input = THCTensor_(newColumnMajor)(state, ra_, a);
real *input_data = THCTensor_(data)(state, input);
int info;
#if defined(THC_REAL_IS_FLOAT)
magma_spotri_gpu(ul, n, input_data, n, &info);
#else
magma_dpotri_gpu(ul, n, input_data, n, &info);
#endif
if (info > 0)
THError("MAGMA potri : A(%d,%d) is 0, A cannot be factorized", info, info);
else if (info < 0)
THError("MAGMA potri : Argument %d : illegal value", -info);
hipStream_t stream = THCState_getCurrentStream(state);
const int len = n*n;
dim3 blocks(::min(DIVUP(len, 128), 65535));
dim3 threads(128);
if (uplo[0] == 'U') {
hipLaunchKernelGGL(( THCTensor_(copyUpperSymmetric)), dim3(blocks), dim3(threads), 0, stream, input_data, n, len);
} else {
hipLaunchKernelGGL(( THCTensor_(copyLowerSymmetric)), dim3(blocks), dim3(threads), 0, stream, input_data, n, len);
}
THCTensor_(freeCopyTo)(state, input, ra_);
#else
THError(NoMagma(potri));
#endif
}
THC_API void THCTensor_(potrf)(THCState *state, THCTensor *ra_, THCTensor *a, const char *uplo)
{
#ifdef USE_MAGMA
THArgCheck(a->nDimension == 2, 2, "A should be 2 dimensional");
THArgCheck(a->size[0] == a->size[1], 2, "A should be square");
int64_t n = a->size[0];
magma_uplo_t ul = uplo[0] == 'U' ? MagmaUpper : MagmaLower;
THCTensor *input = THCTensor_(newColumnMajor)(state, ra_, a);
real *input_data = THCTensor_(data)(state, input);
int info;
#if defined(THC_REAL_IS_FLOAT)
magma_spotrf_gpu(ul, n, input_data, n, &info);
#else
magma_dpotrf_gpu(ul, n, input_data, n, &info);
#endif
// check error value
if (info > 0)
THError("MAGMA potrf : A(%d,%d) is 0, A cannot be factorized", info, info);
else if (info < 0)
THError("MAGMA potrf : Argument %d : illegal value", -info);
if (uplo[0] == 'U') {
THCTensor_(triu)(state, ra_, input, 0);
} else {
THCTensor_(tril)(state, ra_, input, 0);
}
THCTensor_(free)(state, input);
#else
THError(NoMagma(potrf));
#endif
}
THC_API void THCTensor_(potrs)(THCState *state, THCTensor *rb_, THCTensor *b, THCTensor *a, const char *uplo)
{
#ifdef USE_MAGMA
THArgCheck(a->size[0] == a->size[1], 2, "A should be square");
int64_t n = a->size[0];
int64_t nrhs = b->size[1];
magma_uplo_t ul = uplo[0] == 'U' ? MagmaUpper : MagmaLower;
THCTensor *b_ = THCTensor_(newColumnMajor)(state, rb_, b);
real *b_data = THCTensor_(data)(state, b_);
THCTensor *a_ = THCTensor_(newColumnMajor)(state, a, a);
real *a_data = THCTensor_(data)(state, a_);
int info;
#if defined(THC_REAL_IS_FLOAT)
magma_spotrs_gpu(ul, n, nrhs, a_data, n, b_data, n, &info);
#else
magma_dpotrs_gpu(ul, n, nrhs, a_data, n, b_data, n, &info);
#endif
// check error value
if (info < 0)
THError("MAGMA potrs : Argument %d : illegal value", -info);
THCTensor_(freeCopyTo)(state, b_, rb_);
THCTensor_(free)(state, a_);
#else
THError(NoMagma(potrs));
#endif
}
THC_API void THCTensor_(geqrf)(THCState *state, THCTensor *ra_, THCTensor *rtau_, THCTensor *a_)
{
#ifdef USE_MAGMA
THArgCheck(a_->nDimension == 2, 2, "A should be 2 dimensional");
THCTensor *a = THCTensor_(newColumnMajor)(state, ra_, a_);
int64_t m = a->size[0];
int64_t n = a->size[1];
int64_t k = (m < n ? m : n);
#ifdef MAGMA_V2
#if defined(THC_REAL_IS_FLOAT)
int64_t nb = magma_get_sgeqrf_nb(m, n);
#else
int64_t nb = magma_get_dgeqrf_nb(m, n);
#endif
#else
#if defined(THC_REAL_IS_FLOAT)
int64_t nb = magma_get_sgeqrf_nb(m);
#else
int64_t nb = magma_get_dgeqrf_nb(m);
#endif
#endif
real *rtau_data = th_magma_malloc_pinned<real>(k);
real *a_data = THCTensor_(data)(state, a);
int info;
#if defined(THC_REAL_IS_FLOAT)
magma_sgeqrf2_gpu(m, n, a_data, m, rtau_data, &info);
#else
magma_dgeqrf2_gpu(m, n, a_data, m, rtau_data, &info);
#endif
if (info != 0)
THError("MAGMA geqrf2 : Argument %d : illegal value.", -info);
THCTensor_(freeCopyTo)(state, a, ra_);
THCTensor_(copyArray1d)(state, rtau_, rtau_data, k);
magma_free_pinned(rtau_data);
#else
THError(NoMagma(geqrf));
#endif
}
THC_API void THCTensor_(qr)(THCState *state, THCTensor *rq_, THCTensor *rr_, THCTensor *a_)
{
#ifdef USE_MAGMA
THArgCheck(a_->nDimension == 2, 2, "A should be 2 dimensional");
THCTensor *a = THCTensor_(newColumnMajor)(state, rr_, a_);
int64_t m = a->size[0];
int64_t n = a->size[1];
int64_t k = (m < n ? m : n);
#ifdef MAGMA_V2
#if defined(THC_REAL_IS_FLOAT)
int64_t nb = magma_get_sgeqrf_nb(m, n);
#else
int64_t nb = magma_get_dgeqrf_nb(m, n);
#endif
#else
#if defined(THC_REAL_IS_FLOAT)
int64_t nb = magma_get_sgeqrf_nb(m);
#else
int64_t nb = magma_get_dgeqrf_nb(m);
#endif
#endif
real *a_data = THCTensor_(data)(state, a);
real *tau_data = th_magma_malloc_pinned<real>(k);
THCTensor *work = THCTensor_(newWithSize1d)(state, (2*k + magma_roundup(n, 32))*nb);
real *work_data = THCTensor_(data)(state, work);
int info;
// We need to call two different versions of ?geqrf:
// ?geqrf_gpu allows fast computation of Q via ?orqrf_gpu, but doesn't give
// R properly. Note that the MAGMA documentation for this method is wrong.
// http://icl.cs.utk.edu/magma/forum/viewtopic.php?f=2&t=1015&p=2800&hilit=geqrf_gpu#p2800
// ?geqrf2_gpu gives correct R, but doesn't allow computation of Q via ?orqrf_gpu
#if defined(THC_REAL_IS_FLOAT)
magma_sgeqrf2_gpu(m, n, a_data, m, tau_data, &info);
#else
magma_dgeqrf2_gpu(m, n, a_data, m, tau_data, &info);
#endif
if (info != 0)
THError("MAGMA geqrf2 : Argument %d : illegal value.", -info);
THCTensor_(narrow)(state, a, a, 0, 0, k);
THCTensor_(triu)(state, rr_, a, 0);
THCTensor_(free)(state, a);
a = THCTensor_(newColumnMajor)(state, rq_, a_);
a_data = THCTensor_(data)(state, a);
#if defined(THC_REAL_IS_FLOAT)
magma_sgeqrf_gpu(m, n, a_data, m, tau_data, work_data, &info);
#else
magma_dgeqrf_gpu(m, n, a_data, m, tau_data, work_data, &info);
#endif
if (info != 0)
THError("MAGMA geqrf : Argument %d : illegal value.", -info);
THCTensor *q = THCTensor_(newColumnMajor)(state, rq_, a);
real *q_data = THCTensor_(data)(state, q);
#if defined(THC_REAL_IS_FLOAT)
magma_sorgqr_gpu(m, k, k, q_data, m, tau_data, work_data, nb, &info);
#else
magma_dorgqr_gpu(m, k, k, q_data, m, tau_data, work_data, nb, &info);
#endif
if (info != 0)
THError("MAGMA orgqr : Argument %d : illegal value.", -info);
THCTensor_(free)(state, a);
THCTensor_(free)(state, work);
magma_free_pinned(tau_data);
THCTensor_(narrow)(state, q, q, 1, 0, k);
THCTensor_(freeCopyTo)(state, q, rq_);
#else
THError(NoMagma(qr));
#endif
}
#endif
#endif
| fe2501d0c07976e1e0d131a12c9a25f7a25826fd.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/THCTensorMathMagma.cu"
#else
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
#ifdef USE_MAGMA
static void THCTensor_(copyArray1d)(THCState *state, THCTensor *self, real *src, int k)
{
int64_t size[1] = { k };
int64_t stride[1] = { 1 };
THCTensor_(resizeNd)(state, self, 1, size, stride);
size_t len = k * sizeof(real);
THCudaCheck(cudaMemcpy(self->storage->data + self->storageOffset, src, len, cudaMemcpyHostToDevice));
}
static void THCTensor_(copyArray2d)(THCState *state, THCTensor *self, real *src, int m, int n)
{
int64_t size[2] = { m, n };
int64_t stride[2] = { 1, m };
THCTensor_(resizeNd)(state, self, 2, size, stride);
size_t len = m * n * sizeof(real);
THCudaCheck(cudaMemcpy(self->storage->data + self->storageOffset, src, len, cudaMemcpyHostToDevice));
}
static void THCTensor_(copyTensor2d)(THCState *state, real *dst, THCTensor *self)
{
THAssert(self->nDimension == 2);
size_t len = THCTensor_(nElement)(state, self)*sizeof(real);
THCTensor *temp = THCTensor_(newTranspose)(state, self, 0, 1);
THCTensor *selfc = THCTensor_(newContiguous)(state, temp);
THCudaCheck(cudaMemcpy(dst, selfc->storage->data + selfc->storageOffset, len, cudaMemcpyDeviceToHost));
THCTensor_(free)(state, temp);
THCTensor_(free)(state, selfc);
}
#endif // USE_MAGMA
static THCTensor* THCTensor_(newColumnMajor)(THCState *state, THCTensor *self, THCTensor *src)
{
THAssert(src->nDimension == 2);
if (self == src && self->stride[0] == 1 && self->stride[1] == self->size[0])
{
THCTensor_(retain)(state, self);
return self;
}
if (self == src)
self = THCTensor_(new)(state);
else
THCTensor_(retain)(state, self);
int64_t size[2] = { src->size[0], src->size[1] };
int64_t stride[2] = { 1, src->size[0] };
THCTensor_(resizeNd)(state, self, 2, size, stride);
THCTensor_(copy)(state, self, src);
return self;
}
THC_API void THCTensor_(gesv)(THCState *state, THCTensor *rb_, THCTensor *ra_, THCTensor *b_, THCTensor *a_)
{
#ifdef USE_MAGMA
THArgCheck(a_->nDimension == 2, 1, "A should be 2 dimensional");
THArgCheck(b_->nDimension == 2, 2, "b should be 2 dimensional");
THArgCheck(a_->size[0] == a_->size[1], 1, "A should be square");
THArgCheck(b_->size[0] == a_->size[0], 2, "A,b size incompatible");
int64_t n = a_->size[0];
int64_t nrhs = b_->size[1];
THCTensor *a = THCTensor_(newColumnMajor)(state, ra_, a_);
THCTensor *b = THCTensor_(newColumnMajor)(state, rb_, b_);
real *a_data = THCTensor_(data)(state, a);
real *b_data = THCTensor_(data)(state, b);
int *ipiv = th_magma_malloc_pinned<int>(n);
int info;
#if defined(THC_REAL_IS_FLOAT)
magma_sgesv_gpu(n, nrhs, a_data, n, ipiv, b_data, n, &info);
#else
magma_dgesv_gpu(n, nrhs, a_data, n, ipiv, b_data, n, &info);
#endif
if (info < 0)
THError("MAGMA gesv : Argument %d : illegal value", -info);
else if (info > 0)
THError("MAGMA gesv : U(%d,%d) is zero, singular U.", info, info);
magma_free_pinned(ipiv);
THCTensor_(freeCopyTo)(state, a, ra_);
THCTensor_(freeCopyTo)(state, b, rb_);
#else
THError(NoMagma(gesv));
#endif
}
THC_API void THCTensor_(gels)(THCState *state, THCTensor *rb_, THCTensor *ra_, THCTensor *b_, THCTensor *a_)
{
#ifdef USE_MAGMA
THArgCheck(a_->nDimension == 2, 1, "A should be 2 dimensional");
THArgCheck(b_->nDimension == 2, 1, "b should be 2 dimensional");
THArgCheck(a_->size[0] == b_->size[0], 2, "size incompatible A,b");
THArgCheck(a_->size[0] >= a_->size[1], 2, "A should have m >= n");
THCTensor *a = THCTensor_(newColumnMajor)(state, ra_, a_);
THCTensor *b = THCTensor_(newColumnMajor)(state, rb_, b_);
real *a_data = THCTensor_(data)(state, a);
real *b_data = THCTensor_(data)(state, b);
int64_t m = a->size[0];
int64_t n = a->size[1];
int64_t nrhs = b->size[1];
real wkopt;
int info;
#if defined(THC_REAL_IS_FLOAT)
magma_sgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, &wkopt, -1, &info);
#else
magma_dgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, &wkopt, -1, &info);
#endif
real *hwork = th_magma_malloc_pinned<real>((size_t)wkopt);
#if defined(THC_REAL_IS_FLOAT)
magma_sgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, hwork, (int)wkopt, &info);
#else
magma_dgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, hwork, (int)wkopt, &info);
#endif
magma_free_pinned(hwork);
if (info != 0)
THError("MAGMA gels : Argument %d : illegal value", -info);
THCTensor_(freeCopyTo)(state, a, ra_);
THCTensor_(freeCopyTo)(state, b, rb_);
#else
THError(NoMagma(gels));
#endif
}
THC_API void THCTensor_(syev)(THCState *state, THCTensor *re_, THCTensor *rv_, THCTensor *a, const char *jobzs, const char *uplos)
{
#ifdef USE_MAGMA
int64_t n = a->size[0];
int64_t lda = n;
magma_uplo_t uplo = uplos[0] == 'U' ? MagmaUpper : MagmaLower;
magma_vec_t jobz = jobzs[0] == 'N' ? MagmaNoVec : MagmaVec;
THCTensor *input = THCTensor_(newColumnMajor)(state, rv_, a);
real *input_data = THCTensor_(data)(state, input);
// eigen values and workspace
real *w = th_magma_malloc_pinned<real>(n);
real *wA = th_magma_malloc_pinned<real>(lda * n);
// compute optimal size of work array
int info;
real lwork;
int liwork;
#if defined(THC_REAL_IS_FLOAT)
magma_ssyevd_gpu(jobz, uplo, n, input_data, lda, w, wA, n, &lwork, -1, &liwork, -1, &info);
#else
magma_dsyevd_gpu(jobz, uplo, n, input_data, lda, w, wA, n, &lwork, -1, &liwork, -1, &info);
#endif
real *work = th_magma_malloc_pinned<real>((size_t)lwork);
int *iwork = th_magma_malloc_pinned<int>(liwork);
// compute eigenvalues and, optionally, eigenvectors
#if defined(THC_REAL_IS_FLOAT)
magma_ssyevd_gpu(jobz, uplo, n, input_data, lda, w, wA, n, work, (int) lwork, iwork, liwork, &info);
#else
magma_dsyevd_gpu(jobz, uplo, n, input_data, lda, w, wA, n, work, (int) lwork, iwork, liwork, &info);
#endif
// copy eigen values from w to re_
if (info == 0)
THCTensor_(copyArray1d)(state, re_, w, n);
magma_free_pinned(iwork);
magma_free_pinned(work);
magma_free_pinned(wA);
magma_free_pinned(w);
// check error value
if (info > 0)
THError("MAGMA syev : Failed to converge. %d off-diagonal elements of an didn't converge to zero", info);
else if (info < 0)
THError("MAGMA syev : Argument %d : illegal value", -info);
THCTensor_(freeCopyTo)(state, input, rv_);
#else
THError(NoMagma(syev));
#endif
}
THC_API void THCTensor_(geev)(THCState *state, THCTensor *re_, THCTensor *rv_, THCTensor *a_, const char *jobvrs)
{
#ifdef USE_MAGMA
THArgCheck(a_->nDimension == 2, 3, "A should be 2 dimensional");
THArgCheck(a_->size[0] == a_->size[1], 3, "A should be square");
magma_vec_t jobvr = jobvrs[0] == 'N' ? MagmaNoVec : MagmaVec;
int64_t n = a_->size[0];
real *a_data = th_magma_malloc_pinned<real>(n * n);
THCTensor_(copyTensor2d)(state, a_data, a_);
real *wr = th_magma_malloc_pinned<real>(n);
real *wi = th_magma_malloc_pinned<real>(n);
real *vr_data = NULL;
int64_t ldvr = 1;
if (jobvr == MagmaVec)
{
vr_data = th_magma_malloc_pinned<real>(n * n);
ldvr = n;
}
real wkopt;
int info;
#if defined(THC_REAL_IS_FLOAT)
magma_sgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, &wkopt, -1, &info);
#else
magma_dgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, &wkopt, -1, &info);
#endif
int lwork = (int) wkopt;
real *work_data = th_magma_malloc_pinned<real>(lwork);
#if defined(THC_REAL_IS_FLOAT)
magma_sgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, work_data, lwork, &info);
#else
magma_dgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, work_data, lwork, &info);
#endif
if (info > 0)
THError("MAGMA geev : Failed to converge. %d off-diagonal elements of an didn't converge to zero", info);
else if (info < 0)
THError("MAGMA geev : Argument %d : illegal value", -info);
{
THCTensor_(resize2d)(state, re_, 2, n);
THCTensor *re = THCTensor_(newContiguous)(state, re_);
THCudaCheck(cudaMemcpy(re->storage->data + re->storageOffset, wr, n*sizeof(real), cudaMemcpyHostToDevice));
THCudaCheck(cudaMemcpy(re->storage->data + re->storageOffset + n, wi, n*sizeof(real), cudaMemcpyHostToDevice));
THCTensor_(freeCopyTo)(state, re, re_);
THCTensor_(transpose)(state, re_, NULL, 0, 1);
}
if (jobvr == MagmaVec)
THCTensor_(copyArray2d)(state, rv_, vr_data, n, n);
magma_free_pinned(work_data);
magma_free_pinned(vr_data);
magma_free_pinned(wi);
magma_free_pinned(wr);
magma_free_pinned(a_data);
#else
THError(NoMagma(geev));
#endif
}
THC_API void THCTensor_(gesvd)(THCState *state, THCTensor *ru_, THCTensor *rs_, THCTensor *rv_, THCTensor *a, const char *jobu)
{
#ifdef USE_MAGMA
THCTensor *ra_ = THCTensor_(new)(state);
THCTensor_(gesvd2)(state, ru_, rs_, rv_, ra_, a, jobu);
THCTensor_(free)(state, ra_);
#else
THError(NoMagma(gesvd));
#endif
}
THC_API void THCTensor_(gesvd2)(THCState *state, THCTensor *ru_, THCTensor *rs_, THCTensor *rv_, THCTensor *ra_, THCTensor *a, const char *jobus)
{
#ifdef USE_MAGMA
THArgCheck(a->nDimension == 2, 2, "A should be 2 dimensional");
magma_vec_t jobz = jobus[0] == 'A' ? MagmaAllVec : jobus[0] == 'S' ? MagmaSomeVec : jobus[0] == 'O' ? MagmaOverwriteVec : MagmaNoVec;
int iunused[1];
int64_t m = a->size[0];
int64_t n = a->size[1];
int64_t k = m < n ? m : n;
int64_t j = (jobz == MagmaAllVec) ? m : k;
int64_t jv = (jobz == MagmaAllVec) ? n : k;
real *a_data = th_magma_malloc_pinned<real>(m * n);
THCTensor_(copyTensor2d)(state, a_data, a);
real *rs_data = th_magma_malloc_pinned<real>(k);
real *ru_data = th_magma_malloc_pinned<real>(m * j);
real *rv_data = th_magma_malloc_pinned<real>(n * n);
real wkopt;
int info;
#if defined(THC_REAL_IS_FLOAT)
magma_sgesdd(jobz, m, n, a_data, m, rs_data, ru_data, m, rv_data, n, &wkopt, -1, iunused, &info);
#else
magma_dgesdd(jobz, m, n, a_data, m, rs_data, ru_data, m, rv_data, n, &wkopt, -1, iunused, &info);
#endif
int lwork = (int) wkopt;
real *work_data = th_magma_malloc_pinned<real>(lwork);
int *iwork = th_magma_malloc_pinned<int>(8 * k);
#if defined(THC_REAL_IS_FLOAT)
magma_sgesdd(jobz, m, n, a_data, m, rs_data, ru_data, m, rv_data, n, work_data, lwork, iwork, &info);
#else
magma_dgesdd(jobz, m, n, a_data, m, rs_data, ru_data, m, rv_data, n, work_data, lwork, iwork, &info);
#endif
if (info > 0)
THError("MAGMA gesdd : the updating process of SBDSDC did not converge (error: %d)", info);
else if (info < 0)
THError("MAGMA gesdd : Argument %d : illegal value", -info);
THCTensor_(copyArray2d)(state, rv_, rv_data, n, n);
THCTensor_(transpose)(state, rv_, NULL, 0, 1);
if (jobz != MagmaAllVec)
THCTensor_(narrow)(state, rv_, rv_, 1, 0, jv);
THCTensor_(copyArray2d)(state, ru_, ru_data, m, j);
THCTensor_(copyArray1d)(state, rs_, rs_data, k);
THCTensor_(copyArray2d)(state, ra_, a_data, m, n);
magma_free_pinned(work_data);
magma_free_pinned(iwork);
magma_free_pinned(rv_data);
magma_free_pinned(ru_data);
magma_free_pinned(rs_data);
magma_free_pinned(a_data);
#else
THError(NoMagma(gesvd2));
#endif
}
THC_API void THCTensor_(getri)(THCState *state, THCTensor *ra_, THCTensor *a)
{
THArgCheck(a->nDimension == 2, 2, "A should be 2 dimensional");
THArgCheck(a->size[0] == a->size[1], 2, "A should be square");
#ifdef USE_MAGMA
int info;
int64_t n = a->size[0];
int lwork = n * magma_get_sgetri_nb(n);
THCTensor *input = THCTensor_(newColumnMajor)(state, ra_, a);
real *input_data = THCTensor_(data)(state, input);
int *ipiv = th_magma_malloc_pinned<int>(n);
THCTensor *work = THCTensor_(newWithSize1d)(state, lwork);
real *work_data = THCTensor_(data)(state, work);
// Run LU
#if defined(THC_REAL_IS_FLOAT)
magma_sgetrf_gpu(n, n, input_data, n, ipiv, &info);
#else
magma_dgetrf_gpu(n, n, input_data, n, ipiv, &info);
#endif
if (info > 0)
THError("MAGMA getrf : U(%d,%d) is 0, U is singular", info, info);
else if (info < 0)
THError("MAGMA getrf : Argument %d : illegal value", -info);
// Inverse
#if defined(THC_REAL_IS_FLOAT)
magma_sgetri_gpu(n, input_data, n, ipiv, work_data, lwork, &info);
#else
magma_dgetri_gpu(n, input_data, n, ipiv, work_data, lwork, &info);
#endif
if (info > 0)
THError("MAGMA getri : U(%d,%d) is 0, U is singular", info, info);
else if (info < 0)
THError("MAGMA getri : Argument %d : illegal value", -info);
THCTensor_(free)(state, work);
magma_free_pinned(ipiv);
THCTensor_(freeCopyTo)(state, input, ra_);
#else
int64_t n = a->size[0];
// input
THCTensor *input = THCTensor_(newColumnMajor)(state, a, a);
THCTensor_(resizeNd)(state, ra_, 2, input->size, input->stride);
real *matrices1[1] = { THCTensor_(data)(state, input) };
real *matrices2[1] = { THCTensor_(data)(state, ra_) };
// Copy pointers to device.
real **d_matrices1, **d_matrices2;
THCudaCheck(THCudaMalloc(state, (void**)&d_matrices1, sizeof(real*)));
THCudaCheck(THCudaMalloc(state, (void**)&d_matrices2, sizeof(real*)));
THCudaCheck(cudaMemcpyAsync(d_matrices1, matrices1, sizeof(real*),
cudaMemcpyHostToDevice, THCState_getCurrentStream(state)));
THCudaCheck(cudaMemcpyAsync(d_matrices2, matrices2, sizeof(real*),
cudaMemcpyHostToDevice, THCState_getCurrentStream(state)));
int info;
int *info_gpu;
THCudaCheck(THCudaMalloc(state, (void**)&info_gpu, sizeof(int)));
int *ipiv_gpu;
THCudaCheck(THCudaMalloc(state, (void**)&ipiv_gpu, n * sizeof(int)));
// Run LU
#if defined(THC_REAL_IS_FLOAT)
THCudaBlas_Sgetrf(state, n, d_matrices1, n, ipiv_gpu, info_gpu, 1);
#else
THCudaBlas_Dgetrf(state, n, d_matrices1, n, ipiv_gpu, info_gpu, 1);
#endif
THCudaCheck(cudaMemcpy(&info, info_gpu, sizeof(int), cudaMemcpyDeviceToHost));
if (info > 0)
THError("CUBLAS getrf : U(%d,%d) is 0, U is singular", info, info);
else if (info < 0)
THError("CUBLAS getrf : Argument %d : illegal value", -info);
// Inverse
#if defined(THC_REAL_IS_FLOAT)
THCudaBlas_Sgetri(state, n, (const real**)d_matrices1, n, ipiv_gpu, d_matrices2, n, info_gpu, 1);
#else
THCudaBlas_Dgetri(state, n, (const real**)d_matrices1, n, ipiv_gpu, d_matrices2, n, info_gpu, 1);
#endif
THCudaCheck(cudaMemcpy(&info, info_gpu, sizeof(int), cudaMemcpyDeviceToHost));
if (info > 0)
THError("CUBLAS getri : U(%d,%d) is 0, U is singular", info, info);
else if (info < 0)
THError("CUBLAS getri : Argument %d : illegal value", -info);
THCudaCheck(THCudaFree(state, ipiv_gpu));
THCudaCheck(THCudaFree(state, info_gpu));
THCudaCheck(THCudaFree(state, d_matrices1));
THCudaCheck(THCudaFree(state, d_matrices2));
THCTensor_(free)(state, input);
#endif
}
__global__ void THCTensor_(copyUpperSymmetric)(real *input, int n, int len)
{
for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < len; idx += 65535) {
const int r = idx % n;
const int c = idx / n;
if (r > c) {
input[idx] = input[r*n + c];
}
}
}
__global__ void THCTensor_(copyLowerSymmetric)(real *input, int n, int len)
{
for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < len; idx += 65535) {
const int r = idx % n;
const int c = idx / n;
if (r < c) {
input[idx] = input[r*n + c];
}
}
}
THC_API void THCTensor_(potri)(THCState *state, THCTensor *ra_, THCTensor *a, const char *uplo)
{
#ifdef USE_MAGMA
THArgCheck(a->nDimension == 2, 2, "A should be 2 dimensional");
THArgCheck(a->size[0] == a->size[1], 2, "A should be square");
int64_t n = a->size[0];
magma_uplo_t ul = uplo[0] == 'U' ? MagmaUpper : MagmaLower;
THCTensor *input = THCTensor_(newColumnMajor)(state, ra_, a);
real *input_data = THCTensor_(data)(state, input);
int info;
#if defined(THC_REAL_IS_FLOAT)
magma_spotri_gpu(ul, n, input_data, n, &info);
#else
magma_dpotri_gpu(ul, n, input_data, n, &info);
#endif
if (info > 0)
THError("MAGMA potri : A(%d,%d) is 0, A cannot be factorized", info, info);
else if (info < 0)
THError("MAGMA potri : Argument %d : illegal value", -info);
cudaStream_t stream = THCState_getCurrentStream(state);
const int len = n*n;
dim3 blocks(std::min(DIVUP(len, 128), 65535));
dim3 threads(128);
if (uplo[0] == 'U') {
THCTensor_(copyUpperSymmetric)<<<blocks, threads, 0, stream>>>(input_data, n, len);
} else {
THCTensor_(copyLowerSymmetric)<<<blocks, threads, 0, stream>>>(input_data, n, len);
}
THCTensor_(freeCopyTo)(state, input, ra_);
#else
THError(NoMagma(potri));
#endif
}
THC_API void THCTensor_(potrf)(THCState *state, THCTensor *ra_, THCTensor *a, const char *uplo)
{
#ifdef USE_MAGMA
THArgCheck(a->nDimension == 2, 2, "A should be 2 dimensional");
THArgCheck(a->size[0] == a->size[1], 2, "A should be square");
int64_t n = a->size[0];
magma_uplo_t ul = uplo[0] == 'U' ? MagmaUpper : MagmaLower;
THCTensor *input = THCTensor_(newColumnMajor)(state, ra_, a);
real *input_data = THCTensor_(data)(state, input);
int info;
#if defined(THC_REAL_IS_FLOAT)
magma_spotrf_gpu(ul, n, input_data, n, &info);
#else
magma_dpotrf_gpu(ul, n, input_data, n, &info);
#endif
// check error value
if (info > 0)
THError("MAGMA potrf : A(%d,%d) is 0, A cannot be factorized", info, info);
else if (info < 0)
THError("MAGMA potrf : Argument %d : illegal value", -info);
if (uplo[0] == 'U') {
THCTensor_(triu)(state, ra_, input, 0);
} else {
THCTensor_(tril)(state, ra_, input, 0);
}
THCTensor_(free)(state, input);
#else
THError(NoMagma(potrf));
#endif
}
THC_API void THCTensor_(potrs)(THCState *state, THCTensor *rb_, THCTensor *b, THCTensor *a, const char *uplo)
{
#ifdef USE_MAGMA
THArgCheck(a->size[0] == a->size[1], 2, "A should be square");
int64_t n = a->size[0];
int64_t nrhs = b->size[1];
magma_uplo_t ul = uplo[0] == 'U' ? MagmaUpper : MagmaLower;
THCTensor *b_ = THCTensor_(newColumnMajor)(state, rb_, b);
real *b_data = THCTensor_(data)(state, b_);
THCTensor *a_ = THCTensor_(newColumnMajor)(state, a, a);
real *a_data = THCTensor_(data)(state, a_);
int info;
#if defined(THC_REAL_IS_FLOAT)
magma_spotrs_gpu(ul, n, nrhs, a_data, n, b_data, n, &info);
#else
magma_dpotrs_gpu(ul, n, nrhs, a_data, n, b_data, n, &info);
#endif
// check error value
if (info < 0)
THError("MAGMA potrs : Argument %d : illegal value", -info);
THCTensor_(freeCopyTo)(state, b_, rb_);
THCTensor_(free)(state, a_);
#else
THError(NoMagma(potrs));
#endif
}
THC_API void THCTensor_(geqrf)(THCState *state, THCTensor *ra_, THCTensor *rtau_, THCTensor *a_)
{
#ifdef USE_MAGMA
THArgCheck(a_->nDimension == 2, 2, "A should be 2 dimensional");
THCTensor *a = THCTensor_(newColumnMajor)(state, ra_, a_);
int64_t m = a->size[0];
int64_t n = a->size[1];
int64_t k = (m < n ? m : n);
#ifdef MAGMA_V2
#if defined(THC_REAL_IS_FLOAT)
int64_t nb = magma_get_sgeqrf_nb(m, n);
#else
int64_t nb = magma_get_dgeqrf_nb(m, n);
#endif
#else
#if defined(THC_REAL_IS_FLOAT)
int64_t nb = magma_get_sgeqrf_nb(m);
#else
int64_t nb = magma_get_dgeqrf_nb(m);
#endif
#endif
real *rtau_data = th_magma_malloc_pinned<real>(k);
real *a_data = THCTensor_(data)(state, a);
int info;
#if defined(THC_REAL_IS_FLOAT)
magma_sgeqrf2_gpu(m, n, a_data, m, rtau_data, &info);
#else
magma_dgeqrf2_gpu(m, n, a_data, m, rtau_data, &info);
#endif
if (info != 0)
THError("MAGMA geqrf2 : Argument %d : illegal value.", -info);
THCTensor_(freeCopyTo)(state, a, ra_);
THCTensor_(copyArray1d)(state, rtau_, rtau_data, k);
magma_free_pinned(rtau_data);
#else
THError(NoMagma(geqrf));
#endif
}
THC_API void THCTensor_(qr)(THCState *state, THCTensor *rq_, THCTensor *rr_, THCTensor *a_)
{
#ifdef USE_MAGMA
THArgCheck(a_->nDimension == 2, 2, "A should be 2 dimensional");
THCTensor *a = THCTensor_(newColumnMajor)(state, rr_, a_);
int64_t m = a->size[0];
int64_t n = a->size[1];
int64_t k = (m < n ? m : n);
#ifdef MAGMA_V2
#if defined(THC_REAL_IS_FLOAT)
int64_t nb = magma_get_sgeqrf_nb(m, n);
#else
int64_t nb = magma_get_dgeqrf_nb(m, n);
#endif
#else
#if defined(THC_REAL_IS_FLOAT)
int64_t nb = magma_get_sgeqrf_nb(m);
#else
int64_t nb = magma_get_dgeqrf_nb(m);
#endif
#endif
real *a_data = THCTensor_(data)(state, a);
real *tau_data = th_magma_malloc_pinned<real>(k);
THCTensor *work = THCTensor_(newWithSize1d)(state, (2*k + magma_roundup(n, 32))*nb);
real *work_data = THCTensor_(data)(state, work);
int info;
// We need to call two different versions of ?geqrf:
// ?geqrf_gpu allows fast computation of Q via ?orqrf_gpu, but doesn't give
// R properly. Note that the MAGMA documentation for this method is wrong.
// http://icl.cs.utk.edu/magma/forum/viewtopic.php?f=2&t=1015&p=2800&hilit=geqrf_gpu#p2800
// ?geqrf2_gpu gives correct R, but doesn't allow computation of Q via ?orqrf_gpu
#if defined(THC_REAL_IS_FLOAT)
magma_sgeqrf2_gpu(m, n, a_data, m, tau_data, &info);
#else
magma_dgeqrf2_gpu(m, n, a_data, m, tau_data, &info);
#endif
if (info != 0)
THError("MAGMA geqrf2 : Argument %d : illegal value.", -info);
THCTensor_(narrow)(state, a, a, 0, 0, k);
THCTensor_(triu)(state, rr_, a, 0);
THCTensor_(free)(state, a);
a = THCTensor_(newColumnMajor)(state, rq_, a_);
a_data = THCTensor_(data)(state, a);
#if defined(THC_REAL_IS_FLOAT)
magma_sgeqrf_gpu(m, n, a_data, m, tau_data, work_data, &info);
#else
magma_dgeqrf_gpu(m, n, a_data, m, tau_data, work_data, &info);
#endif
if (info != 0)
THError("MAGMA geqrf : Argument %d : illegal value.", -info);
THCTensor *q = THCTensor_(newColumnMajor)(state, rq_, a);
real *q_data = THCTensor_(data)(state, q);
#if defined(THC_REAL_IS_FLOAT)
magma_sorgqr_gpu(m, k, k, q_data, m, tau_data, work_data, nb, &info);
#else
magma_dorgqr_gpu(m, k, k, q_data, m, tau_data, work_data, nb, &info);
#endif
if (info != 0)
THError("MAGMA orgqr : Argument %d : illegal value.", -info);
THCTensor_(free)(state, a);
THCTensor_(free)(state, work);
magma_free_pinned(tau_data);
THCTensor_(narrow)(state, q, q, 1, 0, k);
THCTensor_(freeCopyTo)(state, q, rq_);
#else
THError(NoMagma(qr));
#endif
}
#endif
#endif
|
67327b8873fb8e37717333d947030debd6389b0f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// write your code into this file
// your kernels can be implemented directly here, or included
// function solveGPU is a device function: it can allocate memory, call CUDA kernels etc.
#define DEBUGNUMFPU 32
#define BLOCK 256
texture<int, hipTextureType2D, hipReadModeElementType> texRefContacts;
texture<int, hipTextureType2D, hipReadModeElementType> texRefCity;
__global__ void GPUiter(const int* const contacts, const int* const in, int* const infections, const int n, const int iter, int* const out){
__shared__ int neighborhood[3][BLOCK+3];
__shared__ int res[BLOCK+1];
int x = (blockIdx.x * (blockDim.x * 2)) + threadIdx.x;
int y = blockIdx.y;
int maxIdx = min(BLOCK, n-(blockIdx.x * (blockDim.x * 2)));
int pos = y*n + x;
int offset = maxIdx >> 1;
if(x + offset < n && y < n){
if(threadIdx.x == 0){
neighborhood[0][0] = x != 0 && y != 0 ? tex2D(texRefCity, x-1, y-1) : 0;
neighborhood[1][0] = x != 0 ? tex2D(texRefCity, x-1, y) : 0;
neighborhood[2][0] = x != 0 && y < n - 1 ? tex2D(texRefCity, x-1, y+1) : 0;
}
if(threadIdx.x + offset == maxIdx-1){
if(maxIdx == BLOCK){
neighborhood[0][maxIdx + 1] = blockIdx.x < ceil((float)n/BLOCK) - 1 && blockIdx.y != 0 ? tex2D(texRefCity, x + 1 + offset, y-1) : 0;
neighborhood[1][maxIdx + 1] = blockIdx.x < ceil((float)n/BLOCK) - 1 ? tex2D(texRefCity, x + 1 + offset, y) : 0;
neighborhood[2][maxIdx + 1] = blockIdx.y < n - 1 && blockIdx.x < ceil((float)n/BLOCK) - 1 ? tex2D(texRefCity, x + 1 + offset, y+1) : 0;
}
else{ // maxIdx is less than BLOCK (ie N == 160)
neighborhood[0][maxIdx + 1] = 0;
neighborhood[1][maxIdx + 1] = 0;
neighborhood[2][maxIdx + 1] = 0;
}
}
neighborhood[0][threadIdx.x + 1] = blockIdx.y != 0 ? tex2D(texRefCity, x, y-1) : 0;
neighborhood[0][threadIdx.x + 1 + offset] = blockIdx.y != 0 ? tex2D(texRefCity, x + offset, y-1) : 0;
neighborhood[1][threadIdx.x + 1] = tex2D(texRefCity, x,y);
neighborhood[1][threadIdx.x + 1 + offset] = tex2D(texRefCity, x + offset, y);
neighborhood[2][threadIdx.x + 1] = blockIdx.y < n - 1 ? tex2D(texRefCity, x, y+1) : 0;
neighborhood[2][threadIdx.x + 1 + offset] = blockIdx.y < n - 1 ? tex2D(texRefCity, x + offset, y+1) : 0;
}
__syncthreads();
if(x + offset < n && y < n){
int in_pos = neighborhood[1][threadIdx.x + 1];
int in_pos2 = neighborhood[1][threadIdx.x + 1 + offset];
if (in_pos > 0) {
res[threadIdx.x] = in_pos - 1 == 0 ? -30 : in_pos - 1;
}
if (in_pos < 0) {
res[threadIdx.x] = in_pos + 1;
}
if (in_pos == 0) {
int infected = 0;
infected += (neighborhood[0][threadIdx.x] > 0) ? 1 : 0;
infected += (neighborhood[0][threadIdx.x + 1] > 0) ? 1 : 0;
infected += (neighborhood[0][threadIdx.x + 2] > 0) ? 1 : 0;
infected += (neighborhood[1][threadIdx.x] > 0) ? 1 : 0;
infected += (neighborhood[1][threadIdx.x + 2] > 0) ? 1 : 0;
infected += (neighborhood[2][threadIdx.x] > 0) ? 1 : 0;
infected += (neighborhood[2][threadIdx.x + 1] > 0) ? 1 : 0;
infected += (neighborhood[2][threadIdx.x + 2] > 0) ? 1 : 0;
if (infected > tex2D(texRefContacts, x , y )) {
res[threadIdx.x] = 10;
atomicAdd(&infections[iter], 1);
}
else{
res[threadIdx.x] = 0;
}
}
if(in_pos2 > 0){
res[threadIdx.x + offset] = in_pos2 - 1 == 0 ? -30 : in_pos2 - 1;
}
if(in_pos2 < 0){
res[threadIdx.x + offset] = in_pos2 + 1;
}
if (in_pos2 == 0) {
int infected2 = 0;
infected2 += (neighborhood[0][threadIdx.x + offset] > 0) ? 1 : 0;
infected2 += (neighborhood[0][threadIdx.x + 1 + offset] > 0) ? 1 : 0;
infected2 += (neighborhood[0][threadIdx.x + 2 + offset] > 0) ? 1 : 0;
infected2 += (neighborhood[1][threadIdx.x + offset] > 0) ? 1 : 0;
infected2 += (neighborhood[1][threadIdx.x + 2 + offset] > 0) ? 1 : 0;
infected2 += (neighborhood[2][threadIdx.x + offset] > 0) ? 1 : 0;
infected2 += (neighborhood[2][threadIdx.x + 1 + offset] > 0) ? 1 : 0;
infected2 += (neighborhood[2][threadIdx.x + 2 + offset] > 0) ? 1 : 0;
if (infected2 > tex2D(texRefContacts, x + offset , y )) {
res[threadIdx.x + offset] = 10;
atomicAdd(&infections[iter], 1);
}
else{
res[threadIdx.x + offset] = 0;
}
}
}
__syncthreads();
if(x + offset < n && y < n){
out[pos] = res[threadIdx.x];
out[pos + offset] = res[threadIdx.x + offset];
}
}
void solveGPU(const int* const contacts, int* const town, int* const infections, const int n, const int iters)
{
int* in = town;
int* out;
int* texContacts;
int* texCity;
size_t pitchContacs;
size_t pitchCity;
if(hipMalloc((void**)&out, n * n * sizeof(out[0])) != hipSuccess){
fprintf(stderr, "CudaMalloc failed ...\n");
return;
}
if(hipMallocPitch((void**)&texContacts, &pitchContacs, n * sizeof(out[0]), n) != hipSuccess){
fprintf(stderr, "CudaMallocPitch failed ...\n");
return;
}
if(hipMallocPitch((void**)&texCity, &pitchCity, n * sizeof(out[0]), n) != hipSuccess){
fprintf(stderr, "CudaMallocPitch failed ...\n");
return;
}
if(hipMemcpy2D(texContacts, pitchContacs, contacts, n * sizeof(out[0]), n * sizeof(out[0]), n, hipMemcpyDeviceToDevice) != hipSuccess){
fprintf(stderr, "CudaMemcpy failed ...\n");
return;
}
if(hipMemcpy2D(texCity, pitchCity, in, n * sizeof(out[0]), n * sizeof(out[0]), n, hipMemcpyDeviceToDevice) != hipSuccess){
fprintf(stderr, "CudaMemcpy failed ...\n");
return;
}
hipChannelFormatDesc desc = hipCreateChannelDesc<int>();
texRefContacts.addressMode[0] = hipAddressModeClamp;
texRefContacts.addressMode[1] = hipAddressModeClamp;
texRefContacts.filterMode = hipFilterModePoint;
texRefContacts.normalized = false;
texRefCity.addressMode[0] = hipAddressModeClamp;
texRefCity.addressMode[1] = hipAddressModeClamp;
texRefCity.filterMode = hipFilterModePoint;
texRefCity.normalized = false;
if(hipBindTexture2D(NULL, texRefContacts, texContacts, desc, n , n, pitchContacs) != hipSuccess){
fprintf(stderr, "CudaBind failed ...\n");
return;
}
dim3 gridSize;
dim3 blockSize;
// If N is less than block, we reduce the amount of threads per block
if(n < BLOCK){
gridSize.x = 1;
gridSize.y = n;
gridSize.z = 1;
blockSize.x = n >> 1;
blockSize.y = 1;
blockSize.z = 1;
}
else{
gridSize.x = ceil((float)n/BLOCK);
gridSize.y = n;
gridSize.z = 1;
blockSize.x = BLOCK >> 1;
blockSize.y = 1;
blockSize.z = 1;
}
for(int i = 0; i < iters; i++){
if(hipMemcpy2D(texCity, pitchCity, in, n * sizeof(out[0]), n * sizeof(out[0]), n, hipMemcpyDeviceToDevice) != hipSuccess){
fprintf(stderr, "CudamemcpyKernel failed ...\n");
return;
}
if(hipBindTexture2D(NULL, texRefCity, texCity, desc, n , n, pitchCity) != hipSuccess){
fprintf(stderr, "CudaBindKernel failed ...\n");
return;
}
hipLaunchKernelGGL(( GPUiter), dim3(gridSize), dim3(blockSize), 0, 0, contacts, in, infections, n, i, out);
int* tmp = in;
in = out;
out = tmp;
}
if (in != town)
{
hipMemcpy(town, in, n * n * sizeof(town[0]), hipMemcpyDeviceToDevice);
hipFree(in);
}
else
{
hipFree(out);
}
}
| 67327b8873fb8e37717333d947030debd6389b0f.cu | // write your code into this file
// your kernels can be implemented directly here, or included
// function solveGPU is a device function: it can allocate memory, call CUDA kernels etc.
#define DEBUGNUMFPU 32
#define BLOCK 256
texture<int, cudaTextureType2D, cudaReadModeElementType> texRefContacts;
texture<int, cudaTextureType2D, cudaReadModeElementType> texRefCity;
__global__ void GPUiter(const int* const contacts, const int* const in, int* const infections, const int n, const int iter, int* const out){
__shared__ int neighborhood[3][BLOCK+3];
__shared__ int res[BLOCK+1];
int x = (blockIdx.x * (blockDim.x * 2)) + threadIdx.x;
int y = blockIdx.y;
int maxIdx = min(BLOCK, n-(blockIdx.x * (blockDim.x * 2)));
int pos = y*n + x;
int offset = maxIdx >> 1;
if(x + offset < n && y < n){
if(threadIdx.x == 0){
neighborhood[0][0] = x != 0 && y != 0 ? tex2D(texRefCity, x-1, y-1) : 0;
neighborhood[1][0] = x != 0 ? tex2D(texRefCity, x-1, y) : 0;
neighborhood[2][0] = x != 0 && y < n - 1 ? tex2D(texRefCity, x-1, y+1) : 0;
}
if(threadIdx.x + offset == maxIdx-1){
if(maxIdx == BLOCK){
neighborhood[0][maxIdx + 1] = blockIdx.x < ceil((float)n/BLOCK) - 1 && blockIdx.y != 0 ? tex2D(texRefCity, x + 1 + offset, y-1) : 0;
neighborhood[1][maxIdx + 1] = blockIdx.x < ceil((float)n/BLOCK) - 1 ? tex2D(texRefCity, x + 1 + offset, y) : 0;
neighborhood[2][maxIdx + 1] = blockIdx.y < n - 1 && blockIdx.x < ceil((float)n/BLOCK) - 1 ? tex2D(texRefCity, x + 1 + offset, y+1) : 0;
}
else{ // maxIdx is less than BLOCK (ie N == 160)
neighborhood[0][maxIdx + 1] = 0;
neighborhood[1][maxIdx + 1] = 0;
neighborhood[2][maxIdx + 1] = 0;
}
}
neighborhood[0][threadIdx.x + 1] = blockIdx.y != 0 ? tex2D(texRefCity, x, y-1) : 0;
neighborhood[0][threadIdx.x + 1 + offset] = blockIdx.y != 0 ? tex2D(texRefCity, x + offset, y-1) : 0;
neighborhood[1][threadIdx.x + 1] = tex2D(texRefCity, x,y);
neighborhood[1][threadIdx.x + 1 + offset] = tex2D(texRefCity, x + offset, y);
neighborhood[2][threadIdx.x + 1] = blockIdx.y < n - 1 ? tex2D(texRefCity, x, y+1) : 0;
neighborhood[2][threadIdx.x + 1 + offset] = blockIdx.y < n - 1 ? tex2D(texRefCity, x + offset, y+1) : 0;
}
__syncthreads();
if(x + offset < n && y < n){
int in_pos = neighborhood[1][threadIdx.x + 1];
int in_pos2 = neighborhood[1][threadIdx.x + 1 + offset];
if (in_pos > 0) {
res[threadIdx.x] = in_pos - 1 == 0 ? -30 : in_pos - 1;
}
if (in_pos < 0) {
res[threadIdx.x] = in_pos + 1;
}
if (in_pos == 0) {
int infected = 0;
infected += (neighborhood[0][threadIdx.x] > 0) ? 1 : 0;
infected += (neighborhood[0][threadIdx.x + 1] > 0) ? 1 : 0;
infected += (neighborhood[0][threadIdx.x + 2] > 0) ? 1 : 0;
infected += (neighborhood[1][threadIdx.x] > 0) ? 1 : 0;
infected += (neighborhood[1][threadIdx.x + 2] > 0) ? 1 : 0;
infected += (neighborhood[2][threadIdx.x] > 0) ? 1 : 0;
infected += (neighborhood[2][threadIdx.x + 1] > 0) ? 1 : 0;
infected += (neighborhood[2][threadIdx.x + 2] > 0) ? 1 : 0;
if (infected > tex2D(texRefContacts, x , y )) {
res[threadIdx.x] = 10;
atomicAdd(&infections[iter], 1);
}
else{
res[threadIdx.x] = 0;
}
}
if(in_pos2 > 0){
res[threadIdx.x + offset] = in_pos2 - 1 == 0 ? -30 : in_pos2 - 1;
}
if(in_pos2 < 0){
res[threadIdx.x + offset] = in_pos2 + 1;
}
if (in_pos2 == 0) {
int infected2 = 0;
infected2 += (neighborhood[0][threadIdx.x + offset] > 0) ? 1 : 0;
infected2 += (neighborhood[0][threadIdx.x + 1 + offset] > 0) ? 1 : 0;
infected2 += (neighborhood[0][threadIdx.x + 2 + offset] > 0) ? 1 : 0;
infected2 += (neighborhood[1][threadIdx.x + offset] > 0) ? 1 : 0;
infected2 += (neighborhood[1][threadIdx.x + 2 + offset] > 0) ? 1 : 0;
infected2 += (neighborhood[2][threadIdx.x + offset] > 0) ? 1 : 0;
infected2 += (neighborhood[2][threadIdx.x + 1 + offset] > 0) ? 1 : 0;
infected2 += (neighborhood[2][threadIdx.x + 2 + offset] > 0) ? 1 : 0;
if (infected2 > tex2D(texRefContacts, x + offset , y )) {
res[threadIdx.x + offset] = 10;
atomicAdd(&infections[iter], 1);
}
else{
res[threadIdx.x + offset] = 0;
}
}
}
__syncthreads();
if(x + offset < n && y < n){
out[pos] = res[threadIdx.x];
out[pos + offset] = res[threadIdx.x + offset];
}
}
void solveGPU(const int* const contacts, int* const town, int* const infections, const int n, const int iters)
{
int* in = town;
int* out;
int* texContacts;
int* texCity;
size_t pitchContacs;
size_t pitchCity;
if(cudaMalloc((void**)&out, n * n * sizeof(out[0])) != cudaSuccess){
fprintf(stderr, "CudaMalloc failed ...\n");
return;
}
if(cudaMallocPitch((void**)&texContacts, &pitchContacs, n * sizeof(out[0]), n) != cudaSuccess){
fprintf(stderr, "CudaMallocPitch failed ...\n");
return;
}
if(cudaMallocPitch((void**)&texCity, &pitchCity, n * sizeof(out[0]), n) != cudaSuccess){
fprintf(stderr, "CudaMallocPitch failed ...\n");
return;
}
if(cudaMemcpy2D(texContacts, pitchContacs, contacts, n * sizeof(out[0]), n * sizeof(out[0]), n, cudaMemcpyDeviceToDevice) != cudaSuccess){
fprintf(stderr, "CudaMemcpy failed ...\n");
return;
}
if(cudaMemcpy2D(texCity, pitchCity, in, n * sizeof(out[0]), n * sizeof(out[0]), n, cudaMemcpyDeviceToDevice) != cudaSuccess){
fprintf(stderr, "CudaMemcpy failed ...\n");
return;
}
cudaChannelFormatDesc desc = cudaCreateChannelDesc<int>();
texRefContacts.addressMode[0] = cudaAddressModeClamp;
texRefContacts.addressMode[1] = cudaAddressModeClamp;
texRefContacts.filterMode = cudaFilterModePoint;
texRefContacts.normalized = false;
texRefCity.addressMode[0] = cudaAddressModeClamp;
texRefCity.addressMode[1] = cudaAddressModeClamp;
texRefCity.filterMode = cudaFilterModePoint;
texRefCity.normalized = false;
if(cudaBindTexture2D(NULL, texRefContacts, texContacts, desc, n , n, pitchContacs) != cudaSuccess){
fprintf(stderr, "CudaBind failed ...\n");
return;
}
dim3 gridSize;
dim3 blockSize;
// If N is less than block, we reduce the amount of threads per block
if(n < BLOCK){
gridSize.x = 1;
gridSize.y = n;
gridSize.z = 1;
blockSize.x = n >> 1;
blockSize.y = 1;
blockSize.z = 1;
}
else{
gridSize.x = ceil((float)n/BLOCK);
gridSize.y = n;
gridSize.z = 1;
blockSize.x = BLOCK >> 1;
blockSize.y = 1;
blockSize.z = 1;
}
for(int i = 0; i < iters; i++){
if(cudaMemcpy2D(texCity, pitchCity, in, n * sizeof(out[0]), n * sizeof(out[0]), n, cudaMemcpyDeviceToDevice) != cudaSuccess){
fprintf(stderr, "CudamemcpyKernel failed ...\n");
return;
}
if(cudaBindTexture2D(NULL, texRefCity, texCity, desc, n , n, pitchCity) != cudaSuccess){
fprintf(stderr, "CudaBindKernel failed ...\n");
return;
}
GPUiter<<<gridSize, blockSize>>>(contacts, in, infections, n, i, out);
int* tmp = in;
in = out;
out = tmp;
}
if (in != town)
{
cudaMemcpy(town, in, n * n * sizeof(town[0]), cudaMemcpyDeviceToDevice);
cudaFree(in);
}
else
{
cudaFree(out);
}
}
|
bd4c8270e1047f2973cf40fd31d4fab72982fcf1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
namespace cv { namespace gpu { namespace cudev
{
namespace split_merge
{
template <typename T, size_t elem_size = sizeof(T)>
struct TypeTraits
{
typedef T type;
typedef T type2;
typedef T type3;
typedef T type4;
};
template <typename T>
struct TypeTraits<T, 1>
{
typedef char type;
typedef char2 type2;
typedef char3 type3;
typedef char4 type4;
};
template <typename T>
struct TypeTraits<T, 2>
{
typedef short type;
typedef short2 type2;
typedef short3 type3;
typedef short4 type4;
};
template <typename T>
struct TypeTraits<T, 4>
{
typedef int type;
typedef int2 type2;
typedef int3 type3;
typedef int4 type4;
};
template <typename T>
struct TypeTraits<T, 8>
{
typedef double type;
typedef double2 type2;
//typedef double3 type3;
//typedef double4 type3;
};
typedef void (*MergeFunction)(const PtrStepSzb* src, PtrStepSzb& dst, const hipStream_t& stream);
typedef void (*SplitFunction)(const PtrStepSzb& src, PtrStepSzb* dst, const hipStream_t& stream);
//------------------------------------------------------------
// Merge
template <typename T>
__global__ void mergeC2_(const uchar* src0, size_t src0_step,
const uchar* src1, size_t src1_step,
int rows, int cols, uchar* dst, size_t dst_step)
{
typedef typename TypeTraits<T>::type2 dst_type;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const T* src0_y = (const T*)(src0 + y * src0_step);
const T* src1_y = (const T*)(src1 + y * src1_step);
dst_type* dst_y = (dst_type*)(dst + y * dst_step);
if (x < cols && y < rows)
{
dst_type dst_elem;
dst_elem.x = src0_y[x];
dst_elem.y = src1_y[x];
dst_y[x] = dst_elem;
}
}
template <typename T>
__global__ void mergeC3_(const uchar* src0, size_t src0_step,
const uchar* src1, size_t src1_step,
const uchar* src2, size_t src2_step,
int rows, int cols, uchar* dst, size_t dst_step)
{
typedef typename TypeTraits<T>::type3 dst_type;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const T* src0_y = (const T*)(src0 + y * src0_step);
const T* src1_y = (const T*)(src1 + y * src1_step);
const T* src2_y = (const T*)(src2 + y * src2_step);
dst_type* dst_y = (dst_type*)(dst + y * dst_step);
if (x < cols && y < rows)
{
dst_type dst_elem;
dst_elem.x = src0_y[x];
dst_elem.y = src1_y[x];
dst_elem.z = src2_y[x];
dst_y[x] = dst_elem;
}
}
template <>
__global__ void mergeC3_<double>(const uchar* src0, size_t src0_step,
const uchar* src1, size_t src1_step,
const uchar* src2, size_t src2_step,
int rows, int cols, uchar* dst, size_t dst_step)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const double* src0_y = (const double*)(src0 + y * src0_step);
const double* src1_y = (const double*)(src1 + y * src1_step);
const double* src2_y = (const double*)(src2 + y * src2_step);
double* dst_y = (double*)(dst + y * dst_step);
if (x < cols && y < rows)
{
dst_y[3 * x] = src0_y[x];
dst_y[3 * x + 1] = src1_y[x];
dst_y[3 * x + 2] = src2_y[x];
}
}
template <typename T>
__global__ void mergeC4_(const uchar* src0, size_t src0_step,
const uchar* src1, size_t src1_step,
const uchar* src2, size_t src2_step,
const uchar* src3, size_t src3_step,
int rows, int cols, uchar* dst, size_t dst_step)
{
typedef typename TypeTraits<T>::type4 dst_type;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const T* src0_y = (const T*)(src0 + y * src0_step);
const T* src1_y = (const T*)(src1 + y * src1_step);
const T* src2_y = (const T*)(src2 + y * src2_step);
const T* src3_y = (const T*)(src3 + y * src3_step);
dst_type* dst_y = (dst_type*)(dst + y * dst_step);
if (x < cols && y < rows)
{
dst_type dst_elem;
dst_elem.x = src0_y[x];
dst_elem.y = src1_y[x];
dst_elem.z = src2_y[x];
dst_elem.w = src3_y[x];
dst_y[x] = dst_elem;
}
}
template <>
__global__ void mergeC4_<double>(const uchar* src0, size_t src0_step,
const uchar* src1, size_t src1_step,
const uchar* src2, size_t src2_step,
const uchar* src3, size_t src3_step,
int rows, int cols, uchar* dst, size_t dst_step)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const double* src0_y = (const double*)(src0 + y * src0_step);
const double* src1_y = (const double*)(src1 + y * src1_step);
const double* src2_y = (const double*)(src2 + y * src2_step);
const double* src3_y = (const double*)(src3 + y * src3_step);
double2* dst_y = (double2*)(dst + y * dst_step);
if (x < cols && y < rows)
{
dst_y[2 * x] = make_double2(src0_y[x], src1_y[x]);
dst_y[2 * x + 1] = make_double2(src2_y[x], src3_y[x]);
}
}
template <typename T>
static void mergeC2_(const PtrStepSzb* src, PtrStepSzb& dst, const hipStream_t& stream)
{
dim3 block(32, 8);
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
hipLaunchKernelGGL(( mergeC2_<T>), dim3(grid), dim3(block), 0, stream,
src[0].data, src[0].step,
src[1].data, src[1].step,
dst.rows, dst.cols, dst.data, dst.step);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall(hipDeviceSynchronize());
}
template <typename T>
static void mergeC3_(const PtrStepSzb* src, PtrStepSzb& dst, const hipStream_t& stream)
{
dim3 block(32, 8);
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
hipLaunchKernelGGL(( mergeC3_<T>), dim3(grid), dim3(block), 0, stream,
src[0].data, src[0].step,
src[1].data, src[1].step,
src[2].data, src[2].step,
dst.rows, dst.cols, dst.data, dst.step);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall(hipDeviceSynchronize());
}
template <typename T>
static void mergeC4_(const PtrStepSzb* src, PtrStepSzb& dst, const hipStream_t& stream)
{
dim3 block(32, 8);
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
hipLaunchKernelGGL(( mergeC4_<T>), dim3(grid), dim3(block), 0, stream,
src[0].data, src[0].step,
src[1].data, src[1].step,
src[2].data, src[2].step,
src[3].data, src[3].step,
dst.rows, dst.cols, dst.data, dst.step);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall(hipDeviceSynchronize());
}
void merge_caller(const PtrStepSzb* src, PtrStepSzb& dst,
int total_channels, size_t elem_size,
const hipStream_t& stream)
{
static MergeFunction merge_func_tbl[] =
{
mergeC2_<char>, mergeC2_<short>, mergeC2_<int>, 0, mergeC2_<double>,
mergeC3_<char>, mergeC3_<short>, mergeC3_<int>, 0, mergeC3_<double>,
mergeC4_<char>, mergeC4_<short>, mergeC4_<int>, 0, mergeC4_<double>,
};
size_t merge_func_id = (total_channels - 2) * 5 + (elem_size >> 1);
MergeFunction merge_func = merge_func_tbl[merge_func_id];
if (merge_func == 0)
CV_Error(cv::Error::StsUnsupportedFormat, "Unsupported channel count or data type");
merge_func(src, dst, stream);
}
//------------------------------------------------------------
// Split
template <typename T>
__global__ void splitC2_(const uchar* src, size_t src_step,
int rows, int cols,
uchar* dst0, size_t dst0_step,
uchar* dst1, size_t dst1_step)
{
typedef typename TypeTraits<T>::type2 src_type;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const src_type* src_y = (const src_type*)(src + y * src_step);
T* dst0_y = (T*)(dst0 + y * dst0_step);
T* dst1_y = (T*)(dst1 + y * dst1_step);
if (x < cols && y < rows)
{
src_type src_elem = src_y[x];
dst0_y[x] = src_elem.x;
dst1_y[x] = src_elem.y;
}
}
template <typename T>
__global__ void splitC3_(const uchar* src, size_t src_step,
int rows, int cols,
uchar* dst0, size_t dst0_step,
uchar* dst1, size_t dst1_step,
uchar* dst2, size_t dst2_step)
{
typedef typename TypeTraits<T>::type3 src_type;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const src_type* src_y = (const src_type*)(src + y * src_step);
T* dst0_y = (T*)(dst0 + y * dst0_step);
T* dst1_y = (T*)(dst1 + y * dst1_step);
T* dst2_y = (T*)(dst2 + y * dst2_step);
if (x < cols && y < rows)
{
src_type src_elem = src_y[x];
dst0_y[x] = src_elem.x;
dst1_y[x] = src_elem.y;
dst2_y[x] = src_elem.z;
}
}
template <>
__global__ void splitC3_<double>(
const uchar* src, size_t src_step, int rows, int cols,
uchar* dst0, size_t dst0_step,
uchar* dst1, size_t dst1_step,
uchar* dst2, size_t dst2_step)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const double* src_y = (const double*)(src + y * src_step);
double* dst0_y = (double*)(dst0 + y * dst0_step);
double* dst1_y = (double*)(dst1 + y * dst1_step);
double* dst2_y = (double*)(dst2 + y * dst2_step);
if (x < cols && y < rows)
{
dst0_y[x] = src_y[3 * x];
dst1_y[x] = src_y[3 * x + 1];
dst2_y[x] = src_y[3 * x + 2];
}
}
template <typename T>
__global__ void splitC4_(const uchar* src, size_t src_step, int rows, int cols,
uchar* dst0, size_t dst0_step,
uchar* dst1, size_t dst1_step,
uchar* dst2, size_t dst2_step,
uchar* dst3, size_t dst3_step)
{
typedef typename TypeTraits<T>::type4 src_type;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const src_type* src_y = (const src_type*)(src + y * src_step);
T* dst0_y = (T*)(dst0 + y * dst0_step);
T* dst1_y = (T*)(dst1 + y * dst1_step);
T* dst2_y = (T*)(dst2 + y * dst2_step);
T* dst3_y = (T*)(dst3 + y * dst3_step);
if (x < cols && y < rows)
{
src_type src_elem = src_y[x];
dst0_y[x] = src_elem.x;
dst1_y[x] = src_elem.y;
dst2_y[x] = src_elem.z;
dst3_y[x] = src_elem.w;
}
}
template <>
__global__ void splitC4_<double>(
const uchar* src, size_t src_step, int rows, int cols,
uchar* dst0, size_t dst0_step,
uchar* dst1, size_t dst1_step,
uchar* dst2, size_t dst2_step,
uchar* dst3, size_t dst3_step)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const double2* src_y = (const double2*)(src + y * src_step);
double* dst0_y = (double*)(dst0 + y * dst0_step);
double* dst1_y = (double*)(dst1 + y * dst1_step);
double* dst2_y = (double*)(dst2 + y * dst2_step);
double* dst3_y = (double*)(dst3 + y * dst3_step);
if (x < cols && y < rows)
{
double2 src_elem1 = src_y[2 * x];
double2 src_elem2 = src_y[2 * x + 1];
dst0_y[x] = src_elem1.x;
dst1_y[x] = src_elem1.y;
dst2_y[x] = src_elem2.x;
dst3_y[x] = src_elem2.y;
}
}
template <typename T>
static void splitC2_(const PtrStepSzb& src, PtrStepSzb* dst, const hipStream_t& stream)
{
dim3 block(32, 8);
dim3 grid(divUp(src.cols, block.x), divUp(src.rows, block.y));
hipLaunchKernelGGL(( splitC2_<T>), dim3(grid), dim3(block), 0, stream,
src.data, src.step, src.rows, src.cols,
dst[0].data, dst[0].step,
dst[1].data, dst[1].step);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall(hipDeviceSynchronize());
}
template <typename T>
static void splitC3_(const PtrStepSzb& src, PtrStepSzb* dst, const hipStream_t& stream)
{
dim3 block(32, 8);
dim3 grid(divUp(src.cols, block.x), divUp(src.rows, block.y));
hipLaunchKernelGGL(( splitC3_<T>), dim3(grid), dim3(block), 0, stream,
src.data, src.step, src.rows, src.cols,
dst[0].data, dst[0].step,
dst[1].data, dst[1].step,
dst[2].data, dst[2].step);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall(hipDeviceSynchronize());
}
template <typename T>
static void splitC4_(const PtrStepSzb& src, PtrStepSzb* dst, const hipStream_t& stream)
{
dim3 block(32, 8);
dim3 grid(divUp(src.cols, block.x), divUp(src.rows, block.y));
hipLaunchKernelGGL(( splitC4_<T>), dim3(grid), dim3(block), 0, stream,
src.data, src.step, src.rows, src.cols,
dst[0].data, dst[0].step,
dst[1].data, dst[1].step,
dst[2].data, dst[2].step,
dst[3].data, dst[3].step);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall(hipDeviceSynchronize());
}
void split_caller(const PtrStepSzb& src, PtrStepSzb* dst, int num_channels, size_t elem_size1, const hipStream_t& stream)
{
static SplitFunction split_func_tbl[] =
{
splitC2_<char>, splitC2_<short>, splitC2_<int>, 0, splitC2_<double>,
splitC3_<char>, splitC3_<short>, splitC3_<int>, 0, splitC3_<double>,
splitC4_<char>, splitC4_<short>, splitC4_<int>, 0, splitC4_<double>,
};
size_t split_func_id = (num_channels - 2) * 5 + (elem_size1 >> 1);
SplitFunction split_func = split_func_tbl[split_func_id];
if (split_func == 0)
CV_Error(cv::Error::StsUnsupportedFormat, "Unsupported channel count or data type");
split_func(src, dst, stream);
}
} // namespace split_merge
}}} // namespace cv { namespace gpu { namespace cudev
#endif /* CUDA_DISABLER */
| bd4c8270e1047f2973cf40fd31d4fab72982fcf1.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
namespace cv { namespace gpu { namespace cudev
{
namespace split_merge
{
template <typename T, size_t elem_size = sizeof(T)>
struct TypeTraits
{
typedef T type;
typedef T type2;
typedef T type3;
typedef T type4;
};
template <typename T>
struct TypeTraits<T, 1>
{
typedef char type;
typedef char2 type2;
typedef char3 type3;
typedef char4 type4;
};
template <typename T>
struct TypeTraits<T, 2>
{
typedef short type;
typedef short2 type2;
typedef short3 type3;
typedef short4 type4;
};
template <typename T>
struct TypeTraits<T, 4>
{
typedef int type;
typedef int2 type2;
typedef int3 type3;
typedef int4 type4;
};
template <typename T>
struct TypeTraits<T, 8>
{
typedef double type;
typedef double2 type2;
//typedef double3 type3;
//typedef double4 type3;
};
typedef void (*MergeFunction)(const PtrStepSzb* src, PtrStepSzb& dst, const cudaStream_t& stream);
typedef void (*SplitFunction)(const PtrStepSzb& src, PtrStepSzb* dst, const cudaStream_t& stream);
//------------------------------------------------------------
// Merge
template <typename T>
__global__ void mergeC2_(const uchar* src0, size_t src0_step,
const uchar* src1, size_t src1_step,
int rows, int cols, uchar* dst, size_t dst_step)
{
typedef typename TypeTraits<T>::type2 dst_type;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const T* src0_y = (const T*)(src0 + y * src0_step);
const T* src1_y = (const T*)(src1 + y * src1_step);
dst_type* dst_y = (dst_type*)(dst + y * dst_step);
if (x < cols && y < rows)
{
dst_type dst_elem;
dst_elem.x = src0_y[x];
dst_elem.y = src1_y[x];
dst_y[x] = dst_elem;
}
}
template <typename T>
__global__ void mergeC3_(const uchar* src0, size_t src0_step,
const uchar* src1, size_t src1_step,
const uchar* src2, size_t src2_step,
int rows, int cols, uchar* dst, size_t dst_step)
{
typedef typename TypeTraits<T>::type3 dst_type;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const T* src0_y = (const T*)(src0 + y * src0_step);
const T* src1_y = (const T*)(src1 + y * src1_step);
const T* src2_y = (const T*)(src2 + y * src2_step);
dst_type* dst_y = (dst_type*)(dst + y * dst_step);
if (x < cols && y < rows)
{
dst_type dst_elem;
dst_elem.x = src0_y[x];
dst_elem.y = src1_y[x];
dst_elem.z = src2_y[x];
dst_y[x] = dst_elem;
}
}
template <>
__global__ void mergeC3_<double>(const uchar* src0, size_t src0_step,
const uchar* src1, size_t src1_step,
const uchar* src2, size_t src2_step,
int rows, int cols, uchar* dst, size_t dst_step)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const double* src0_y = (const double*)(src0 + y * src0_step);
const double* src1_y = (const double*)(src1 + y * src1_step);
const double* src2_y = (const double*)(src2 + y * src2_step);
double* dst_y = (double*)(dst + y * dst_step);
if (x < cols && y < rows)
{
dst_y[3 * x] = src0_y[x];
dst_y[3 * x + 1] = src1_y[x];
dst_y[3 * x + 2] = src2_y[x];
}
}
template <typename T>
__global__ void mergeC4_(const uchar* src0, size_t src0_step,
const uchar* src1, size_t src1_step,
const uchar* src2, size_t src2_step,
const uchar* src3, size_t src3_step,
int rows, int cols, uchar* dst, size_t dst_step)
{
typedef typename TypeTraits<T>::type4 dst_type;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const T* src0_y = (const T*)(src0 + y * src0_step);
const T* src1_y = (const T*)(src1 + y * src1_step);
const T* src2_y = (const T*)(src2 + y * src2_step);
const T* src3_y = (const T*)(src3 + y * src3_step);
dst_type* dst_y = (dst_type*)(dst + y * dst_step);
if (x < cols && y < rows)
{
dst_type dst_elem;
dst_elem.x = src0_y[x];
dst_elem.y = src1_y[x];
dst_elem.z = src2_y[x];
dst_elem.w = src3_y[x];
dst_y[x] = dst_elem;
}
}
template <>
__global__ void mergeC4_<double>(const uchar* src0, size_t src0_step,
const uchar* src1, size_t src1_step,
const uchar* src2, size_t src2_step,
const uchar* src3, size_t src3_step,
int rows, int cols, uchar* dst, size_t dst_step)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const double* src0_y = (const double*)(src0 + y * src0_step);
const double* src1_y = (const double*)(src1 + y * src1_step);
const double* src2_y = (const double*)(src2 + y * src2_step);
const double* src3_y = (const double*)(src3 + y * src3_step);
double2* dst_y = (double2*)(dst + y * dst_step);
if (x < cols && y < rows)
{
dst_y[2 * x] = make_double2(src0_y[x], src1_y[x]);
dst_y[2 * x + 1] = make_double2(src2_y[x], src3_y[x]);
}
}
template <typename T>
static void mergeC2_(const PtrStepSzb* src, PtrStepSzb& dst, const cudaStream_t& stream)
{
dim3 block(32, 8);
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
mergeC2_<T><<<grid, block, 0, stream>>>(
src[0].data, src[0].step,
src[1].data, src[1].step,
dst.rows, dst.cols, dst.data, dst.step);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall(cudaDeviceSynchronize());
}
template <typename T>
static void mergeC3_(const PtrStepSzb* src, PtrStepSzb& dst, const cudaStream_t& stream)
{
dim3 block(32, 8);
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
mergeC3_<T><<<grid, block, 0, stream>>>(
src[0].data, src[0].step,
src[1].data, src[1].step,
src[2].data, src[2].step,
dst.rows, dst.cols, dst.data, dst.step);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall(cudaDeviceSynchronize());
}
template <typename T>
static void mergeC4_(const PtrStepSzb* src, PtrStepSzb& dst, const cudaStream_t& stream)
{
dim3 block(32, 8);
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
mergeC4_<T><<<grid, block, 0, stream>>>(
src[0].data, src[0].step,
src[1].data, src[1].step,
src[2].data, src[2].step,
src[3].data, src[3].step,
dst.rows, dst.cols, dst.data, dst.step);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall(cudaDeviceSynchronize());
}
void merge_caller(const PtrStepSzb* src, PtrStepSzb& dst,
int total_channels, size_t elem_size,
const cudaStream_t& stream)
{
static MergeFunction merge_func_tbl[] =
{
mergeC2_<char>, mergeC2_<short>, mergeC2_<int>, 0, mergeC2_<double>,
mergeC3_<char>, mergeC3_<short>, mergeC3_<int>, 0, mergeC3_<double>,
mergeC4_<char>, mergeC4_<short>, mergeC4_<int>, 0, mergeC4_<double>,
};
size_t merge_func_id = (total_channels - 2) * 5 + (elem_size >> 1);
MergeFunction merge_func = merge_func_tbl[merge_func_id];
if (merge_func == 0)
CV_Error(cv::Error::StsUnsupportedFormat, "Unsupported channel count or data type");
merge_func(src, dst, stream);
}
//------------------------------------------------------------
// Split
template <typename T>
__global__ void splitC2_(const uchar* src, size_t src_step,
int rows, int cols,
uchar* dst0, size_t dst0_step,
uchar* dst1, size_t dst1_step)
{
typedef typename TypeTraits<T>::type2 src_type;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const src_type* src_y = (const src_type*)(src + y * src_step);
T* dst0_y = (T*)(dst0 + y * dst0_step);
T* dst1_y = (T*)(dst1 + y * dst1_step);
if (x < cols && y < rows)
{
src_type src_elem = src_y[x];
dst0_y[x] = src_elem.x;
dst1_y[x] = src_elem.y;
}
}
template <typename T>
__global__ void splitC3_(const uchar* src, size_t src_step,
int rows, int cols,
uchar* dst0, size_t dst0_step,
uchar* dst1, size_t dst1_step,
uchar* dst2, size_t dst2_step)
{
typedef typename TypeTraits<T>::type3 src_type;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const src_type* src_y = (const src_type*)(src + y * src_step);
T* dst0_y = (T*)(dst0 + y * dst0_step);
T* dst1_y = (T*)(dst1 + y * dst1_step);
T* dst2_y = (T*)(dst2 + y * dst2_step);
if (x < cols && y < rows)
{
src_type src_elem = src_y[x];
dst0_y[x] = src_elem.x;
dst1_y[x] = src_elem.y;
dst2_y[x] = src_elem.z;
}
}
template <>
__global__ void splitC3_<double>(
const uchar* src, size_t src_step, int rows, int cols,
uchar* dst0, size_t dst0_step,
uchar* dst1, size_t dst1_step,
uchar* dst2, size_t dst2_step)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const double* src_y = (const double*)(src + y * src_step);
double* dst0_y = (double*)(dst0 + y * dst0_step);
double* dst1_y = (double*)(dst1 + y * dst1_step);
double* dst2_y = (double*)(dst2 + y * dst2_step);
if (x < cols && y < rows)
{
dst0_y[x] = src_y[3 * x];
dst1_y[x] = src_y[3 * x + 1];
dst2_y[x] = src_y[3 * x + 2];
}
}
template <typename T>
__global__ void splitC4_(const uchar* src, size_t src_step, int rows, int cols,
uchar* dst0, size_t dst0_step,
uchar* dst1, size_t dst1_step,
uchar* dst2, size_t dst2_step,
uchar* dst3, size_t dst3_step)
{
typedef typename TypeTraits<T>::type4 src_type;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const src_type* src_y = (const src_type*)(src + y * src_step);
T* dst0_y = (T*)(dst0 + y * dst0_step);
T* dst1_y = (T*)(dst1 + y * dst1_step);
T* dst2_y = (T*)(dst2 + y * dst2_step);
T* dst3_y = (T*)(dst3 + y * dst3_step);
if (x < cols && y < rows)
{
src_type src_elem = src_y[x];
dst0_y[x] = src_elem.x;
dst1_y[x] = src_elem.y;
dst2_y[x] = src_elem.z;
dst3_y[x] = src_elem.w;
}
}
template <>
__global__ void splitC4_<double>(
const uchar* src, size_t src_step, int rows, int cols,
uchar* dst0, size_t dst0_step,
uchar* dst1, size_t dst1_step,
uchar* dst2, size_t dst2_step,
uchar* dst3, size_t dst3_step)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const double2* src_y = (const double2*)(src + y * src_step);
double* dst0_y = (double*)(dst0 + y * dst0_step);
double* dst1_y = (double*)(dst1 + y * dst1_step);
double* dst2_y = (double*)(dst2 + y * dst2_step);
double* dst3_y = (double*)(dst3 + y * dst3_step);
if (x < cols && y < rows)
{
double2 src_elem1 = src_y[2 * x];
double2 src_elem2 = src_y[2 * x + 1];
dst0_y[x] = src_elem1.x;
dst1_y[x] = src_elem1.y;
dst2_y[x] = src_elem2.x;
dst3_y[x] = src_elem2.y;
}
}
template <typename T>
static void splitC2_(const PtrStepSzb& src, PtrStepSzb* dst, const cudaStream_t& stream)
{
dim3 block(32, 8);
dim3 grid(divUp(src.cols, block.x), divUp(src.rows, block.y));
splitC2_<T><<<grid, block, 0, stream>>>(
src.data, src.step, src.rows, src.cols,
dst[0].data, dst[0].step,
dst[1].data, dst[1].step);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall(cudaDeviceSynchronize());
}
template <typename T>
static void splitC3_(const PtrStepSzb& src, PtrStepSzb* dst, const cudaStream_t& stream)
{
dim3 block(32, 8);
dim3 grid(divUp(src.cols, block.x), divUp(src.rows, block.y));
splitC3_<T><<<grid, block, 0, stream>>>(
src.data, src.step, src.rows, src.cols,
dst[0].data, dst[0].step,
dst[1].data, dst[1].step,
dst[2].data, dst[2].step);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall(cudaDeviceSynchronize());
}
template <typename T>
static void splitC4_(const PtrStepSzb& src, PtrStepSzb* dst, const cudaStream_t& stream)
{
dim3 block(32, 8);
dim3 grid(divUp(src.cols, block.x), divUp(src.rows, block.y));
splitC4_<T><<<grid, block, 0, stream>>>(
src.data, src.step, src.rows, src.cols,
dst[0].data, dst[0].step,
dst[1].data, dst[1].step,
dst[2].data, dst[2].step,
dst[3].data, dst[3].step);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall(cudaDeviceSynchronize());
}
void split_caller(const PtrStepSzb& src, PtrStepSzb* dst, int num_channels, size_t elem_size1, const cudaStream_t& stream)
{
static SplitFunction split_func_tbl[] =
{
splitC2_<char>, splitC2_<short>, splitC2_<int>, 0, splitC2_<double>,
splitC3_<char>, splitC3_<short>, splitC3_<int>, 0, splitC3_<double>,
splitC4_<char>, splitC4_<short>, splitC4_<int>, 0, splitC4_<double>,
};
size_t split_func_id = (num_channels - 2) * 5 + (elem_size1 >> 1);
SplitFunction split_func = split_func_tbl[split_func_id];
if (split_func == 0)
CV_Error(cv::Error::StsUnsupportedFormat, "Unsupported channel count or data type");
split_func(src, dst, stream);
}
} // namespace split_merge
}}} // namespace cv { namespace gpu { namespace cudev
#endif /* CUDA_DISABLER */
|
dd8ea060ffaafc45b92bf1a1467a1fce0a1abf50.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <ATen/ATen.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <vector>
__global__ void NmDistanceKernel(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i){
const int batch=512;
__shared__ float buf[batch*3];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int k2=0;k2<m;k2+=batch){
int end_k=min(m,k2+batch)-k2;
for (int j=threadIdx.x;j<end_k*3;j+=blockDim.x){
buf[j]=xyz2[(i*m+k2)*3+j];
}
__syncthreads();
for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){
float x1=xyz[(i*n+j)*3+0];
float y1=xyz[(i*n+j)*3+1];
float z1=xyz[(i*n+j)*3+2];
int best_i=0;
float best=0;
int end_ka=end_k-(end_k&3);
if (end_ka==batch){
for (int k=0;k<batch;k+=4){
{
float x2=buf[k*3+0]-x1;
float y2=buf[k*3+1]-y1;
float z2=buf[k*3+2]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
{
float x2=buf[k*3+3]-x1;
float y2=buf[k*3+4]-y1;
float z2=buf[k*3+5]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+1;
}
}
{
float x2=buf[k*3+6]-x1;
float y2=buf[k*3+7]-y1;
float z2=buf[k*3+8]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+2;
}
}
{
float x2=buf[k*3+9]-x1;
float y2=buf[k*3+10]-y1;
float z2=buf[k*3+11]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+3;
}
}
}
}else{
for (int k=0;k<end_ka;k+=4){
{
float x2=buf[k*3+0]-x1;
float y2=buf[k*3+1]-y1;
float z2=buf[k*3+2]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
{
float x2=buf[k*3+3]-x1;
float y2=buf[k*3+4]-y1;
float z2=buf[k*3+5]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+1;
}
}
{
float x2=buf[k*3+6]-x1;
float y2=buf[k*3+7]-y1;
float z2=buf[k*3+8]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+2;
}
}
{
float x2=buf[k*3+9]-x1;
float y2=buf[k*3+10]-y1;
float z2=buf[k*3+11]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+3;
}
}
}
}
for (int k=end_ka;k<end_k;k++){
float x2=buf[k*3+0]-x1;
float y2=buf[k*3+1]-y1;
float z2=buf[k*3+2]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
if (k2==0 || result[(i*n+j)]>best){
result[(i*n+j)]=best;
result_i[(i*n+j)]=best_i;
}
}
__syncthreads();
}
}
}
// int chamfer_cuda_forward(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i,float * result2,int * result2_i, hipStream_t stream){
int chamfer_cuda_forward(at::Tensor xyz1, at::Tensor xyz2, at::Tensor dist1, at::Tensor dist2, at::Tensor idx1, at::Tensor idx2){
const auto batch_size = xyz1.size(0);
const auto n = xyz1.size(1); //num_points point cloud A
const auto m = xyz2.size(1); //num_points point cloud B
hipLaunchKernelGGL(( NmDistanceKernel), dim3(dim3(32,16,1)),dim3(512), 0, 0, batch_size, n, xyz1.data<float>(), m, xyz2.data<float>(), dist1.data<float>(), idx1.data<int>());
hipLaunchKernelGGL(( NmDistanceKernel), dim3(dim3(32,16,1)),dim3(512), 0, 0, batch_size, m, xyz2.data<float>(), n, xyz1.data<float>(), dist2.data<float>(), idx2.data<int>());
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in nnd updateOutput: %s\n", hipGetErrorString(err));
//THError("aborting");
return 0;
}
return 1;
}
__global__ void NmDistanceGradKernel(int b,int n,const float * xyz1,int m,const float * xyz2,const float * grad_dist1,const int * idx1,float * grad_xyz1,float * grad_xyz2){
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){
float x1=xyz1[(i*n+j)*3+0];
float y1=xyz1[(i*n+j)*3+1];
float z1=xyz1[(i*n+j)*3+2];
int j2=idx1[i*n+j];
float x2=xyz2[(i*m+j2)*3+0];
float y2=xyz2[(i*m+j2)*3+1];
float z2=xyz2[(i*m+j2)*3+2];
float g=grad_dist1[i*n+j]*2;
atomicAdd(&(grad_xyz1[(i*n+j)*3+0]),g*(x1-x2));
atomicAdd(&(grad_xyz1[(i*n+j)*3+1]),g*(y1-y2));
atomicAdd(&(grad_xyz1[(i*n+j)*3+2]),g*(z1-z2));
atomicAdd(&(grad_xyz2[(i*m+j2)*3+0]),-(g*(x1-x2)));
atomicAdd(&(grad_xyz2[(i*m+j2)*3+1]),-(g*(y1-y2)));
atomicAdd(&(grad_xyz2[(i*m+j2)*3+2]),-(g*(z1-z2)));
}
}
}
// int chamfer_cuda_backward(int b,int n,const float * xyz1,int m,const float * xyz2,const float * grad_dist1,const int * idx1,const float * grad_dist2,const int * idx2,float * grad_xyz1,float * grad_xyz2, hipStream_t stream){
int chamfer_cuda_backward(at::Tensor xyz1, at::Tensor xyz2, at::Tensor gradxyz1, at::Tensor gradxyz2, at::Tensor graddist1, at::Tensor graddist2, at::Tensor idx1, at::Tensor idx2){
// hipMemset(grad_xyz1,0,b*n*3*4);
// hipMemset(grad_xyz2,0,b*m*3*4);
const auto batch_size = xyz1.size(0);
const auto n = xyz1.size(1); //num_points point cloud A
const auto m = xyz2.size(1); //num_points point cloud B
hipLaunchKernelGGL(( NmDistanceGradKernel), dim3(dim3(1,16,1)),dim3(256), 0, 0, batch_size,n,xyz1.data<float>(),m,xyz2.data<float>(),graddist1.data<float>(),idx1.data<int>(),gradxyz1.data<float>(),gradxyz2.data<float>());
hipLaunchKernelGGL(( NmDistanceGradKernel), dim3(dim3(1,16,1)),dim3(256), 0, 0, batch_size,m,xyz2.data<float>(),n,xyz1.data<float>(),graddist2.data<float>(),idx2.data<int>(),gradxyz2.data<float>(),gradxyz1.data<float>());
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in nnd get grad: %s\n", hipGetErrorString(err));
//THError("aborting");
return 0;
}
return 1;
}
| dd8ea060ffaafc45b92bf1a1467a1fce0a1abf50.cu |
#include <stdio.h>
#include <ATen/ATen.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <vector>
__global__ void NmDistanceKernel(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i){
const int batch=512;
__shared__ float buf[batch*3];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int k2=0;k2<m;k2+=batch){
int end_k=min(m,k2+batch)-k2;
for (int j=threadIdx.x;j<end_k*3;j+=blockDim.x){
buf[j]=xyz2[(i*m+k2)*3+j];
}
__syncthreads();
for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){
float x1=xyz[(i*n+j)*3+0];
float y1=xyz[(i*n+j)*3+1];
float z1=xyz[(i*n+j)*3+2];
int best_i=0;
float best=0;
int end_ka=end_k-(end_k&3);
if (end_ka==batch){
for (int k=0;k<batch;k+=4){
{
float x2=buf[k*3+0]-x1;
float y2=buf[k*3+1]-y1;
float z2=buf[k*3+2]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
{
float x2=buf[k*3+3]-x1;
float y2=buf[k*3+4]-y1;
float z2=buf[k*3+5]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+1;
}
}
{
float x2=buf[k*3+6]-x1;
float y2=buf[k*3+7]-y1;
float z2=buf[k*3+8]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+2;
}
}
{
float x2=buf[k*3+9]-x1;
float y2=buf[k*3+10]-y1;
float z2=buf[k*3+11]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+3;
}
}
}
}else{
for (int k=0;k<end_ka;k+=4){
{
float x2=buf[k*3+0]-x1;
float y2=buf[k*3+1]-y1;
float z2=buf[k*3+2]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
{
float x2=buf[k*3+3]-x1;
float y2=buf[k*3+4]-y1;
float z2=buf[k*3+5]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+1;
}
}
{
float x2=buf[k*3+6]-x1;
float y2=buf[k*3+7]-y1;
float z2=buf[k*3+8]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+2;
}
}
{
float x2=buf[k*3+9]-x1;
float y2=buf[k*3+10]-y1;
float z2=buf[k*3+11]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+3;
}
}
}
}
for (int k=end_ka;k<end_k;k++){
float x2=buf[k*3+0]-x1;
float y2=buf[k*3+1]-y1;
float z2=buf[k*3+2]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
if (k2==0 || result[(i*n+j)]>best){
result[(i*n+j)]=best;
result_i[(i*n+j)]=best_i;
}
}
__syncthreads();
}
}
}
// int chamfer_cuda_forward(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i,float * result2,int * result2_i, cudaStream_t stream){
int chamfer_cuda_forward(at::Tensor xyz1, at::Tensor xyz2, at::Tensor dist1, at::Tensor dist2, at::Tensor idx1, at::Tensor idx2){
const auto batch_size = xyz1.size(0);
const auto n = xyz1.size(1); //num_points point cloud A
const auto m = xyz2.size(1); //num_points point cloud B
NmDistanceKernel<<<dim3(32,16,1),512>>>(batch_size, n, xyz1.data<float>(), m, xyz2.data<float>(), dist1.data<float>(), idx1.data<int>());
NmDistanceKernel<<<dim3(32,16,1),512>>>(batch_size, m, xyz2.data<float>(), n, xyz1.data<float>(), dist2.data<float>(), idx2.data<int>());
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in nnd updateOutput: %s\n", cudaGetErrorString(err));
//THError("aborting");
return 0;
}
return 1;
}
__global__ void NmDistanceGradKernel(int b,int n,const float * xyz1,int m,const float * xyz2,const float * grad_dist1,const int * idx1,float * grad_xyz1,float * grad_xyz2){
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){
float x1=xyz1[(i*n+j)*3+0];
float y1=xyz1[(i*n+j)*3+1];
float z1=xyz1[(i*n+j)*3+2];
int j2=idx1[i*n+j];
float x2=xyz2[(i*m+j2)*3+0];
float y2=xyz2[(i*m+j2)*3+1];
float z2=xyz2[(i*m+j2)*3+2];
float g=grad_dist1[i*n+j]*2;
atomicAdd(&(grad_xyz1[(i*n+j)*3+0]),g*(x1-x2));
atomicAdd(&(grad_xyz1[(i*n+j)*3+1]),g*(y1-y2));
atomicAdd(&(grad_xyz1[(i*n+j)*3+2]),g*(z1-z2));
atomicAdd(&(grad_xyz2[(i*m+j2)*3+0]),-(g*(x1-x2)));
atomicAdd(&(grad_xyz2[(i*m+j2)*3+1]),-(g*(y1-y2)));
atomicAdd(&(grad_xyz2[(i*m+j2)*3+2]),-(g*(z1-z2)));
}
}
}
// int chamfer_cuda_backward(int b,int n,const float * xyz1,int m,const float * xyz2,const float * grad_dist1,const int * idx1,const float * grad_dist2,const int * idx2,float * grad_xyz1,float * grad_xyz2, cudaStream_t stream){
int chamfer_cuda_backward(at::Tensor xyz1, at::Tensor xyz2, at::Tensor gradxyz1, at::Tensor gradxyz2, at::Tensor graddist1, at::Tensor graddist2, at::Tensor idx1, at::Tensor idx2){
// cudaMemset(grad_xyz1,0,b*n*3*4);
// cudaMemset(grad_xyz2,0,b*m*3*4);
const auto batch_size = xyz1.size(0);
const auto n = xyz1.size(1); //num_points point cloud A
const auto m = xyz2.size(1); //num_points point cloud B
NmDistanceGradKernel<<<dim3(1,16,1),256>>>(batch_size,n,xyz1.data<float>(),m,xyz2.data<float>(),graddist1.data<float>(),idx1.data<int>(),gradxyz1.data<float>(),gradxyz2.data<float>());
NmDistanceGradKernel<<<dim3(1,16,1),256>>>(batch_size,m,xyz2.data<float>(),n,xyz1.data<float>(),graddist2.data<float>(),idx2.data<int>(),gradxyz2.data<float>(),gradxyz1.data<float>());
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in nnd get grad: %s\n", cudaGetErrorString(err));
//THError("aborting");
return 0;
}
return 1;
}
|
804c3ba4a290ced9d44b96ca27ae823408167c47.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <cutil_inline.h> // includes cuda.h and hip/hip_runtime_api.h
#include <math.h>
#if defined(__APPLE__) || defined(MACOSX)
#include <GLUT/glut.h>
#else
#include <GL/freeglut.h>
#endif
#include <cuda_gl_interop.h>
__constant__ float softeningSquared;
__constant__ double softeningSquared_fp64;
template<class T>
struct SharedMemory
{
__device__ inline operator T*()
{
extern __shared__ int __smem[];
return (T*)__smem;
}
__device__ inline operator const T*() const
{
extern __shared__ int __smem[];
return (T*)__smem;
}
};
template <typename T> struct vec3 { typedef float Type; }; // dummy
template <> struct vec3<float> { typedef float3 Type; };
template <> struct vec3<double> { typedef double3 Type; };
template <typename T> struct vec4 { typedef float Type; }; // dummy
template <> struct vec4<float> { typedef float4 Type; };
template <> struct vec4<double> { typedef double4 Type; };
template<typename T>
__device__ T rsqrt_T(T x) { return rsqrt(x); }
template<>
__device__ float rsqrt_T<float>(float x) { return rsqrtf(x); }
// Macros to simplify shared memory addressing
#define SX(i) sharedPos[i+blockDim.x*threadIdx.y]
// This macro is only used when multithreadBodies is true (below)
#define SX_SUM(i,j) sharedPos[i+blockDim.x*j]
template <typename T>
__device__ T getSofteningSquared() { return softeningSquared; }
template <>
__device__ double getSofteningSquared<double>() { return softeningSquared_fp64; }
template <typename T>
struct DeviceData {
T* dPos[2]; // mapped host pointers
T* dVel;
hipEvent_t event;
unsigned int offset;
unsigned int numBodies;
};
template <typename T>
__device__ typename vec3<T>::Type
bodyBodyInteraction(typename vec3<T>::Type ai,
typename vec4<T>::Type bi,
typename vec4<T>::Type bj)
{
typename vec3<T>::Type r;
// r_ij [3 FLOPS]
r.x = bj.x - bi.x;
r.y = bj.y - bi.y;
r.z = bj.z - bi.z;
// distSqr = dot(r_ij, r_ij) + EPS^2 [6 FLOPS]
T distSqr = r.x * r.x + r.y * r.y + r.z * r.z;
distSqr += getSofteningSquared<T>();
// invDistCube =1/distSqr^(3/2) [4 FLOPS (2 mul, 1 sqrt, 1 inv)]
T invDist = rsqrt_T(distSqr);
T invDistCube = invDist * invDist * invDist;
// s = m_j * invDistCube [1 FLOP]
T s = bj.w * invDistCube;
// a_i = a_i + s * r_ij [6 FLOPS]
ai.x += r.x * s;
ai.y += r.y * s;
ai.z += r.z * s;
return ai;
}
// This is the "tile_calculation" function from the GPUG3 article.
template <typename T>
__device__ typename vec3<T>::Type
gravitation(typename vec4<T>::Type iPos,
typename vec3<T>::Type accel)
{
typename vec4<T>::Type* sharedPos = SharedMemory<typename vec4<T>::Type>();
// The CUDA 1.1 compiler cannot determine that i is not going to
// overflow in the loop below. Therefore if int is used on 64-bit linux
// or windows (or long instead of long long on win64), the compiler
// generates suboptimal code. Therefore we use long long on win64 and
// long on everything else. (Workaround for Bug ID 347697)
#ifdef _Win64
unsigned long long j = 0;
#else
unsigned long j = 0;
#endif
// Here we unroll the loop to reduce bookkeeping instruction overhead
// 32x unrolling seems to provide best performance
// Note that having an unsigned int loop counter and an unsigned
// long index helps the compiler generate efficient code on 64-bit
// OSes. The compiler can't assume the 64-bit index won't overflow
// so it incurs extra integer operations. This is a standard issue
// in porting 32-bit code to 64-bit OSes.
#pragma unroll 32
for (unsigned int counter = 0; counter < blockDim.x; counter++ )
{
accel = bodyBodyInteraction<T>(accel, iPos, SX(j++));
}
return accel;
}
// WRAP is used to force each block to start working on a different
// chunk (and wrap around back to the beginning of the array) so that
// not all multiprocessors try to read the same memory locations at
// once.
#define WRAP(x,m) (((x)<m)?(x):(x-m)) // Mod without divide, works on values from 0 up to 2m
template <typename T, bool multithreadBodies>
__device__ typename vec3<T>::Type
computeBodyAccel(typename vec4<T>::Type bodyPos,
typename vec4<T>::Type* positions,
int numBodies)
{
typename vec4<T>::Type* sharedPos = SharedMemory<typename vec4<T>::Type>();
typename vec3<T>::Type acc = {0.0f, 0.0f, 0.0f};
int p = blockDim.x;
int q = blockDim.y;
int n = numBodies;
int numTiles = n / (p * q);
for (int tile = blockIdx.y; tile < numTiles + blockIdx.y; tile++)
{
sharedPos[threadIdx.x+blockDim.x*threadIdx.y] =
multithreadBodies ?
positions[WRAP(blockIdx.x + q * tile + threadIdx.y, gridDim.x) * p + threadIdx.x] :
positions[WRAP(blockIdx.x + tile, gridDim.x) * p + threadIdx.x];
__syncthreads();
// This is the "tile_calculation" function from the GPUG3 article.
acc = gravitation<T>(bodyPos, acc);
__syncthreads();
}
// When the numBodies / thread block size is < # multiprocessors (16 on G80), the GPU is
// underutilized. For example, with a 256 threads per block and 1024 bodies, there will only
// be 4 thread blocks, so the GPU will only be 25% utilized. To improve this, we use multiple
// threads per body. We still can use blocks of 256 threads, but they are arranged in q rows
// of p threads each. Each thread processes 1/q of the forces that affect each body, and then
// 1/q of the threads (those with threadIdx.y==0) add up the partial sums from the other
// threads for that body. To enable this, use the "--p=" and "--q=" command line options to
// this example. e.g.: "nbody.exe --n=1024 --p=64 --q=4" will use 4 threads per body and 256
// threads per block. There will be n/p = 16 blocks, so a G80 GPU will be 100% utilized.
// We use a bool template parameter to specify when the number of threads per body is greater
// than one, so that when it is not we don't have to execute the more complex code required!
if (multithreadBodies)
{
SX_SUM(threadIdx.x, threadIdx.y).x = acc.x;
SX_SUM(threadIdx.x, threadIdx.y).y = acc.y;
SX_SUM(threadIdx.x, threadIdx.y).z = acc.z;
__syncthreads();
// Save the result in global memory for the integration step
if (threadIdx.y == 0)
{
for (int i = 1; i < blockDim.y; i++)
{
acc.x += SX_SUM(threadIdx.x,i).x;
acc.y += SX_SUM(threadIdx.x,i).y;
acc.z += SX_SUM(threadIdx.x,i).z;
}
}
}
return acc;
}
template<typename T, bool multithreadBodies>
__global__ void
integrateBodies(typename vec4<T>::Type* newPos,
typename vec4<T>::Type* oldPos,
typename vec4<T>::Type* vel,
unsigned int deviceOffset, unsigned int deviceNumBodies,
float deltaTime, float damping, int totalNumBodies)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= deviceNumBodies)
return;
typename vec4<T>::Type position = oldPos[deviceOffset + index];
typename vec3<T>::Type accel = computeBodyAccel<T, multithreadBodies>(position, oldPos, totalNumBodies);
if (!multithreadBodies || (threadIdx.y == 0))
{
// acceleration = force \ mass;
// new velocity = old velocity + acceleration * deltaTime
// note we factor out the body's mass from the equation, here and in bodyBodyInteraction
// (because they cancel out). Thus here force == acceleration
typename vec4<T>::Type velocity = vel[deviceOffset + index];
velocity.x += accel.x * deltaTime;
velocity.y += accel.y * deltaTime;
velocity.z += accel.z * deltaTime;
velocity.x *= damping;
velocity.y *= damping;
velocity.z *= damping;
// new position = old position + velocity * deltaTime
position.x += velocity.x * deltaTime;
position.y += velocity.y * deltaTime;
position.z += velocity.z * deltaTime;
// store new position and velocity
newPos[deviceOffset + index] = position;
vel[deviceOffset + index] = velocity;
}
}
template <typename T>
void integrateNbodySystem(DeviceData<T> *deviceData,
cudaGraphicsResource **pgres,
unsigned int currentRead,
float deltaTime,
float damping,
unsigned int numBodies,
unsigned int numDevices,
int p,
int q,
bool bUsePBO)
{
if (bUsePBO)
{
cutilSafeCall(hipGraphicsResourceSetMapFlags(pgres[currentRead], hipGraphicsMapFlagsReadOnly));
cutilSafeCall(hipGraphicsResourceSetMapFlags(pgres[1-currentRead], hipGraphicsMapFlagsWriteDiscard));
cutilSafeCall(hipGraphicsMapResources(2, pgres, 0));
size_t bytes;
cutilSafeCall(hipGraphicsResourceGetMappedPointer((void**)&(deviceData[0].dPos[currentRead]), &bytes, pgres[currentRead]));
cutilSafeCall(hipGraphicsResourceGetMappedPointer((void**)&(deviceData[0].dPos[1-currentRead]), &bytes, pgres[1-currentRead]));
}
dim3 threads(p,q,1);
hipDeviceProp_t props;
for (unsigned int dev = 0; dev != numDevices; dev++)
{
if (numDevices > 1)
hipSetDevice(dev);
cutilSafeCall(hipGetDeviceProperties(&props, dev));
while ((deviceData[dev].numBodies > 0) &&
(deviceData[dev].numBodies / p < (unsigned)props.multiProcessorCount))
{
p /= 2;
q *= 2;
}
dim3 grid((deviceData[dev].numBodies + (p-1))/p, 1, 1);
// execute the kernel:
// When the numBodies / thread block size is < # multiprocessors
// (16 on G80), the GPU is underutilized. For example, with 256 threads per
// block and 1024 bodies, there will only be 4 thread blocks, so the
// GPU will only be 25% utilized. To improve this, we use multiple threads
// per body. We still can use blocks of 256 threads, but they are arranged
// in q rows of p threads each. Each thread processes 1/q of the forces
// that affect each body, and then 1/q of the threads (those with
// threadIdx.y==0) add up the partial sums from the other threads for that
// body. To enable this, use the "--p=" and "--q=" command line options to
// this example. e.g.: "nbody.exe --n=1024 --p=64 --q=4" will use 4
// threads per body and 256 threads per block. There will be n/p = 16
// blocks, so a G80 GPU will be 100% utilized.
// We use a bool template parameter to specify when the number of threads
// per body is greater than one, so that when it is not we don't have to
// execute the more complex code required!
int sharedMemSize = p * q * 4 * sizeof(T); // 4 floats for pos
if (grid.x > 0 && threads.y == 1)
{
hipLaunchKernelGGL(( integrateBodies<T, false>), dim3(grid), dim3(threads), sharedMemSize , 0,
(typename vec4<T>::Type*)deviceData[dev].dPos[1-currentRead],
(typename vec4<T>::Type*)deviceData[dev].dPos[currentRead],
(typename vec4<T>::Type*)deviceData[dev].dVel,
deviceData[dev].offset, deviceData[dev].numBodies,
deltaTime, damping, numBodies);
}
else if (grid.x > 0)
{
hipLaunchKernelGGL(( integrateBodies<T, true>), dim3(grid), dim3(threads), sharedMemSize , 0,
(typename vec4<T>::Type*)deviceData[dev].dPos[1-currentRead],
(typename vec4<T>::Type*)deviceData[dev].dPos[currentRead],
(typename vec4<T>::Type*)deviceData[dev].dVel,
deviceData[dev].offset, deviceData[dev].numBodies,
deltaTime, damping, numBodies);
}
if (numDevices > 1)
{
cutilSafeCall(hipEventRecord(deviceData[dev].event));
// MJH: Hack on older driver versions to force kernel launches to flush!
hipStreamQuery(0);
}
// check if kernel invocation generated an error
cutilCheckMsg("Kernel execution failed");
}
if (numDevices > 1)
{
for (unsigned int dev = 0; dev < numDevices; dev++)
cutilSafeCall(hipEventSynchronize(deviceData[dev].event));
}
if (bUsePBO)
{
cutilSafeCall(hipGraphicsUnmapResources(2, pgres, 0));
}
}
// Explicit specializations needed to generate code
template void integrateNbodySystem<float>(DeviceData<float>* deviceData,
cudaGraphicsResource** pgres,
unsigned int currentRead,
float deltaTime,
float damping,
unsigned int numBodies,
unsigned int numDevices,
int p, int q,
bool bUsePBO);
template void integrateNbodySystem<double>(DeviceData<double>* deviceData,
cudaGraphicsResource** pgres,
unsigned int currentRead,
float deltaTime,
float damping,
unsigned int numBodies,
unsigned int numDevices,
int p, int q,
bool bUsePBO);
| 804c3ba4a290ced9d44b96ca27ae823408167c47.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <cutil_inline.h> // includes cuda.h and cuda_runtime_api.h
#include <math.h>
#if defined(__APPLE__) || defined(MACOSX)
#include <GLUT/glut.h>
#else
#include <GL/freeglut.h>
#endif
#include <cuda_gl_interop.h>
__constant__ float softeningSquared;
__constant__ double softeningSquared_fp64;
template<class T>
struct SharedMemory
{
__device__ inline operator T*()
{
extern __shared__ int __smem[];
return (T*)__smem;
}
__device__ inline operator const T*() const
{
extern __shared__ int __smem[];
return (T*)__smem;
}
};
template <typename T> struct vec3 { typedef float Type; }; // dummy
template <> struct vec3<float> { typedef float3 Type; };
template <> struct vec3<double> { typedef double3 Type; };
template <typename T> struct vec4 { typedef float Type; }; // dummy
template <> struct vec4<float> { typedef float4 Type; };
template <> struct vec4<double> { typedef double4 Type; };
template<typename T>
__device__ T rsqrt_T(T x) { return rsqrt(x); }
template<>
__device__ float rsqrt_T<float>(float x) { return rsqrtf(x); }
// Macros to simplify shared memory addressing
#define SX(i) sharedPos[i+blockDim.x*threadIdx.y]
// This macro is only used when multithreadBodies is true (below)
#define SX_SUM(i,j) sharedPos[i+blockDim.x*j]
template <typename T>
__device__ T getSofteningSquared() { return softeningSquared; }
template <>
__device__ double getSofteningSquared<double>() { return softeningSquared_fp64; }
template <typename T>
struct DeviceData {
T* dPos[2]; // mapped host pointers
T* dVel;
cudaEvent_t event;
unsigned int offset;
unsigned int numBodies;
};
template <typename T>
__device__ typename vec3<T>::Type
bodyBodyInteraction(typename vec3<T>::Type ai,
typename vec4<T>::Type bi,
typename vec4<T>::Type bj)
{
typename vec3<T>::Type r;
// r_ij [3 FLOPS]
r.x = bj.x - bi.x;
r.y = bj.y - bi.y;
r.z = bj.z - bi.z;
// distSqr = dot(r_ij, r_ij) + EPS^2 [6 FLOPS]
T distSqr = r.x * r.x + r.y * r.y + r.z * r.z;
distSqr += getSofteningSquared<T>();
// invDistCube =1/distSqr^(3/2) [4 FLOPS (2 mul, 1 sqrt, 1 inv)]
T invDist = rsqrt_T(distSqr);
T invDistCube = invDist * invDist * invDist;
// s = m_j * invDistCube [1 FLOP]
T s = bj.w * invDistCube;
// a_i = a_i + s * r_ij [6 FLOPS]
ai.x += r.x * s;
ai.y += r.y * s;
ai.z += r.z * s;
return ai;
}
// This is the "tile_calculation" function from the GPUG3 article.
template <typename T>
__device__ typename vec3<T>::Type
gravitation(typename vec4<T>::Type iPos,
typename vec3<T>::Type accel)
{
typename vec4<T>::Type* sharedPos = SharedMemory<typename vec4<T>::Type>();
// The CUDA 1.1 compiler cannot determine that i is not going to
// overflow in the loop below. Therefore if int is used on 64-bit linux
// or windows (or long instead of long long on win64), the compiler
// generates suboptimal code. Therefore we use long long on win64 and
// long on everything else. (Workaround for Bug ID 347697)
#ifdef _Win64
unsigned long long j = 0;
#else
unsigned long j = 0;
#endif
// Here we unroll the loop to reduce bookkeeping instruction overhead
// 32x unrolling seems to provide best performance
// Note that having an unsigned int loop counter and an unsigned
// long index helps the compiler generate efficient code on 64-bit
// OSes. The compiler can't assume the 64-bit index won't overflow
// so it incurs extra integer operations. This is a standard issue
// in porting 32-bit code to 64-bit OSes.
#pragma unroll 32
for (unsigned int counter = 0; counter < blockDim.x; counter++ )
{
accel = bodyBodyInteraction<T>(accel, iPos, SX(j++));
}
return accel;
}
// WRAP is used to force each block to start working on a different
// chunk (and wrap around back to the beginning of the array) so that
// not all multiprocessors try to read the same memory locations at
// once.
#define WRAP(x,m) (((x)<m)?(x):(x-m)) // Mod without divide, works on values from 0 up to 2m
template <typename T, bool multithreadBodies>
__device__ typename vec3<T>::Type
computeBodyAccel(typename vec4<T>::Type bodyPos,
typename vec4<T>::Type* positions,
int numBodies)
{
typename vec4<T>::Type* sharedPos = SharedMemory<typename vec4<T>::Type>();
typename vec3<T>::Type acc = {0.0f, 0.0f, 0.0f};
int p = blockDim.x;
int q = blockDim.y;
int n = numBodies;
int numTiles = n / (p * q);
for (int tile = blockIdx.y; tile < numTiles + blockIdx.y; tile++)
{
sharedPos[threadIdx.x+blockDim.x*threadIdx.y] =
multithreadBodies ?
positions[WRAP(blockIdx.x + q * tile + threadIdx.y, gridDim.x) * p + threadIdx.x] :
positions[WRAP(blockIdx.x + tile, gridDim.x) * p + threadIdx.x];
__syncthreads();
// This is the "tile_calculation" function from the GPUG3 article.
acc = gravitation<T>(bodyPos, acc);
__syncthreads();
}
// When the numBodies / thread block size is < # multiprocessors (16 on G80), the GPU is
// underutilized. For example, with a 256 threads per block and 1024 bodies, there will only
// be 4 thread blocks, so the GPU will only be 25% utilized. To improve this, we use multiple
// threads per body. We still can use blocks of 256 threads, but they are arranged in q rows
// of p threads each. Each thread processes 1/q of the forces that affect each body, and then
// 1/q of the threads (those with threadIdx.y==0) add up the partial sums from the other
// threads for that body. To enable this, use the "--p=" and "--q=" command line options to
// this example. e.g.: "nbody.exe --n=1024 --p=64 --q=4" will use 4 threads per body and 256
// threads per block. There will be n/p = 16 blocks, so a G80 GPU will be 100% utilized.
// We use a bool template parameter to specify when the number of threads per body is greater
// than one, so that when it is not we don't have to execute the more complex code required!
if (multithreadBodies)
{
SX_SUM(threadIdx.x, threadIdx.y).x = acc.x;
SX_SUM(threadIdx.x, threadIdx.y).y = acc.y;
SX_SUM(threadIdx.x, threadIdx.y).z = acc.z;
__syncthreads();
// Save the result in global memory for the integration step
if (threadIdx.y == 0)
{
for (int i = 1; i < blockDim.y; i++)
{
acc.x += SX_SUM(threadIdx.x,i).x;
acc.y += SX_SUM(threadIdx.x,i).y;
acc.z += SX_SUM(threadIdx.x,i).z;
}
}
}
return acc;
}
template<typename T, bool multithreadBodies>
__global__ void
integrateBodies(typename vec4<T>::Type* newPos,
typename vec4<T>::Type* oldPos,
typename vec4<T>::Type* vel,
unsigned int deviceOffset, unsigned int deviceNumBodies,
float deltaTime, float damping, int totalNumBodies)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= deviceNumBodies)
return;
typename vec4<T>::Type position = oldPos[deviceOffset + index];
typename vec3<T>::Type accel = computeBodyAccel<T, multithreadBodies>(position, oldPos, totalNumBodies);
if (!multithreadBodies || (threadIdx.y == 0))
{
// acceleration = force \ mass;
// new velocity = old velocity + acceleration * deltaTime
// note we factor out the body's mass from the equation, here and in bodyBodyInteraction
// (because they cancel out). Thus here force == acceleration
typename vec4<T>::Type velocity = vel[deviceOffset + index];
velocity.x += accel.x * deltaTime;
velocity.y += accel.y * deltaTime;
velocity.z += accel.z * deltaTime;
velocity.x *= damping;
velocity.y *= damping;
velocity.z *= damping;
// new position = old position + velocity * deltaTime
position.x += velocity.x * deltaTime;
position.y += velocity.y * deltaTime;
position.z += velocity.z * deltaTime;
// store new position and velocity
newPos[deviceOffset + index] = position;
vel[deviceOffset + index] = velocity;
}
}
template <typename T>
void integrateNbodySystem(DeviceData<T> *deviceData,
cudaGraphicsResource **pgres,
unsigned int currentRead,
float deltaTime,
float damping,
unsigned int numBodies,
unsigned int numDevices,
int p,
int q,
bool bUsePBO)
{
if (bUsePBO)
{
cutilSafeCall(cudaGraphicsResourceSetMapFlags(pgres[currentRead], cudaGraphicsMapFlagsReadOnly));
cutilSafeCall(cudaGraphicsResourceSetMapFlags(pgres[1-currentRead], cudaGraphicsMapFlagsWriteDiscard));
cutilSafeCall(cudaGraphicsMapResources(2, pgres, 0));
size_t bytes;
cutilSafeCall(cudaGraphicsResourceGetMappedPointer((void**)&(deviceData[0].dPos[currentRead]), &bytes, pgres[currentRead]));
cutilSafeCall(cudaGraphicsResourceGetMappedPointer((void**)&(deviceData[0].dPos[1-currentRead]), &bytes, pgres[1-currentRead]));
}
dim3 threads(p,q,1);
cudaDeviceProp props;
for (unsigned int dev = 0; dev != numDevices; dev++)
{
if (numDevices > 1)
cudaSetDevice(dev);
cutilSafeCall(cudaGetDeviceProperties(&props, dev));
while ((deviceData[dev].numBodies > 0) &&
(deviceData[dev].numBodies / p < (unsigned)props.multiProcessorCount))
{
p /= 2;
q *= 2;
}
dim3 grid((deviceData[dev].numBodies + (p-1))/p, 1, 1);
// execute the kernel:
// When the numBodies / thread block size is < # multiprocessors
// (16 on G80), the GPU is underutilized. For example, with 256 threads per
// block and 1024 bodies, there will only be 4 thread blocks, so the
// GPU will only be 25% utilized. To improve this, we use multiple threads
// per body. We still can use blocks of 256 threads, but they are arranged
// in q rows of p threads each. Each thread processes 1/q of the forces
// that affect each body, and then 1/q of the threads (those with
// threadIdx.y==0) add up the partial sums from the other threads for that
// body. To enable this, use the "--p=" and "--q=" command line options to
// this example. e.g.: "nbody.exe --n=1024 --p=64 --q=4" will use 4
// threads per body and 256 threads per block. There will be n/p = 16
// blocks, so a G80 GPU will be 100% utilized.
// We use a bool template parameter to specify when the number of threads
// per body is greater than one, so that when it is not we don't have to
// execute the more complex code required!
int sharedMemSize = p * q * 4 * sizeof(T); // 4 floats for pos
if (grid.x > 0 && threads.y == 1)
{
integrateBodies<T, false><<< grid, threads, sharedMemSize >>>
((typename vec4<T>::Type*)deviceData[dev].dPos[1-currentRead],
(typename vec4<T>::Type*)deviceData[dev].dPos[currentRead],
(typename vec4<T>::Type*)deviceData[dev].dVel,
deviceData[dev].offset, deviceData[dev].numBodies,
deltaTime, damping, numBodies);
}
else if (grid.x > 0)
{
integrateBodies<T, true><<< grid, threads, sharedMemSize >>>
((typename vec4<T>::Type*)deviceData[dev].dPos[1-currentRead],
(typename vec4<T>::Type*)deviceData[dev].dPos[currentRead],
(typename vec4<T>::Type*)deviceData[dev].dVel,
deviceData[dev].offset, deviceData[dev].numBodies,
deltaTime, damping, numBodies);
}
if (numDevices > 1)
{
cutilSafeCall(cudaEventRecord(deviceData[dev].event));
// MJH: Hack on older driver versions to force kernel launches to flush!
cudaStreamQuery(0);
}
// check if kernel invocation generated an error
cutilCheckMsg("Kernel execution failed");
}
if (numDevices > 1)
{
for (unsigned int dev = 0; dev < numDevices; dev++)
cutilSafeCall(cudaEventSynchronize(deviceData[dev].event));
}
if (bUsePBO)
{
cutilSafeCall(cudaGraphicsUnmapResources(2, pgres, 0));
}
}
// Explicit specializations needed to generate code
template void integrateNbodySystem<float>(DeviceData<float>* deviceData,
cudaGraphicsResource** pgres,
unsigned int currentRead,
float deltaTime,
float damping,
unsigned int numBodies,
unsigned int numDevices,
int p, int q,
bool bUsePBO);
template void integrateNbodySystem<double>(DeviceData<double>* deviceData,
cudaGraphicsResource** pgres,
unsigned int currentRead,
float deltaTime,
float damping,
unsigned int numBodies,
unsigned int numDevices,
int p, int q,
bool bUsePBO);
|
605b8cc0aea3435d54b3efb49c51171203f2822c.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/null_mask.hpp>
#include <cudf/strings/detail/utilities.hpp>
#include <cudf/strings/replace_re.hpp>
#include <cudf/strings/string_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <strings/regex/regex.cuh>
#include <strings/utilities.cuh>
#include <strings/utilities.hpp>
namespace cudf {
namespace strings {
namespace detail {
namespace {
// this is a [begin,end) pair of character positions when a substring is matched
using found_range = thrust::pair<size_type, size_type>;
/**
* @brief This functor handles replacing strings by applying the compiled regex patterns
* and inserting the corresponding new string within the matched range of characters.
*
* The logic includes computing the size of each string and also writing the output.
*
* The stack is used to keep progress on evaluating the regex instructions on each string.
* So the size of the stack is in proportion to the number of instructions in the given regex
* pattern.
*
* There are three call types based on the number of regex instructions in the given pattern.
* Small to medium instruction lengths can use the stack effectively though smaller executes faster.
* Longer patterns require global memory. Shorter patterns are common in data cleaning.
*
*/
template <size_t stack_size>
struct replace_multi_regex_fn {
column_device_view const d_strings;
reprog_device* progs; // array of regex progs
size_type number_of_patterns;
found_range* d_found_ranges; // working array matched (begin,end) values
column_device_view const d_repls; // replacment strings
int32_t* d_offsets{}; // these are null when
char* d_chars{}; // only computing size
__device__ void operator()(size_type idx)
{
if (d_strings.is_null(idx)) {
if (!d_chars) d_offsets[idx] = 0;
return;
}
u_char data1[stack_size];
u_char data2[stack_size];
auto const d_str = d_strings.element<string_view>(idx);
auto const nchars = d_str.length(); // number of characters in input string
auto nbytes = d_str.size_bytes(); // number of bytes in input string
auto in_ptr = d_str.data(); // input pointer
auto out_ptr = d_chars ? d_chars + d_offsets[idx] : nullptr;
found_range* d_ranges = d_found_ranges + (idx * number_of_patterns);
size_type lpos = 0;
size_type ch_pos = 0;
// initialize the working ranges memory to -1's
thrust::fill(thrust::seq, d_ranges, d_ranges + number_of_patterns, found_range{-1, 1});
// process string one character at a time
while (ch_pos < nchars) {
// this minimizes the regex-find calls by only calling it for stale patterns
// -- those that have not previously matched up to this point (ch_pos)
for (size_type ptn_idx = 0; ptn_idx < number_of_patterns; ++ptn_idx) {
if (d_ranges[ptn_idx].first >= ch_pos) // previously matched here
continue; // or later in the string
reprog_device prog = progs[ptn_idx];
prog.set_stack_mem(data1, data2);
size_type begin = ch_pos, end = nchars;
if (!prog.is_empty() && prog.find(idx, d_str, begin, end) > 0)
d_ranges[ptn_idx] = found_range{begin, end}; // found a match
else
d_ranges[ptn_idx] = found_range{nchars, nchars}; // this pattern is done
}
// all the ranges have been updated from each regex match;
// look for any that match at this character position (ch_pos)
auto itr = thrust::find_if(
thrust::seq, d_ranges, d_ranges + number_of_patterns, [ch_pos] __device__(auto range) {
return range.first == ch_pos;
});
if (itr !=
d_ranges +
number_of_patterns) { // match found, compute and replace the string in the output
size_type ptn_idx = static_cast<size_type>(itr - d_ranges);
size_type begin = d_ranges[ptn_idx].first;
size_type end = d_ranges[ptn_idx].second;
string_view d_repl = d_repls.size() > 1 ? d_repls.element<string_view>(ptn_idx)
: d_repls.element<string_view>(0);
auto spos = d_str.byte_offset(begin);
auto epos = d_str.byte_offset(end);
nbytes += d_repl.size_bytes() - (epos - spos);
if (out_ptr) { // copy unmodified content plus new replacement string
out_ptr = copy_and_increment(out_ptr, in_ptr + lpos, spos - lpos);
out_ptr = copy_string(out_ptr, d_repl);
lpos = epos;
}
ch_pos = end - 1;
}
++ch_pos;
}
if (out_ptr) // copy the remainder
memcpy(out_ptr, in_ptr + lpos, d_str.size_bytes() - lpos);
else
d_offsets[idx] = nbytes;
}
};
} // namespace
//
std::unique_ptr<column> replace_re(
strings_column_view const& strings,
std::vector<std::string> const& patterns,
strings_column_view const& repls,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
hipStream_t stream = 0)
{
auto strings_count = strings.size();
if (strings_count == 0) return make_empty_strings_column(mr, stream);
if (patterns.empty()) // no patterns; just return a copy
return std::make_unique<column>(strings.parent());
CUDF_EXPECTS(!repls.has_nulls(), "Parameter repls must not have any nulls");
auto strings_column = column_device_view::create(strings.parent(), stream);
auto d_strings = *strings_column;
auto repls_column = column_device_view::create(repls.parent(), stream);
auto d_repls = *repls_column;
auto d_flags = get_character_flags_table();
// compile regexes into device objects
size_type regex_insts = 0;
std::vector<std::unique_ptr<reprog_device, std::function<void(reprog_device*)>>> h_progs;
rmm::device_vector<reprog_device> progs;
for (auto itr = patterns.begin(); itr != patterns.end(); ++itr) {
auto prog = reprog_device::create(*itr, d_flags, strings_count, stream);
auto insts = prog->insts_counts();
if (insts > regex_insts) regex_insts = insts;
progs.push_back(*prog);
h_progs.emplace_back(std::move(prog));
}
auto d_progs = progs.data().get();
// copy null mask
auto null_mask = copy_bitmask(strings.parent());
auto null_count = strings.null_count();
// create working buffer for ranges pairs
rmm::device_vector<found_range> found_ranges(patterns.size() * strings_count);
auto d_found_ranges = found_ranges.data().get();
// create child columns
std::pair<std::unique_ptr<column>, std::unique_ptr<column>> children(nullptr, nullptr);
// Each invocation is predicated on the stack size which is dependent on the number of regex
// instructions
if ((regex_insts > MAX_STACK_INSTS) || (regex_insts <= RX_SMALL_INSTS))
children = make_strings_children(
replace_multi_regex_fn<RX_STACK_SMALL>{
d_strings, d_progs, static_cast<size_type>(progs.size()), d_found_ranges, d_repls},
strings_count,
null_count,
mr,
stream);
else if (regex_insts <= RX_MEDIUM_INSTS)
children = make_strings_children(
replace_multi_regex_fn<RX_STACK_MEDIUM>{
d_strings, d_progs, static_cast<size_type>(progs.size()), d_found_ranges, d_repls},
strings_count,
null_count,
mr,
stream);
else
children = make_strings_children(
replace_multi_regex_fn<RX_STACK_LARGE>{
d_strings, d_progs, static_cast<size_type>(progs.size()), d_found_ranges, d_repls},
strings_count,
null_count,
mr,
stream);
//
return make_strings_column(strings_count,
std::move(children.first),
std::move(children.second),
null_count,
std::move(null_mask),
stream,
mr);
}
} // namespace detail
// external API
std::unique_ptr<column> replace_re(strings_column_view const& strings,
std::vector<std::string> const& patterns,
strings_column_view const& repls,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::replace_re(strings, patterns, repls, mr);
}
} // namespace strings
} // namespace cudf
| 605b8cc0aea3435d54b3efb49c51171203f2822c.cu | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/null_mask.hpp>
#include <cudf/strings/detail/utilities.hpp>
#include <cudf/strings/replace_re.hpp>
#include <cudf/strings/string_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <strings/regex/regex.cuh>
#include <strings/utilities.cuh>
#include <strings/utilities.hpp>
namespace cudf {
namespace strings {
namespace detail {
namespace {
// this is a [begin,end) pair of character positions when a substring is matched
using found_range = thrust::pair<size_type, size_type>;
/**
* @brief This functor handles replacing strings by applying the compiled regex patterns
* and inserting the corresponding new string within the matched range of characters.
*
* The logic includes computing the size of each string and also writing the output.
*
* The stack is used to keep progress on evaluating the regex instructions on each string.
* So the size of the stack is in proportion to the number of instructions in the given regex
* pattern.
*
* There are three call types based on the number of regex instructions in the given pattern.
* Small to medium instruction lengths can use the stack effectively though smaller executes faster.
* Longer patterns require global memory. Shorter patterns are common in data cleaning.
*
*/
template <size_t stack_size>
struct replace_multi_regex_fn {
column_device_view const d_strings;
reprog_device* progs; // array of regex progs
size_type number_of_patterns;
found_range* d_found_ranges; // working array matched (begin,end) values
column_device_view const d_repls; // replacment strings
int32_t* d_offsets{}; // these are null when
char* d_chars{}; // only computing size
__device__ void operator()(size_type idx)
{
if (d_strings.is_null(idx)) {
if (!d_chars) d_offsets[idx] = 0;
return;
}
u_char data1[stack_size];
u_char data2[stack_size];
auto const d_str = d_strings.element<string_view>(idx);
auto const nchars = d_str.length(); // number of characters in input string
auto nbytes = d_str.size_bytes(); // number of bytes in input string
auto in_ptr = d_str.data(); // input pointer
auto out_ptr = d_chars ? d_chars + d_offsets[idx] : nullptr;
found_range* d_ranges = d_found_ranges + (idx * number_of_patterns);
size_type lpos = 0;
size_type ch_pos = 0;
// initialize the working ranges memory to -1's
thrust::fill(thrust::seq, d_ranges, d_ranges + number_of_patterns, found_range{-1, 1});
// process string one character at a time
while (ch_pos < nchars) {
// this minimizes the regex-find calls by only calling it for stale patterns
// -- those that have not previously matched up to this point (ch_pos)
for (size_type ptn_idx = 0; ptn_idx < number_of_patterns; ++ptn_idx) {
if (d_ranges[ptn_idx].first >= ch_pos) // previously matched here
continue; // or later in the string
reprog_device prog = progs[ptn_idx];
prog.set_stack_mem(data1, data2);
size_type begin = ch_pos, end = nchars;
if (!prog.is_empty() && prog.find(idx, d_str, begin, end) > 0)
d_ranges[ptn_idx] = found_range{begin, end}; // found a match
else
d_ranges[ptn_idx] = found_range{nchars, nchars}; // this pattern is done
}
// all the ranges have been updated from each regex match;
// look for any that match at this character position (ch_pos)
auto itr = thrust::find_if(
thrust::seq, d_ranges, d_ranges + number_of_patterns, [ch_pos] __device__(auto range) {
return range.first == ch_pos;
});
if (itr !=
d_ranges +
number_of_patterns) { // match found, compute and replace the string in the output
size_type ptn_idx = static_cast<size_type>(itr - d_ranges);
size_type begin = d_ranges[ptn_idx].first;
size_type end = d_ranges[ptn_idx].second;
string_view d_repl = d_repls.size() > 1 ? d_repls.element<string_view>(ptn_idx)
: d_repls.element<string_view>(0);
auto spos = d_str.byte_offset(begin);
auto epos = d_str.byte_offset(end);
nbytes += d_repl.size_bytes() - (epos - spos);
if (out_ptr) { // copy unmodified content plus new replacement string
out_ptr = copy_and_increment(out_ptr, in_ptr + lpos, spos - lpos);
out_ptr = copy_string(out_ptr, d_repl);
lpos = epos;
}
ch_pos = end - 1;
}
++ch_pos;
}
if (out_ptr) // copy the remainder
memcpy(out_ptr, in_ptr + lpos, d_str.size_bytes() - lpos);
else
d_offsets[idx] = nbytes;
}
};
} // namespace
//
std::unique_ptr<column> replace_re(
strings_column_view const& strings,
std::vector<std::string> const& patterns,
strings_column_view const& repls,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
cudaStream_t stream = 0)
{
auto strings_count = strings.size();
if (strings_count == 0) return make_empty_strings_column(mr, stream);
if (patterns.empty()) // no patterns; just return a copy
return std::make_unique<column>(strings.parent());
CUDF_EXPECTS(!repls.has_nulls(), "Parameter repls must not have any nulls");
auto strings_column = column_device_view::create(strings.parent(), stream);
auto d_strings = *strings_column;
auto repls_column = column_device_view::create(repls.parent(), stream);
auto d_repls = *repls_column;
auto d_flags = get_character_flags_table();
// compile regexes into device objects
size_type regex_insts = 0;
std::vector<std::unique_ptr<reprog_device, std::function<void(reprog_device*)>>> h_progs;
rmm::device_vector<reprog_device> progs;
for (auto itr = patterns.begin(); itr != patterns.end(); ++itr) {
auto prog = reprog_device::create(*itr, d_flags, strings_count, stream);
auto insts = prog->insts_counts();
if (insts > regex_insts) regex_insts = insts;
progs.push_back(*prog);
h_progs.emplace_back(std::move(prog));
}
auto d_progs = progs.data().get();
// copy null mask
auto null_mask = copy_bitmask(strings.parent());
auto null_count = strings.null_count();
// create working buffer for ranges pairs
rmm::device_vector<found_range> found_ranges(patterns.size() * strings_count);
auto d_found_ranges = found_ranges.data().get();
// create child columns
std::pair<std::unique_ptr<column>, std::unique_ptr<column>> children(nullptr, nullptr);
// Each invocation is predicated on the stack size which is dependent on the number of regex
// instructions
if ((regex_insts > MAX_STACK_INSTS) || (regex_insts <= RX_SMALL_INSTS))
children = make_strings_children(
replace_multi_regex_fn<RX_STACK_SMALL>{
d_strings, d_progs, static_cast<size_type>(progs.size()), d_found_ranges, d_repls},
strings_count,
null_count,
mr,
stream);
else if (regex_insts <= RX_MEDIUM_INSTS)
children = make_strings_children(
replace_multi_regex_fn<RX_STACK_MEDIUM>{
d_strings, d_progs, static_cast<size_type>(progs.size()), d_found_ranges, d_repls},
strings_count,
null_count,
mr,
stream);
else
children = make_strings_children(
replace_multi_regex_fn<RX_STACK_LARGE>{
d_strings, d_progs, static_cast<size_type>(progs.size()), d_found_ranges, d_repls},
strings_count,
null_count,
mr,
stream);
//
return make_strings_column(strings_count,
std::move(children.first),
std::move(children.second),
null_count,
std::move(null_mask),
stream,
mr);
}
} // namespace detail
// external API
std::unique_ptr<column> replace_re(strings_column_view const& strings,
std::vector<std::string> const& patterns,
strings_column_view const& repls,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::replace_re(strings, patterns, repls, mr);
}
} // namespace strings
} // namespace cudf
|
d7af9d9a220a92906f125a3ce3c8114f74ed417c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
// includes, project
#define PI 3.1415926536f
int MaxThreadsPerBlock;
int MaxThreadsX;
int MaxThreadsY;
// Conversion d'un vecteur rel en vecteur complexe
// Conversion d'un vecteur complexe en vecteur rel
// Multiplie point par point un vecteur complex par un vecteur rel
// Applique y = at*x +bt chaque point d'un vecteur rel
// Remplissage de la linearmem (tableau de pixels) associe la texture avec le tableau de rel
// Alpha n'est pas modifi
// Remplissage de la linearmem (tableau de pixels) associe la texture avec le tableau de bytes
// Alpha n'est pas modifi
// Remplissage de la linearmem (tableau de pixels) associe la texture avec le tableau de rel
// Alpha autorise l'affichage au dessus d'un certain seuil
// Processus auto-rgressif X2 = a*X1 + b*X0 + N0;
// Expansion
// On applique une interpolation bi-linaire la source
// Transformation Cartesian To Polar
// On applique une interpolation bi-linaire la source
__global__ void FillTex(void *surface, int width, int height, size_t pitch, double* src, int Mask)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
unsigned char *pixel1;
if (x >= width || y >= height) return;
double w = src[x + width*y];
if (w<0) {w=0;}
if (w>253) {w=253;}
pixel1 = (unsigned char *)( (char*)surface + y*pitch) + 4*x;
//pixel1[3] = 255; // alpha = 255 sauf s'il fait partie du masque
for (int i=0;i<4;i++)
{ if (Mask & (1<<i)) pixel1[i] = w; }
} | d7af9d9a220a92906f125a3ce3c8114f74ed417c.cu | #include "includes.h"
// includes, project
#define PI 3.1415926536f
int MaxThreadsPerBlock;
int MaxThreadsX;
int MaxThreadsY;
// Conversion d'un vecteur réel en vecteur complexe
// Conversion d'un vecteur complexe en vecteur réel
// Multiplie point par point un vecteur complex par un vecteur réel
// Applique y = at*x +bt à chaque point d'un vecteur réel
// Remplissage de la linearmem (tableau de pixels) associée à la texture avec le tableau de réel
// Alpha n'est pas modifié
// Remplissage de la linearmem (tableau de pixels) associée à la texture avec le tableau de bytes
// Alpha n'est pas modifié
// Remplissage de la linearmem (tableau de pixels) associée à la texture avec le tableau de réel
// Alpha autorise l'affichage au dessus d'un certain seuil
// Processus auto-régressif X2 = a*X1 + b*X0 + N0;
// Expansion
// On applique une interpolation bi-linéaire à la source
// Transformation Cartesian To Polar
// On applique une interpolation bi-linéaire à la source
__global__ void FillTex(void *surface, int width, int height, size_t pitch, double* src, int Mask)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
unsigned char *pixel1;
if (x >= width || y >= height) return;
double w = src[x + width*y];
if (w<0) {w=0;}
if (w>253) {w=253;}
pixel1 = (unsigned char *)( (char*)surface + y*pitch) + 4*x;
//pixel1[3] = 255; // alpha = 255 sauf s'il fait partie du masque
for (int i=0;i<4;i++)
{ if (Mask & (1<<i)) pixel1[i] = w; }
} |
209476088b770fc0106417b93c8e0f8d5529594f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@generated from magmablas/ztrtri_upper_batched.cu, normal z -> c, Sun Nov 20 20:20:30 2016
@author Peng Du
@author Tingxing Dong
@author Mark Gates
@author Azzam Haidar
@author Ahmad Abdelfattah
This file implements upper case, and is called by ctrtri_kernel.cu.
It's convenient to have separate files for lower & upper, to diff the sources.
*/
#include "magma_internal.h"
#define TRTRI_BATCHED
#include "ctrtri.cuh"
#include "ctrtri_upper_device.cuh"
/******************************************************************************/
__global__ void
ctrtri_diag_upper_kernel_batched(
magma_diag_t diag, int n, magmaFloatComplex const * const * dA_array, int lda, magmaFloatComplex **dinvA_array)
{
int batchid = blockIdx.z;
ctrtri_diag_upper_device(diag, n, dA_array[batchid], lda, dinvA_array[batchid]);
}
/******************************************************************************/
__global__ void
triple_cgemm16_part1_upper_kernel_batched(
int n, magmaFloatComplex const * const * Ain_array, int lda, magmaFloatComplex **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_cgemm16_part1_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
/******************************************************************************/
__global__ void
triple_cgemm16_part2_upper_kernel_batched(
int n, magmaFloatComplex const * const * Ain_array, int lda, magmaFloatComplex **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_cgemm16_part2_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
/******************************************************************************/
__global__ void
triple_cgemm32_part1_upper_kernel_batched(
int n, magmaFloatComplex const * const * Ain_array, int lda, magmaFloatComplex **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_cgemm32_part1_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
/******************************************************************************/
__global__ void
triple_cgemm32_part2_upper_kernel_batched(
int n, magmaFloatComplex const * const * Ain_array, int lda, magmaFloatComplex **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_cgemm32_part2_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
/******************************************************************************/
__global__ void
triple_cgemm64_part1_upper_kernel_batched(
int n, magmaFloatComplex const * const * Ain_array, int lda, magmaFloatComplex **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_cgemm64_part1_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
/******************************************************************************/
__global__ void
triple_cgemm64_part2_upper_kernel_batched(
int n, magmaFloatComplex const * const * Ain_array, int lda, magmaFloatComplex **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_cgemm64_part2_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
/******************************************************************************/
__global__ void
triple_cgemm_above64_part1_upper_kernel_batched(
int n, magmaFloatComplex const * const * Ain_array, int lda, magmaFloatComplex **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_cgemm_above64_part1_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
/******************************************************************************/
__global__ void
triple_cgemm_above64_part2_upper_kernel_batched(
int n, magmaFloatComplex const * const * Ain_array, int lda, magmaFloatComplex **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_cgemm_above64_part2_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
/******************************************************************************/
__global__ void
triple_cgemm_above64_part3_upper_kernel_batched(
int n, magmaFloatComplex const * const * Ain_array, int lda, magmaFloatComplex **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_cgemm_above64_part3_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
// =============================================================================
// vbatched kernels
/******************************************************************************/
__global__ void
ctrtri_diag_upper_kernel_vbatched(
magma_diag_t diag, magma_int_t* n, magmaFloatComplex const * const * dA_array, magma_int_t* lda, magmaFloatComplex **dinvA_array)
{
const int batchid = blockIdx.z;
const int my_n = (int)n[batchid];
if(my_n <= 0) return;
if(blockIdx.x >= magma_ceildiv(my_n, IB)) return;
ctrtri_diag_upper_device(diag, my_n, dA_array[batchid], (int)lda[batchid], dinvA_array[batchid]);
}
// The kernels below have 3D grids
// grid.x and grid.y are independent from my_n
// only grid.y is dependent on my_n, so terminating thread blocks is based on blockIdx.y
/******************************************************************************/
__global__ void
triple_cgemm16_part1_upper_kernel_vbatched(
magma_int_t* n, magmaFloatComplex const * const * Ain_array, magma_int_t* lda, magmaFloatComplex **dinvA_array, int jb, int npages)
{
const int batchid = blockIdx.z;
const int my_n = (int)n[batchid];
if(my_n <= 0) return;
const int my_npages = magma_ceildiv(my_n, jb*2);
if(blockIdx.y >= my_npages*(jb/16) ) return;
triple_cgemm16_part1_upper_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages);
}
/******************************************************************************/
__global__ void
triple_cgemm16_part2_upper_kernel_vbatched(
magma_int_t* n, magmaFloatComplex const * const * Ain_array, magma_int_t* lda, magmaFloatComplex **dinvA_array, int jb, int npages)
{
const int batchid = blockIdx.z;
const int my_n = (int)n[batchid];
if(my_n <= 0) return;
const int my_npages = magma_ceildiv(my_n, jb*2);
if(blockIdx.y >= my_npages*(jb/16) ) return;
triple_cgemm16_part2_upper_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages);
}
/******************************************************************************/
__global__ void
triple_cgemm32_part1_upper_kernel_vbatched(
magma_int_t* n, magmaFloatComplex const * const * Ain_array, magma_int_t* lda, magmaFloatComplex **dinvA_array, int jb, int npages)
{
const int batchid = blockIdx.z;
const int my_n = (int)n[batchid];
if(my_n <= 0) return;
const int my_npages = magma_ceildiv(my_n, jb*2);
if(blockIdx.y >= my_npages*(jb/16) ) return;
triple_cgemm32_part1_upper_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages);
}
/******************************************************************************/
__global__ void
triple_cgemm32_part2_upper_kernel_vbatched(
magma_int_t* n, magmaFloatComplex const * const * Ain_array, magma_int_t* lda, magmaFloatComplex **dinvA_array, int jb, int npages)
{
const int batchid = blockIdx.z;
const int my_n = (int)n[batchid];
if(my_n <= 0) return;
const int my_npages = magma_ceildiv(my_n, jb*2);
if(blockIdx.y >= my_npages*(jb/16) ) return;
triple_cgemm32_part2_upper_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages);
}
/******************************************************************************/
__global__ void
triple_cgemm64_part1_upper_kernel_vbatched(
magma_int_t* n, magmaFloatComplex const * const * Ain_array, magma_int_t* lda, magmaFloatComplex **dinvA_array, int jb, int npages)
{
const int batchid = blockIdx.z;
const int my_n = (int)n[batchid];
if(my_n <= 0) return;
const int my_npages = magma_ceildiv(my_n, jb*2);
if(blockIdx.y >= my_npages*(jb/16) ) return;
triple_cgemm64_part1_upper_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages);
}
/******************************************************************************/
__global__ void
triple_cgemm64_part2_upper_kernel_vbatched(
magma_int_t* n, magmaFloatComplex const * const * Ain_array, magma_int_t* lda, magmaFloatComplex **dinvA_array, int jb, int npages)
{
const int batchid = blockIdx.z;
const int my_n = (int)n[batchid];
if(my_n <= 0) return;
const int my_npages = magma_ceildiv(my_n, jb*2);
if(blockIdx.y >= my_npages*(jb/16) ) return;
triple_cgemm64_part2_upper_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages);
}
/******************************************************************************/
__global__ void
triple_cgemm_above64_part1_upper_kernel_vbatched(
magma_int_t* n, magmaFloatComplex const * const * Ain_array, magma_int_t* lda, magmaFloatComplex **dinvA_array, int jb, int npages)
{
const int batchid = blockIdx.z;
const int my_n = (int)n[batchid];
if(my_n <= 0) return;
const int my_npages = magma_ceildiv(my_n, jb*2);
if(blockIdx.y >= my_npages*(jb/16) ) return;
triple_cgemm_above64_part1_upper_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages);
}
/******************************************************************************/
__global__ void
triple_cgemm_above64_part2_upper_kernel_vbatched(
magma_int_t* n, magmaFloatComplex const * const * Ain_array, magma_int_t* lda, magmaFloatComplex **dinvA_array, int jb, int npages)
{
const int batchid = blockIdx.z;
const int my_n = (int)n[batchid];
if(my_n <= 0) return;
const int my_npages = magma_ceildiv(my_n, jb*2);
if(blockIdx.y >= my_npages*(jb/16) ) return;
triple_cgemm_above64_part2_upper_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages);
}
/******************************************************************************/
__global__ void
triple_cgemm_above64_part3_upper_kernel_vbatched(
magma_int_t* n, magmaFloatComplex const * const * Ain_array, magma_int_t* lda, magmaFloatComplex **dinvA_array, int jb, int npages)
{
const int batchid = blockIdx.z;
const int my_n = (int)n[batchid];
if(my_n <= 0) return;
const int my_npages = magma_ceildiv(my_n, jb*2);
if(blockIdx.y >= my_npages*(jb/16) ) return;
triple_cgemm_above64_part3_upper_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages);
}
| 209476088b770fc0106417b93c8e0f8d5529594f.cu | /*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@generated from magmablas/ztrtri_upper_batched.cu, normal z -> c, Sun Nov 20 20:20:30 2016
@author Peng Du
@author Tingxing Dong
@author Mark Gates
@author Azzam Haidar
@author Ahmad Abdelfattah
This file implements upper case, and is called by ctrtri_kernel.cu.
It's convenient to have separate files for lower & upper, to diff the sources.
*/
#include "magma_internal.h"
#define TRTRI_BATCHED
#include "ctrtri.cuh"
#include "ctrtri_upper_device.cuh"
/******************************************************************************/
__global__ void
ctrtri_diag_upper_kernel_batched(
magma_diag_t diag, int n, magmaFloatComplex const * const * dA_array, int lda, magmaFloatComplex **dinvA_array)
{
int batchid = blockIdx.z;
ctrtri_diag_upper_device(diag, n, dA_array[batchid], lda, dinvA_array[batchid]);
}
/******************************************************************************/
__global__ void
triple_cgemm16_part1_upper_kernel_batched(
int n, magmaFloatComplex const * const * Ain_array, int lda, magmaFloatComplex **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_cgemm16_part1_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
/******************************************************************************/
__global__ void
triple_cgemm16_part2_upper_kernel_batched(
int n, magmaFloatComplex const * const * Ain_array, int lda, magmaFloatComplex **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_cgemm16_part2_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
/******************************************************************************/
__global__ void
triple_cgemm32_part1_upper_kernel_batched(
int n, magmaFloatComplex const * const * Ain_array, int lda, magmaFloatComplex **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_cgemm32_part1_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
/******************************************************************************/
__global__ void
triple_cgemm32_part2_upper_kernel_batched(
int n, magmaFloatComplex const * const * Ain_array, int lda, magmaFloatComplex **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_cgemm32_part2_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
/******************************************************************************/
__global__ void
triple_cgemm64_part1_upper_kernel_batched(
int n, magmaFloatComplex const * const * Ain_array, int lda, magmaFloatComplex **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_cgemm64_part1_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
/******************************************************************************/
__global__ void
triple_cgemm64_part2_upper_kernel_batched(
int n, magmaFloatComplex const * const * Ain_array, int lda, magmaFloatComplex **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_cgemm64_part2_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
/******************************************************************************/
__global__ void
triple_cgemm_above64_part1_upper_kernel_batched(
int n, magmaFloatComplex const * const * Ain_array, int lda, magmaFloatComplex **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_cgemm_above64_part1_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
/******************************************************************************/
__global__ void
triple_cgemm_above64_part2_upper_kernel_batched(
int n, magmaFloatComplex const * const * Ain_array, int lda, magmaFloatComplex **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_cgemm_above64_part2_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
/******************************************************************************/
__global__ void
triple_cgemm_above64_part3_upper_kernel_batched(
int n, magmaFloatComplex const * const * Ain_array, int lda, magmaFloatComplex **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_cgemm_above64_part3_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
// =============================================================================
// vbatched kernels
/******************************************************************************/
__global__ void
ctrtri_diag_upper_kernel_vbatched(
magma_diag_t diag, magma_int_t* n, magmaFloatComplex const * const * dA_array, magma_int_t* lda, magmaFloatComplex **dinvA_array)
{
const int batchid = blockIdx.z;
const int my_n = (int)n[batchid];
if(my_n <= 0) return;
if(blockIdx.x >= magma_ceildiv(my_n, IB)) return;
ctrtri_diag_upper_device(diag, my_n, dA_array[batchid], (int)lda[batchid], dinvA_array[batchid]);
}
// The kernels below have 3D grids
// grid.x and grid.y are independent from my_n
// only grid.y is dependent on my_n, so terminating thread blocks is based on blockIdx.y
/******************************************************************************/
__global__ void
triple_cgemm16_part1_upper_kernel_vbatched(
magma_int_t* n, magmaFloatComplex const * const * Ain_array, magma_int_t* lda, magmaFloatComplex **dinvA_array, int jb, int npages)
{
const int batchid = blockIdx.z;
const int my_n = (int)n[batchid];
if(my_n <= 0) return;
const int my_npages = magma_ceildiv(my_n, jb*2);
if(blockIdx.y >= my_npages*(jb/16) ) return;
triple_cgemm16_part1_upper_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages);
}
/******************************************************************************/
__global__ void
triple_cgemm16_part2_upper_kernel_vbatched(
magma_int_t* n, magmaFloatComplex const * const * Ain_array, magma_int_t* lda, magmaFloatComplex **dinvA_array, int jb, int npages)
{
const int batchid = blockIdx.z;
const int my_n = (int)n[batchid];
if(my_n <= 0) return;
const int my_npages = magma_ceildiv(my_n, jb*2);
if(blockIdx.y >= my_npages*(jb/16) ) return;
triple_cgemm16_part2_upper_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages);
}
/******************************************************************************/
__global__ void
triple_cgemm32_part1_upper_kernel_vbatched(
magma_int_t* n, magmaFloatComplex const * const * Ain_array, magma_int_t* lda, magmaFloatComplex **dinvA_array, int jb, int npages)
{
const int batchid = blockIdx.z;
const int my_n = (int)n[batchid];
if(my_n <= 0) return;
const int my_npages = magma_ceildiv(my_n, jb*2);
if(blockIdx.y >= my_npages*(jb/16) ) return;
triple_cgemm32_part1_upper_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages);
}
/******************************************************************************/
__global__ void
triple_cgemm32_part2_upper_kernel_vbatched(
magma_int_t* n, magmaFloatComplex const * const * Ain_array, magma_int_t* lda, magmaFloatComplex **dinvA_array, int jb, int npages)
{
const int batchid = blockIdx.z;
const int my_n = (int)n[batchid];
if(my_n <= 0) return;
const int my_npages = magma_ceildiv(my_n, jb*2);
if(blockIdx.y >= my_npages*(jb/16) ) return;
triple_cgemm32_part2_upper_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages);
}
/******************************************************************************/
__global__ void
triple_cgemm64_part1_upper_kernel_vbatched(
magma_int_t* n, magmaFloatComplex const * const * Ain_array, magma_int_t* lda, magmaFloatComplex **dinvA_array, int jb, int npages)
{
const int batchid = blockIdx.z;
const int my_n = (int)n[batchid];
if(my_n <= 0) return;
const int my_npages = magma_ceildiv(my_n, jb*2);
if(blockIdx.y >= my_npages*(jb/16) ) return;
triple_cgemm64_part1_upper_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages);
}
/******************************************************************************/
__global__ void
triple_cgemm64_part2_upper_kernel_vbatched(
magma_int_t* n, magmaFloatComplex const * const * Ain_array, magma_int_t* lda, magmaFloatComplex **dinvA_array, int jb, int npages)
{
const int batchid = blockIdx.z;
const int my_n = (int)n[batchid];
if(my_n <= 0) return;
const int my_npages = magma_ceildiv(my_n, jb*2);
if(blockIdx.y >= my_npages*(jb/16) ) return;
triple_cgemm64_part2_upper_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages);
}
/******************************************************************************/
__global__ void
triple_cgemm_above64_part1_upper_kernel_vbatched(
magma_int_t* n, magmaFloatComplex const * const * Ain_array, magma_int_t* lda, magmaFloatComplex **dinvA_array, int jb, int npages)
{
const int batchid = blockIdx.z;
const int my_n = (int)n[batchid];
if(my_n <= 0) return;
const int my_npages = magma_ceildiv(my_n, jb*2);
if(blockIdx.y >= my_npages*(jb/16) ) return;
triple_cgemm_above64_part1_upper_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages);
}
/******************************************************************************/
__global__ void
triple_cgemm_above64_part2_upper_kernel_vbatched(
magma_int_t* n, magmaFloatComplex const * const * Ain_array, magma_int_t* lda, magmaFloatComplex **dinvA_array, int jb, int npages)
{
const int batchid = blockIdx.z;
const int my_n = (int)n[batchid];
if(my_n <= 0) return;
const int my_npages = magma_ceildiv(my_n, jb*2);
if(blockIdx.y >= my_npages*(jb/16) ) return;
triple_cgemm_above64_part2_upper_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages);
}
/******************************************************************************/
__global__ void
triple_cgemm_above64_part3_upper_kernel_vbatched(
magma_int_t* n, magmaFloatComplex const * const * Ain_array, magma_int_t* lda, magmaFloatComplex **dinvA_array, int jb, int npages)
{
const int batchid = blockIdx.z;
const int my_n = (int)n[batchid];
if(my_n <= 0) return;
const int my_npages = magma_ceildiv(my_n, jb*2);
if(blockIdx.y >= my_npages*(jb/16) ) return;
triple_cgemm_above64_part3_upper_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages);
}
|
2e3e4848c826c7492d8c3142056f3d52ea25ed09.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Slice_advanced.h"
#include <iostream>
#include "Device.h"
using std::cout;
using std::endl;
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
extern __global__ void slice_advanced(float* ptrTabDev,int n);
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Constructeur *|
\*-------------------------------------*/
Slice_advanced::Slice_advanced(const Grid& grid, int n) :
n(n)
{
// Grid
{
this->dg = grid.dg;
this->db = grid.db;
}
this->nbThread = grid.threadCounts();
this->sizeOctet = nbThread * sizeof(float); // octet
// MM
{
// MM (malloc Device)
{
Device::malloc(&ptrTabDev, sizeOctet);
}
ptrTab= new float[nbThread];
Device::lastCudaError("AddVector MM (end allocation)"); // temp debug, facultatif
}
}
Slice_advanced::~Slice_advanced(void)
{
//MM (device free)
{
Device::free(ptrTabDev);
Device::lastCudaError("AddVector MM (end deallocation)"); // temp debug, facultatif
}
}
/*--------------------------------------*\
|* Methode *|
\*-------------------------------------*/
void Slice_advanced::run()
{
Device::lastCudaError("slice(before)"); // temp debug
hipLaunchKernelGGL(( slice_advanced), dim3(dg),dim3(db), 0, 0, ptrTabDev, n); // assynchrone
Device::lastCudaError("slice (after)"); // temp debug
//Device::synchronize(); // Temp,debug, only for printf in GPU
// MM (Device -> Host)
{
Device::memcpyDToH(ptrTab, ptrTabDev, sizeOctet); // barriere synchronisation implicite
}
double pi=0;
for(int i=0;i<nbThread;i++){
pi += ptrTab[i];
}
pi = pi / (double) n;
std::cout<<pi;
//TODO OpenMp reduction
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
| 2e3e4848c826c7492d8c3142056f3d52ea25ed09.cu | #include "Slice_advanced.h"
#include <iostream>
#include "Device.h"
using std::cout;
using std::endl;
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
extern __global__ void slice_advanced(float* ptrTabDev,int n);
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Constructeur *|
\*-------------------------------------*/
Slice_advanced::Slice_advanced(const Grid& grid, int n) :
n(n)
{
// Grid
{
this->dg = grid.dg;
this->db = grid.db;
}
this->nbThread = grid.threadCounts();
this->sizeOctet = nbThread * sizeof(float); // octet
// MM
{
// MM (malloc Device)
{
Device::malloc(&ptrTabDev, sizeOctet);
}
ptrTab= new float[nbThread];
Device::lastCudaError("AddVector MM (end allocation)"); // temp debug, facultatif
}
}
Slice_advanced::~Slice_advanced(void)
{
//MM (device free)
{
Device::free(ptrTabDev);
Device::lastCudaError("AddVector MM (end deallocation)"); // temp debug, facultatif
}
}
/*--------------------------------------*\
|* Methode *|
\*-------------------------------------*/
void Slice_advanced::run()
{
Device::lastCudaError("slice(before)"); // temp debug
slice_advanced<<<dg,db>>>(ptrTabDev, n); // assynchrone
Device::lastCudaError("slice (after)"); // temp debug
//Device::synchronize(); // Temp,debug, only for printf in GPU
// MM (Device -> Host)
{
Device::memcpyDToH(ptrTab, ptrTabDev, sizeOctet); // barriere synchronisation implicite
}
double pi=0;
for(int i=0;i<nbThread;i++){
pi += ptrTab[i];
}
pi = pi / (double) n;
std::cout<<pi;
//TODO OpenMp reduction
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
|
cf01c349d99b4bf1e6de0365e566828cb511da39.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@author Azzam Haidar
@author Tingxing Dong
@generated from magmablas/zgeqr2_batched.cu, normal z -> d, Sun Nov 20 20:20:31 2016
*/
#include "magma_internal.h"
#include "magma_templates.h"
#include "batched_kernel_param.h"
#define BLOCK_SIZE 256
#define dA(a_1,a_2) (dA + (a_1) + (a_2)*(local_lda))
#include "dlarfg_devicesfunc.cuh"
/******************************************************************************/
static __device__
void dlarfx_device(
int m, int n, double *v, double *tau,
double *dc, magma_int_t ldc, double* sum)
{
if (n <= 0) return;
if (MAGMA_D_EQUAL(*tau, MAGMA_D_ZERO) ) return; // check singularity
const int tx = threadIdx.x;
double lsum;
for (int k=0; k < n; k++)
{
/* perform w := v' * C */
if (tx < BLOCK_SIZE)
{
if (tx == 0)
lsum = dc[0+ldc*k]; //since V[0] should be one
else
lsum = MAGMA_D_ZERO;
for (int j = tx+1; j < m; j += BLOCK_SIZE) {
lsum += MAGMA_D_MUL( MAGMA_D_CONJ( v[j] ), dc[j+ldc*k] );
}
sum[tx] = lsum;
}
magma_sum_reduce< BLOCK_SIZE >( tx, sum );
__syncthreads();
double z__1 = - MAGMA_D_CONJ(*tau) * sum[0];
/* C := C - v * w */
if (tx < BLOCK_SIZE)
{
for (int j = tx+1; j < m; j += BLOCK_SIZE)
dc[j+ldc*k] += z__1 * v[j];
}
if (tx == 0) dc[0+ldc*k] += z__1;
__syncthreads();
}
}
/******************************************************************************/
static __device__
void dgeqr2_device( magma_int_t m, magma_int_t n,
double* dA, magma_int_t lda,
double *dtau,
double *dv,
double *sum,
double *swork,
double *scale,
double *sscale)
{
//lapack dlarfg, compute the norm, scale and generate the householder vector
dlarfg_device(m, dv, &(dv[1]), 1, dtau, swork, sscale, scale);
__syncthreads();
//update the trailing matix with the householder
dlarfx_device(m, n, dv, dtau, dA, lda, sum);
__syncthreads();
}
/******************************************************************************/
extern __shared__ double shared_data[];
/******************************************************************************/
__global__
void dgeqr2_sm_kernel_batched( int m, int n, double** dA_array, magma_int_t lda,
double **dtau_array)
{
double* dA = dA_array[blockIdx.z];
double* dtau = dtau_array[blockIdx.z];
double *sdata = (double*)shared_data;
const int tx = threadIdx.x;
__shared__ double scale;
__shared__ double sum[ BLOCK_SIZE ];
__shared__ double swork[ BLOCK_SIZE ];
__shared__ double sscale;
//load data from global to shared memory
for (int s=0; s < n; s++)
{
for (int j = tx; j < m; j += BLOCK_SIZE)
{
sdata[j + s * m] = dA[j + s * lda];
}
}
__syncthreads();
for (int s=0; s < min(m,n); s++)
{
dgeqr2_device( m-s, n-(s+1),
&(sdata[s+(s+1)*m]), m,
dtau+s,
&(sdata[s+s*m]),
sum,
swork,
&scale,
&sscale);
} // end of s
//copy back to global memory
for (int s=0; s < n; s++)
{
for (int j = tx; j < m; j += BLOCK_SIZE)
{
dA[j + s * lda] = sdata[j + s * m];
}
}
}
/******************************************************************************/
__global__
void dgeqr2_column_sm_kernel_batched( int m, int n, double** dA_array, magma_int_t lda,
double **dtau_array)
{
double* dA = dA_array[blockIdx.z];
double* dtau = dtau_array[blockIdx.z];
double *sdata = (double*)shared_data;
__shared__ double scale;
__shared__ double sum[ BLOCK_SIZE ];
__shared__ double swork[ BLOCK_SIZE ];
__shared__ double sscale;
const int tx = threadIdx.x;
for (int s=0; s < min(m,n); s++)
{
//load one vector in shared memory: sdata
for (int j = tx; j < m-s; j += BLOCK_SIZE)
{
sdata[j] = dA[s + j + s * lda];
}
__syncthreads();
//sdata is written
dgeqr2_device(m-s, n-(s+1),
&(dA[s+(s+1)*lda]), lda,
dtau+s,
sdata,
sum,
swork,
&scale,
&sscale);
for (int j = tx; j < m-s; j += BLOCK_SIZE)
{
dA[s + j + s * lda] = sdata[j];
}
__syncthreads();
}
}
/******************************************************************************/
__global__
void dgeqr2_kernel_batched( int m, int n, double** dA_array, magma_int_t lda,
double **dtau_array)
{
double* dA = dA_array[blockIdx.z];
double* dtau = dtau_array[blockIdx.z];
__shared__ double scale;
__shared__ double sum[ BLOCK_SIZE ];
__shared__ double swork[ BLOCK_SIZE ];
__shared__ double sscale;
for (int s=0; s < min(m,n); s++)
{
dgeqr2_device( m-s, n-(s+1),
&(dA[s+(s+1)*lda]), lda,
dtau+s,
&(dA[s+s*lda]),
sum,
swork,
&scale,
&sscale );
}
}
/***************************************************************************//**
Purpose
-------
DGEQR2 computes a QR factorization of a real m by n matrix A:
A = Q * R.
This version implements the right-looking QR with non-blocking.
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0.
@param[in,out]
dA_array Array of pointers, dimension (batchCount).
Each is a DOUBLE PRECISION array on the GPU, dimension (LDDA,N)
On entry, the M-by-N matrix A.
On exit, the elements on and above the diagonal of the array
contain the min(M,N)-by-N upper trapezoidal matrix R (R is
upper triangular if m >= n); the elements below the diagonal,
with the array TAU, represent the orthogonal matrix Q as a
product of min(m,n) elementary reflectors (see Further
Details).
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
To benefit from coalescent memory accesses LDDA must be
divisible by 16.
@param[out]
dtau_array Array of pointers, dimension (batchCount).
Each is a DOUBLE PRECISION array, dimension (min(M,N))
The scalar factors of the elementary reflectors (see Further
Details).
@param[out]
info_array Array of INTEGERs, dimension (batchCount), for corresponding matrices.
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value
or another error occured, such as memory allocation failed.
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
Further Details
---------------
The matrix Q is represented as a product of elementary reflectors
Q = H(1) H(2) . . . H(k), where k = min(m,n).
Each H(i) has the form
H(i) = I - tau * v * v'
where tau is a real scalar, and v is a real vector with
v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i),
and tau in TAU(i).
@ingroup magma_geqr2_batched
*******************************************************************************/
extern "C" magma_int_t
magma_dgeqr2_batched(magma_int_t m, magma_int_t n,
double **dA_array, magma_int_t ldda,
double **dtau_array,
magma_int_t *info_array, magma_int_t batchCount, magma_queue_t queue)
{
magma_int_t k;
/* Check arguments */
magma_int_t arginfo = 0;
if (m < 0)
arginfo = -1;
else if (n < 0)
arginfo = -2;
else if (ldda < max(1,m))
arginfo = -4;
if (arginfo != 0) {
magma_xerbla( __func__, -(arginfo) );
return arginfo;
}
k = min(m,n);
dim3 blocks(1, 1, batchCount);
dim3 threads(BLOCK_SIZE);
if (sizeof(double)*(m*k) <= 42000 /*sizeof(double) * 128 * k*/) // there are some static shared memory besides of dynamic ones
{
//load panel in shared memory and factorize it and copy back to gloabl memory
//intend for small panel to avoid overfill of shared memory.
//this kernel is composed of device routine and thus clean
hipLaunchKernelGGL(( dgeqr2_sm_kernel_batched), dim3(blocks), dim3(threads), sizeof(double)*(m*k), queue->cuda_stream() ,
m, k, dA_array, ldda, dtau_array);
}
else
{
//load one column vector in shared memory and householder it and used it to update trailing matrix which is global memory
// one vector is normally smaller than 48K shared memory
if (sizeof(double)*(m) < 42000)
hipLaunchKernelGGL(( dgeqr2_column_sm_kernel_batched), dim3(blocks), dim3(threads), sizeof(double)*(m), queue->cuda_stream() ,
m, k, dA_array, ldda, dtau_array);
else
//not use dynamic shared memory at all
hipLaunchKernelGGL(( dgeqr2_kernel_batched), dim3(blocks), dim3(threads), 0, queue->cuda_stream() ,
m, k, dA_array, ldda, dtau_array);
}
return arginfo;
}
| cf01c349d99b4bf1e6de0365e566828cb511da39.cu | /*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@author Azzam Haidar
@author Tingxing Dong
@generated from magmablas/zgeqr2_batched.cu, normal z -> d, Sun Nov 20 20:20:31 2016
*/
#include "magma_internal.h"
#include "magma_templates.h"
#include "batched_kernel_param.h"
#define BLOCK_SIZE 256
#define dA(a_1,a_2) (dA + (a_1) + (a_2)*(local_lda))
#include "dlarfg_devicesfunc.cuh"
/******************************************************************************/
static __device__
void dlarfx_device(
int m, int n, double *v, double *tau,
double *dc, magma_int_t ldc, double* sum)
{
if (n <= 0) return;
if (MAGMA_D_EQUAL(*tau, MAGMA_D_ZERO) ) return; // check singularity
const int tx = threadIdx.x;
double lsum;
for (int k=0; k < n; k++)
{
/* perform w := v' * C */
if (tx < BLOCK_SIZE)
{
if (tx == 0)
lsum = dc[0+ldc*k]; //since V[0] should be one
else
lsum = MAGMA_D_ZERO;
for (int j = tx+1; j < m; j += BLOCK_SIZE) {
lsum += MAGMA_D_MUL( MAGMA_D_CONJ( v[j] ), dc[j+ldc*k] );
}
sum[tx] = lsum;
}
magma_sum_reduce< BLOCK_SIZE >( tx, sum );
__syncthreads();
double z__1 = - MAGMA_D_CONJ(*tau) * sum[0];
/* C := C - v * w */
if (tx < BLOCK_SIZE)
{
for (int j = tx+1; j < m; j += BLOCK_SIZE)
dc[j+ldc*k] += z__1 * v[j];
}
if (tx == 0) dc[0+ldc*k] += z__1;
__syncthreads();
}
}
/******************************************************************************/
static __device__
void dgeqr2_device( magma_int_t m, magma_int_t n,
double* dA, magma_int_t lda,
double *dtau,
double *dv,
double *sum,
double *swork,
double *scale,
double *sscale)
{
//lapack dlarfg, compute the norm, scale and generate the householder vector
dlarfg_device(m, dv, &(dv[1]), 1, dtau, swork, sscale, scale);
__syncthreads();
//update the trailing matix with the householder
dlarfx_device(m, n, dv, dtau, dA, lda, sum);
__syncthreads();
}
/******************************************************************************/
extern __shared__ double shared_data[];
/******************************************************************************/
__global__
void dgeqr2_sm_kernel_batched( int m, int n, double** dA_array, magma_int_t lda,
double **dtau_array)
{
double* dA = dA_array[blockIdx.z];
double* dtau = dtau_array[blockIdx.z];
double *sdata = (double*)shared_data;
const int tx = threadIdx.x;
__shared__ double scale;
__shared__ double sum[ BLOCK_SIZE ];
__shared__ double swork[ BLOCK_SIZE ];
__shared__ double sscale;
//load data from global to shared memory
for (int s=0; s < n; s++)
{
for (int j = tx; j < m; j += BLOCK_SIZE)
{
sdata[j + s * m] = dA[j + s * lda];
}
}
__syncthreads();
for (int s=0; s < min(m,n); s++)
{
dgeqr2_device( m-s, n-(s+1),
&(sdata[s+(s+1)*m]), m,
dtau+s,
&(sdata[s+s*m]),
sum,
swork,
&scale,
&sscale);
} // end of s
//copy back to global memory
for (int s=0; s < n; s++)
{
for (int j = tx; j < m; j += BLOCK_SIZE)
{
dA[j + s * lda] = sdata[j + s * m];
}
}
}
/******************************************************************************/
__global__
void dgeqr2_column_sm_kernel_batched( int m, int n, double** dA_array, magma_int_t lda,
double **dtau_array)
{
double* dA = dA_array[blockIdx.z];
double* dtau = dtau_array[blockIdx.z];
double *sdata = (double*)shared_data;
__shared__ double scale;
__shared__ double sum[ BLOCK_SIZE ];
__shared__ double swork[ BLOCK_SIZE ];
__shared__ double sscale;
const int tx = threadIdx.x;
for (int s=0; s < min(m,n); s++)
{
//load one vector in shared memory: sdata
for (int j = tx; j < m-s; j += BLOCK_SIZE)
{
sdata[j] = dA[s + j + s * lda];
}
__syncthreads();
//sdata is written
dgeqr2_device(m-s, n-(s+1),
&(dA[s+(s+1)*lda]), lda,
dtau+s,
sdata,
sum,
swork,
&scale,
&sscale);
for (int j = tx; j < m-s; j += BLOCK_SIZE)
{
dA[s + j + s * lda] = sdata[j];
}
__syncthreads();
}
}
/******************************************************************************/
__global__
void dgeqr2_kernel_batched( int m, int n, double** dA_array, magma_int_t lda,
double **dtau_array)
{
double* dA = dA_array[blockIdx.z];
double* dtau = dtau_array[blockIdx.z];
__shared__ double scale;
__shared__ double sum[ BLOCK_SIZE ];
__shared__ double swork[ BLOCK_SIZE ];
__shared__ double sscale;
for (int s=0; s < min(m,n); s++)
{
dgeqr2_device( m-s, n-(s+1),
&(dA[s+(s+1)*lda]), lda,
dtau+s,
&(dA[s+s*lda]),
sum,
swork,
&scale,
&sscale );
}
}
/***************************************************************************//**
Purpose
-------
DGEQR2 computes a QR factorization of a real m by n matrix A:
A = Q * R.
This version implements the right-looking QR with non-blocking.
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0.
@param[in,out]
dA_array Array of pointers, dimension (batchCount).
Each is a DOUBLE PRECISION array on the GPU, dimension (LDDA,N)
On entry, the M-by-N matrix A.
On exit, the elements on and above the diagonal of the array
contain the min(M,N)-by-N upper trapezoidal matrix R (R is
upper triangular if m >= n); the elements below the diagonal,
with the array TAU, represent the orthogonal matrix Q as a
product of min(m,n) elementary reflectors (see Further
Details).
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
To benefit from coalescent memory accesses LDDA must be
divisible by 16.
@param[out]
dtau_array Array of pointers, dimension (batchCount).
Each is a DOUBLE PRECISION array, dimension (min(M,N))
The scalar factors of the elementary reflectors (see Further
Details).
@param[out]
info_array Array of INTEGERs, dimension (batchCount), for corresponding matrices.
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value
or another error occured, such as memory allocation failed.
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
Further Details
---------------
The matrix Q is represented as a product of elementary reflectors
Q = H(1) H(2) . . . H(k), where k = min(m,n).
Each H(i) has the form
H(i) = I - tau * v * v'
where tau is a real scalar, and v is a real vector with
v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i),
and tau in TAU(i).
@ingroup magma_geqr2_batched
*******************************************************************************/
extern "C" magma_int_t
magma_dgeqr2_batched(magma_int_t m, magma_int_t n,
double **dA_array, magma_int_t ldda,
double **dtau_array,
magma_int_t *info_array, magma_int_t batchCount, magma_queue_t queue)
{
magma_int_t k;
/* Check arguments */
magma_int_t arginfo = 0;
if (m < 0)
arginfo = -1;
else if (n < 0)
arginfo = -2;
else if (ldda < max(1,m))
arginfo = -4;
if (arginfo != 0) {
magma_xerbla( __func__, -(arginfo) );
return arginfo;
}
k = min(m,n);
dim3 blocks(1, 1, batchCount);
dim3 threads(BLOCK_SIZE);
if (sizeof(double)*(m*k) <= 42000 /*sizeof(double) * 128 * k*/) // there are some static shared memory besides of dynamic ones
{
//load panel in shared memory and factorize it and copy back to gloabl memory
//intend for small panel to avoid overfill of shared memory.
//this kernel is composed of device routine and thus clean
dgeqr2_sm_kernel_batched<<< blocks, threads, sizeof(double)*(m*k), queue->cuda_stream() >>>
(m, k, dA_array, ldda, dtau_array);
}
else
{
//load one column vector in shared memory and householder it and used it to update trailing matrix which is global memory
// one vector is normally smaller than 48K shared memory
if (sizeof(double)*(m) < 42000)
dgeqr2_column_sm_kernel_batched<<< blocks, threads, sizeof(double)*(m), queue->cuda_stream() >>>
(m, k, dA_array, ldda, dtau_array);
else
//not use dynamic shared memory at all
dgeqr2_kernel_batched<<< blocks, threads, 0, queue->cuda_stream() >>>
(m, k, dA_array, ldda, dtau_array);
}
return arginfo;
}
|
c20d13f6caba62a5840f18ee5300c01d25ebd0c7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file indexing_op.cu
* \brief GPU implementation of indexing operator
* \author Siyi Li, Chi Zhang
*/
#include "./indexing_op.h"
#include "./util/tensor_util-inl.cuh"
#include "./util/tensor_util-inl.h"
namespace mxnet {
namespace op {
/*! \brief If there are out-of-bound indices, out will be assigned to 1.
*/
struct is_valid_check {
template <typename DType>
MSHADOW_XINLINE static void Map(int i,
char* out,
const DType* data,
const DType min,
const DType max) {
if (data[i] < min || data[i] > max)
*out = 1;
}
};
struct AddTakeGradRspGPUKernel {
template <typename DType, typename IType>
__device__ __forceinline__ static void Map(int tid,
DType* out,
const nnvm::dim_t* prefix_sum,
const IType* data,
const DType* ograd,
const nnvm::dim_t row_length) {
using nnvm::dim_t;
const dim_t data_i = tid / row_length;
const dim_t grad_i = tid % row_length;
const dim_t irow = static_cast<dim_t>(data[data_i]);
const dim_t rsp_row = prefix_sum[irow] - 1;
const DType val = ograd[data_i * row_length + grad_i];
atomicAdd(static_cast<DType*>(&(out[rsp_row * row_length + grad_i])), val);
}
};
/*
* \brief kernel for backward computation for take, executed with deterministic order
* \param thread_id the thread id
* \param out the output gradient data
* \param lookup_table the table to lookup the position of an id in gradient array
* \param sorted_data the sorted data input
* \param original_idx the original indices of the sorted data input
* \param ograd head gradient
* \param row_length the output dimension
* \param num_threads_per_row the number of threads to process a row together
* \param SZ the number of features a thread is responsible for
*/
template <int SZ>
struct AddTakeGradRspDeterministicKernel {
template <typename DType>
__device__ __forceinline__ static void Map(int thread_id,
DType* out,
const nnvm::dim_t* lookup_table,
const nnvm::dim_t* sorted_data,
const nnvm::dim_t data_size,
const nnvm::dim_t* original_idx,
const DType* ograd,
const nnvm::dim_t row_length,
const nnvm::dim_t num_threads_per_row) {
using nnvm::dim_t;
int tid = thread_id / num_threads_per_row;
const int feature_start = thread_id % num_threads_per_row * SZ;
int num_features = SZ;
if (feature_start + num_features > row_length) {
num_features = row_length - feature_start;
}
if (tid == 0 || sorted_data[tid - 1] != sorted_data[tid]) {
DType acc[SZ];
#pragma unroll
for (int i = 0; i < SZ; i++) {
acc[i] = 0;
}
const dim_t data = sorted_data[tid];
const dim_t row_id = lookup_table[data];
const dim_t out_offset = row_id * row_length + feature_start;
do {
const dim_t idx = original_idx[tid];
const dim_t ograd_offset = idx * row_length + feature_start;
for (int i = 0; i < num_features; i++) {
acc[i] += ograd[ograd_offset + i];
}
tid++;
} while (tid < data_size && sorted_data[tid - 1] == sorted_data[tid]);
for (int i = 0; i < num_features; i++) {
out[out_offset + i] += acc[i];
}
}
}
};
template <bool clip = true>
struct TakeZeroAxisGPU {
// assume that idx have been flattened to a 1-D tensor (N,)
// assume that out_data and in_data have been flattened to 2-D tensors, (N, M) and (K, M)
// M is the number of columns of in_data and out_data
// K is the number of rows of in_data
// i is the index of out_data
template <typename DType, typename IType>
MSHADOW_XINLINE static void Map(int i,
DType* out_data,
const DType* in_data,
const IType* idx,
const int64_t M,
const int64_t K) {
int64_t j = static_cast<int64_t>(idx[i / M]);
if (clip) {
if (j <= 0)
j = 0;
else if (j >= K)
j = K - 1;
} else {
j = j % K;
j += (j < 0) ? K : 0;
}
out_data[i] = in_data[j * M + i % M];
}
};
/*
* \brief returns true if all indices are between [min, max]
* \param s the stream
* \param data_ptr the indices on the stream
* \param data_size the number of indices to examine
* \param min the expected min value for indices
* \param max the expected max value for indices
* \param is_valid_ptr the temparary workspace
*/
template <typename DType>
bool CheckIndexOutOfBound(mshadow::Stream<gpu>* s,
const DType* data_ptr,
size_t data_size,
const DType min,
const DType max,
char* is_valid_ptr) {
using namespace mxnet_op;
int32_t is_valid = 0;
Kernel<set_zero, gpu>::Launch(s, 1, is_valid_ptr);
Kernel<is_valid_check, gpu>::Launch(s, data_size, is_valid_ptr, data_ptr, min, max);
CUDA_CALL(hipMemcpyAsync(&is_valid,
is_valid_ptr,
sizeof(char),
hipMemcpyDeviceToHost,
mshadow::Stream<gpu>::GetStream(s)));
CUDA_CALL(hipStreamSynchronize(mshadow::Stream<gpu>::GetStream(s)));
return is_valid == 0;
}
// Embedding forward implementation with dense weight
template <>
void EmbeddingOpForwardDnsImpl<gpu>(mshadow::Stream<gpu>* s,
const TBlob& data,
const TBlob& weight,
const OpReqType req,
const TBlob& output) {
using namespace mxnet_op;
const mxnet::TShape& ishape = data.shape_;
const mxnet::TShape& oshape = output.shape_;
MSHADOW_TYPE_SWITCH(output.type_flag_, DType, {
MSHADOW_TYPE_SWITCH(data.type_flag_, IType, {
Tensor<gpu, 1, IType> idx =
data.get_with_shape<gpu, 1, IType>(Shape1(ishape.ProdShape(0, ishape.ndim())), s);
Tensor<gpu, 2, DType> wmat = weight.get<gpu, 2, DType>(s);
Tensor<gpu, 2, DType> out = output.get_with_shape<gpu, 2, DType>(
Shape2(oshape.ProdShape(0, oshape.ndim() - 1), oshape[oshape.ndim() - 1]), s);
Kernel<TakeZeroAxisGPU<true>, gpu>::Launch(
s, oshape.Size(), out.dptr_, wmat.dptr_, idx.dptr_, wmat.shape_[1], wmat.shape_[0]);
});
});
}
template <>
void SparseEmbeddingOpForwardRspImpl<gpu>(const OpContext& ctx,
const TBlob& data,
const NDArray& weight,
const OpReqType req,
const TBlob& output) {
if (req == kNullOp)
return;
using namespace rowsparse;
using namespace mxnet_op;
mshadow::Stream<gpu>* s = ctx.get_stream<gpu>();
// zeros weight
if (req == kWriteTo && !weight.storage_initialized()) {
size_t out_size = output.shape_.Size();
MSHADOW_TYPE_SWITCH(output.type_flag_, DType, {
Fill<false>(
s, TBlob(output.dptr<DType>(), mshadow::Shape1(out_size), gpu::kDevMask), kWriteTo, 0);
})
return;
}
// check out-of-bound indices
MSHADOW_TYPE_SWITCH(data.type_flag_, DType, {
DType min = 0;
DType max = static_cast<DType>(weight.shape()[0] - 1);
DType* data_ptr = data.dptr<DType>();
size_t data_size = data.shape_.Size();
Tensor<gpu, 1, char> workspace = ctx.requested[0].get_space_typed<gpu, 1, char>(Shape1(1), s);
char* is_valid_ptr = reinterpret_cast<char*>(workspace.dptr_);
bool is_valid = CheckIndexOutOfBound(s, data_ptr, data_size, min, max, is_valid_ptr);
CHECK(is_valid) << "SparseEmbedding input contains data out of bound";
})
// the weight is actually dense
if (weight.aux_shape(kIdx)[0] == weight.shape()[0]) {
EmbeddingOpForwardDnsImpl<gpu>(s, data, weight.data(), req, output);
} else {
EmbeddingOpForwardRspImpl<gpu>(s, data, weight, req, output);
}
}
template <typename IType, typename DType, typename RType>
void SparseEmbeddingDeterministicKernelLaunch(const OpContext& ctx,
const TBlob& ograd,
const TBlob& data,
const OpReqType req,
const NDArray& output) {
using namespace mshadow;
using namespace mxnet_op;
using namespace expr;
using namespace rowsparse;
using nnvm::dim_t;
mshadow::Stream<gpu>* s = ctx.get_stream<gpu>();
const dim_t num_rows = output.shape()[0];
const dim_t row_length = output.shape()[1];
const dim_t data_size = static_cast<dim_t>(data.shape_.Size());
// temp resource declarations
dim_t* lookup_table = nullptr;
void* temp_storage = nullptr;
dim_t* sorted_data = nullptr;
dim_t* original_idx = nullptr;
// calculate number of bytes for temp resources
size_t lookup_table_bytes = num_rows * sizeof(dim_t);
size_t sorted_data_storage_bytes = data_size * sizeof(dim_t);
size_t original_idx_storage_bytes = data_size * sizeof(dim_t);
size_t sort_workspace_size = SortByKeyWorkspaceSize<dim_t, dim_t, gpu>(data_size);
size_t unique_workspace_bytes = 0;
// estimate unique temp space
IType* data_ptr = data.dptr<IType>();
size_t* null_ptr = nullptr;
// unique operations will be applied on sorted data
hipcub::DeviceSelect::Unique(nullptr,
unique_workspace_bytes,
sorted_data,
sorted_data,
null_ptr,
data_size,
Stream<gpu>::GetStream(s));
// One more space reserved for unique count
size_t temp_workspace_bytes = ::max(unique_workspace_bytes, sort_workspace_size);
size_t total_storage_bytes = lookup_table_bytes + sorted_data_storage_bytes +
original_idx_storage_bytes + temp_workspace_bytes;
// request resource and split it. layout is:
// lookup_table, sorted_data, original_idx, temp_storage
Tensor<gpu, 1, char> workspace =
ctx.requested[0].get_space_typed<gpu, 1, char>(Shape1(total_storage_bytes), s);
lookup_table = reinterpret_cast<dim_t*>(workspace.dptr_);
sorted_data = reinterpret_cast<dim_t*>(workspace.dptr_ + lookup_table_bytes);
original_idx =
reinterpret_cast<dim_t*>(workspace.dptr_ + lookup_table_bytes + sorted_data_storage_bytes);
temp_storage = workspace.dptr_ + total_storage_bytes - temp_workspace_bytes;
// check out-of-bound indices
{
IType min = 0;
IType max = static_cast<IType>(output.shape()[0] - 1);
IType* data_ptr = data.dptr<IType>();
size_t data_size = data.shape_.Size();
bool is_valid = CheckIndexOutOfBound(
s, data_ptr, data_size, min, max, reinterpret_cast<char*>(temp_storage));
CHECK(is_valid) << "Embedding input contains data out of bound";
}
// make a copy of the data, to be sorted
TBlob sorted_data_blob(sorted_data, Shape1(data_size), gpu::kDevMask);
auto sorted_data_tensor = sorted_data_blob.FlatTo1D<gpu, dim_t>(s);
mxnet_op::copy(s, sorted_data_blob, data);
// generate original idx
Tensor<gpu, 1, dim_t> original_idx_tensor(original_idx, Shape1(data_size), s);
Kernel<range_fwd, gpu>::Launch(
s, data_size, 1, static_cast<dim_t>(0), static_cast<dim_t>(1), kWriteTo, original_idx);
// sort data with its original idx
int num_bits = common::ilog2ui(num_rows - 1);
char* temp_storage_ptr = reinterpret_cast<char*>(temp_storage);
Tensor<gpu, 1, char> temp_storage_tensor(temp_storage_ptr, Shape1(sort_workspace_size), s);
SortByKey(sorted_data_tensor, original_idx_tensor, true, &temp_storage_tensor, 0, num_bits);
// compute unique row ids based on sorted values.
output.CheckAndAllocAuxData(kIdx, Shape1(data_size + 1));
// fill row_idx array of output matrix, using the row_flg values
RType* grad_row_idx = output.aux_data(kIdx).dptr<RType>();
hipcub::DeviceSelect::Unique(temp_storage_ptr,
unique_workspace_bytes,
sorted_data,
grad_row_idx,
grad_row_idx + data_size,
data_size,
Stream<gpu>::GetStream(s));
dim_t nnr = 0;
CUDA_CALL(hipMemcpyAsync(&nnr,
grad_row_idx + data_size,
sizeof(RType),
hipMemcpyDeviceToHost,
mshadow::Stream<gpu>::GetStream(s)));
CUDA_CALL(hipStreamSynchronize(mshadow::Stream<gpu>::GetStream(s)));
CHECK_EQ(output.shape().ndim(), 2) << "Unexcepted ndim";
output.CheckAndAllocData(Shape2(nnr, output.shape()[1]));
output.set_aux_shape(kIdx, Shape1(nnr));
// generate lookup table
Kernel<MarkLookupTable, gpu>::Launch(s, nnr, lookup_table, grad_row_idx);
// accumulate gradients
DType* grad_data = output.data().dptr<DType>();
Fill<false>(s, TBlob(grad_data, Shape1(nnr * row_length), gpu::kDevMask), kWriteTo, 0);
const int SZ = 4;
const nnvm::dim_t num_threads_per_row = (row_length + SZ - 1) / SZ;
Kernel<AddTakeGradRspDeterministicKernel<SZ>, gpu>::Launch(s,
data_size * num_threads_per_row,
grad_data,
lookup_table,
sorted_data,
data_size,
original_idx,
ograd.dptr<DType>(),
row_length,
num_threads_per_row);
}
inline void SparseEmbeddingOpBackwardDeterministicRspImpl(const OpContext& ctx,
const TBlob& ograd,
const TBlob& data,
const OpReqType req,
const NDArray& output) {
using nnvm::dim_t;
if (req == kNullOp)
return;
CHECK_EQ(req, kWriteTo) << "SparseEmbedding layer doesn't support "
<< "weight gradient calculation with req != write";
mshadow::Stream<gpu>* s = ctx.get_stream<gpu>();
const dim_t data_size = static_cast<dim_t>(data.shape_.Size());
if (data_size == 0) {
FillZerosRspImpl(s, output);
return;
}
MSHADOW_TYPE_SWITCH(data.type_flag_, IType, {
MSHADOW_TYPE_SWITCH(ograd.type_flag_, DType, {
MSHADOW_IDX_TYPE_SWITCH(output.aux_type(rowsparse::kIdx), RType, {
SparseEmbeddingDeterministicKernelLaunch<IType, DType, RType>(
ctx, ograd, data, req, output);
});
});
});
}
template <>
inline void SparseEmbeddingOpBackwardRspImpl<gpu>(const bool deterministic,
const OpContext& ctx,
const TBlob& ograd,
const TBlob& data,
const OpReqType req,
const NDArray& output) {
if (deterministic) {
SparseEmbeddingOpBackwardDeterministicRspImpl(ctx, ograd, data, req, output);
return;
}
using namespace mshadow;
using namespace mxnet_op;
using namespace mshadow::expr;
using namespace rowsparse;
using nnvm::dim_t;
if (req == kNullOp)
return;
CHECK_EQ(req, kWriteTo) << "SparseEmbedding layer doesn't support "
<< "weight gradient calculation with req != write";
// Request temporary storage for marking non-zero rows and prefix sum
Stream<gpu>* s = ctx.get_stream<gpu>();
dim_t num_rows = output.shape()[0];
dim_t row_length = output.shape()[1];
dim_t data_size = static_cast<dim_t>(data.shape_.Size());
dim_t num_threads;
MSHADOW_TYPE_SWITCH(data.type_flag_, IType, {
MSHADOW_SGL_DBL_TYPE_SWITCH(ograd.type_flag_, DType, {
MSHADOW_IDX_TYPE_SWITCH(output.aux_type(kIdx), RType, {
dim_t* prefix_sum = nullptr;
void* d_temp_storage = nullptr;
size_t temp_storage_bytes = 0;
hipcub::DeviceScan::InclusiveSum(d_temp_storage,
temp_storage_bytes,
prefix_sum,
prefix_sum,
num_rows,
Stream<gpu>::GetStream(s));
Tensor<gpu, 1, char> workspace = ctx.requested[0].get_space_typed<gpu, 1, char>(
Shape1(num_rows * sizeof(dim_t) + temp_storage_bytes), s);
prefix_sum = reinterpret_cast<dim_t*>(workspace.dptr_);
d_temp_storage = workspace.dptr_ + num_rows * sizeof(dim_t);
num_threads = num_rows;
Fill<false>(s, TBlob(prefix_sum, Shape1(num_threads), gpu::kDevMask), kWriteTo, 0);
Kernel<MarkRowFlgKernel, gpu>::Launch(s, data_size, prefix_sum, data.dptr<IType>());
hipcub::DeviceScan::InclusiveSum(d_temp_storage,
temp_storage_bytes,
prefix_sum,
prefix_sum,
num_rows,
mshadow::Stream<gpu>::GetStream(s));
dim_t nnr = 0;
CUDA_CALL(hipMemcpyAsync(&nnr,
&prefix_sum[num_rows - 1],
sizeof(dim_t),
hipMemcpyDeviceToHost,
mshadow::Stream<gpu>::GetStream(s)));
CUDA_CALL(hipStreamSynchronize(mshadow::Stream<gpu>::GetStream(s)));
if (nnr == 0) {
FillZerosRspImpl(s, output);
return;
}
output.CheckAndAlloc({Shape1(nnr)});
RType* grad_row_idx = output.aux_data(kIdx).dptr<RType>();
// fill row_idx array of output matrix, using the row_flg values
Kernel<FillRspRowIdxKernel, gpu>::Launch(s, num_rows, grad_row_idx, prefix_sum, num_rows);
// prefill with zeros
DType* grad_data = output.data().dptr<DType>();
Fill<false>(s, TBlob(grad_data, Shape1(nnr * row_length), gpu::kDevMask), kWriteTo, 0);
// add the final gradients
num_threads = row_length * data_size;
Kernel<AddTakeGradRspGPUKernel, gpu>::Launch(s,
num_threads,
grad_data,
prefix_sum,
data.dptr<IType>(),
ograd.dptr<DType>(),
row_length);
});
});
});
}
/*
* \brief check if any of the indices is out of bound
* \param s the stream
* \param idx_ptr the indices on the stream
* \param N the number of indices in an axis
* \param M the number of axises to exmaine
* \param mshape the array that stores shape for each dimension
* \param is_valid_dim_ptr the temparary workspace that contains out-of-bound indices
*/
template <typename DType>
void GatherNDCheckBoundGPU(mshadow::Stream<gpu>* s,
const DType* idx_ptr,
index_t N,
index_t M,
const mshadow::Shape<10> mshape,
DType* is_valid_dim_ptr) {
using namespace mxnet_op;
Kernel<set_zero, gpu>::Launch(s, M, is_valid_dim_ptr);
Kernel<is_valid_check_gather_nd, gpu>::Launch(s, M, is_valid_dim_ptr, idx_ptr, N, mshape);
std::vector<DType> is_valid_dim(M);
CUDA_CALL(hipMemcpyAsync(is_valid_dim.data(),
is_valid_dim_ptr,
sizeof(DType) * M,
hipMemcpyDeviceToHost,
mshadow::Stream<gpu>::GetStream(s)));
CUDA_CALL(hipStreamSynchronize(mshadow::Stream<gpu>::GetStream(s)));
for (int m = 0; m < M; m++) {
if (is_valid_dim[m] > mshape[m] - 1 || is_valid_dim[m] < -mshape[m]) {
LOG(FATAL) << "IndexError: index " << is_valid_dim[m] << " is out of bounds for axis " << m
<< " with size " << mshape[m];
}
}
}
void GatherNDForwardGPU(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mxnet_op;
using namespace mshadow;
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
if (req[0] == kNullOp)
return;
mshadow::Stream<gpu>* s = ctx.get_stream<gpu>();
const mxnet::TShape& dshape = inputs[0].shape_;
const mxnet::TShape& ishape = inputs[1].shape_;
int M = ishape[0];
int N = ishape.Size() / M;
int K = dshape.ProdShape(M, dshape.ndim());
mshadow::Shape<10> strides;
mshadow::Shape<10> mshape;
for (int i = M - 1, stride = K; i >= 0; stride *= dshape[i], --i) {
strides[i] = stride;
mshape[i] = dshape[i];
}
MSHADOW_TYPE_SWITCH_WITH_BOOL(inputs[0].type_flag_, DType, { // output data type switch
MSHADOW_TYPE_SWITCH(inputs[1].type_flag_, IType, { // indices data type switch
// check whether indices are out of bound
IType* idx_ptr = inputs[1].dptr<IType>();
Tensor<gpu, 1, IType> workspace =
ctx.requested[0].get_space_typed<gpu, 1, IType>(Shape1(M), s);
IType* is_valid_dim_ptr = reinterpret_cast<IType*>(workspace.dptr_);
GatherNDCheckBoundGPU(s, idx_ptr, N, M, mshape, is_valid_dim_ptr);
Kernel<gather_nd, gpu>::Launch(s,
N,
req[0],
N,
M,
K,
strides,
mshape,
outputs[0].dptr<DType>(),
inputs[0].dptr<DType>(),
inputs[1].dptr<IType>());
});
});
}
struct backward_gather_nd_gpu {
template <typename DType, typename IType>
MSHADOW_XINLINE static void Map(index_t i,
index_t N,
index_t M,
index_t K,
const mshadow::Shape<10> strides,
DType* out,
const DType* data,
const IType* indices) {
index_t offset = 0;
for (index_t j = 0; j < M; ++j) {
offset += strides[j] * static_cast<int>(indices[j * N + i]);
}
for (index_t j = 0; j < K; ++j) {
atomicAdd(out + (offset + j), data[i * K + j]);
}
}
};
template <typename DType, typename IType>
inline void GatherNDBackwardImpl(index_t N,
index_t M,
index_t K,
const mshadow::Shape<10> strides,
DType* out,
const DType* data,
const IType* indices,
mshadow::Stream<gpu>* s) {
mxnet_op::Kernel<backward_gather_nd_gpu, gpu>::Launch(s, N, N, M, K, strides, out, data, indices);
}
template <>
void TakeOpForward<gpu>(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mxnet_op;
if (req[take_::kOut] == kNullOp)
return;
const TakeParam& param = nnvm::get<TakeParam>(attrs.parsed);
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
const mxnet::TShape& idxshape = inputs[take_::kIdx].shape_;
const mxnet::TShape& arrshape = inputs[take_::kArr].shape_;
const mxnet::TShape& oshape = outputs[take_::kOut].shape_;
if (idxshape.Size() == 0) {
return;
}
Stream<gpu>* s = ctx.get_stream<gpu>();
const int actual_axis = param.axis + ((param.axis < 0) ? arrshape.ndim() : 0);
MSHADOW_TYPE_SWITCH_WITH_BOOL(outputs[take_::kOut].type_flag_, DType, { // output data type
MSHADOW_TYPE_SWITCH_WITH_BOOL(inputs[take_::kIdx].type_flag_, IType, { // index data type
if (param.mode == take_::kRaise) {
// check out-of-bound indices
IType min = 0;
IType max = static_cast<IType>(arrshape[actual_axis] - 1);
IType* idx_ptr = inputs[take_::kIdx].dptr<IType>();
size_t idx_size = idxshape.Size();
Tensor<gpu, 1, char> workspace =
ctx.requested[0].get_space_typed<gpu, 1, char>(Shape1(1), s);
char* is_valid_ptr = reinterpret_cast<char*>(workspace.dptr_);
bool is_valid = CheckIndexOutOfBound(s, idx_ptr, idx_size, min, max, is_valid_ptr);
CHECK(is_valid) << "Take indices contains indices out of bound";
}
if (actual_axis == 0) {
if (param.mode == take_::kClip) {
Kernel<TakeZeroAxisGPU<true>, gpu>::Launch(s,
oshape.Size(),
outputs[take_::kOut].dptr<DType>(),
inputs[take_::kArr].dptr<DType>(),
inputs[take_::kIdx].dptr<IType>(),
oshape.Size() / idxshape.Size(),
arrshape[0]);
} else {
Kernel<TakeZeroAxisGPU<false>, gpu>::Launch(s,
oshape.Size(),
outputs[take_::kOut].dptr<DType>(),
inputs[take_::kArr].dptr<DType>(),
inputs[take_::kIdx].dptr<IType>(),
oshape.Size() / idxshape.Size(),
arrshape[0]);
}
} else {
mshadow::Shape<10> in_strides;
int stride = 1;
for (int i = arrshape.ndim() - 1; i >= 0; stride *= arrshape[i], --i) {
in_strides[i] = stride;
}
mshadow::Shape<10> out_strides;
stride = 1;
for (int i = oshape.ndim() - 1; i >= 0; stride *= oshape[i], --i) {
out_strides[i] = stride;
}
if (param.mode == take_::kClip) {
Kernel<TakeNonzeroAxis<true>, gpu>::Launch(s,
oshape.Size(),
outputs[take_::kOut].dptr<DType>(),
inputs[take_::kArr].dptr<DType>(),
inputs[take_::kIdx].dptr<IType>(),
out_strides[actual_axis - 1],
in_strides[actual_axis - 1],
in_strides[actual_axis],
arrshape.ndim(),
oshape.ndim(),
idxshape.ndim(),
arrshape[actual_axis],
actual_axis);
} else {
Kernel<TakeNonzeroAxis<false>, gpu>::Launch(s,
oshape.Size(),
outputs[take_::kOut].dptr<DType>(),
inputs[take_::kArr].dptr<DType>(),
inputs[take_::kIdx].dptr<IType>(),
out_strides[actual_axis - 1],
in_strides[actual_axis - 1],
in_strides[actual_axis],
arrshape.ndim(),
oshape.ndim(),
idxshape.ndim(),
arrshape[actual_axis],
actual_axis);
}
}
});
});
}
namespace {
/*
* \brief returns integer log2(a) rounded up
*/
inline int ilog2(unsigned int a) {
int k = 1;
while (a >>= 1)
k++;
return k;
}
} // namespace
/*
* \brief finds the lower and upper-bound positions of each unique element within
* a sorted input array
*
* \param sorted_data input elements previously sorted
* \param bounds output containing all lower-bound followed by all upper-bound positions
* \param data_dim total number of elements in the input array
* \param vocab_dim maximum number of unique elements
*/
template <typename IType>
__global__ void EmbeddingFindBounds(const IType* sorted_data,
IType* bounds,
const index_t data_dim,
const index_t vocab_dim) {
const index_t id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= vocab_dim)
return;
// Binary search to find lower bound: stored at bounds[0..vocab_dim-1]
IType lower_bound = 0;
IType upper_bound = data_dim - 1;
IType mean;
while (lower_bound < upper_bound) {
mean = (lower_bound + upper_bound) / 2;
if (id <= sorted_data[mean])
upper_bound = mean;
else
lower_bound = mean + 1;
}
bool found_row = (sorted_data[lower_bound] == id);
if (!found_row) {
bounds[id] = -1;
bounds[vocab_dim + id] = -2;
return;
} else {
bounds[id] = lower_bound;
}
// Binary search to find upper bound: stored at bounds[vocab_dim..2*vocab_dim-1]
lower_bound = 0;
upper_bound = data_dim - 1;
while (lower_bound < upper_bound) {
mean = (lower_bound + upper_bound + 1) / 2;
if (id >= sorted_data[mean])
lower_bound = mean;
else
upper_bound = mean - 1;
}
bounds[vocab_dim + id] = upper_bound;
}
/*
* \brief kernel to compute gradient of EmbeddingOp
* \param grad_in input gradient data
* \param original_index reference to the position at original input data for each index
* \param index_bounds lower and upper-bounds positions of each unique index
* \param grad_out output gradient data
* \param embbedding_dim dimension of the dense embedding
* \param vocab_dim maximum number of unique indices in the data array: tokens vocabulary size
* \param nelems_per_load number of elements per each load based on (LType / DType)
* \param req write/add/null
*/
template <typename AType, typename LType, typename DType, typename IType>
__global__ void EmbeddingGradKernel(DType* grad_in,
const IType* original_index,
const IType* index_bounds,
const DType* grad_out,
const index_t embbedding_dim,
const index_t vocab_dim,
const int nelems_per_load,
const int req) {
extern __shared__ int sharedmem[];
AType* grad_in_row = reinterpret_cast<AType*>(sharedmem);
const LType* aligned_grad_out = reinterpret_cast<const LType*>(grad_out);
LType* aligned_grad_in = reinterpret_cast<LType*>(grad_in);
const index_t aligned_emb_dim = embbedding_dim / nelems_per_load;
LType load_value[1];
DType* data_values = reinterpret_cast<DType*>(load_value);
IType my_row = blockIdx.x;
if (my_row < vocab_dim) {
// Read lower and upper bounds for current row
IType lower_bound = index_bounds[my_row];
IType upper_bound = index_bounds[vocab_dim + my_row];
int nOccurrences = upper_bound - lower_bound + 1;
for (index_t emb_id = threadIdx.x; emb_id < aligned_emb_dim; emb_id += blockDim.x) {
// Initialize grad_in
if (req == kAddTo) {
*load_value = aligned_grad_in[my_row * aligned_emb_dim + emb_id];
for (index_t val_id = 0; val_id < nelems_per_load; val_id++) {
grad_in_row[val_id * blockDim.x + threadIdx.x] = static_cast<AType>(data_values[val_id]);
}
} else {
for (index_t val_id = 0; val_id < nelems_per_load; val_id++) {
grad_in_row[val_id * blockDim.x + threadIdx.x] = static_cast<AType>(0.0);
}
}
// Add all rows from grad_out according to indices in data
for (index_t data_idx = lower_bound; data_idx < (lower_bound + nOccurrences); ++data_idx) {
*load_value = aligned_grad_out[original_index[data_idx] * aligned_emb_dim + emb_id];
for (index_t val_id = 0; val_id < nelems_per_load; val_id++) {
grad_in_row[val_id * blockDim.x + threadIdx.x] += static_cast<AType>(data_values[val_id]);
}
}
// Save results
for (index_t val_id = 0; val_id < nelems_per_load; val_id++) {
data_values[val_id] = static_cast<DType>(grad_in_row[val_id * blockDim.x + threadIdx.x]);
}
aligned_grad_in[my_row * aligned_emb_dim + emb_id] = *load_value;
}
}
}
template <typename AType, typename IType, typename DType>
void EmbeddingGradKernelCaller(const OpContext& ctx,
mshadow::Tensor<gpu, 2, DType> grad_in,
const mshadow::Tensor<gpu, 1, IType>& index,
const mshadow::Tensor<gpu, 2, DType>& grad_out,
const std::vector<OpReqType>& req) {
using namespace mxnet_op;
using namespace mshadow::expr;
Stream<gpu>* s = ctx.get_stream<gpu>();
const index_t data_dim = index.shape_[0];
const index_t vocab_dim = grad_in.shape_[0];
const index_t embbedding_dim = grad_in.shape_[1];
// Calculate amount of temporary storage
size_t sort_workspace_size = mxnet::op::SortByKeyWorkspaceSize<int, int, gpu>(data_dim);
size_t workspace_size =
2 * data_dim * sizeof(int) + 2 * vocab_dim * sizeof(int) + sort_workspace_size;
// Request temporary storage
Tensor<gpu, 1, char> workspace =
ctx.requested[embedding::kTempSpace].get_space_typed<gpu, 1, char>(Shape1(workspace_size), s);
// Create tensors
size_t pos = 0;
Tensor<gpu, 1, int> sorted_data(reinterpret_cast<int*>(&workspace[pos]), Shape1(data_dim), s);
pos += data_dim * sizeof(int);
// Reference to input data positions for each element of sorted_data
Tensor<gpu, 1, int> original_index(reinterpret_cast<int*>(&workspace[pos]), Shape1(data_dim), s);
pos += data_dim * sizeof(int);
// lower and upper bound positions of each index within sorted_data
Tensor<gpu, 1, int> bounds_index(
reinterpret_cast<int*>(&workspace[pos]), Shape1(2 * vocab_dim), s);
pos += 2 * vocab_dim * sizeof(int);
Tensor<gpu, 1, char> Sort_temp_storage(&workspace[pos], Shape1(sort_workspace_size), s);
// Clip indices [0, vocab_dim-1]
Kernel<tcast_clip, gpu>::Launch(
s, data_dim, sorted_data.dptr_, index.dptr_, static_cast<int>(vocab_dim));
Kernel<range_fwd, gpu>::Launch(s, data_dim, 1, 0, 1, kWriteTo, original_index.dptr_);
// Sort indices array
int num_bits = ilog2((vocab_dim - 1));
mxnet::op::SortByKey(sorted_data, original_index, true, &Sort_temp_storage, 0, num_bits);
// Find lower & upper bounds of each possible index
const int threads_block_bounds = 128;
const int nblocks_bounds = (vocab_dim + threads_block_bounds - 1) / threads_block_bounds;
hipLaunchKernelGGL(( EmbeddingFindBounds), dim3(nblocks_bounds), dim3(threads_block_bounds), 0, Stream<gpu>::GetStream(s),
sorted_data.dptr_, bounds_index.dptr_, data_dim, vocab_dim);
// Compute Gradient
int ltype = mxnet::common::cuda::get_load_type(embbedding_dim * sizeof(DType));
MXNET_LOAD_TYPE_SWITCH(ltype, LType, {
CHECK_LE(sizeof(DType), sizeof(LType));
int nelems_per_load = sizeof(LType) / sizeof(DType);
int threads_block_grad = 32;
int maxThreads = 1024;
while (threads_block_grad < (embbedding_dim / nelems_per_load) &&
(threads_block_grad < maxThreads))
threads_block_grad += 32;
size_t required_shared = threads_block_grad * nelems_per_load * sizeof(AType);
dim3 blocks(vocab_dim, 1);
hipLaunchKernelGGL(( EmbeddingGradKernel<AType, LType>)
, dim3(blocks), dim3(threads_block_grad), required_shared, Stream<gpu>::GetStream(s),
grad_in.dptr_,
original_index.dptr_,
bounds_index.dptr_,
grad_out.dptr_,
embbedding_dim,
vocab_dim,
nelems_per_load,
req[embedding::kWeight]);
});
}
template <>
void EmbeddingOpBackward<gpu>(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mshadow::expr;
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 2U);
CHECK_EQ(req[embedding::kData], kNullOp)
<< "Embedding layer doesn't support calculate data gradient";
if (req[embedding::kWeight] == kNullOp) {
return;
}
CHECK_EQ(outputs[1].type_flag_, inputs[0].type_flag_);
const mxnet::TShape& ishape = inputs[1].shape_;
const mxnet::TShape& oshape = inputs[0].shape_;
Stream<gpu>* s = ctx.get_stream<gpu>();
CHECK_NE(req[embedding::kWeight], kWriteInplace)
<< "Backward of Embedding does not support writing in place.";
bool safe_acc = dmlc::GetEnv("MXNET_SAFE_ACCUMULATION", true);
if (!safe_acc && outputs[1].type_flag_ == mshadow::kFloat16) {
common::LogOnce(
"MXNET_SAFE_ACCUMULATION=1 is recommended for EmbeddingOpBackward "
"with float16 inputs. "
"See https://mxnet.apache.org/api/faq/env_var "
"for more details.");
}
MXNET_REAL_ACC_TYPE_SWITCH(outputs[1].type_flag_, DType, AType, {
MSHADOW_TYPE_SWITCH(inputs[1].type_flag_, IType, {
Tensor<gpu, 1, IType> data =
inputs[1].get_with_shape<gpu, 1, IType>(Shape1(ishape.ProdShape(0, ishape.ndim())), s);
Tensor<gpu, 2, DType> grad_out = inputs[0].get_with_shape<gpu, 2, DType>(
Shape2(oshape.ProdShape(0, oshape.ndim() - 1), oshape[oshape.ndim() - 1]), s);
Tensor<gpu, 2, DType> grad_in = outputs[1].get<gpu, 2, DType>(s);
if (req[embedding::kWeight] == kWriteTo || req[embedding::kWeight] == kAddTo) {
if (safe_acc)
EmbeddingGradKernelCaller<AType>(ctx, grad_in, data, grad_out, req);
else
EmbeddingGradKernelCaller<DType>(ctx, grad_in, data, grad_out, req);
} else {
LOG(FATAL) << "wrong req";
}
});
});
}
NNVM_REGISTER_OP(Embedding).set_attr<FCompute>("FCompute<gpu>", EmbeddingOpForward<gpu>);
NNVM_REGISTER_OP(_backward_Embedding)
.set_attr<FCompute>("FCompute<gpu>", EmbeddingOpBackward<gpu>)
.set_attr<FComputeEx>("FComputeEx<gpu>", EmbeddingOpBackwardEx<gpu>);
NNVM_REGISTER_OP(take).set_attr<FCompute>("FCompute<gpu>", TakeOpForward<gpu>);
NNVM_REGISTER_OP(_backward_take).set_attr<FCompute>("FCompute<gpu>", TakeOpBackward<gpu>);
NNVM_REGISTER_OP(batch_take).set_attr<FCompute>("FCompute<gpu>", BatchTakeOpForward<gpu>);
NNVM_REGISTER_OP(one_hot).set_attr<FCompute>("FCompute<gpu>", OneHotOpForward<gpu>);
NNVM_REGISTER_OP(gather_nd)
.set_attr<FIsCUDAGraphsCompatible>("FIsCUDAGraphsCompatible",
[](const NodeAttrs&, const bool) { return false; })
.set_attr<FCompute>("FCompute<gpu>", GatherNDForwardGPU);
NNVM_REGISTER_OP(scatter_nd).set_attr<FCompute>("FCompute<gpu>", ScatterNDForward<gpu>);
NNVM_REGISTER_OP(_backward_gather_nd).set_attr<FCompute>("FCompute<gpu>", GatherNDBackward<gpu>);
NNVM_REGISTER_OP(_scatter_set_nd).set_attr<FCompute>("FCompute<gpu>", ScatterSetNDForward<gpu>);
} // namespace op
} // namespace mxnet
| c20d13f6caba62a5840f18ee5300c01d25ebd0c7.cu | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file indexing_op.cu
* \brief GPU implementation of indexing operator
* \author Siyi Li, Chi Zhang
*/
#include "./indexing_op.h"
#include "./util/tensor_util-inl.cuh"
#include "./util/tensor_util-inl.h"
namespace mxnet {
namespace op {
/*! \brief If there are out-of-bound indices, out will be assigned to 1.
*/
struct is_valid_check {
template <typename DType>
MSHADOW_XINLINE static void Map(int i,
char* out,
const DType* data,
const DType min,
const DType max) {
if (data[i] < min || data[i] > max)
*out = 1;
}
};
struct AddTakeGradRspGPUKernel {
template <typename DType, typename IType>
__device__ __forceinline__ static void Map(int tid,
DType* out,
const nnvm::dim_t* prefix_sum,
const IType* data,
const DType* ograd,
const nnvm::dim_t row_length) {
using nnvm::dim_t;
const dim_t data_i = tid / row_length;
const dim_t grad_i = tid % row_length;
const dim_t irow = static_cast<dim_t>(data[data_i]);
const dim_t rsp_row = prefix_sum[irow] - 1;
const DType val = ograd[data_i * row_length + grad_i];
atomicAdd(static_cast<DType*>(&(out[rsp_row * row_length + grad_i])), val);
}
};
/*
* \brief kernel for backward computation for take, executed with deterministic order
* \param thread_id the thread id
* \param out the output gradient data
* \param lookup_table the table to lookup the position of an id in gradient array
* \param sorted_data the sorted data input
* \param original_idx the original indices of the sorted data input
* \param ograd head gradient
* \param row_length the output dimension
* \param num_threads_per_row the number of threads to process a row together
* \param SZ the number of features a thread is responsible for
*/
template <int SZ>
struct AddTakeGradRspDeterministicKernel {
template <typename DType>
__device__ __forceinline__ static void Map(int thread_id,
DType* out,
const nnvm::dim_t* lookup_table,
const nnvm::dim_t* sorted_data,
const nnvm::dim_t data_size,
const nnvm::dim_t* original_idx,
const DType* ograd,
const nnvm::dim_t row_length,
const nnvm::dim_t num_threads_per_row) {
using nnvm::dim_t;
int tid = thread_id / num_threads_per_row;
const int feature_start = thread_id % num_threads_per_row * SZ;
int num_features = SZ;
if (feature_start + num_features > row_length) {
num_features = row_length - feature_start;
}
if (tid == 0 || sorted_data[tid - 1] != sorted_data[tid]) {
DType acc[SZ];
#pragma unroll
for (int i = 0; i < SZ; i++) {
acc[i] = 0;
}
const dim_t data = sorted_data[tid];
const dim_t row_id = lookup_table[data];
const dim_t out_offset = row_id * row_length + feature_start;
do {
const dim_t idx = original_idx[tid];
const dim_t ograd_offset = idx * row_length + feature_start;
for (int i = 0; i < num_features; i++) {
acc[i] += ograd[ograd_offset + i];
}
tid++;
} while (tid < data_size && sorted_data[tid - 1] == sorted_data[tid]);
for (int i = 0; i < num_features; i++) {
out[out_offset + i] += acc[i];
}
}
}
};
template <bool clip = true>
struct TakeZeroAxisGPU {
// assume that idx have been flattened to a 1-D tensor (N,)
// assume that out_data and in_data have been flattened to 2-D tensors, (N, M) and (K, M)
// M is the number of columns of in_data and out_data
// K is the number of rows of in_data
// i is the index of out_data
template <typename DType, typename IType>
MSHADOW_XINLINE static void Map(int i,
DType* out_data,
const DType* in_data,
const IType* idx,
const int64_t M,
const int64_t K) {
int64_t j = static_cast<int64_t>(idx[i / M]);
if (clip) {
if (j <= 0)
j = 0;
else if (j >= K)
j = K - 1;
} else {
j = j % K;
j += (j < 0) ? K : 0;
}
out_data[i] = in_data[j * M + i % M];
}
};
/*
* \brief returns true if all indices are between [min, max]
* \param s the stream
* \param data_ptr the indices on the stream
* \param data_size the number of indices to examine
* \param min the expected min value for indices
* \param max the expected max value for indices
* \param is_valid_ptr the temparary workspace
*/
template <typename DType>
bool CheckIndexOutOfBound(mshadow::Stream<gpu>* s,
const DType* data_ptr,
size_t data_size,
const DType min,
const DType max,
char* is_valid_ptr) {
using namespace mxnet_op;
int32_t is_valid = 0;
Kernel<set_zero, gpu>::Launch(s, 1, is_valid_ptr);
Kernel<is_valid_check, gpu>::Launch(s, data_size, is_valid_ptr, data_ptr, min, max);
CUDA_CALL(cudaMemcpyAsync(&is_valid,
is_valid_ptr,
sizeof(char),
cudaMemcpyDeviceToHost,
mshadow::Stream<gpu>::GetStream(s)));
CUDA_CALL(cudaStreamSynchronize(mshadow::Stream<gpu>::GetStream(s)));
return is_valid == 0;
}
// Embedding forward implementation with dense weight
template <>
void EmbeddingOpForwardDnsImpl<gpu>(mshadow::Stream<gpu>* s,
const TBlob& data,
const TBlob& weight,
const OpReqType req,
const TBlob& output) {
using namespace mxnet_op;
const mxnet::TShape& ishape = data.shape_;
const mxnet::TShape& oshape = output.shape_;
MSHADOW_TYPE_SWITCH(output.type_flag_, DType, {
MSHADOW_TYPE_SWITCH(data.type_flag_, IType, {
Tensor<gpu, 1, IType> idx =
data.get_with_shape<gpu, 1, IType>(Shape1(ishape.ProdShape(0, ishape.ndim())), s);
Tensor<gpu, 2, DType> wmat = weight.get<gpu, 2, DType>(s);
Tensor<gpu, 2, DType> out = output.get_with_shape<gpu, 2, DType>(
Shape2(oshape.ProdShape(0, oshape.ndim() - 1), oshape[oshape.ndim() - 1]), s);
Kernel<TakeZeroAxisGPU<true>, gpu>::Launch(
s, oshape.Size(), out.dptr_, wmat.dptr_, idx.dptr_, wmat.shape_[1], wmat.shape_[0]);
});
});
}
template <>
void SparseEmbeddingOpForwardRspImpl<gpu>(const OpContext& ctx,
const TBlob& data,
const NDArray& weight,
const OpReqType req,
const TBlob& output) {
if (req == kNullOp)
return;
using namespace rowsparse;
using namespace mxnet_op;
mshadow::Stream<gpu>* s = ctx.get_stream<gpu>();
// zeros weight
if (req == kWriteTo && !weight.storage_initialized()) {
size_t out_size = output.shape_.Size();
MSHADOW_TYPE_SWITCH(output.type_flag_, DType, {
Fill<false>(
s, TBlob(output.dptr<DType>(), mshadow::Shape1(out_size), gpu::kDevMask), kWriteTo, 0);
})
return;
}
// check out-of-bound indices
MSHADOW_TYPE_SWITCH(data.type_flag_, DType, {
DType min = 0;
DType max = static_cast<DType>(weight.shape()[0] - 1);
DType* data_ptr = data.dptr<DType>();
size_t data_size = data.shape_.Size();
Tensor<gpu, 1, char> workspace = ctx.requested[0].get_space_typed<gpu, 1, char>(Shape1(1), s);
char* is_valid_ptr = reinterpret_cast<char*>(workspace.dptr_);
bool is_valid = CheckIndexOutOfBound(s, data_ptr, data_size, min, max, is_valid_ptr);
CHECK(is_valid) << "SparseEmbedding input contains data out of bound";
})
// the weight is actually dense
if (weight.aux_shape(kIdx)[0] == weight.shape()[0]) {
EmbeddingOpForwardDnsImpl<gpu>(s, data, weight.data(), req, output);
} else {
EmbeddingOpForwardRspImpl<gpu>(s, data, weight, req, output);
}
}
template <typename IType, typename DType, typename RType>
void SparseEmbeddingDeterministicKernelLaunch(const OpContext& ctx,
const TBlob& ograd,
const TBlob& data,
const OpReqType req,
const NDArray& output) {
using namespace mshadow;
using namespace mxnet_op;
using namespace expr;
using namespace rowsparse;
using nnvm::dim_t;
mshadow::Stream<gpu>* s = ctx.get_stream<gpu>();
const dim_t num_rows = output.shape()[0];
const dim_t row_length = output.shape()[1];
const dim_t data_size = static_cast<dim_t>(data.shape_.Size());
// temp resource declarations
dim_t* lookup_table = nullptr;
void* temp_storage = nullptr;
dim_t* sorted_data = nullptr;
dim_t* original_idx = nullptr;
// calculate number of bytes for temp resources
size_t lookup_table_bytes = num_rows * sizeof(dim_t);
size_t sorted_data_storage_bytes = data_size * sizeof(dim_t);
size_t original_idx_storage_bytes = data_size * sizeof(dim_t);
size_t sort_workspace_size = SortByKeyWorkspaceSize<dim_t, dim_t, gpu>(data_size);
size_t unique_workspace_bytes = 0;
// estimate unique temp space
IType* data_ptr = data.dptr<IType>();
size_t* null_ptr = nullptr;
// unique operations will be applied on sorted data
cub::DeviceSelect::Unique(nullptr,
unique_workspace_bytes,
sorted_data,
sorted_data,
null_ptr,
data_size,
Stream<gpu>::GetStream(s));
// One more space reserved for unique count
size_t temp_workspace_bytes = std::max(unique_workspace_bytes, sort_workspace_size);
size_t total_storage_bytes = lookup_table_bytes + sorted_data_storage_bytes +
original_idx_storage_bytes + temp_workspace_bytes;
// request resource and split it. layout is:
// lookup_table, sorted_data, original_idx, temp_storage
Tensor<gpu, 1, char> workspace =
ctx.requested[0].get_space_typed<gpu, 1, char>(Shape1(total_storage_bytes), s);
lookup_table = reinterpret_cast<dim_t*>(workspace.dptr_);
sorted_data = reinterpret_cast<dim_t*>(workspace.dptr_ + lookup_table_bytes);
original_idx =
reinterpret_cast<dim_t*>(workspace.dptr_ + lookup_table_bytes + sorted_data_storage_bytes);
temp_storage = workspace.dptr_ + total_storage_bytes - temp_workspace_bytes;
// check out-of-bound indices
{
IType min = 0;
IType max = static_cast<IType>(output.shape()[0] - 1);
IType* data_ptr = data.dptr<IType>();
size_t data_size = data.shape_.Size();
bool is_valid = CheckIndexOutOfBound(
s, data_ptr, data_size, min, max, reinterpret_cast<char*>(temp_storage));
CHECK(is_valid) << "Embedding input contains data out of bound";
}
// make a copy of the data, to be sorted
TBlob sorted_data_blob(sorted_data, Shape1(data_size), gpu::kDevMask);
auto sorted_data_tensor = sorted_data_blob.FlatTo1D<gpu, dim_t>(s);
mxnet_op::copy(s, sorted_data_blob, data);
// generate original idx
Tensor<gpu, 1, dim_t> original_idx_tensor(original_idx, Shape1(data_size), s);
Kernel<range_fwd, gpu>::Launch(
s, data_size, 1, static_cast<dim_t>(0), static_cast<dim_t>(1), kWriteTo, original_idx);
// sort data with its original idx
int num_bits = common::ilog2ui(num_rows - 1);
char* temp_storage_ptr = reinterpret_cast<char*>(temp_storage);
Tensor<gpu, 1, char> temp_storage_tensor(temp_storage_ptr, Shape1(sort_workspace_size), s);
SortByKey(sorted_data_tensor, original_idx_tensor, true, &temp_storage_tensor, 0, num_bits);
// compute unique row ids based on sorted values.
output.CheckAndAllocAuxData(kIdx, Shape1(data_size + 1));
// fill row_idx array of output matrix, using the row_flg values
RType* grad_row_idx = output.aux_data(kIdx).dptr<RType>();
cub::DeviceSelect::Unique(temp_storage_ptr,
unique_workspace_bytes,
sorted_data,
grad_row_idx,
grad_row_idx + data_size,
data_size,
Stream<gpu>::GetStream(s));
dim_t nnr = 0;
CUDA_CALL(cudaMemcpyAsync(&nnr,
grad_row_idx + data_size,
sizeof(RType),
cudaMemcpyDeviceToHost,
mshadow::Stream<gpu>::GetStream(s)));
CUDA_CALL(cudaStreamSynchronize(mshadow::Stream<gpu>::GetStream(s)));
CHECK_EQ(output.shape().ndim(), 2) << "Unexcepted ndim";
output.CheckAndAllocData(Shape2(nnr, output.shape()[1]));
output.set_aux_shape(kIdx, Shape1(nnr));
// generate lookup table
Kernel<MarkLookupTable, gpu>::Launch(s, nnr, lookup_table, grad_row_idx);
// accumulate gradients
DType* grad_data = output.data().dptr<DType>();
Fill<false>(s, TBlob(grad_data, Shape1(nnr * row_length), gpu::kDevMask), kWriteTo, 0);
const int SZ = 4;
const nnvm::dim_t num_threads_per_row = (row_length + SZ - 1) / SZ;
Kernel<AddTakeGradRspDeterministicKernel<SZ>, gpu>::Launch(s,
data_size * num_threads_per_row,
grad_data,
lookup_table,
sorted_data,
data_size,
original_idx,
ograd.dptr<DType>(),
row_length,
num_threads_per_row);
}
inline void SparseEmbeddingOpBackwardDeterministicRspImpl(const OpContext& ctx,
const TBlob& ograd,
const TBlob& data,
const OpReqType req,
const NDArray& output) {
using nnvm::dim_t;
if (req == kNullOp)
return;
CHECK_EQ(req, kWriteTo) << "SparseEmbedding layer doesn't support "
<< "weight gradient calculation with req != write";
mshadow::Stream<gpu>* s = ctx.get_stream<gpu>();
const dim_t data_size = static_cast<dim_t>(data.shape_.Size());
if (data_size == 0) {
FillZerosRspImpl(s, output);
return;
}
MSHADOW_TYPE_SWITCH(data.type_flag_, IType, {
MSHADOW_TYPE_SWITCH(ograd.type_flag_, DType, {
MSHADOW_IDX_TYPE_SWITCH(output.aux_type(rowsparse::kIdx), RType, {
SparseEmbeddingDeterministicKernelLaunch<IType, DType, RType>(
ctx, ograd, data, req, output);
});
});
});
}
template <>
inline void SparseEmbeddingOpBackwardRspImpl<gpu>(const bool deterministic,
const OpContext& ctx,
const TBlob& ograd,
const TBlob& data,
const OpReqType req,
const NDArray& output) {
if (deterministic) {
SparseEmbeddingOpBackwardDeterministicRspImpl(ctx, ograd, data, req, output);
return;
}
using namespace mshadow;
using namespace mxnet_op;
using namespace mshadow::expr;
using namespace rowsparse;
using nnvm::dim_t;
if (req == kNullOp)
return;
CHECK_EQ(req, kWriteTo) << "SparseEmbedding layer doesn't support "
<< "weight gradient calculation with req != write";
// Request temporary storage for marking non-zero rows and prefix sum
Stream<gpu>* s = ctx.get_stream<gpu>();
dim_t num_rows = output.shape()[0];
dim_t row_length = output.shape()[1];
dim_t data_size = static_cast<dim_t>(data.shape_.Size());
dim_t num_threads;
MSHADOW_TYPE_SWITCH(data.type_flag_, IType, {
MSHADOW_SGL_DBL_TYPE_SWITCH(ograd.type_flag_, DType, {
MSHADOW_IDX_TYPE_SWITCH(output.aux_type(kIdx), RType, {
dim_t* prefix_sum = nullptr;
void* d_temp_storage = nullptr;
size_t temp_storage_bytes = 0;
cub::DeviceScan::InclusiveSum(d_temp_storage,
temp_storage_bytes,
prefix_sum,
prefix_sum,
num_rows,
Stream<gpu>::GetStream(s));
Tensor<gpu, 1, char> workspace = ctx.requested[0].get_space_typed<gpu, 1, char>(
Shape1(num_rows * sizeof(dim_t) + temp_storage_bytes), s);
prefix_sum = reinterpret_cast<dim_t*>(workspace.dptr_);
d_temp_storage = workspace.dptr_ + num_rows * sizeof(dim_t);
num_threads = num_rows;
Fill<false>(s, TBlob(prefix_sum, Shape1(num_threads), gpu::kDevMask), kWriteTo, 0);
Kernel<MarkRowFlgKernel, gpu>::Launch(s, data_size, prefix_sum, data.dptr<IType>());
cub::DeviceScan::InclusiveSum(d_temp_storage,
temp_storage_bytes,
prefix_sum,
prefix_sum,
num_rows,
mshadow::Stream<gpu>::GetStream(s));
dim_t nnr = 0;
CUDA_CALL(cudaMemcpyAsync(&nnr,
&prefix_sum[num_rows - 1],
sizeof(dim_t),
cudaMemcpyDeviceToHost,
mshadow::Stream<gpu>::GetStream(s)));
CUDA_CALL(cudaStreamSynchronize(mshadow::Stream<gpu>::GetStream(s)));
if (nnr == 0) {
FillZerosRspImpl(s, output);
return;
}
output.CheckAndAlloc({Shape1(nnr)});
RType* grad_row_idx = output.aux_data(kIdx).dptr<RType>();
// fill row_idx array of output matrix, using the row_flg values
Kernel<FillRspRowIdxKernel, gpu>::Launch(s, num_rows, grad_row_idx, prefix_sum, num_rows);
// prefill with zeros
DType* grad_data = output.data().dptr<DType>();
Fill<false>(s, TBlob(grad_data, Shape1(nnr * row_length), gpu::kDevMask), kWriteTo, 0);
// add the final gradients
num_threads = row_length * data_size;
Kernel<AddTakeGradRspGPUKernel, gpu>::Launch(s,
num_threads,
grad_data,
prefix_sum,
data.dptr<IType>(),
ograd.dptr<DType>(),
row_length);
});
});
});
}
/*
* \brief check if any of the indices is out of bound
* \param s the stream
* \param idx_ptr the indices on the stream
* \param N the number of indices in an axis
* \param M the number of axises to exmaine
* \param mshape the array that stores shape for each dimension
* \param is_valid_dim_ptr the temparary workspace that contains out-of-bound indices
*/
template <typename DType>
void GatherNDCheckBoundGPU(mshadow::Stream<gpu>* s,
const DType* idx_ptr,
index_t N,
index_t M,
const mshadow::Shape<10> mshape,
DType* is_valid_dim_ptr) {
using namespace mxnet_op;
Kernel<set_zero, gpu>::Launch(s, M, is_valid_dim_ptr);
Kernel<is_valid_check_gather_nd, gpu>::Launch(s, M, is_valid_dim_ptr, idx_ptr, N, mshape);
std::vector<DType> is_valid_dim(M);
CUDA_CALL(cudaMemcpyAsync(is_valid_dim.data(),
is_valid_dim_ptr,
sizeof(DType) * M,
cudaMemcpyDeviceToHost,
mshadow::Stream<gpu>::GetStream(s)));
CUDA_CALL(cudaStreamSynchronize(mshadow::Stream<gpu>::GetStream(s)));
for (int m = 0; m < M; m++) {
if (is_valid_dim[m] > mshape[m] - 1 || is_valid_dim[m] < -mshape[m]) {
LOG(FATAL) << "IndexError: index " << is_valid_dim[m] << " is out of bounds for axis " << m
<< " with size " << mshape[m];
}
}
}
void GatherNDForwardGPU(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mxnet_op;
using namespace mshadow;
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
if (req[0] == kNullOp)
return;
mshadow::Stream<gpu>* s = ctx.get_stream<gpu>();
const mxnet::TShape& dshape = inputs[0].shape_;
const mxnet::TShape& ishape = inputs[1].shape_;
int M = ishape[0];
int N = ishape.Size() / M;
int K = dshape.ProdShape(M, dshape.ndim());
mshadow::Shape<10> strides;
mshadow::Shape<10> mshape;
for (int i = M - 1, stride = K; i >= 0; stride *= dshape[i], --i) {
strides[i] = stride;
mshape[i] = dshape[i];
}
MSHADOW_TYPE_SWITCH_WITH_BOOL(inputs[0].type_flag_, DType, { // output data type switch
MSHADOW_TYPE_SWITCH(inputs[1].type_flag_, IType, { // indices data type switch
// check whether indices are out of bound
IType* idx_ptr = inputs[1].dptr<IType>();
Tensor<gpu, 1, IType> workspace =
ctx.requested[0].get_space_typed<gpu, 1, IType>(Shape1(M), s);
IType* is_valid_dim_ptr = reinterpret_cast<IType*>(workspace.dptr_);
GatherNDCheckBoundGPU(s, idx_ptr, N, M, mshape, is_valid_dim_ptr);
Kernel<gather_nd, gpu>::Launch(s,
N,
req[0],
N,
M,
K,
strides,
mshape,
outputs[0].dptr<DType>(),
inputs[0].dptr<DType>(),
inputs[1].dptr<IType>());
});
});
}
struct backward_gather_nd_gpu {
template <typename DType, typename IType>
MSHADOW_XINLINE static void Map(index_t i,
index_t N,
index_t M,
index_t K,
const mshadow::Shape<10> strides,
DType* out,
const DType* data,
const IType* indices) {
index_t offset = 0;
for (index_t j = 0; j < M; ++j) {
offset += strides[j] * static_cast<int>(indices[j * N + i]);
}
for (index_t j = 0; j < K; ++j) {
atomicAdd(out + (offset + j), data[i * K + j]);
}
}
};
template <typename DType, typename IType>
inline void GatherNDBackwardImpl(index_t N,
index_t M,
index_t K,
const mshadow::Shape<10> strides,
DType* out,
const DType* data,
const IType* indices,
mshadow::Stream<gpu>* s) {
mxnet_op::Kernel<backward_gather_nd_gpu, gpu>::Launch(s, N, N, M, K, strides, out, data, indices);
}
template <>
void TakeOpForward<gpu>(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mxnet_op;
if (req[take_::kOut] == kNullOp)
return;
const TakeParam& param = nnvm::get<TakeParam>(attrs.parsed);
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
const mxnet::TShape& idxshape = inputs[take_::kIdx].shape_;
const mxnet::TShape& arrshape = inputs[take_::kArr].shape_;
const mxnet::TShape& oshape = outputs[take_::kOut].shape_;
if (idxshape.Size() == 0) {
return;
}
Stream<gpu>* s = ctx.get_stream<gpu>();
const int actual_axis = param.axis + ((param.axis < 0) ? arrshape.ndim() : 0);
MSHADOW_TYPE_SWITCH_WITH_BOOL(outputs[take_::kOut].type_flag_, DType, { // output data type
MSHADOW_TYPE_SWITCH_WITH_BOOL(inputs[take_::kIdx].type_flag_, IType, { // index data type
if (param.mode == take_::kRaise) {
// check out-of-bound indices
IType min = 0;
IType max = static_cast<IType>(arrshape[actual_axis] - 1);
IType* idx_ptr = inputs[take_::kIdx].dptr<IType>();
size_t idx_size = idxshape.Size();
Tensor<gpu, 1, char> workspace =
ctx.requested[0].get_space_typed<gpu, 1, char>(Shape1(1), s);
char* is_valid_ptr = reinterpret_cast<char*>(workspace.dptr_);
bool is_valid = CheckIndexOutOfBound(s, idx_ptr, idx_size, min, max, is_valid_ptr);
CHECK(is_valid) << "Take indices contains indices out of bound";
}
if (actual_axis == 0) {
if (param.mode == take_::kClip) {
Kernel<TakeZeroAxisGPU<true>, gpu>::Launch(s,
oshape.Size(),
outputs[take_::kOut].dptr<DType>(),
inputs[take_::kArr].dptr<DType>(),
inputs[take_::kIdx].dptr<IType>(),
oshape.Size() / idxshape.Size(),
arrshape[0]);
} else {
Kernel<TakeZeroAxisGPU<false>, gpu>::Launch(s,
oshape.Size(),
outputs[take_::kOut].dptr<DType>(),
inputs[take_::kArr].dptr<DType>(),
inputs[take_::kIdx].dptr<IType>(),
oshape.Size() / idxshape.Size(),
arrshape[0]);
}
} else {
mshadow::Shape<10> in_strides;
int stride = 1;
for (int i = arrshape.ndim() - 1; i >= 0; stride *= arrshape[i], --i) {
in_strides[i] = stride;
}
mshadow::Shape<10> out_strides;
stride = 1;
for (int i = oshape.ndim() - 1; i >= 0; stride *= oshape[i], --i) {
out_strides[i] = stride;
}
if (param.mode == take_::kClip) {
Kernel<TakeNonzeroAxis<true>, gpu>::Launch(s,
oshape.Size(),
outputs[take_::kOut].dptr<DType>(),
inputs[take_::kArr].dptr<DType>(),
inputs[take_::kIdx].dptr<IType>(),
out_strides[actual_axis - 1],
in_strides[actual_axis - 1],
in_strides[actual_axis],
arrshape.ndim(),
oshape.ndim(),
idxshape.ndim(),
arrshape[actual_axis],
actual_axis);
} else {
Kernel<TakeNonzeroAxis<false>, gpu>::Launch(s,
oshape.Size(),
outputs[take_::kOut].dptr<DType>(),
inputs[take_::kArr].dptr<DType>(),
inputs[take_::kIdx].dptr<IType>(),
out_strides[actual_axis - 1],
in_strides[actual_axis - 1],
in_strides[actual_axis],
arrshape.ndim(),
oshape.ndim(),
idxshape.ndim(),
arrshape[actual_axis],
actual_axis);
}
}
});
});
}
namespace {
/*
* \brief returns integer log2(a) rounded up
*/
inline int ilog2(unsigned int a) {
int k = 1;
while (a >>= 1)
k++;
return k;
}
} // namespace
/*
* \brief finds the lower and upper-bound positions of each unique element within
* a sorted input array
*
* \param sorted_data input elements previously sorted
* \param bounds output containing all lower-bound followed by all upper-bound positions
* \param data_dim total number of elements in the input array
* \param vocab_dim maximum number of unique elements
*/
template <typename IType>
__global__ void EmbeddingFindBounds(const IType* sorted_data,
IType* bounds,
const index_t data_dim,
const index_t vocab_dim) {
const index_t id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= vocab_dim)
return;
// Binary search to find lower bound: stored at bounds[0..vocab_dim-1]
IType lower_bound = 0;
IType upper_bound = data_dim - 1;
IType mean;
while (lower_bound < upper_bound) {
mean = (lower_bound + upper_bound) / 2;
if (id <= sorted_data[mean])
upper_bound = mean;
else
lower_bound = mean + 1;
}
bool found_row = (sorted_data[lower_bound] == id);
if (!found_row) {
bounds[id] = -1;
bounds[vocab_dim + id] = -2;
return;
} else {
bounds[id] = lower_bound;
}
// Binary search to find upper bound: stored at bounds[vocab_dim..2*vocab_dim-1]
lower_bound = 0;
upper_bound = data_dim - 1;
while (lower_bound < upper_bound) {
mean = (lower_bound + upper_bound + 1) / 2;
if (id >= sorted_data[mean])
lower_bound = mean;
else
upper_bound = mean - 1;
}
bounds[vocab_dim + id] = upper_bound;
}
/*
* \brief kernel to compute gradient of EmbeddingOp
* \param grad_in input gradient data
* \param original_index reference to the position at original input data for each index
* \param index_bounds lower and upper-bounds positions of each unique index
* \param grad_out output gradient data
* \param embbedding_dim dimension of the dense embedding
* \param vocab_dim maximum number of unique indices in the data array: tokens vocabulary size
* \param nelems_per_load number of elements per each load based on (LType / DType)
* \param req write/add/null
*/
template <typename AType, typename LType, typename DType, typename IType>
__global__ void EmbeddingGradKernel(DType* grad_in,
const IType* original_index,
const IType* index_bounds,
const DType* grad_out,
const index_t embbedding_dim,
const index_t vocab_dim,
const int nelems_per_load,
const int req) {
extern __shared__ int sharedmem[];
AType* grad_in_row = reinterpret_cast<AType*>(sharedmem);
const LType* aligned_grad_out = reinterpret_cast<const LType*>(grad_out);
LType* aligned_grad_in = reinterpret_cast<LType*>(grad_in);
const index_t aligned_emb_dim = embbedding_dim / nelems_per_load;
LType load_value[1];
DType* data_values = reinterpret_cast<DType*>(load_value);
IType my_row = blockIdx.x;
if (my_row < vocab_dim) {
// Read lower and upper bounds for current row
IType lower_bound = index_bounds[my_row];
IType upper_bound = index_bounds[vocab_dim + my_row];
int nOccurrences = upper_bound - lower_bound + 1;
for (index_t emb_id = threadIdx.x; emb_id < aligned_emb_dim; emb_id += blockDim.x) {
// Initialize grad_in
if (req == kAddTo) {
*load_value = aligned_grad_in[my_row * aligned_emb_dim + emb_id];
for (index_t val_id = 0; val_id < nelems_per_load; val_id++) {
grad_in_row[val_id * blockDim.x + threadIdx.x] = static_cast<AType>(data_values[val_id]);
}
} else {
for (index_t val_id = 0; val_id < nelems_per_load; val_id++) {
grad_in_row[val_id * blockDim.x + threadIdx.x] = static_cast<AType>(0.0);
}
}
// Add all rows from grad_out according to indices in data
for (index_t data_idx = lower_bound; data_idx < (lower_bound + nOccurrences); ++data_idx) {
*load_value = aligned_grad_out[original_index[data_idx] * aligned_emb_dim + emb_id];
for (index_t val_id = 0; val_id < nelems_per_load; val_id++) {
grad_in_row[val_id * blockDim.x + threadIdx.x] += static_cast<AType>(data_values[val_id]);
}
}
// Save results
for (index_t val_id = 0; val_id < nelems_per_load; val_id++) {
data_values[val_id] = static_cast<DType>(grad_in_row[val_id * blockDim.x + threadIdx.x]);
}
aligned_grad_in[my_row * aligned_emb_dim + emb_id] = *load_value;
}
}
}
template <typename AType, typename IType, typename DType>
void EmbeddingGradKernelCaller(const OpContext& ctx,
mshadow::Tensor<gpu, 2, DType> grad_in,
const mshadow::Tensor<gpu, 1, IType>& index,
const mshadow::Tensor<gpu, 2, DType>& grad_out,
const std::vector<OpReqType>& req) {
using namespace mxnet_op;
using namespace mshadow::expr;
Stream<gpu>* s = ctx.get_stream<gpu>();
const index_t data_dim = index.shape_[0];
const index_t vocab_dim = grad_in.shape_[0];
const index_t embbedding_dim = grad_in.shape_[1];
// Calculate amount of temporary storage
size_t sort_workspace_size = mxnet::op::SortByKeyWorkspaceSize<int, int, gpu>(data_dim);
size_t workspace_size =
2 * data_dim * sizeof(int) + 2 * vocab_dim * sizeof(int) + sort_workspace_size;
// Request temporary storage
Tensor<gpu, 1, char> workspace =
ctx.requested[embedding::kTempSpace].get_space_typed<gpu, 1, char>(Shape1(workspace_size), s);
// Create tensors
size_t pos = 0;
Tensor<gpu, 1, int> sorted_data(reinterpret_cast<int*>(&workspace[pos]), Shape1(data_dim), s);
pos += data_dim * sizeof(int);
// Reference to input data positions for each element of sorted_data
Tensor<gpu, 1, int> original_index(reinterpret_cast<int*>(&workspace[pos]), Shape1(data_dim), s);
pos += data_dim * sizeof(int);
// lower and upper bound positions of each index within sorted_data
Tensor<gpu, 1, int> bounds_index(
reinterpret_cast<int*>(&workspace[pos]), Shape1(2 * vocab_dim), s);
pos += 2 * vocab_dim * sizeof(int);
Tensor<gpu, 1, char> Sort_temp_storage(&workspace[pos], Shape1(sort_workspace_size), s);
// Clip indices [0, vocab_dim-1]
Kernel<tcast_clip, gpu>::Launch(
s, data_dim, sorted_data.dptr_, index.dptr_, static_cast<int>(vocab_dim));
Kernel<range_fwd, gpu>::Launch(s, data_dim, 1, 0, 1, kWriteTo, original_index.dptr_);
// Sort indices array
int num_bits = ilog2((vocab_dim - 1));
mxnet::op::SortByKey(sorted_data, original_index, true, &Sort_temp_storage, 0, num_bits);
// Find lower & upper bounds of each possible index
const int threads_block_bounds = 128;
const int nblocks_bounds = (vocab_dim + threads_block_bounds - 1) / threads_block_bounds;
EmbeddingFindBounds<<<nblocks_bounds, threads_block_bounds, 0, Stream<gpu>::GetStream(s)>>>(
sorted_data.dptr_, bounds_index.dptr_, data_dim, vocab_dim);
// Compute Gradient
int ltype = mxnet::common::cuda::get_load_type(embbedding_dim * sizeof(DType));
MXNET_LOAD_TYPE_SWITCH(ltype, LType, {
CHECK_LE(sizeof(DType), sizeof(LType));
int nelems_per_load = sizeof(LType) / sizeof(DType);
int threads_block_grad = 32;
int maxThreads = 1024;
while (threads_block_grad < (embbedding_dim / nelems_per_load) &&
(threads_block_grad < maxThreads))
threads_block_grad += 32;
size_t required_shared = threads_block_grad * nelems_per_load * sizeof(AType);
dim3 blocks(vocab_dim, 1);
EmbeddingGradKernel<AType, LType>
<<<blocks, threads_block_grad, required_shared, Stream<gpu>::GetStream(s)>>>(
grad_in.dptr_,
original_index.dptr_,
bounds_index.dptr_,
grad_out.dptr_,
embbedding_dim,
vocab_dim,
nelems_per_load,
req[embedding::kWeight]);
});
}
template <>
void EmbeddingOpBackward<gpu>(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mshadow::expr;
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 2U);
CHECK_EQ(req[embedding::kData], kNullOp)
<< "Embedding layer doesn't support calculate data gradient";
if (req[embedding::kWeight] == kNullOp) {
return;
}
CHECK_EQ(outputs[1].type_flag_, inputs[0].type_flag_);
const mxnet::TShape& ishape = inputs[1].shape_;
const mxnet::TShape& oshape = inputs[0].shape_;
Stream<gpu>* s = ctx.get_stream<gpu>();
CHECK_NE(req[embedding::kWeight], kWriteInplace)
<< "Backward of Embedding does not support writing in place.";
bool safe_acc = dmlc::GetEnv("MXNET_SAFE_ACCUMULATION", true);
if (!safe_acc && outputs[1].type_flag_ == mshadow::kFloat16) {
common::LogOnce(
"MXNET_SAFE_ACCUMULATION=1 is recommended for EmbeddingOpBackward "
"with float16 inputs. "
"See https://mxnet.apache.org/api/faq/env_var "
"for more details.");
}
MXNET_REAL_ACC_TYPE_SWITCH(outputs[1].type_flag_, DType, AType, {
MSHADOW_TYPE_SWITCH(inputs[1].type_flag_, IType, {
Tensor<gpu, 1, IType> data =
inputs[1].get_with_shape<gpu, 1, IType>(Shape1(ishape.ProdShape(0, ishape.ndim())), s);
Tensor<gpu, 2, DType> grad_out = inputs[0].get_with_shape<gpu, 2, DType>(
Shape2(oshape.ProdShape(0, oshape.ndim() - 1), oshape[oshape.ndim() - 1]), s);
Tensor<gpu, 2, DType> grad_in = outputs[1].get<gpu, 2, DType>(s);
if (req[embedding::kWeight] == kWriteTo || req[embedding::kWeight] == kAddTo) {
if (safe_acc)
EmbeddingGradKernelCaller<AType>(ctx, grad_in, data, grad_out, req);
else
EmbeddingGradKernelCaller<DType>(ctx, grad_in, data, grad_out, req);
} else {
LOG(FATAL) << "wrong req";
}
});
});
}
NNVM_REGISTER_OP(Embedding).set_attr<FCompute>("FCompute<gpu>", EmbeddingOpForward<gpu>);
NNVM_REGISTER_OP(_backward_Embedding)
.set_attr<FCompute>("FCompute<gpu>", EmbeddingOpBackward<gpu>)
.set_attr<FComputeEx>("FComputeEx<gpu>", EmbeddingOpBackwardEx<gpu>);
NNVM_REGISTER_OP(take).set_attr<FCompute>("FCompute<gpu>", TakeOpForward<gpu>);
NNVM_REGISTER_OP(_backward_take).set_attr<FCompute>("FCompute<gpu>", TakeOpBackward<gpu>);
NNVM_REGISTER_OP(batch_take).set_attr<FCompute>("FCompute<gpu>", BatchTakeOpForward<gpu>);
NNVM_REGISTER_OP(one_hot).set_attr<FCompute>("FCompute<gpu>", OneHotOpForward<gpu>);
NNVM_REGISTER_OP(gather_nd)
.set_attr<FIsCUDAGraphsCompatible>("FIsCUDAGraphsCompatible",
[](const NodeAttrs&, const bool) { return false; })
.set_attr<FCompute>("FCompute<gpu>", GatherNDForwardGPU);
NNVM_REGISTER_OP(scatter_nd).set_attr<FCompute>("FCompute<gpu>", ScatterNDForward<gpu>);
NNVM_REGISTER_OP(_backward_gather_nd).set_attr<FCompute>("FCompute<gpu>", GatherNDBackward<gpu>);
NNVM_REGISTER_OP(_scatter_set_nd).set_attr<FCompute>("FCompute<gpu>", ScatterSetNDForward<gpu>);
} // namespace op
} // namespace mxnet
|
11aa3689236f8d4116b9f819ecd60a6ed80f325c.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <sstream>
#include <fstream>
#include <string>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <hip/hip_runtime.h>
/*
#include "cuPrintf.hip"`
*/
using namespace std;
inline void __cudaSafeCall( hipError_t err,
const char *file, const int line )
{
#ifdef CUDA_CHECK_ERROR
#pragma warning( push )
#pragma warning( disable: 4127 ) // Prevent warning on do-while(0);
do
{
if ( hipSuccess != err )
{
fprintf( stderr,
"cudaSafeCall() failed at %s:%i : %s\n",
file, line, hipGetErrorString( err ) );
exit( -1 );
}
} while ( 0 );
#pragma warning( pop )
#endif
// CUDA_CHECK_ERROR
return;
}//end function
inline void __cudaCheckError( const char *file, const int line ) {
#ifdef CUDA_CHECK_ERROR
#pragma warning( push )
#pragma warning( disable: 4127 ) // Prevent warning on do-while(0);
do
{
hipError_t err = hipGetLastError();
if( hipSuccess != err )
{
fprintf( stderr,
"cudaCheckError() with sync failed at %s:%i : %s.\n",
file, line, hipGetErrorString( err ) );
exit( -1 );
}
err = hipDeviceSynchronize();
if( hipSuccess != err )
{
if ( hipSuccess != err )
fprintf( stderr,
"cudaCheckError() failed at %s:%i : %s.\n",
file, line, hipGetErrorString( err ) );
exit( -1 );
}
} while ( 0 );
// More careful checking. However, this will affect performance. // Comment if not needed
#pragma warning( pop )
#endif // CUDA_CHECK_ERROR
return;
}
void bubble_sort(int * array, int size)
{
for(int i = 0; i <= size - 1; i ++)
{
for(int j = 1; j <= size - 1; j ++)
{
if(array[j] < array[j - 1])
{
//printf("%d %d\n", array[j - 1], array[j]);
int c = array[j - 1];
array[j - 1] = array[j];
array[j] = c;
//printf("%d %d\n\n", array[j - 1], array[j]);
}//end if
}//end for j
}//end for i
}//end function
void print_array(int * array, int size)
{
for(int i = 0; i <= size - 1; i ++)
{
printf("%d, ", array[i]);
}//end for i
printf("\n");
}//end function
int * makeRandArray( const int size, const int seed ) {
srand( seed );
int * array = new int[ size ];
for( int i = 0; i < size; i ++ ) {
array[i] = std::rand() % 1000000;
}
return array; }
/*
Kernel is fuction to run on GPU.
*/
__global__ void matavgKernel(int * array, int size ) {
//array[0] = 5;
for(int i = 0; i <= size - 1; i ++)
{
//cuPrintf(Value is: %d\n, i);
for(int j = 1; j <= size - 1; j ++)
{
if(array[j] < array[j - 1])
{
//printf("%d %d\n", array[j - 1], array[j]);
int c = array[j - 1];
array[j - 1] = array[j];
array[j] = c;
//printf("%d %d\n\n", array[j - 1], array[j]);
}//end if
}//end for j
}//end for i
//return array;
}//end function
int main( int argc, char* argv[] ) {
int * array; // the poitner to the array of rands
int size, seed; // values for the size of the array
bool printSorted = false;
// and the seed for generating
// random numbers
// check the command line args
if( argc < 3 ){
std::cerr << "usage: "
<< argv[0]
<< " [amount of random nums to generate] [seed value for rand]" << " [1 to print sorted array, 0 otherwise]"
<< std::endl;
exit( -1 ); }
// convert cstrings to ints
{
std::stringstream ss1( argv[1] );
ss1 >> size;
} {
std::stringstream ss1( argv[2] );
ss1 >> seed; }
/*
{
int sortPrint;
std::stringstream ss1( argv[2] );
ss1 >> sortPrint;
if( sortPrint == 1 )
printSorted = true;
}
*/
// get the random numbers
array = makeRandArray( size, seed );
int * host_array = (int*)malloc(size * 4);
for(int i =0; i <= size - 1; i ++)
{
host_array[i] = array[i];
}//end for i
print_array(array, size);
printf("host_array\n");
print_array(host_array, size);
hipEvent_t startTotal, stopTotal; float timeTotal; hipEventCreate(&startTotal); hipEventCreate(&stopTotal); hipEventRecord( startTotal, 0 );
/////////////////////////////////////////////////////////////////////
/////////////////////// YOUR CODE HERE ///////////////////////
/////////////////////////////////////////////////////////////////////
//hiprandState_t* devRandomGeneratorStateArray;
// hipMalloc ( &devRandomGeneratorStateArray, 1*sizeof( hiprandState_t ) );
//bubble_sort(array, size);
// thrust::host_vector<int> hostCounts(1, 0);
// thrust::device_vector<int> deviceCounts(hostCounts);
int * cuda_array;
hipMalloc(&cuda_array, size * 4);
hipMemcpy(cuda_array, host_array, size * 4, hipMemcpyHostToDevice);
//matavgKernel <<< 1, 1 >>> (array, size);
hipLaunchKernelGGL(( matavgKernel) , dim3(1), dim3(1) , 0, 0, cuda_array, size);
hipMemcpy(host_array, cuda_array, size * 4, hipMemcpyDeviceToHost);
hipFree(cuda_array);
//https://stackoverflow.com/questions/6419700/way-to-verify-kernel-was-executed-in-cuda
/*
hipError_t err = hipGetLastError();
if (err != hipSuccess)
printf("Error: %s\n", hipGetErrorString(err));
//thrust::reduce(deviceCounts.begin(), deviceCounts.end(), 0, thrust::plus<int>());;
*/
//matavgKerenel(array, size);
/***********************************
*
Stop and destroy the cuda timer
**********************************/
hipEventRecord( stopTotal, 0 );
hipEventSynchronize( stopTotal );
hipEventElapsedTime( &timeTotal, startTotal, stopTotal );
hipEventDestroy( startTotal );
hipEventDestroy( stopTotal );
/***********************************
end of cuda timer destruction
**********************************/
std::cerr << "Total time in seconds: "
<< timeTotal / 1000.0 << std::endl;
printSorted = true;
if( printSorted ){
for(int i = 0; i <= size - 1; i ++)
{
printf("%d, ", host_array[i]);
}//end for i
printf("\n");
///////////////////////////////////////////////
/// Your code to print the sorted array here //
///////////////////////////////////////////////
} }
| 11aa3689236f8d4116b9f819ecd60a6ed80f325c.cu |
#include <iostream>
#include <sstream>
#include <fstream>
#include <string>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <curand.h>
#include <curand_kernel.h>
#include <cuda.h>
/*
#include "cuPrintf.cu"`
*/
using namespace std;
inline void __cudaSafeCall( cudaError err,
const char *file, const int line )
{
#ifdef CUDA_CHECK_ERROR
#pragma warning( push )
#pragma warning( disable: 4127 ) // Prevent warning on do-while(0);
do
{
if ( cudaSuccess != err )
{
fprintf( stderr,
"cudaSafeCall() failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
} while ( 0 );
#pragma warning( pop )
#endif
// CUDA_CHECK_ERROR
return;
}//end function
inline void __cudaCheckError( const char *file, const int line ) {
#ifdef CUDA_CHECK_ERROR
#pragma warning( push )
#pragma warning( disable: 4127 ) // Prevent warning on do-while(0);
do
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err )
{
fprintf( stderr,
"cudaCheckError() with sync failed at %s:%i : %s.\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
err = cudaThreadSynchronize();
if( cudaSuccess != err )
{
if ( cudaSuccess != err )
fprintf( stderr,
"cudaCheckError() failed at %s:%i : %s.\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
} while ( 0 );
// More careful checking. However, this will affect performance. // Comment if not needed
#pragma warning( pop )
#endif // CUDA_CHECK_ERROR
return;
}
void bubble_sort(int * array, int size)
{
for(int i = 0; i <= size - 1; i ++)
{
for(int j = 1; j <= size - 1; j ++)
{
if(array[j] < array[j - 1])
{
//printf("%d %d\n", array[j - 1], array[j]);
int c = array[j - 1];
array[j - 1] = array[j];
array[j] = c;
//printf("%d %d\n\n", array[j - 1], array[j]);
}//end if
}//end for j
}//end for i
}//end function
void print_array(int * array, int size)
{
for(int i = 0; i <= size - 1; i ++)
{
printf("%d, ", array[i]);
}//end for i
printf("\n");
}//end function
int * makeRandArray( const int size, const int seed ) {
srand( seed );
int * array = new int[ size ];
for( int i = 0; i < size; i ++ ) {
array[i] = std::rand() % 1000000;
}
return array; }
/*
Kernel is fuction to run on GPU.
*/
__global__ void matavgKernel(int * array, int size ) {
//array[0] = 5;
for(int i = 0; i <= size - 1; i ++)
{
//cuPrintf(“Value is: %d\n”, i);
for(int j = 1; j <= size - 1; j ++)
{
if(array[j] < array[j - 1])
{
//printf("%d %d\n", array[j - 1], array[j]);
int c = array[j - 1];
array[j - 1] = array[j];
array[j] = c;
//printf("%d %d\n\n", array[j - 1], array[j]);
}//end if
}//end for j
}//end for i
//return array;
}//end function
int main( int argc, char* argv[] ) {
int * array; // the poitner to the array of rands
int size, seed; // values for the size of the array
bool printSorted = false;
// and the seed for generating
// random numbers
// check the command line args
if( argc < 3 ){
std::cerr << "usage: "
<< argv[0]
<< " [amount of random nums to generate] [seed value for rand]" << " [1 to print sorted array, 0 otherwise]"
<< std::endl;
exit( -1 ); }
// convert cstrings to ints
{
std::stringstream ss1( argv[1] );
ss1 >> size;
} {
std::stringstream ss1( argv[2] );
ss1 >> seed; }
/*
{
int sortPrint;
std::stringstream ss1( argv[2] );
ss1 >> sortPrint;
if( sortPrint == 1 )
printSorted = true;
}
*/
// get the random numbers
array = makeRandArray( size, seed );
int * host_array = (int*)malloc(size * 4);
for(int i =0; i <= size - 1; i ++)
{
host_array[i] = array[i];
}//end for i
print_array(array, size);
printf("host_array\n");
print_array(host_array, size);
cudaEvent_t startTotal, stopTotal; float timeTotal; cudaEventCreate(&startTotal); cudaEventCreate(&stopTotal); cudaEventRecord( startTotal, 0 );
/////////////////////////////////////////////////////////////////////
/////////////////////// YOUR CODE HERE ///////////////////////
/////////////////////////////////////////////////////////////////////
//curandState* devRandomGeneratorStateArray;
// cudaMalloc ( &devRandomGeneratorStateArray, 1*sizeof( curandState ) );
//bubble_sort(array, size);
// thrust::host_vector<int> hostCounts(1, 0);
// thrust::device_vector<int> deviceCounts(hostCounts);
int * cuda_array;
cudaMalloc(&cuda_array, size * 4);
cudaMemcpy(cuda_array, host_array, size * 4, cudaMemcpyHostToDevice);
//matavgKernel <<< 1, 1 >>> (array, size);
matavgKernel <<< 1, 1 >>> (cuda_array, size);
cudaMemcpy(host_array, cuda_array, size * 4, cudaMemcpyDeviceToHost);
cudaFree(cuda_array);
//https://stackoverflow.com/questions/6419700/way-to-verify-kernel-was-executed-in-cuda
/*
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error: %s\n", cudaGetErrorString(err));
//thrust::reduce(deviceCounts.begin(), deviceCounts.end(), 0, thrust::plus<int>());;
*/
//matavgKerenel(array, size);
/***********************************
*
Stop and destroy the cuda timer
**********************************/
cudaEventRecord( stopTotal, 0 );
cudaEventSynchronize( stopTotal );
cudaEventElapsedTime( &timeTotal, startTotal, stopTotal );
cudaEventDestroy( startTotal );
cudaEventDestroy( stopTotal );
/***********************************
end of cuda timer destruction
**********************************/
std::cerr << "Total time in seconds: "
<< timeTotal / 1000.0 << std::endl;
printSorted = true;
if( printSorted ){
for(int i = 0; i <= size - 1; i ++)
{
printf("%d, ", host_array[i]);
}//end for i
printf("\n");
///////////////////////////////////////////////
/// Your code to print the sorted array here //
///////////////////////////////////////////////
} }
|
955fc71f2f1b8b4c012382de6e822f74beed90ed.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/TemporalReplicationPadding.cu"
#else
void THNN_(TemporalReplicationPadding_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *output,
int padL, int padR) {
THArgCheck(THCTensor_canUse32BitIndexMath(state, input), 2,
"input tensor must fit into 32-bit index math");
int planeDim = 0;
int dimw = 1;
int numBatch = 1;
int numInputDims = THCTensor_(nDimension)(state, input);
THCUNN_argCheck(state, !input->is_empty() && (numInputDims == 2 || numInputDims == 3), 2, input,
"2D or 3D (batch mode) tensor expected for input, but got: %s")
if (numInputDims == 3) {
numBatch = THCTensor_(size)(state, input, 0);
planeDim++;
dimw++;
}
int numPlanes = THCTensor_(size)(state, input, planeDim);
int inputW = THCTensor_(size)(state, input, dimw);
int outputW = inputW + padL + padR;
THArgCheck(outputW >= 1, 2,
"input (W: %d)is too small."
" Calculated output W: %d",
inputW, outputW);
THCDeviceTensor<real, 3> devInput;
THCDeviceTensor<real, 3> devOutput;
if (numInputDims == 2) {
THCTensor_(resize2d)(state, output, numPlanes, outputW);
devInput = toDeviceTensor<real, 2>(state, input).upcastOuter<3>();
devOutput = toDeviceTensor<real, 2>(state, output).upcastOuter<3>();
} else {
THCTensor_(resize3d)(state, output, numBatch, numPlanes, outputW);
devInput = toDeviceTensor<real, 3>(state, input);
devOutput = toDeviceTensor<real, 3>(state, output);
}
int outputPlaneSize = devOutput.getSize(2);
dim3 gridSize(THCCeilDiv(outputPlaneSize, 256),
devOutput.getSize(1),
devOutput.getSize(0));
dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize);
hipLaunchKernelGGL(( TemporalReplicationPadding_updateOutput), dim3(gridSize), dim3(blockSize), 0, THCState_getCurrentStream(state),
devInput, devOutput, padL, padR);
}
void THNN_(TemporalReplicationPadding_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradInput,
int padL, int padR) {
THArgCheck(THCTensor_canUse32BitIndexMath(state, input), 2,
"input tensor must fit into 32-bit index math");
THArgCheck(THCTensor_canUse32BitIndexMath(state, gradOutput), 3,
"output gradient tensor must fit into 32-bit index math");
int planeDim = 0;
int dimw = 1;
int numInputDims = THCTensor_(nDimension)(state, input);
if (numInputDims == 3) {
planeDim++;
dimw++;
}
int iwidth = input->size(dimw);
int owidth = iwidth + padL + padR;
THArgCheck(owidth == THCTensor_(size)(state, gradOutput, dimw), 3,
"gradOutput width unexpected. Expected: %d, Got: %d",
owidth, THCTensor_(size)(state, gradOutput, dimw));
THCTensor_(resizeAs)(state, gradInput, input);
THCTensor_(zero)(state, gradInput);
THCDeviceTensor<real, 3> devGradInput;
THCDeviceTensor<real, 3> devGradOutput;
if (numInputDims == 2) {
devGradInput = toDeviceTensor<real, 2>(state, gradInput).upcastOuter<3>();
devGradOutput = toDeviceTensor<real, 2>(state, gradOutput).upcastOuter<3>();
} else {
devGradInput = toDeviceTensor<real, 3>(state, gradInput);
devGradOutput = toDeviceTensor<real, 3>(state, gradOutput);
}
int outputPlaneSize = devGradOutput.getSize(2);
dim3 gridSize(THCCeilDiv(outputPlaneSize, 256),
devGradOutput.getSize(1),
devGradOutput.getSize(0));
dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize);
hipLaunchKernelGGL(( TemporalReplicationPadding_updateGradInput), dim3(gridSize), dim3(blockSize), 0, THCState_getCurrentStream(state),
devGradInput, devGradOutput, padL, padR);
}
#endif
| 955fc71f2f1b8b4c012382de6e822f74beed90ed.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/TemporalReplicationPadding.cu"
#else
void THNN_(TemporalReplicationPadding_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *output,
int padL, int padR) {
THArgCheck(THCTensor_canUse32BitIndexMath(state, input), 2,
"input tensor must fit into 32-bit index math");
int planeDim = 0;
int dimw = 1;
int numBatch = 1;
int numInputDims = THCTensor_(nDimension)(state, input);
THCUNN_argCheck(state, !input->is_empty() && (numInputDims == 2 || numInputDims == 3), 2, input,
"2D or 3D (batch mode) tensor expected for input, but got: %s")
if (numInputDims == 3) {
numBatch = THCTensor_(size)(state, input, 0);
planeDim++;
dimw++;
}
int numPlanes = THCTensor_(size)(state, input, planeDim);
int inputW = THCTensor_(size)(state, input, dimw);
int outputW = inputW + padL + padR;
THArgCheck(outputW >= 1, 2,
"input (W: %d)is too small."
" Calculated output W: %d",
inputW, outputW);
THCDeviceTensor<real, 3> devInput;
THCDeviceTensor<real, 3> devOutput;
if (numInputDims == 2) {
THCTensor_(resize2d)(state, output, numPlanes, outputW);
devInput = toDeviceTensor<real, 2>(state, input).upcastOuter<3>();
devOutput = toDeviceTensor<real, 2>(state, output).upcastOuter<3>();
} else {
THCTensor_(resize3d)(state, output, numBatch, numPlanes, outputW);
devInput = toDeviceTensor<real, 3>(state, input);
devOutput = toDeviceTensor<real, 3>(state, output);
}
int outputPlaneSize = devOutput.getSize(2);
dim3 gridSize(THCCeilDiv(outputPlaneSize, 256),
devOutput.getSize(1),
devOutput.getSize(0));
dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize);
TemporalReplicationPadding_updateOutput<<<gridSize, blockSize, 0, THCState_getCurrentStream(state)>>>(
devInput, devOutput, padL, padR);
}
void THNN_(TemporalReplicationPadding_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradInput,
int padL, int padR) {
THArgCheck(THCTensor_canUse32BitIndexMath(state, input), 2,
"input tensor must fit into 32-bit index math");
THArgCheck(THCTensor_canUse32BitIndexMath(state, gradOutput), 3,
"output gradient tensor must fit into 32-bit index math");
int planeDim = 0;
int dimw = 1;
int numInputDims = THCTensor_(nDimension)(state, input);
if (numInputDims == 3) {
planeDim++;
dimw++;
}
int iwidth = input->size(dimw);
int owidth = iwidth + padL + padR;
THArgCheck(owidth == THCTensor_(size)(state, gradOutput, dimw), 3,
"gradOutput width unexpected. Expected: %d, Got: %d",
owidth, THCTensor_(size)(state, gradOutput, dimw));
THCTensor_(resizeAs)(state, gradInput, input);
THCTensor_(zero)(state, gradInput);
THCDeviceTensor<real, 3> devGradInput;
THCDeviceTensor<real, 3> devGradOutput;
if (numInputDims == 2) {
devGradInput = toDeviceTensor<real, 2>(state, gradInput).upcastOuter<3>();
devGradOutput = toDeviceTensor<real, 2>(state, gradOutput).upcastOuter<3>();
} else {
devGradInput = toDeviceTensor<real, 3>(state, gradInput);
devGradOutput = toDeviceTensor<real, 3>(state, gradOutput);
}
int outputPlaneSize = devGradOutput.getSize(2);
dim3 gridSize(THCCeilDiv(outputPlaneSize, 256),
devGradOutput.getSize(1),
devGradOutput.getSize(0));
dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize);
TemporalReplicationPadding_updateGradInput<<<gridSize, blockSize, 0, THCState_getCurrentStream(state)>>>(
devGradInput, devGradOutput, padL, padR);
}
#endif
|
9e3e2fb31a50163fb759276a461ba66731c88433.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <thrust/transform_reduce.h>
#include "kernels/cuda_helpers.h"
#include "kernels/tensor_operators.h"
#include "3rd_party/reduce_all.h"
namespace marian {
#define CUDA_FLT_MAX 1.70141e+38
struct isnan_test {
__host__ __device__ bool operator()(const float a) const { return isnan(a); }
};
__device__ inline float stableLogit(float x) {
if(x >= 0) {
float z = expf(-x);
return 1.0 / (1.0 + z);
} else {
float z = expf(x);
return z / (1.0 + z);
}
}
bool IsNan(Tensor in) {
//hipSetDevice(in->getDevice());
//thrust::device_ptr<float> begin = thrust::device_pointer_cast(in->data());
//thrust::device_ptr<float> end
// = thrust::device_pointer_cast(in->data() + in->size());
//return thrust::transform_reduce(
// begin, end, isnan_test(), 0, thrust::plus<bool>());
return false;
}
void ConcatCont(Tensor out, const std::vector<Tensor>& inputs, int axis) {
hipSetDevice(out->getDevice());
int step = 1;
for(int i = 0; i < axis; ++i)
step *= out->shape()[i];
size_t offset1 = 0;
for(int i = 0; i < step; ++i) {
for(auto in : inputs) {
size_t size = in->shape().elements() / step;
size_t offset2 = i * size;
hipMemcpyAsync(out->data() + offset1,
in->data() + offset2,
size * sizeof(float),
hipMemcpyDeviceToDevice);
offset1 += size;
}
}
hipStreamSynchronize(0);
}
__global__ void gInsertCols(float* out,
const float* in,
size_t rows,
size_t cols,
size_t cols_out,
size_t cols_in,
size_t offset_out,
size_t offset_in) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float* rowOut = out + j * cols_out + offset_out;
const float* rowIn = in + j * cols_in + offset_in;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < cols)
rowOut[i] = rowIn[i];
}
}
}
}
void Concatenate1(Tensor out, const std::vector<Tensor>& inputs) {
hipSetDevice(out->getDevice());
int rows = out->shape().elements() / out->shape().back();
size_t offset = 0;
int cols_out = out->shape().back();
for(auto in : inputs) {
ABORT_IF(rows != in->shape().elements() / in->shape().back(),
"First dimension must be equal");
int cols_in = in->shape().back();
int blocks = ::min(MAX_BLOCKS, rows);
int threads = ::min(MAX_THREADS, cols_in);
hipLaunchKernelGGL(( gInsertCols), dim3(blocks), dim3(threads), 0, 0,
out->data(), in->data(), rows, cols_in, cols_out, cols_in, offset, 0);
offset += cols_in;
}
hipStreamSynchronize(0);
}
void Concatenate(Tensor out, const std::vector<Tensor>& inputs, int ax) {
if(ax == out->shape().size() - 1)
Concatenate1(out, inputs);
else
ConcatCont(out, inputs, ax);
}
void Split1(std::vector<Tensor>& outputs, const Tensor in) {
hipSetDevice(in->getDevice());
size_t offset = 0;
int rows = in->shape().elements() / in->shape().back();
int cols_in = in->shape().back();
for(auto out : outputs) {
ABORT_IF(rows != out->shape().elements() / out->shape().back(),
"First dimension must be equal");
int cols_out = out->shape().back();
int blocks = ::min(MAX_BLOCKS, rows);
int threads = ::min(MAX_THREADS, cols_out);
hipLaunchKernelGGL(( gInsertCols), dim3(blocks), dim3(threads), 0, 0,
out->data(), in->data(), rows, cols_out, cols_out, cols_in, 0, offset);
offset += cols_out;
}
hipStreamSynchronize(0);
}
void SplitCont(std::vector<Tensor>& outputs, const Tensor in, int axis) {
hipSetDevice(in->getDevice());
int step = 1;
for(int i = 0; i < axis; ++i)
step *= in->shape()[i];
size_t offset1 = 0;
for(int i = 0; i < step; ++i) {
for(auto out : outputs) {
size_t size = out->shape().elements() / step;
size_t offset2 = i * size;
hipMemcpyAsync(out->data() + offset2,
in->data() + offset1,
size * sizeof(float),
hipMemcpyDeviceToDevice);
offset1 += size;
}
}
hipStreamSynchronize(0);
}
void Deconcatenate(std::vector<Tensor>& outputs, const Tensor in, int ax) {
if(ax == in->shape().size() - 1)
Split1(outputs, in);
else
SplitCont(outputs, in, ax);
}
__global__ void gTransposeND(gpu::Tensor<float> out,
const gpu::Tensor<float> in,
const gpu::Array<int, gpu::Shape::size()> permute) {
constexpr size_t N = gpu::Shape::size();
gpu::Array<int, N> oDims;
gpu::Array<int, N> pDims;
int length = out.shape().elements();
for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) {
int index = bid + blockDim.x * blockIdx.x + threadIdx.x;
if(index < length) {
out.shape().dims(index, oDims);
for(int i = 0; i < N; ++i)
pDims[permute[i]] = oDims[i];
out[index] = in[pDims];
}
}
}
void TransposeND(Tensor out, Tensor in, const std::vector<int>& vAxis) {
hipSetDevice(out->getDevice());
gpu::Array<int, gpu::Shape::size()> axes;
int diff = gpu::Shape::size() - vAxis.size();
for(int i = 0; i < axes.size(); ++i)
if(i < diff)
axes[i] = i;
else
axes[i] = vAxis[i - diff] + diff;
int length = out->shape().elements();
int threads = ::min(MAX_THREADS, length);
int blocks = ::min(MAX_BLOCKS, length / threads + (length % threads != 0));
hipLaunchKernelGGL(( gTransposeND), dim3(blocks), dim3(threads), 0, 0, out, in, axes);
}
__global__ void gSoftmax(float* out,
gpu::Shape outShape,
const float* in,
const float* mask,
const gpu::Shape maskShape) {
int rows = outShape.elements() / outShape.back();
int cols = outShape.back();
bool broadcast = outShape != maskShape;
gpu::Array<int, gpu::Shape::size()> dims;
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float* so = out + j * cols;
const float* sp = in + j * cols;
extern __shared__ float _share[];
float* _max = _share + blockDim.x;
_max[threadIdx.x] = -CUDA_FLT_MAX; // mask
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float mVal = 1.f;
if(mask) {
int mIndex = id + j * cols;
if(broadcast) {
outShape.dims(mIndex, dims);
mIndex = maskShape.bindex(dims);
}
mVal = mask[mIndex];
}
if(mVal && sp[id] > _max[threadIdx.x])
_max[threadIdx.x] = sp[id];
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1)) {
if(_max[threadIdx.x + skip] > _max[threadIdx.x]) {
_max[threadIdx.x] = _max[threadIdx.x + skip];
}
}
len = (len + 1) >> 1;
}
__syncthreads();
float max = _max[0];
__syncthreads();
float* _sum = _share + blockDim.x;
_sum[threadIdx.x] = 0.0;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float mVal = 1.f;
if(mask) {
int mIndex = id + j * cols;
if(broadcast) {
outShape.dims(mIndex, dims);
mIndex = maskShape.bindex(dims);
}
mVal = mask[mIndex];
}
float ex = 0;
if(mVal)
ex = __expf(sp[id] - max);
so[id] = ex;
_sum[threadIdx.x] += ex;
}
}
__syncthreads();
len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
len = (len + 1) >> 1;
}
__syncthreads();
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
so[id] = so[id] / _sum[0];
}
}
}
}
}
void Softmax(Tensor out, Tensor in, Tensor mask) {
hipSetDevice(out->getDevice());
size_t m = out->shape().elements() / out->shape().back();
size_t k = out->shape().back();
int blocks = ::min(MAX_BLOCKS, (int)m);
int threads = ::min(MAX_THREADS, (int)k);
int shared = sizeof(float) * threads * 2;
if(mask)
hipLaunchKernelGGL(( gSoftmax), dim3(blocks), dim3(threads), shared, 0,
out->data(), out->shape(), in->data(), mask->data(), mask->shape());
else
hipLaunchKernelGGL(( gSoftmax), dim3(blocks), dim3(threads), shared, 0,
out->data(), out->shape(), in->data(), 0, out->shape());
}
__global__ void gLogSoftmax(float* out,
const gpu::Shape outShape,
const float* in) {
int rows = outShape.elements() / outShape.back();
int cols = outShape.back();
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float* so = out + j * cols;
const float* sp = in + j * cols;
extern __shared__ float _share[];
float* _max = _share + blockDim.x;
_max[threadIdx.x] = sp[threadIdx.x];
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
if(sp[id] > _max[threadIdx.x])
_max[threadIdx.x] = sp[id];
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1)) {
if(_max[threadIdx.x + skip] > _max[threadIdx.x]) {
_max[threadIdx.x] = _max[threadIdx.x + skip];
}
}
len = (len + 1) >> 1;
}
__syncthreads();
float max = _max[0];
__syncthreads();
float* _sum = _share + blockDim.x;
_sum[threadIdx.x] = 0.0;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float sm = sp[id] - max;
float ex = __expf(sm);
so[id] = sm;
_sum[threadIdx.x] += ex;
}
}
__syncthreads();
len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
len = (len + 1) >> 1;
}
__syncthreads();
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols)
so[id] -= __logf(_sum[0]);
}
}
}
}
void LogSoftmax(Tensor out, Tensor in) {
hipSetDevice(out->getDevice());
size_t m = out->shape().elements() / out->shape().back();
size_t k = out->shape().back();
int blocks = ::min(MAX_BLOCKS, (int)m);
int threads = ::min(MAX_THREADS, (int)k);
int shared = sizeof(float) * threads * 2;
hipLaunchKernelGGL(( gLogSoftmax), dim3(blocks), dim3(threads), shared, 0,
out->data(), out->shape(), in->data());
}
///////////////////////////////////////////////////////
__global__ void gSoftmaxGrad(float* grad,
const float* adj,
const float* val,
const int rows,
const int cols) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
extern __shared__ float _share[];
float* _sum = _share + blockDim.x;
float* gradRow = grad + j * cols;
const float* adjRow = adj + j * cols;
const float* valRow = val + j * cols;
_sum[threadIdx.x] = 0.0;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
_sum[threadIdx.x] += valRow[id] * adjRow[id];
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
len = (len + 1) >> 1;
}
__syncthreads();
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float val = valRow[id] * (adjRow[id] - _sum[0]);
if(val)
gradRow[id] += val;
}
}
}
}
}
void SoftmaxGrad(Tensor grad, Tensor adj, Tensor val) {
hipSetDevice(adj->getDevice());
// grad and val are both m-by-k matrices, passed as input.
// A weighted average of each row of grad (according to the weights
// specified in val) is computed and subtracted from Out.
// adj is multiplied for each element to get backward step in autodiff
int m = grad->shape().elements() / grad->shape().back();
int k = grad->shape().back();
int blocks = ::min(MAX_BLOCKS, m);
int threads = ::min(MAX_THREADS, k);
int shared = sizeof(float) * threads * 2;
hipLaunchKernelGGL(( gSoftmaxGrad), dim3(blocks), dim3(threads), shared, 0,
grad->data(), adj->data(), val->data(), m, k);
}
__global__ void gLogSoftmaxGrad(float* grad,
const float* adj,
const float* val,
const int rows,
const int cols) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
extern __shared__ float _share[];
float* _sum = _share + blockDim.x;
float* gradRow = grad + j * cols;
const float* adjRow = adj + j * cols;
const float* valRow = val + j * cols;
_sum[threadIdx.x] = 0.0;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
_sum[threadIdx.x] += adjRow[id];
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
len = (len + 1) >> 1;
}
__syncthreads();
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols)
gradRow[id] += adjRow[id] - (expf(valRow[id]) * _sum[0]);
}
}
}
}
void LogSoftmaxGrad(Tensor grad, Tensor adj, Tensor val) {
hipSetDevice(adj->getDevice());
// grad and val are both m-by-k matrices, passed as input.
// A weighted average of each row of grad (according to the weights
// specified in val) is computed and subtracted from Out.
// adj is multiplied for each element to get backward step in autodiff
int m = grad->shape().elements() / grad->shape().back();
int k = grad->shape().back();
int blocks = ::min(MAX_BLOCKS, m);
int threads = ::min(MAX_THREADS, k);
int shared = sizeof(float) * threads * 2;
hipLaunchKernelGGL(( gLogSoftmaxGrad), dim3(blocks), dim3(threads), shared, 0,
grad->data(), adj->data(), val->data(), m, k);
}
///////////////////////////////////////////////////////
__global__ void gArgmax(float* out,
const float* data,
size_t rows,
size_t cols) {
size_t row = blockIdx.x;
size_t startInd = row * cols;
float maxScore = -99999;
size_t maxInd;
for(size_t col = 0; col < cols; ++col) {
size_t ind = startInd + col;
float score = data[ind];
if(score > maxScore) {
maxScore = score;
maxInd = col;
}
}
out[row] = maxInd;
}
///////////////////////////////////////////////////////
void Prod(hipblasHandle_t handle,
Tensor C,
const Tensor A,
const Tensor B,
bool transA,
bool transB,
float beta,
float scalar) {
hipSetDevice(C->getDevice());
float alpha = scalar;
size_t m = A->shape().elements() / A->shape().back();
size_t k = A->shape().back();
if(transA)
std::swap(m, k);
size_t l = B->shape().elements() / B->shape().back();
size_t n = B->shape().back();
if(transB)
std::swap(l, n);
size_t lda = A->shape().back();
size_t ldb = B->shape().back();
size_t ldc = B->shape().back();
if(transB)
ldc = B->shape().elements() / B->shape().back();
hipblasOperation_t opA = transA ? HIPBLAS_OP_T : HIPBLAS_OP_N;
hipblasOperation_t opB = transB ? HIPBLAS_OP_T : HIPBLAS_OP_N;
#if TORCH_HIP_VERSION >= 9000
//cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH);
#endif
hipblasSgemm(handle,
opB,
opA,
n,
m,
k,
&alpha,
B->data(),
ldb,
A->data(),
lda,
&beta,
C->data(),
ldc);
#if TORCH_HIP_VERSION >= 9000
//cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH);
#endif
}
void ProdBatched(hipblasHandle_t handle,
Tensor C,
const Tensor A,
const Tensor B,
bool transA,
bool transB,
float beta,
float scalar) {
hipSetDevice(C->getDevice());
float alpha = scalar;
size_t batchA = A->shape().elements() / (A->shape()[-1] * A->shape()[-2]);
size_t batchB = B->shape().elements() / (B->shape()[-1] * B->shape()[-2]);
size_t m = A->shape()[-2];
size_t k = A->shape()[-1];
if(transA)
std::swap(m, k);
size_t l = B->shape()[-2];
size_t n = B->shape()[-1];
if(transB)
std::swap(l, n);
size_t lda = A->shape()[-1];
size_t ldb = B->shape()[-1];
size_t ldc = B->shape()[-1];
if(transB)
ldc = B->shape()[-2];
hipblasOperation_t opA = transA ? HIPBLAS_OP_T : HIPBLAS_OP_N;
hipblasOperation_t opB = transB ? HIPBLAS_OP_T : HIPBLAS_OP_N;
#if TORCH_HIP_VERSION >= 9000
//cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH);
#endif
hipblasSgemmStridedBatched(handle,
opB,
opA,
n,
m,
k,
&alpha,
B->data(),
ldb,
batchB == 1 ? 0 : n * k,
A->data(),
lda,
batchA == 1 ? 0 : m * k,
&beta,
C->data(),
ldc,
n * m,
::max(batchA, batchB));
#if TORCH_HIP_VERSION >= 9000
//cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH);
#endif
}
__global__ void gCopyRows(float* out,
const float* in,
size_t cols,
const size_t* sourceRowIdx,
size_t rows) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
size_t dstId = j;
size_t srcId = sourceRowIdx[j];
float* rowOut = out + dstId * cols;
const float* rowIn = in + srcId * cols;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < cols)
rowOut[i] = rowIn[i];
}
}
}
}
void CopyRows(Tensor out, const Tensor in, const std::vector<size_t>& indices) {
hipSetDevice(out->getDevice());
size_t cols = in->shape().back();
size_t rowsToCopy = indices.size();
int threads = ::min(MAX_THREADS, (int)cols);
int blocks = ::min(MAX_BLOCKS, (int)rowsToCopy);
size_t* d_indices;
CUDA_CHECK(hipMalloc(&d_indices, rowsToCopy * sizeof(size_t)));
CUDA_CHECK(hipMemcpy(d_indices,
indices.data(),
rowsToCopy * sizeof(size_t),
hipMemcpyHostToDevice));
hipLaunchKernelGGL(( gCopyRows), dim3(blocks), dim3(threads), 0, 0,
out->data(), in->data(), cols, d_indices, rowsToCopy);
CUDA_CHECK(hipFree(d_indices));
}
__global__ void gPasteRows(float* out,
const float* in,
size_t cols,
const size_t* targetRowIdx,
size_t rows) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
size_t dstId = targetRowIdx[j];
size_t srcId = j;
float* rowOut = out + dstId * cols;
const float* rowIn = in + srcId * cols;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < cols)
atomicAdd(rowOut + i, rowIn[i]);
}
}
}
}
void PasteRows(Tensor out,
const Tensor in,
const std::vector<size_t>& indices) {
hipSetDevice(out->getDevice());
size_t cols = in->shape().back();
size_t rowsToCopy = indices.size();
int threads = ::min(MAX_THREADS, (int)cols);
int blocks = ::min(MAX_BLOCKS, (int)rowsToCopy);
// @TODO: turn into tensor
size_t* d_indices;
CUDA_CHECK(hipMalloc(&d_indices, rowsToCopy * sizeof(size_t)));
CUDA_CHECK(hipMemcpy(d_indices,
indices.data(),
rowsToCopy * sizeof(size_t),
hipMemcpyHostToDevice));
hipLaunchKernelGGL(( gPasteRows), dim3(blocks), dim3(threads), 0, 0,
out->data(), in->data(), cols, d_indices, rowsToCopy);
CUDA_CHECK(hipFree(d_indices));
}
/////////////
__global__ void gCopyCols(float* out,
const float* in,
size_t rows,
size_t colsIn,
const size_t* sourceColIdx,
size_t colsOut) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
const float* rowIn = in + j * colsIn;
float* rowOut = out + j * colsOut;
for(int tid = 0; tid < colsOut; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < colsOut)
rowOut[i] = rowIn[sourceColIdx[i]];
}
}
}
}
void CopyCols(Tensor out, const Tensor in, const std::vector<size_t>& indices) {
hipSetDevice(out->getDevice());
size_t rows = in->shape().elements() / in->shape().back();
size_t cols = in->shape().back();
size_t colsToCopy = indices.size();
int threads = ::min(MAX_THREADS, (int)colsToCopy);
int blocks = ::min(MAX_BLOCKS, (int)rows);
size_t* d_indices;
CUDA_CHECK(hipMalloc(&d_indices, colsToCopy * sizeof(size_t)));
CUDA_CHECK(hipMemcpy(d_indices,
indices.data(),
colsToCopy * sizeof(size_t),
hipMemcpyHostToDevice));
hipLaunchKernelGGL(( gCopyCols), dim3(blocks), dim3(threads), 0, 0,
out->data(), in->data(), rows, cols, d_indices, colsToCopy);
CUDA_CHECK(hipFree(d_indices));
}
__global__ void gPasteCols(float* out,
const float* in,
size_t rows,
size_t colsOut,
const size_t* targetColIdx,
size_t colsIn) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
const float* rowIn = in + j * colsIn;
float* rowOut = out + j * colsOut;
for(int tid = 0; tid < colsIn; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < colsIn)
rowOut[targetColIdx[i]] = rowIn[i];
}
}
}
}
void PasteCols(Tensor out,
const Tensor in,
const std::vector<size_t>& indices) {
hipSetDevice(out->getDevice());
size_t rows = in->shape().elements() / in->shape().back();
size_t cols = in->shape().back();
size_t colsToCopy = indices.size();
int threads = ::min(MAX_THREADS, (int)colsToCopy);
int blocks = ::min(MAX_BLOCKS, (int)rows);
size_t* d_indices;
CUDA_CHECK(hipMalloc(&d_indices, colsToCopy * sizeof(size_t)));
CUDA_CHECK(hipMemcpy(d_indices,
indices.data(),
colsToCopy * sizeof(size_t),
hipMemcpyHostToDevice));
hipLaunchKernelGGL(( gPasteCols), dim3(blocks), dim3(threads), 0, 0,
out->data(), in->data(), rows, cols, d_indices, colsToCopy);
CUDA_CHECK(hipFree(d_indices));
}
__global__ void gSelect(float* out,
gpu::Shape outShape,
const float* in,
const gpu::Shape inShape,
int axis,
size_t* d_indices) {
int length = outShape.elements();
gpu::Array<int, gpu::Shape::size()> dims;
for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) {
int index = bid + blockDim.x * blockIdx.x + threadIdx.x;
if(index < length) {
outShape.dims(index, dims);
dims[axis] = d_indices[dims[axis]];
int inIndex = inShape.index(dims);
out[index] = in[inIndex];
}
}
}
__global__ void gInsert(float* out,
gpu::Shape outShape,
const float* in,
const gpu::Shape inShape,
int axis,
size_t* d_indices) {
int length = inShape.elements();
gpu::Array<int, gpu::Shape::size()> dims;
for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) {
int index = bid + blockDim.x * blockIdx.x + threadIdx.x;
if(index < length) {
inShape.dims(index, dims);
dims[axis] = d_indices[dims[index]];
int outIndex = outShape.index(dims);
out[outIndex] = in[index];
}
}
}
void Select(Ptr<Allocator<DeviceGPU>> allocator,
Tensor out,
const Tensor in,
int axis,
const std::vector<size_t>& indices) {
hipSetDevice(out->getDevice());
int length = out->shape().elements();
int threads = ::min(MAX_THREADS, length);
int blocks = ::min(MAX_BLOCKS, length / threads + (length % threads != 0));
auto mp_indices = allocator->alloc<size_t>(indices.size());
mp_indices->insert(indices.data(), indices.size());
int axisGPU = axis + gpu::Shape::size() - out->shape().size();
hipLaunchKernelGGL(( gSelect), dim3(blocks), dim3(threads), 0, 0, out->data(),
out->shape(),
in->data(),
in->shape(),
axisGPU,
mp_indices->data<size_t>());
allocator->free(mp_indices);
}
void Insert(Ptr<Allocator<DeviceGPU>> allocator,
Tensor out,
const Tensor in,
int axis,
const std::vector<size_t>& indices) {
hipSetDevice(in->getDevice());
int length = in->shape().elements();
int threads = ::min(MAX_THREADS, length);
int blocks = ::min(MAX_BLOCKS, length / threads + (length % threads != 0));
auto mp_indices = allocator->alloc<size_t>(indices.size());
mp_indices->insert(indices.data(), indices.size());
int axisGPU = axis + gpu::Shape::size() - out->shape().size();
hipLaunchKernelGGL(( gInsert), dim3(blocks), dim3(threads), 0, 0, out->data(),
out->shape(),
in->data(),
in->shape(),
axisGPU,
mp_indices->data<size_t>());
allocator->free(mp_indices);
}
__global__ void gGRUFastForward(float* out,
const float* state,
const float* xW,
const float* sU,
const float* b,
const float* mask,
size_t rows,
size_t cols,
bool final) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float m = !mask || mask[j];
float* rowOut = out + j * cols;
const float* rowState = state + j * cols;
const float* xWrow = xW + j * cols * 3;
const float* sUrow = sU + j * cols * 3;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < cols) {
float r = stableLogit(xWrow[i] + sUrow[i] + b[i]);
int k = i + cols;
float z = stableLogit(xWrow[k] + sUrow[k] + b[k]);
int l = i + 2 * cols;
float h;
if(final)
h = tanhf(xWrow[l] + (sUrow[l] + b[l]) * r);
else
h = tanhf(xWrow[l] + sUrow[l] * r + b[l]);
float out = (1.0f - z) * h + z * rowState[i];
rowOut[i] = m * out + (1 - m) * rowState[i];
}
}
}
}
}
void GRUFastForward(Tensor out, std::vector<Tensor> inputs, bool final) {
hipSetDevice(out->getDevice());
int rows = out->shape().elements() / out->shape().back();
int cols = out->shape().back();
int blocks = ::min(MAX_BLOCKS, rows);
int threads = ::min(MAX_THREADS, cols);
hipLaunchKernelGGL(( gGRUFastForward), dim3(blocks), dim3(threads), 0, 0,
out->data(), // output
inputs[0]->data(), // state
inputs[1]->data(), // xW
inputs[2]->data(), // sU
inputs[3]->data(), // b
inputs.size() > 4 ? inputs[4]->data() : 0, // mask
rows,
cols,
final);
}
__global__ void gGRUFastBackward(float* outState,
float* outXW,
float* outSU,
float* outB,
const float* state,
const float* xW,
const float* sU,
const float* b,
const float* mask,
const float* adj,
size_t rows,
size_t cols,
bool final) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float m = !mask || mask[j];
float* rowOutState = outState + j * cols;
float* rowOutXW = outXW + j * cols * 3;
float* rowOutSU = outSU + j * cols * 3;
const float* rowState = state + j * cols;
const float* rowXW = xW + j * cols * 3;
const float* rowSU = sU + j * cols * 3;
const float* rowAdj = adj + j * cols;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < cols) {
int k = i + cols;
int l = i + 2 * cols;
float r = stableLogit(rowXW[i] + rowSU[i] + b[i]);
float z = stableLogit(rowXW[k] + rowSU[k] + b[k]);
float h;
if(final)
h = tanhf(rowXW[l] + (rowSU[l] + b[l]) * r);
else
h = tanhf(rowXW[l] + rowSU[l] * r + b[l]);
float adj = rowAdj[i];
float t = (1 - z) * (1 - h * h);
// df/ds
if(outState)
rowOutState[i] += (m * z - m + 1) * adj;
// df/d(xW_r) ...
float dfdxW_r = m * r * (1 - r) * t * adj;
if(final)
dfdxW_r *= rowSU[l] + b[l];
else
dfdxW_r *= rowSU[l];
if(outXW)
rowOutXW[i] += dfdxW_r;
if(outSU)
rowOutSU[i] += dfdxW_r;
if(outB)
atomicAdd(outB + i, dfdxW_r);
// df/d(xW_z) ...
float dfdxW_z = m * (1 - z) * z * (rowState[i] - h) * adj;
if(outXW)
rowOutXW[k] += dfdxW_z;
if(outSU)
rowOutSU[k] += dfdxW_z;
if(outB)
atomicAdd(outB + k, dfdxW_z);
// df/d(xW_x) ...
float dfdxW_x = m * t * adj;
if(outXW)
rowOutXW[l] += dfdxW_x;
if(outSU)
rowOutSU[l] += dfdxW_x * r;
if(outB)
if(final)
atomicAdd(outB + l, dfdxW_x * r);
else
atomicAdd(outB + l, dfdxW_x);
}
}
}
}
}
void GRUFastBackward(std::vector<Tensor> outputs,
std::vector<Tensor> inputs,
Tensor adj,
bool final) {
hipSetDevice(adj->getDevice());
int rows = adj->shape().elements() / adj->shape().back();
int cols = adj->shape().back();
int blocks = ::min(MAX_BLOCKS, rows);
int threads = ::min(MAX_THREADS, cols);
hipLaunchKernelGGL(( gGRUFastBackward), dim3(blocks), dim3(threads), 0, 0,
outputs[0] ? outputs[0]->data() : 0, // state - adj
outputs[1] ? outputs[1]->data() : 0, // xW - adj
outputs[2] ? outputs[2]->data() : 0, // sU - adj
outputs[3] ? outputs[3]->data() : 0, // b - adj
inputs[0]->data(), // state
inputs[1]->data(), // xW
inputs[2]->data(), // sU
inputs[3]->data(), // b
inputs.size() > 4 ? inputs[4]->data() : 0, // mask
adj->data(),
rows,
cols,
final);
}
__global__ void gCrossEntropyPick(float* out,
const gpu::Shape outShape,
const float* in,
const gpu::Shape inShape,
const float* pick) {
int rows = inShape.elements() / inShape.back();
int cols = inShape.back();
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
const float* sp = in + j * cols;
extern __shared__ float _share[];
float* _max = _share + blockDim.x;
_max[threadIdx.x] = sp[threadIdx.x];
for(int tid = 1; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
if(sp[id] > _max[threadIdx.x])
_max[threadIdx.x] = sp[id];
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1)) {
if(_max[threadIdx.x + skip] > _max[threadIdx.x]) {
_max[threadIdx.x] = _max[threadIdx.x + skip];
}
}
len = (len + 1) >> 1;
}
__syncthreads();
float max = _max[0];
__syncthreads();
float* _sum = _share + blockDim.x;
_sum[threadIdx.x] = 0.0;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
_sum[threadIdx.x] += __expf(sp[id] - max);
}
}
__syncthreads();
len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
len = (len + 1) >> 1;
}
__syncthreads();
// cross-entropy
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id == (int)pick[j]) {
out[j] = __logf(_sum[0]) - sp[id] + max;
}
}
}
}
}
void CrossEntropyPick(Tensor out, Tensor in, Tensor pick) {
hipSetDevice(out->getDevice());
int rows = in->shape().elements() / in->shape().back();
int cols = in->shape().back();
int blocks = ::min(MAX_BLOCKS, (int)rows);
int threads = ::min(MAX_THREADS, (int)cols);
int shared = sizeof(float) * threads * 2;
hipLaunchKernelGGL(( gCrossEntropyPick), dim3(blocks), dim3(threads), shared, 0,
out->data(), out->shape(), in->data(), in->shape(), pick->data());
}
__global__ void gCrossEntropyPickBackward(float* out,
const gpu::Shape outShape,
const float* adj,
const float* in,
const float* pick) {
int rows = outShape.elements() / outShape.back();
int cols = outShape.back();
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
const float* sp = in + j * cols;
float* so = out + j * cols;
extern __shared__ float _share[];
float* _max = _share + blockDim.x;
_max[threadIdx.x] = sp[threadIdx.x];
for(int tid = 1; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
if(sp[id] > _max[threadIdx.x])
_max[threadIdx.x] = sp[id];
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1)) {
if(_max[threadIdx.x + skip] > _max[threadIdx.x]) {
_max[threadIdx.x] = _max[threadIdx.x + skip];
}
}
len = (len + 1) >> 1;
}
__syncthreads();
float max = _max[0];
__syncthreads();
float* _sum = _share + blockDim.x;
_sum[threadIdx.x] = 0.0;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float ex = __expf(sp[id] - max);
_sum[threadIdx.x] += ex;
}
}
__syncthreads();
len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
len = (len + 1) >> 1;
}
__syncthreads();
// cross-entropy
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float sub = (float)(id == (int)pick[j]);
so[id] += adj[j] * (__expf(sp[id] - max) / _sum[0] - sub);
}
}
}
}
}
void CrossEntropyPickBackward(Tensor out, Tensor adj, Tensor a, Tensor pick) {
hipSetDevice(out->getDevice());
int rows = out->shape().elements() / out->shape().back();
int cols = out->shape().back();
int blocks = ::min(MAX_BLOCKS, (int)rows);
int threads = ::min(MAX_THREADS, (int)cols);
int shared = sizeof(float) * threads * 2;
hipLaunchKernelGGL(( gCrossEntropyPickBackward), dim3(blocks), dim3(threads), shared, 0,
out->data(), out->shape(), adj->data(), a->data(), pick->data());
}
float L2Norm(Tensor in) {
using namespace functional;
hipSetDevice(in->getDevice());
int size = in->shape().elements();
int threads = ::min(MAX_THREADS, size);
int blocks = ::min(MAX_BLOCKS, size / threads + (size % threads != 0));
uint8_t* data;
hipMalloc(&data, blocks * sizeof(float));
Tensor out(new TensorBase(
New<MemoryPiece>(data, blocks * sizeof(float)), {1, blocks}, in->getDevice()));
ReduceAll(_1 * _1, out, in);
float dataCpu = sqrtf(out->get(0));
out.reset();
hipFree(data);
return dataCpu;
}
__global__ void gAtt(float* out,
const float* va,
const float* ctx,
const float* state,
int m, // total rows (batch x time x beam)
int k, // depth
int b, // batch size
int t // time of ctx
) {
int rows = m;
int cols = k;
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
const float* vaRow = va;
const float* ctxRow = ctx + (j % (b * t)) * cols;
const float* stateRow = state + ((j / (b * t)) * b + j % b) * cols;
extern __shared__ float _share[];
float* _sum = _share + blockDim.x;
_sum[threadIdx.x] = 0.0;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float z = ctxRow[id] + stateRow[id];
float ex = tanhf(z) * vaRow[id];
_sum[threadIdx.x] += ex;
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
len = (len + 1) >> 1;
}
__syncthreads();
out[j] = _sum[0];
__syncthreads();
}
}
}
void Att(Tensor out, Tensor va, Tensor context, Tensor state) {
hipSetDevice(out->getDevice());
size_t m = out->shape().elements() / out->shape().back();
size_t k = context->shape()[-1];
size_t b = context->shape()[-2];
size_t t = context->shape()[-3];
int blocks = ::min(MAX_BLOCKS, (int)m);
int threads = ::min(MAX_THREADS, (int)k);
int shared = sizeof(float) * threads * 2;
hipLaunchKernelGGL(( gAtt), dim3(blocks), dim3(threads), shared, 0, out->data(),
va->data(),
context->data(),
state->data(),
m,
k,
b,
t);
}
__global__ void gAttBack(float* gVa,
float* gContext,
float* gState,
const float* va,
const float* context,
const float* state,
const float* adj,
int m, // rows
int k, // cols
int n // batch size
) {
int rows = m;
int cols = k;
for(int bid = 0; bid < m; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float* gcRow = gContext + j * cols;
float* gsRow = gState + (j % n) * cols;
const float* cRow = context + j * cols;
const float* sRow = state + (j % n) * cols;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float z = cRow[id] + sRow[id];
float t = tanhf(z);
float r = va[id] * (1.f - t * t);
gcRow[id] += r * adj[j];
gsRow[id] += r * adj[j];
atomicAdd(gVa + id, t * adj[j]);
}
}
}
}
}
void AttBack(Tensor gVa,
Tensor gContext,
Tensor gState,
Tensor va,
Tensor context,
Tensor state,
Tensor adj) {
hipSetDevice(adj->getDevice());
size_t m = adj->shape().elements() / adj->shape().back();
size_t dims = context->shape().size();
size_t k = context->shape()[dims - 1];
size_t n = context->shape()[dims - 2];
int blocks = ::min(MAX_BLOCKS, (int)n);
int threads = ::min(MAX_THREADS, (int)k);
hipLaunchKernelGGL(( gAttBack), dim3(blocks), dim3(threads), 0, 0, gVa->data(),
gContext->data(),
gState->data(),
va->data(),
context->data(),
state->data(),
adj->data(),
m,
k,
n);
}
__global__ void gLNormalization(float* out,
const float* in,
const float* alpha,
const float* beta,
int rows,
int cols,
float eps = 1e-9) {
extern __shared__ float _share[];
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float* so = out + j * cols;
const float* sp = in + j * cols;
float* _sum = _share + blockDim.x;
_sum[threadIdx.x] = 0.0f;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
_sum[threadIdx.x] += sp[id];
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1)) {
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
}
len = (len + 1) >> 1;
}
__syncthreads();
float mean = _sum[0] / cols;
__syncthreads();
float* _sqSum = _share + blockDim.x;
_sqSum[threadIdx.x] = 0.0;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float ex = sp[id] - mean;
_sqSum[threadIdx.x] += ex * ex;
}
}
__syncthreads();
len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
_sqSum[threadIdx.x] += _sqSum[threadIdx.x + skip];
len = (len + 1) >> 1;
}
__syncthreads();
float sigma = sqrtf(eps + (_sqSum[0] / cols));
__syncthreads();
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float t = alpha[id] * ((sp[id] - mean) / sigma);
if(beta != nullptr)
t += beta[id];
so[id] = t;
}
}
}
}
}
void LayerNormalization(Tensor out,
Tensor in,
Tensor gamma,
Tensor beta,
float eps) {
hipSetDevice(out->getDevice());
int rows = in->shape().elements() / in->shape().back();
int cols = in->shape().back();
int blocks = ::min(MAX_BLOCKS, (int)rows);
int threads = ::min(MAX_THREADS, (int)cols);
int shared = 2 * threads * sizeof(float);
hipLaunchKernelGGL(( gLNormalization), dim3(blocks), dim3(threads), shared, 0, out->data(),
in->data(),
gamma->data(),
beta ? beta->data() : nullptr,
rows,
cols,
eps);
}
__global__ void gLayerNormalizationGrad(float* gradX,
float* gradGamma,
float* gradBeta,
float* adj,
float* y,
float* x,
float* gamma,
float* beta,
int rows,
int cols,
float eps = 1e-9) {
extern __shared__ float shared[];
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float* sum_adj = shared;
float* sum_adj_x = shared + blockDim.x;
float* sum_x = shared + 2 * blockDim.x;
float* sum_sqr = shared + 3 * blockDim.x;
const float* xRow = x + j * cols;
const float* yRow = y + j * cols;
const float* adjRow = adj + j * cols;
float* gradXRow = gradX + j * cols;
sum_x[threadIdx.x] = 0.0f;
sum_adj[threadIdx.x] = 0.0f;
sum_adj_x[threadIdx.x] = 0.0f;
sum_sqr[threadIdx.x] = 0.0f;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
sum_x[threadIdx.x] += xRow[id];
sum_adj_x[threadIdx.x]
+= adjRow[id] * (yRow[id] - ((beta) ? beta[id] : 0)) / gamma[id];
sum_adj[threadIdx.x] += adjRow[id];
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1)) {
sum_x[threadIdx.x] += sum_x[threadIdx.x + skip];
sum_adj[threadIdx.x] += sum_adj[threadIdx.x + skip];
sum_adj_x[threadIdx.x] += sum_adj_x[threadIdx.x + skip];
}
len = (len + 1) >> 1;
}
__syncthreads();
float mean = sum_x[0] / cols;
__syncthreads();
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float ex = xRow[id] - mean;
sum_sqr[threadIdx.x] += ex * ex;
}
}
__syncthreads();
len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
sum_sqr[threadIdx.x] += sum_sqr[threadIdx.x + skip];
len = (len + 1) >> 1;
}
__syncthreads();
float sigma = sqrtf(eps + (sum_sqr[0] / cols));
__syncthreads();
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float grad_x = 0.0f;
float x_hat = (yRow[id] - ((beta) ? beta[id] : 0)) / gamma[id];
grad_x += cols * adjRow[id];
grad_x -= sum_adj[0];
grad_x -= sum_adj_x[0] * x_hat;
grad_x /= (cols * sigma);
float valX = gamma[id] * grad_x;
float sign = (0.f < valX) - (valX < 0.f);
valX = fabs(valX) > 1000 ? sign * 1000 : valX;
gradXRow[id] += valX;
atomicAdd(gradGamma + id, adjRow[id] * x_hat);
if(beta) {
atomicAdd(gradBeta + id, adjRow[id]);
}
}
}
}
}
}
void LayerNormalizationGrad(Tensor gradX,
Tensor gradGamma,
Tensor gradBeta,
Tensor adj,
Tensor y,
Tensor x,
Tensor gamma,
Tensor beta,
float eps) {
hipSetDevice(adj->getDevice());
int rows = y->shape().elements() / y->shape().back();
int cols = y->shape().back();
int threads = ::min(MAX_THREADS, cols);
int blocks = ::min(MAX_BLOCKS, rows);
int shared = sizeof(float) * threads * 4;
hipLaunchKernelGGL(( gLayerNormalizationGrad), dim3(blocks), dim3(threads), shared, 0,
gradX->data(),
gradGamma->data(),
(gradBeta) ? gradBeta->data() : nullptr,
adj->data(),
y->data(),
x->data(),
gamma->data(),
(beta) ? beta->data() : nullptr,
rows,
cols,
eps);
}
__global__ void gShift(float* out, const float* in, int length, int offset) {
for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) {
int index = bid + blockDim.x * blockIdx.x + threadIdx.x;
if(index < length) {
if(index - offset < 0 || index - offset >= length)
out[index] = 0;
else
out[index] = in[index - offset];
}
}
}
void Shift(Tensor out, Tensor in, Shape shift, bool invert) {
UTIL_THROW_IF2(in->shape().size() != shift.size(), "bad dimensions");
int offset = 0;
for(int i = 0; i < shift.size(); ++i)
offset += in->shape().stride(i) * shift[i];
if(invert)
offset = -offset;
hipSetDevice(out->getDevice());
int length = out->shape().elements();
int threads = ::min(MAX_THREADS, length);
int blocks = ::min(MAX_BLOCKS, length / threads + (length % threads != 0));
hipLaunchKernelGGL(( gShift), dim3(blocks), dim3(threads), 0, 0, out->data(), in->data(), length, offset);
}
__global__ void gSetSparse(float* out,
const size_t* indices,
const float* values,
int length) {
for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) {
int index = bid + blockDim.x * blockIdx.x + threadIdx.x;
if(index < length) {
out[indices[index]] = values[index];
}
}
}
void SetSparse(float* out,
const std::vector<size_t>& indices,
const std::vector<float>& values) {
int length = indices.size();
int threads = ::min(MAX_THREADS, length);
int blocks = ::min(MAX_BLOCKS, length / threads + (length % threads != 0));
size_t* d_indices;
CUDA_CHECK(hipMalloc(&d_indices, length * sizeof(size_t)));
CUDA_CHECK(hipMemcpy(d_indices,
indices.data(),
length * sizeof(size_t),
hipMemcpyHostToDevice));
float* d_values;
CUDA_CHECK(hipMalloc(&d_values, length * sizeof(float)));
CUDA_CHECK(hipMemcpy(
d_values, values.data(), length * sizeof(float), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( gSetSparse), dim3(blocks), dim3(threads), 0, 0, out, d_indices, d_values, length);
hipFree(d_indices);
hipFree(d_values);
}
/******************************************************************************/
__global__ void gLSTMCellForward(float* out,
const float* cell,
const float* xW,
const float* sU,
const float* b,
const float* mask,
size_t rows,
size_t cols) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float m = !mask || mask[j];
float* rowOut = out + j * cols;
const float* rowCell = cell + j * cols;
const float* xWrow = xW + j * cols * 4;
const float* sUrow = sU + j * cols * 4;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < cols) {
float gf = stableLogit(xWrow[i] + sUrow[i] + b[i]);
int k = i + cols;
float gi = stableLogit(xWrow[k] + sUrow[k] + b[k]);
int l = i + 2 * cols;
float gc = tanhf(xWrow[l] + sUrow[l] + b[l]);
float cout = gf * rowCell[i] + gi * gc;
rowOut[i] = m * cout + (1 - m) * rowCell[i];
}
}
}
}
}
void LSTMCellForward(Tensor out, std::vector<Tensor> inputs) {
hipSetDevice(out->getDevice());
int rows = out->shape().elements() / out->shape().back();
int cols = out->shape().back();
int blocks = ::min(MAX_BLOCKS, rows);
int threads = ::min(MAX_THREADS, cols);
hipLaunchKernelGGL(( gLSTMCellForward), dim3(blocks), dim3(threads), 0, 0,
out->data(), // output
inputs[0]->data(), // cell state
inputs[1]->data(), // xW
inputs[2]->data(), // sU
inputs[3]->data(), // b
inputs.size() > 4 ? inputs[4]->data() : 0, // mask
rows,
cols);
}
__global__ void gLSTMOutputForward(float* out,
const float* cell,
const float* xW,
const float* sU,
const float* b,
size_t rows,
size_t cols) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float* rowOut = out + j * cols;
const float* rowCell = cell + j * cols;
const float* xWrow = xW + j * cols * 4;
const float* sUrow = sU + j * cols * 4;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < cols) {
int k = i + 3 * cols;
float go = stableLogit(xWrow[k] + sUrow[k] + b[k]);
rowOut[i] = go * tanhf(rowCell[i]);
}
}
}
}
}
void LSTMOutputForward(Tensor out, std::vector<Tensor> inputs) {
hipSetDevice(out->getDevice());
int rows = out->shape().elements() / out->shape().back();
int cols = out->shape().back();
int blocks = ::min(MAX_BLOCKS, rows);
int threads = ::min(MAX_THREADS, cols);
hipLaunchKernelGGL(( gLSTMOutputForward), dim3(blocks), dim3(threads), 0, 0, out->data(), // output
inputs[0]->data(), // cell state
inputs[1]->data(), // xW
inputs[2]->data(), // sU
inputs[3]->data(), // b
rows,
cols);
}
__global__ void gLSTMCellBackward(float* outCell,
float* outXW,
float* outSU,
float* outB,
const float* cell,
const float* xW,
const float* sU,
const float* b,
const float* mask,
const float* adj,
size_t rows,
size_t cols) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float m = !mask || mask[j];
float* rowOutCell = outCell + j * cols;
float* rowOutXW = outXW + j * cols * 4;
float* rowOutSU = outSU + j * cols * 4;
const float* rowCell = cell + j * cols;
const float* xWrow = xW + j * cols * 4;
const float* sUrow = sU + j * cols * 4;
const float* rowAdj = adj + j * cols;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < cols) {
float gf = stableLogit(xWrow[i] + sUrow[i] + b[i]);
int k = i + cols;
float gi = stableLogit(xWrow[k] + sUrow[k] + b[k]);
int l = i + 2 * cols;
float gc = tanhf(xWrow[l] + sUrow[l] + b[l]);
float adj = rowAdj[i];
// dc/dc_{t-1}
if(outCell)
rowOutCell[i] += (m * gf - m + 1) * adj;
// dc/d(b_f) = dc/d(xW_f) ...
float dcdxf = m * rowCell[i] * gf * (1 - gf) * adj;
if(outXW)
rowOutXW[i] += dcdxf;
if(outSU)
rowOutSU[i] += dcdxf;
if(outB)
atomicAdd(outB + i, dcdxf);
// dc/d(b_i) ...
float dcdb_i = m * gc * gi * (1 - gi) * adj;
if(outXW)
rowOutXW[k] += dcdb_i;
if(outSU)
rowOutSU[k] += dcdb_i;
if(outB)
atomicAdd(outB + k, dcdb_i);
// dc/d(b_c) ...
float dcdxc = m * gi * (1 - gc * gc) * adj;
if(outXW)
rowOutXW[l] += dcdxc;
if(outSU)
rowOutSU[l] += dcdxc;
if(outB)
atomicAdd(outB + l, dcdxc);
}
}
}
}
}
void LSTMCellBackward(std::vector<Tensor> outputs,
std::vector<Tensor> inputs,
Tensor adj) {
hipSetDevice(adj->getDevice());
int rows = adj->shape().elements() / adj->shape().back();
int cols = adj->shape().back();
int blocks = ::min(MAX_BLOCKS, rows);
int threads = ::min(MAX_THREADS, cols);
hipLaunchKernelGGL(( gLSTMCellBackward), dim3(blocks), dim3(threads), 0, 0,
outputs[0] ? outputs[0]->data() : 0, // state - adj
outputs[1] ? outputs[1]->data() : 0, // xW - adj
outputs[2] ? outputs[2]->data() : 0, // sU - adj
outputs[3] ? outputs[3]->data() : 0, // b - adj
inputs[0]->data(), // state
inputs[1]->data(), // xW
inputs[2]->data(), // sU
inputs[3]->data(), // b
inputs.size() > 4 ? inputs[4]->data() : 0, // mask
adj->data(),
rows,
cols);
}
__global__ void gLSTMOutputBackward(float* outCell,
float* outXW,
float* outSU,
float* outB,
const float* cell,
const float* xW,
const float* sU,
const float* b,
const float* adj,
size_t rows,
size_t cols) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float* rowOutCell = outCell + j * cols;
float* rowOutXW = outXW + j * cols * 4;
float* rowOutSU = outSU + j * cols * 4;
const float* rowCell = cell + j * cols;
const float* xWrow = xW + j * cols * 4;
const float* sUrow = sU + j * cols * 4;
const float* rowAdj = adj + j * cols;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < cols) {
int k = i + 3 * cols;
float go = stableLogit(xWrow[k] + sUrow[k] + b[k]);
float t = tanhf(rowCell[i]);
float adj = rowAdj[i];
// dc/dc_{t-1}
if(outCell)
rowOutCell[i] += go * (1 - t * t) * adj;
// dc/d(b_o) = dc/d(xW_f) ...
float dcdxo = t * go * (1 - go) * adj;
if(outXW)
rowOutXW[k] += dcdxo;
if(outSU)
rowOutSU[k] += dcdxo;
if(outB)
atomicAdd(outB + k, dcdxo);
}
}
}
}
}
void LSTMOutputBackward(std::vector<Tensor> outputs,
std::vector<Tensor> inputs,
Tensor adj) {
hipSetDevice(adj->getDevice());
int rows = adj->shape().elements() / adj->shape().back();
int cols = adj->shape().back();
int blocks = ::min(MAX_BLOCKS, rows);
int threads = ::min(MAX_THREADS, cols);
hipLaunchKernelGGL(( gLSTMOutputBackward), dim3(blocks), dim3(threads), 0, 0,
outputs[0] ? outputs[0]->data() : 0, // state - adj
outputs[1] ? outputs[1]->data() : 0, // xW - adj
outputs[2] ? outputs[2]->data() : 0, // sU - adj
outputs[3] ? outputs[3]->data() : 0, // b - adj
inputs[0]->data(), // state
inputs[1]->data(), // xW
inputs[2]->data(), // sU
inputs[3]->data(), // b
adj->data(),
rows,
cols);
}
__global__ void gHighwayForward(float* out,
const float* in1,
const float* in2,
const float* t,
size_t length) {
for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) {
int index = bid + blockDim.x * blockIdx.x + threadIdx.x;
if(index < length) {
float sigma = stableLogit(t[index]);
out[index] = in1[index] * sigma + in2[index] * (1.f - sigma);
}
}
}
void HighwayForward(Tensor out,
const Tensor in1,
const Tensor in2,
const Tensor t) {
hipSetDevice(out->getDevice());
int length = out->shape().elements();
int threads = ::min(MAX_THREADS, length);
int blocks = ::min(MAX_BLOCKS, length / threads + (length % threads != 0));
hipLaunchKernelGGL(( gHighwayForward), dim3(blocks), dim3(threads), 0, 0,
out->data(), in1->data(), in2->data(), t->data(), length);
}
__global__ void gHighwayBackward(float* out1,
float* out2,
float* outt,
const float* in1,
const float* in2,
const float* t,
const float* adj,
size_t length) {
for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) {
int index = bid + blockDim.x * blockIdx.x + threadIdx.x;
if(index < length) {
float sigma = stableLogit(t[index]);
out1[index] = sigma * adj[index];
out2[index] = (1.f - sigma) * adj[index];
outt[index]
= sigma * (1.f - sigma) * (in1[index] - in2[index]) * adj[index];
}
}
}
void HighwayBackward(Tensor out1,
Tensor out2,
Tensor outt,
const Tensor in1,
const Tensor in2,
const Tensor t,
const Tensor adj) {
hipSetDevice(out1->getDevice());
int length = out1->shape().elements();
int threads = ::min(MAX_THREADS, length);
int blocks = ::min(MAX_BLOCKS, length / threads + (length % threads != 0));
hipLaunchKernelGGL(( gHighwayBackward), dim3(blocks), dim3(threads), 0, 0, out1->data(),
out2->data(),
outt->data(),
in1->data(),
in2->data(),
t->data(),
adj->data(),
length);
}
__global__ void gMaxPoolingForward(float* out,
int outRows,
int outCols,
float* in,
int inRows,
int inCols,
float* mask,
int numKernels,
int maskCols,
int width,
int lastWidth) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid >= outRows * outCols) return;
int rowId = tid / outRows;
int colId = tid % outRows;
float* b = in + (rowId * inCols) + (colId * width);
if (colId == outRows - 1) {
width = lastWidth;
}
float* localMask = mask + (rowId / numKernels) * maskCols + colId * width;
float currentMax = b[0] * localMask[0];
for (int i = 1; i < width; ++i) {
if (b[i] * localMask[i] > currentMax) {
currentMax = b[i] * localMask[i];
}
}
out[rowId + (colId * outCols)] = currentMax;
}
void PoolingWithMaskingForward(Tensor out,
Tensor in,
Tensor mask,
int width,
bool isEven) {
int n = out->shape().elements();
int threads = ::min(n, MAX_THREADS);
int blocks = n / threads + (n % threads != 0);
Shape& inShape = in->shape();
int inRows = inShape[0] * inShape[1];
int inCols = inShape[2];
Shape& outShape = out->shape();
int outRows = outShape[2];
int outCols = outShape[0] * outShape[1];
int lastWidth = ((inCols - isEven) % width == 0)
? width
: (inCols - isEven) % width;
hipLaunchKernelGGL(( gMaxPoolingForward), dim3(blocks), dim3(threads), 0, 0,
out->data(), outRows, outCols,
in->data(), inRows, inCols,
mask->data(), outShape[1], mask->shape()[2],
width, lastWidth);
}
__global__ void gMaxPoolingBackward(float* adj,
int adjRows,
int adjCols,
float* in,
float* adjIn,
int inRows,
int inCols,
float* mask,
int numKernels,
int maskCols,
int width,
int lastWidth)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid >= adjRows * adjCols) return;
int rowId = tid / adjRows;
int colId = tid % adjRows;
float* b = in + (rowId * inCols) + (colId * width);
if (colId == adjRows - 1) {
width = lastWidth;
}
float* localMask = mask + (rowId / numKernels) * maskCols + colId * width;
size_t currentMaxIdx = 0;
for (int i = 1; i < width; ++i) {
if (b[i] * localMask[i] > b[currentMaxIdx] * localMask[currentMaxIdx]) {
currentMaxIdx = i;
}
}
adjIn[(rowId * inCols) + (colId * width) + currentMaxIdx] += adj[rowId + (colId * adjCols)];
}
void PoolingWithMaskingBackward(Tensor adj,
Tensor adjIn,
Tensor in,
Tensor mask,
int width,
bool isEven) {
int n = adj->shape().elements();
int threads = ::min(n, 512);
int blocks = n / threads + (n % threads != 0);
Shape& inShape = in->shape();
int inRows = inShape[0] * inShape[1];
int inCols = inShape[2];
Shape& adjShape = adj->shape();
int adjRows = adjShape[2];
int adjCols = adjShape[0] * adjShape[1];
int lastWidth = ((inCols - isEven) % width == 0)
? width
: (inCols - isEven) % width;
hipLaunchKernelGGL(( gMaxPoolingBackward), dim3(blocks), dim3(threads), 0, 0,
adj->data(), adjRows, adjCols,
in->data(), adjIn->data(), inRows, inCols,
mask->data(), adjShape[1], mask->shape()[2],
width, lastWidth);
}
} // namespace marian
| 9e3e2fb31a50163fb759276a461ba66731c88433.cu | #include <thrust/transform_reduce.h>
#include "kernels/cuda_helpers.h"
#include "kernels/tensor_operators.h"
#include "3rd_party/reduce_all.h"
namespace marian {
#define CUDA_FLT_MAX 1.70141e+38
struct isnan_test {
__host__ __device__ bool operator()(const float a) const { return isnan(a); }
};
__device__ inline float stableLogit(float x) {
if(x >= 0) {
float z = expf(-x);
return 1.0 / (1.0 + z);
} else {
float z = expf(x);
return z / (1.0 + z);
}
}
bool IsNan(Tensor in) {
//cudaSetDevice(in->getDevice());
//thrust::device_ptr<float> begin = thrust::device_pointer_cast(in->data());
//thrust::device_ptr<float> end
// = thrust::device_pointer_cast(in->data() + in->size());
//return thrust::transform_reduce(
// begin, end, isnan_test(), 0, thrust::plus<bool>());
return false;
}
void ConcatCont(Tensor out, const std::vector<Tensor>& inputs, int axis) {
cudaSetDevice(out->getDevice());
int step = 1;
for(int i = 0; i < axis; ++i)
step *= out->shape()[i];
size_t offset1 = 0;
for(int i = 0; i < step; ++i) {
for(auto in : inputs) {
size_t size = in->shape().elements() / step;
size_t offset2 = i * size;
cudaMemcpyAsync(out->data() + offset1,
in->data() + offset2,
size * sizeof(float),
cudaMemcpyDeviceToDevice);
offset1 += size;
}
}
cudaStreamSynchronize(0);
}
__global__ void gInsertCols(float* out,
const float* in,
size_t rows,
size_t cols,
size_t cols_out,
size_t cols_in,
size_t offset_out,
size_t offset_in) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float* rowOut = out + j * cols_out + offset_out;
const float* rowIn = in + j * cols_in + offset_in;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < cols)
rowOut[i] = rowIn[i];
}
}
}
}
void Concatenate1(Tensor out, const std::vector<Tensor>& inputs) {
cudaSetDevice(out->getDevice());
int rows = out->shape().elements() / out->shape().back();
size_t offset = 0;
int cols_out = out->shape().back();
for(auto in : inputs) {
ABORT_IF(rows != in->shape().elements() / in->shape().back(),
"First dimension must be equal");
int cols_in = in->shape().back();
int blocks = std::min(MAX_BLOCKS, rows);
int threads = std::min(MAX_THREADS, cols_in);
gInsertCols<<<blocks, threads>>>(
out->data(), in->data(), rows, cols_in, cols_out, cols_in, offset, 0);
offset += cols_in;
}
cudaStreamSynchronize(0);
}
void Concatenate(Tensor out, const std::vector<Tensor>& inputs, int ax) {
if(ax == out->shape().size() - 1)
Concatenate1(out, inputs);
else
ConcatCont(out, inputs, ax);
}
void Split1(std::vector<Tensor>& outputs, const Tensor in) {
cudaSetDevice(in->getDevice());
size_t offset = 0;
int rows = in->shape().elements() / in->shape().back();
int cols_in = in->shape().back();
for(auto out : outputs) {
ABORT_IF(rows != out->shape().elements() / out->shape().back(),
"First dimension must be equal");
int cols_out = out->shape().back();
int blocks = std::min(MAX_BLOCKS, rows);
int threads = std::min(MAX_THREADS, cols_out);
gInsertCols<<<blocks, threads>>>(
out->data(), in->data(), rows, cols_out, cols_out, cols_in, 0, offset);
offset += cols_out;
}
cudaStreamSynchronize(0);
}
void SplitCont(std::vector<Tensor>& outputs, const Tensor in, int axis) {
cudaSetDevice(in->getDevice());
int step = 1;
for(int i = 0; i < axis; ++i)
step *= in->shape()[i];
size_t offset1 = 0;
for(int i = 0; i < step; ++i) {
for(auto out : outputs) {
size_t size = out->shape().elements() / step;
size_t offset2 = i * size;
cudaMemcpyAsync(out->data() + offset2,
in->data() + offset1,
size * sizeof(float),
cudaMemcpyDeviceToDevice);
offset1 += size;
}
}
cudaStreamSynchronize(0);
}
void Deconcatenate(std::vector<Tensor>& outputs, const Tensor in, int ax) {
if(ax == in->shape().size() - 1)
Split1(outputs, in);
else
SplitCont(outputs, in, ax);
}
__global__ void gTransposeND(gpu::Tensor<float> out,
const gpu::Tensor<float> in,
const gpu::Array<int, gpu::Shape::size()> permute) {
constexpr size_t N = gpu::Shape::size();
gpu::Array<int, N> oDims;
gpu::Array<int, N> pDims;
int length = out.shape().elements();
for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) {
int index = bid + blockDim.x * blockIdx.x + threadIdx.x;
if(index < length) {
out.shape().dims(index, oDims);
for(int i = 0; i < N; ++i)
pDims[permute[i]] = oDims[i];
out[index] = in[pDims];
}
}
}
void TransposeND(Tensor out, Tensor in, const std::vector<int>& vAxis) {
cudaSetDevice(out->getDevice());
gpu::Array<int, gpu::Shape::size()> axes;
int diff = gpu::Shape::size() - vAxis.size();
for(int i = 0; i < axes.size(); ++i)
if(i < diff)
axes[i] = i;
else
axes[i] = vAxis[i - diff] + diff;
int length = out->shape().elements();
int threads = std::min(MAX_THREADS, length);
int blocks = std::min(MAX_BLOCKS, length / threads + (length % threads != 0));
gTransposeND<<<blocks, threads>>>(out, in, axes);
}
__global__ void gSoftmax(float* out,
gpu::Shape outShape,
const float* in,
const float* mask,
const gpu::Shape maskShape) {
int rows = outShape.elements() / outShape.back();
int cols = outShape.back();
bool broadcast = outShape != maskShape;
gpu::Array<int, gpu::Shape::size()> dims;
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float* so = out + j * cols;
const float* sp = in + j * cols;
extern __shared__ float _share[];
float* _max = _share + blockDim.x;
_max[threadIdx.x] = -CUDA_FLT_MAX; // mask
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float mVal = 1.f;
if(mask) {
int mIndex = id + j * cols;
if(broadcast) {
outShape.dims(mIndex, dims);
mIndex = maskShape.bindex(dims);
}
mVal = mask[mIndex];
}
if(mVal && sp[id] > _max[threadIdx.x])
_max[threadIdx.x] = sp[id];
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1)) {
if(_max[threadIdx.x + skip] > _max[threadIdx.x]) {
_max[threadIdx.x] = _max[threadIdx.x + skip];
}
}
len = (len + 1) >> 1;
}
__syncthreads();
float max = _max[0];
__syncthreads();
float* _sum = _share + blockDim.x;
_sum[threadIdx.x] = 0.0;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float mVal = 1.f;
if(mask) {
int mIndex = id + j * cols;
if(broadcast) {
outShape.dims(mIndex, dims);
mIndex = maskShape.bindex(dims);
}
mVal = mask[mIndex];
}
float ex = 0;
if(mVal)
ex = __expf(sp[id] - max);
so[id] = ex;
_sum[threadIdx.x] += ex;
}
}
__syncthreads();
len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
len = (len + 1) >> 1;
}
__syncthreads();
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
so[id] = so[id] / _sum[0];
}
}
}
}
}
void Softmax(Tensor out, Tensor in, Tensor mask) {
cudaSetDevice(out->getDevice());
size_t m = out->shape().elements() / out->shape().back();
size_t k = out->shape().back();
int blocks = std::min(MAX_BLOCKS, (int)m);
int threads = std::min(MAX_THREADS, (int)k);
int shared = sizeof(float) * threads * 2;
if(mask)
gSoftmax<<<blocks, threads, shared>>>(
out->data(), out->shape(), in->data(), mask->data(), mask->shape());
else
gSoftmax<<<blocks, threads, shared>>>(
out->data(), out->shape(), in->data(), 0, out->shape());
}
__global__ void gLogSoftmax(float* out,
const gpu::Shape outShape,
const float* in) {
int rows = outShape.elements() / outShape.back();
int cols = outShape.back();
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float* so = out + j * cols;
const float* sp = in + j * cols;
extern __shared__ float _share[];
float* _max = _share + blockDim.x;
_max[threadIdx.x] = sp[threadIdx.x];
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
if(sp[id] > _max[threadIdx.x])
_max[threadIdx.x] = sp[id];
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1)) {
if(_max[threadIdx.x + skip] > _max[threadIdx.x]) {
_max[threadIdx.x] = _max[threadIdx.x + skip];
}
}
len = (len + 1) >> 1;
}
__syncthreads();
float max = _max[0];
__syncthreads();
float* _sum = _share + blockDim.x;
_sum[threadIdx.x] = 0.0;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float sm = sp[id] - max;
float ex = __expf(sm);
so[id] = sm;
_sum[threadIdx.x] += ex;
}
}
__syncthreads();
len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
len = (len + 1) >> 1;
}
__syncthreads();
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols)
so[id] -= __logf(_sum[0]);
}
}
}
}
void LogSoftmax(Tensor out, Tensor in) {
cudaSetDevice(out->getDevice());
size_t m = out->shape().elements() / out->shape().back();
size_t k = out->shape().back();
int blocks = std::min(MAX_BLOCKS, (int)m);
int threads = std::min(MAX_THREADS, (int)k);
int shared = sizeof(float) * threads * 2;
gLogSoftmax<<<blocks, threads, shared>>>(
out->data(), out->shape(), in->data());
}
///////////////////////////////////////////////////////
__global__ void gSoftmaxGrad(float* grad,
const float* adj,
const float* val,
const int rows,
const int cols) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
extern __shared__ float _share[];
float* _sum = _share + blockDim.x;
float* gradRow = grad + j * cols;
const float* adjRow = adj + j * cols;
const float* valRow = val + j * cols;
_sum[threadIdx.x] = 0.0;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
_sum[threadIdx.x] += valRow[id] * adjRow[id];
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
len = (len + 1) >> 1;
}
__syncthreads();
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float val = valRow[id] * (adjRow[id] - _sum[0]);
if(val)
gradRow[id] += val;
}
}
}
}
}
void SoftmaxGrad(Tensor grad, Tensor adj, Tensor val) {
cudaSetDevice(adj->getDevice());
// grad and val are both m-by-k matrices, passed as input.
// A weighted average of each row of grad (according to the weights
// specified in val) is computed and subtracted from Out.
// adj is multiplied for each element to get backward step in autodiff
int m = grad->shape().elements() / grad->shape().back();
int k = grad->shape().back();
int blocks = std::min(MAX_BLOCKS, m);
int threads = std::min(MAX_THREADS, k);
int shared = sizeof(float) * threads * 2;
gSoftmaxGrad<<<blocks, threads, shared>>>(
grad->data(), adj->data(), val->data(), m, k);
}
__global__ void gLogSoftmaxGrad(float* grad,
const float* adj,
const float* val,
const int rows,
const int cols) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
extern __shared__ float _share[];
float* _sum = _share + blockDim.x;
float* gradRow = grad + j * cols;
const float* adjRow = adj + j * cols;
const float* valRow = val + j * cols;
_sum[threadIdx.x] = 0.0;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
_sum[threadIdx.x] += adjRow[id];
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
len = (len + 1) >> 1;
}
__syncthreads();
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols)
gradRow[id] += adjRow[id] - (expf(valRow[id]) * _sum[0]);
}
}
}
}
void LogSoftmaxGrad(Tensor grad, Tensor adj, Tensor val) {
cudaSetDevice(adj->getDevice());
// grad and val are both m-by-k matrices, passed as input.
// A weighted average of each row of grad (according to the weights
// specified in val) is computed and subtracted from Out.
// adj is multiplied for each element to get backward step in autodiff
int m = grad->shape().elements() / grad->shape().back();
int k = grad->shape().back();
int blocks = std::min(MAX_BLOCKS, m);
int threads = std::min(MAX_THREADS, k);
int shared = sizeof(float) * threads * 2;
gLogSoftmaxGrad<<<blocks, threads, shared>>>(
grad->data(), adj->data(), val->data(), m, k);
}
///////////////////////////////////////////////////////
__global__ void gArgmax(float* out,
const float* data,
size_t rows,
size_t cols) {
size_t row = blockIdx.x;
size_t startInd = row * cols;
float maxScore = -99999;
size_t maxInd;
for(size_t col = 0; col < cols; ++col) {
size_t ind = startInd + col;
float score = data[ind];
if(score > maxScore) {
maxScore = score;
maxInd = col;
}
}
out[row] = maxInd;
}
///////////////////////////////////////////////////////
void Prod(cublasHandle_t handle,
Tensor C,
const Tensor A,
const Tensor B,
bool transA,
bool transB,
float beta,
float scalar) {
cudaSetDevice(C->getDevice());
float alpha = scalar;
size_t m = A->shape().elements() / A->shape().back();
size_t k = A->shape().back();
if(transA)
std::swap(m, k);
size_t l = B->shape().elements() / B->shape().back();
size_t n = B->shape().back();
if(transB)
std::swap(l, n);
size_t lda = A->shape().back();
size_t ldb = B->shape().back();
size_t ldc = B->shape().back();
if(transB)
ldc = B->shape().elements() / B->shape().back();
cublasOperation_t opA = transA ? CUBLAS_OP_T : CUBLAS_OP_N;
cublasOperation_t opB = transB ? CUBLAS_OP_T : CUBLAS_OP_N;
#if CUDA_VERSION >= 9000
//cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH);
#endif
cublasSgemm(handle,
opB,
opA,
n,
m,
k,
&alpha,
B->data(),
ldb,
A->data(),
lda,
&beta,
C->data(),
ldc);
#if CUDA_VERSION >= 9000
//cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH);
#endif
}
void ProdBatched(cublasHandle_t handle,
Tensor C,
const Tensor A,
const Tensor B,
bool transA,
bool transB,
float beta,
float scalar) {
cudaSetDevice(C->getDevice());
float alpha = scalar;
size_t batchA = A->shape().elements() / (A->shape()[-1] * A->shape()[-2]);
size_t batchB = B->shape().elements() / (B->shape()[-1] * B->shape()[-2]);
size_t m = A->shape()[-2];
size_t k = A->shape()[-1];
if(transA)
std::swap(m, k);
size_t l = B->shape()[-2];
size_t n = B->shape()[-1];
if(transB)
std::swap(l, n);
size_t lda = A->shape()[-1];
size_t ldb = B->shape()[-1];
size_t ldc = B->shape()[-1];
if(transB)
ldc = B->shape()[-2];
cublasOperation_t opA = transA ? CUBLAS_OP_T : CUBLAS_OP_N;
cublasOperation_t opB = transB ? CUBLAS_OP_T : CUBLAS_OP_N;
#if CUDA_VERSION >= 9000
//cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH);
#endif
cublasSgemmStridedBatched(handle,
opB,
opA,
n,
m,
k,
&alpha,
B->data(),
ldb,
batchB == 1 ? 0 : n * k,
A->data(),
lda,
batchA == 1 ? 0 : m * k,
&beta,
C->data(),
ldc,
n * m,
std::max(batchA, batchB));
#if CUDA_VERSION >= 9000
//cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH);
#endif
}
__global__ void gCopyRows(float* out,
const float* in,
size_t cols,
const size_t* sourceRowIdx,
size_t rows) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
size_t dstId = j;
size_t srcId = sourceRowIdx[j];
float* rowOut = out + dstId * cols;
const float* rowIn = in + srcId * cols;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < cols)
rowOut[i] = rowIn[i];
}
}
}
}
void CopyRows(Tensor out, const Tensor in, const std::vector<size_t>& indices) {
cudaSetDevice(out->getDevice());
size_t cols = in->shape().back();
size_t rowsToCopy = indices.size();
int threads = std::min(MAX_THREADS, (int)cols);
int blocks = std::min(MAX_BLOCKS, (int)rowsToCopy);
size_t* d_indices;
CUDA_CHECK(cudaMalloc(&d_indices, rowsToCopy * sizeof(size_t)));
CUDA_CHECK(cudaMemcpy(d_indices,
indices.data(),
rowsToCopy * sizeof(size_t),
cudaMemcpyHostToDevice));
gCopyRows<<<blocks, threads>>>(
out->data(), in->data(), cols, d_indices, rowsToCopy);
CUDA_CHECK(cudaFree(d_indices));
}
__global__ void gPasteRows(float* out,
const float* in,
size_t cols,
const size_t* targetRowIdx,
size_t rows) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
size_t dstId = targetRowIdx[j];
size_t srcId = j;
float* rowOut = out + dstId * cols;
const float* rowIn = in + srcId * cols;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < cols)
atomicAdd(rowOut + i, rowIn[i]);
}
}
}
}
void PasteRows(Tensor out,
const Tensor in,
const std::vector<size_t>& indices) {
cudaSetDevice(out->getDevice());
size_t cols = in->shape().back();
size_t rowsToCopy = indices.size();
int threads = std::min(MAX_THREADS, (int)cols);
int blocks = std::min(MAX_BLOCKS, (int)rowsToCopy);
// @TODO: turn into tensor
size_t* d_indices;
CUDA_CHECK(cudaMalloc(&d_indices, rowsToCopy * sizeof(size_t)));
CUDA_CHECK(cudaMemcpy(d_indices,
indices.data(),
rowsToCopy * sizeof(size_t),
cudaMemcpyHostToDevice));
gPasteRows<<<blocks, threads>>>(
out->data(), in->data(), cols, d_indices, rowsToCopy);
CUDA_CHECK(cudaFree(d_indices));
}
/////////////
__global__ void gCopyCols(float* out,
const float* in,
size_t rows,
size_t colsIn,
const size_t* sourceColIdx,
size_t colsOut) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
const float* rowIn = in + j * colsIn;
float* rowOut = out + j * colsOut;
for(int tid = 0; tid < colsOut; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < colsOut)
rowOut[i] = rowIn[sourceColIdx[i]];
}
}
}
}
void CopyCols(Tensor out, const Tensor in, const std::vector<size_t>& indices) {
cudaSetDevice(out->getDevice());
size_t rows = in->shape().elements() / in->shape().back();
size_t cols = in->shape().back();
size_t colsToCopy = indices.size();
int threads = std::min(MAX_THREADS, (int)colsToCopy);
int blocks = std::min(MAX_BLOCKS, (int)rows);
size_t* d_indices;
CUDA_CHECK(cudaMalloc(&d_indices, colsToCopy * sizeof(size_t)));
CUDA_CHECK(cudaMemcpy(d_indices,
indices.data(),
colsToCopy * sizeof(size_t),
cudaMemcpyHostToDevice));
gCopyCols<<<blocks, threads>>>(
out->data(), in->data(), rows, cols, d_indices, colsToCopy);
CUDA_CHECK(cudaFree(d_indices));
}
__global__ void gPasteCols(float* out,
const float* in,
size_t rows,
size_t colsOut,
const size_t* targetColIdx,
size_t colsIn) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
const float* rowIn = in + j * colsIn;
float* rowOut = out + j * colsOut;
for(int tid = 0; tid < colsIn; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < colsIn)
rowOut[targetColIdx[i]] = rowIn[i];
}
}
}
}
void PasteCols(Tensor out,
const Tensor in,
const std::vector<size_t>& indices) {
cudaSetDevice(out->getDevice());
size_t rows = in->shape().elements() / in->shape().back();
size_t cols = in->shape().back();
size_t colsToCopy = indices.size();
int threads = std::min(MAX_THREADS, (int)colsToCopy);
int blocks = std::min(MAX_BLOCKS, (int)rows);
size_t* d_indices;
CUDA_CHECK(cudaMalloc(&d_indices, colsToCopy * sizeof(size_t)));
CUDA_CHECK(cudaMemcpy(d_indices,
indices.data(),
colsToCopy * sizeof(size_t),
cudaMemcpyHostToDevice));
gPasteCols<<<blocks, threads>>>(
out->data(), in->data(), rows, cols, d_indices, colsToCopy);
CUDA_CHECK(cudaFree(d_indices));
}
__global__ void gSelect(float* out,
gpu::Shape outShape,
const float* in,
const gpu::Shape inShape,
int axis,
size_t* d_indices) {
int length = outShape.elements();
gpu::Array<int, gpu::Shape::size()> dims;
for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) {
int index = bid + blockDim.x * blockIdx.x + threadIdx.x;
if(index < length) {
outShape.dims(index, dims);
dims[axis] = d_indices[dims[axis]];
int inIndex = inShape.index(dims);
out[index] = in[inIndex];
}
}
}
__global__ void gInsert(float* out,
gpu::Shape outShape,
const float* in,
const gpu::Shape inShape,
int axis,
size_t* d_indices) {
int length = inShape.elements();
gpu::Array<int, gpu::Shape::size()> dims;
for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) {
int index = bid + blockDim.x * blockIdx.x + threadIdx.x;
if(index < length) {
inShape.dims(index, dims);
dims[axis] = d_indices[dims[index]];
int outIndex = outShape.index(dims);
out[outIndex] = in[index];
}
}
}
void Select(Ptr<Allocator<DeviceGPU>> allocator,
Tensor out,
const Tensor in,
int axis,
const std::vector<size_t>& indices) {
cudaSetDevice(out->getDevice());
int length = out->shape().elements();
int threads = std::min(MAX_THREADS, length);
int blocks = std::min(MAX_BLOCKS, length / threads + (length % threads != 0));
auto mp_indices = allocator->alloc<size_t>(indices.size());
mp_indices->insert(indices.data(), indices.size());
int axisGPU = axis + gpu::Shape::size() - out->shape().size();
gSelect<<<blocks, threads>>>(out->data(),
out->shape(),
in->data(),
in->shape(),
axisGPU,
mp_indices->data<size_t>());
allocator->free(mp_indices);
}
void Insert(Ptr<Allocator<DeviceGPU>> allocator,
Tensor out,
const Tensor in,
int axis,
const std::vector<size_t>& indices) {
cudaSetDevice(in->getDevice());
int length = in->shape().elements();
int threads = std::min(MAX_THREADS, length);
int blocks = std::min(MAX_BLOCKS, length / threads + (length % threads != 0));
auto mp_indices = allocator->alloc<size_t>(indices.size());
mp_indices->insert(indices.data(), indices.size());
int axisGPU = axis + gpu::Shape::size() - out->shape().size();
gInsert<<<blocks, threads>>>(out->data(),
out->shape(),
in->data(),
in->shape(),
axisGPU,
mp_indices->data<size_t>());
allocator->free(mp_indices);
}
__global__ void gGRUFastForward(float* out,
const float* state,
const float* xW,
const float* sU,
const float* b,
const float* mask,
size_t rows,
size_t cols,
bool final) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float m = !mask || mask[j];
float* rowOut = out + j * cols;
const float* rowState = state + j * cols;
const float* xWrow = xW + j * cols * 3;
const float* sUrow = sU + j * cols * 3;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < cols) {
float r = stableLogit(xWrow[i] + sUrow[i] + b[i]);
int k = i + cols;
float z = stableLogit(xWrow[k] + sUrow[k] + b[k]);
int l = i + 2 * cols;
float h;
if(final)
h = tanhf(xWrow[l] + (sUrow[l] + b[l]) * r);
else
h = tanhf(xWrow[l] + sUrow[l] * r + b[l]);
float out = (1.0f - z) * h + z * rowState[i];
rowOut[i] = m * out + (1 - m) * rowState[i];
}
}
}
}
}
void GRUFastForward(Tensor out, std::vector<Tensor> inputs, bool final) {
cudaSetDevice(out->getDevice());
int rows = out->shape().elements() / out->shape().back();
int cols = out->shape().back();
int blocks = std::min(MAX_BLOCKS, rows);
int threads = std::min(MAX_THREADS, cols);
gGRUFastForward<<<blocks, threads>>>(
out->data(), // output
inputs[0]->data(), // state
inputs[1]->data(), // xW
inputs[2]->data(), // sU
inputs[3]->data(), // b
inputs.size() > 4 ? inputs[4]->data() : 0, // mask
rows,
cols,
final);
}
__global__ void gGRUFastBackward(float* outState,
float* outXW,
float* outSU,
float* outB,
const float* state,
const float* xW,
const float* sU,
const float* b,
const float* mask,
const float* adj,
size_t rows,
size_t cols,
bool final) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float m = !mask || mask[j];
float* rowOutState = outState + j * cols;
float* rowOutXW = outXW + j * cols * 3;
float* rowOutSU = outSU + j * cols * 3;
const float* rowState = state + j * cols;
const float* rowXW = xW + j * cols * 3;
const float* rowSU = sU + j * cols * 3;
const float* rowAdj = adj + j * cols;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < cols) {
int k = i + cols;
int l = i + 2 * cols;
float r = stableLogit(rowXW[i] + rowSU[i] + b[i]);
float z = stableLogit(rowXW[k] + rowSU[k] + b[k]);
float h;
if(final)
h = tanhf(rowXW[l] + (rowSU[l] + b[l]) * r);
else
h = tanhf(rowXW[l] + rowSU[l] * r + b[l]);
float adj = rowAdj[i];
float t = (1 - z) * (1 - h * h);
// df/ds
if(outState)
rowOutState[i] += (m * z - m + 1) * adj;
// df/d(xW_r) ...
float dfdxW_r = m * r * (1 - r) * t * adj;
if(final)
dfdxW_r *= rowSU[l] + b[l];
else
dfdxW_r *= rowSU[l];
if(outXW)
rowOutXW[i] += dfdxW_r;
if(outSU)
rowOutSU[i] += dfdxW_r;
if(outB)
atomicAdd(outB + i, dfdxW_r);
// df/d(xW_z) ...
float dfdxW_z = m * (1 - z) * z * (rowState[i] - h) * adj;
if(outXW)
rowOutXW[k] += dfdxW_z;
if(outSU)
rowOutSU[k] += dfdxW_z;
if(outB)
atomicAdd(outB + k, dfdxW_z);
// df/d(xW_x) ...
float dfdxW_x = m * t * adj;
if(outXW)
rowOutXW[l] += dfdxW_x;
if(outSU)
rowOutSU[l] += dfdxW_x * r;
if(outB)
if(final)
atomicAdd(outB + l, dfdxW_x * r);
else
atomicAdd(outB + l, dfdxW_x);
}
}
}
}
}
void GRUFastBackward(std::vector<Tensor> outputs,
std::vector<Tensor> inputs,
Tensor adj,
bool final) {
cudaSetDevice(adj->getDevice());
int rows = adj->shape().elements() / adj->shape().back();
int cols = adj->shape().back();
int blocks = std::min(MAX_BLOCKS, rows);
int threads = std::min(MAX_THREADS, cols);
gGRUFastBackward<<<blocks, threads>>>(
outputs[0] ? outputs[0]->data() : 0, // state - adj
outputs[1] ? outputs[1]->data() : 0, // xW - adj
outputs[2] ? outputs[2]->data() : 0, // sU - adj
outputs[3] ? outputs[3]->data() : 0, // b - adj
inputs[0]->data(), // state
inputs[1]->data(), // xW
inputs[2]->data(), // sU
inputs[3]->data(), // b
inputs.size() > 4 ? inputs[4]->data() : 0, // mask
adj->data(),
rows,
cols,
final);
}
__global__ void gCrossEntropyPick(float* out,
const gpu::Shape outShape,
const float* in,
const gpu::Shape inShape,
const float* pick) {
int rows = inShape.elements() / inShape.back();
int cols = inShape.back();
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
const float* sp = in + j * cols;
extern __shared__ float _share[];
float* _max = _share + blockDim.x;
_max[threadIdx.x] = sp[threadIdx.x];
for(int tid = 1; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
if(sp[id] > _max[threadIdx.x])
_max[threadIdx.x] = sp[id];
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1)) {
if(_max[threadIdx.x + skip] > _max[threadIdx.x]) {
_max[threadIdx.x] = _max[threadIdx.x + skip];
}
}
len = (len + 1) >> 1;
}
__syncthreads();
float max = _max[0];
__syncthreads();
float* _sum = _share + blockDim.x;
_sum[threadIdx.x] = 0.0;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
_sum[threadIdx.x] += __expf(sp[id] - max);
}
}
__syncthreads();
len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
len = (len + 1) >> 1;
}
__syncthreads();
// cross-entropy
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id == (int)pick[j]) {
out[j] = __logf(_sum[0]) - sp[id] + max;
}
}
}
}
}
void CrossEntropyPick(Tensor out, Tensor in, Tensor pick) {
cudaSetDevice(out->getDevice());
int rows = in->shape().elements() / in->shape().back();
int cols = in->shape().back();
int blocks = std::min(MAX_BLOCKS, (int)rows);
int threads = std::min(MAX_THREADS, (int)cols);
int shared = sizeof(float) * threads * 2;
gCrossEntropyPick<<<blocks, threads, shared>>>(
out->data(), out->shape(), in->data(), in->shape(), pick->data());
}
__global__ void gCrossEntropyPickBackward(float* out,
const gpu::Shape outShape,
const float* adj,
const float* in,
const float* pick) {
int rows = outShape.elements() / outShape.back();
int cols = outShape.back();
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
const float* sp = in + j * cols;
float* so = out + j * cols;
extern __shared__ float _share[];
float* _max = _share + blockDim.x;
_max[threadIdx.x] = sp[threadIdx.x];
for(int tid = 1; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
if(sp[id] > _max[threadIdx.x])
_max[threadIdx.x] = sp[id];
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1)) {
if(_max[threadIdx.x + skip] > _max[threadIdx.x]) {
_max[threadIdx.x] = _max[threadIdx.x + skip];
}
}
len = (len + 1) >> 1;
}
__syncthreads();
float max = _max[0];
__syncthreads();
float* _sum = _share + blockDim.x;
_sum[threadIdx.x] = 0.0;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float ex = __expf(sp[id] - max);
_sum[threadIdx.x] += ex;
}
}
__syncthreads();
len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
len = (len + 1) >> 1;
}
__syncthreads();
// cross-entropy
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float sub = (float)(id == (int)pick[j]);
so[id] += adj[j] * (__expf(sp[id] - max) / _sum[0] - sub);
}
}
}
}
}
void CrossEntropyPickBackward(Tensor out, Tensor adj, Tensor a, Tensor pick) {
cudaSetDevice(out->getDevice());
int rows = out->shape().elements() / out->shape().back();
int cols = out->shape().back();
int blocks = std::min(MAX_BLOCKS, (int)rows);
int threads = std::min(MAX_THREADS, (int)cols);
int shared = sizeof(float) * threads * 2;
gCrossEntropyPickBackward<<<blocks, threads, shared>>>(
out->data(), out->shape(), adj->data(), a->data(), pick->data());
}
float L2Norm(Tensor in) {
using namespace functional;
cudaSetDevice(in->getDevice());
int size = in->shape().elements();
int threads = std::min(MAX_THREADS, size);
int blocks = std::min(MAX_BLOCKS, size / threads + (size % threads != 0));
uint8_t* data;
cudaMalloc(&data, blocks * sizeof(float));
Tensor out(new TensorBase(
New<MemoryPiece>(data, blocks * sizeof(float)), {1, blocks}, in->getDevice()));
ReduceAll(_1 * _1, out, in);
float dataCpu = sqrtf(out->get(0));
out.reset();
cudaFree(data);
return dataCpu;
}
__global__ void gAtt(float* out,
const float* va,
const float* ctx,
const float* state,
int m, // total rows (batch x time x beam)
int k, // depth
int b, // batch size
int t // time of ctx
) {
int rows = m;
int cols = k;
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
const float* vaRow = va;
const float* ctxRow = ctx + (j % (b * t)) * cols;
const float* stateRow = state + ((j / (b * t)) * b + j % b) * cols;
extern __shared__ float _share[];
float* _sum = _share + blockDim.x;
_sum[threadIdx.x] = 0.0;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float z = ctxRow[id] + stateRow[id];
float ex = tanhf(z) * vaRow[id];
_sum[threadIdx.x] += ex;
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
len = (len + 1) >> 1;
}
__syncthreads();
out[j] = _sum[0];
__syncthreads();
}
}
}
void Att(Tensor out, Tensor va, Tensor context, Tensor state) {
cudaSetDevice(out->getDevice());
size_t m = out->shape().elements() / out->shape().back();
size_t k = context->shape()[-1];
size_t b = context->shape()[-2];
size_t t = context->shape()[-3];
int blocks = std::min(MAX_BLOCKS, (int)m);
int threads = std::min(MAX_THREADS, (int)k);
int shared = sizeof(float) * threads * 2;
gAtt<<<blocks, threads, shared>>>(out->data(),
va->data(),
context->data(),
state->data(),
m,
k,
b,
t);
}
__global__ void gAttBack(float* gVa,
float* gContext,
float* gState,
const float* va,
const float* context,
const float* state,
const float* adj,
int m, // rows
int k, // cols
int n // batch size
) {
int rows = m;
int cols = k;
for(int bid = 0; bid < m; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float* gcRow = gContext + j * cols;
float* gsRow = gState + (j % n) * cols;
const float* cRow = context + j * cols;
const float* sRow = state + (j % n) * cols;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float z = cRow[id] + sRow[id];
float t = tanhf(z);
float r = va[id] * (1.f - t * t);
gcRow[id] += r * adj[j];
gsRow[id] += r * adj[j];
atomicAdd(gVa + id, t * adj[j]);
}
}
}
}
}
void AttBack(Tensor gVa,
Tensor gContext,
Tensor gState,
Tensor va,
Tensor context,
Tensor state,
Tensor adj) {
cudaSetDevice(adj->getDevice());
size_t m = adj->shape().elements() / adj->shape().back();
size_t dims = context->shape().size();
size_t k = context->shape()[dims - 1];
size_t n = context->shape()[dims - 2];
int blocks = std::min(MAX_BLOCKS, (int)n);
int threads = std::min(MAX_THREADS, (int)k);
gAttBack<<<blocks, threads>>>(gVa->data(),
gContext->data(),
gState->data(),
va->data(),
context->data(),
state->data(),
adj->data(),
m,
k,
n);
}
__global__ void gLNormalization(float* out,
const float* in,
const float* alpha,
const float* beta,
int rows,
int cols,
float eps = 1e-9) {
extern __shared__ float _share[];
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float* so = out + j * cols;
const float* sp = in + j * cols;
float* _sum = _share + blockDim.x;
_sum[threadIdx.x] = 0.0f;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
_sum[threadIdx.x] += sp[id];
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1)) {
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
}
len = (len + 1) >> 1;
}
__syncthreads();
float mean = _sum[0] / cols;
__syncthreads();
float* _sqSum = _share + blockDim.x;
_sqSum[threadIdx.x] = 0.0;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float ex = sp[id] - mean;
_sqSum[threadIdx.x] += ex * ex;
}
}
__syncthreads();
len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
_sqSum[threadIdx.x] += _sqSum[threadIdx.x + skip];
len = (len + 1) >> 1;
}
__syncthreads();
float sigma = sqrtf(eps + (_sqSum[0] / cols));
__syncthreads();
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float t = alpha[id] * ((sp[id] - mean) / sigma);
if(beta != nullptr)
t += beta[id];
so[id] = t;
}
}
}
}
}
void LayerNormalization(Tensor out,
Tensor in,
Tensor gamma,
Tensor beta,
float eps) {
cudaSetDevice(out->getDevice());
int rows = in->shape().elements() / in->shape().back();
int cols = in->shape().back();
int blocks = std::min(MAX_BLOCKS, (int)rows);
int threads = std::min(MAX_THREADS, (int)cols);
int shared = 2 * threads * sizeof(float);
gLNormalization<<<blocks, threads, shared>>>(out->data(),
in->data(),
gamma->data(),
beta ? beta->data() : nullptr,
rows,
cols,
eps);
}
__global__ void gLayerNormalizationGrad(float* gradX,
float* gradGamma,
float* gradBeta,
float* adj,
float* y,
float* x,
float* gamma,
float* beta,
int rows,
int cols,
float eps = 1e-9) {
extern __shared__ float shared[];
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float* sum_adj = shared;
float* sum_adj_x = shared + blockDim.x;
float* sum_x = shared + 2 * blockDim.x;
float* sum_sqr = shared + 3 * blockDim.x;
const float* xRow = x + j * cols;
const float* yRow = y + j * cols;
const float* adjRow = adj + j * cols;
float* gradXRow = gradX + j * cols;
sum_x[threadIdx.x] = 0.0f;
sum_adj[threadIdx.x] = 0.0f;
sum_adj_x[threadIdx.x] = 0.0f;
sum_sqr[threadIdx.x] = 0.0f;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
sum_x[threadIdx.x] += xRow[id];
sum_adj_x[threadIdx.x]
+= adjRow[id] * (yRow[id] - ((beta) ? beta[id] : 0)) / gamma[id];
sum_adj[threadIdx.x] += adjRow[id];
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1)) {
sum_x[threadIdx.x] += sum_x[threadIdx.x + skip];
sum_adj[threadIdx.x] += sum_adj[threadIdx.x + skip];
sum_adj_x[threadIdx.x] += sum_adj_x[threadIdx.x + skip];
}
len = (len + 1) >> 1;
}
__syncthreads();
float mean = sum_x[0] / cols;
__syncthreads();
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float ex = xRow[id] - mean;
sum_sqr[threadIdx.x] += ex * ex;
}
}
__syncthreads();
len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
sum_sqr[threadIdx.x] += sum_sqr[threadIdx.x + skip];
len = (len + 1) >> 1;
}
__syncthreads();
float sigma = sqrtf(eps + (sum_sqr[0] / cols));
__syncthreads();
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float grad_x = 0.0f;
float x_hat = (yRow[id] - ((beta) ? beta[id] : 0)) / gamma[id];
grad_x += cols * adjRow[id];
grad_x -= sum_adj[0];
grad_x -= sum_adj_x[0] * x_hat;
grad_x /= (cols * sigma);
float valX = gamma[id] * grad_x;
float sign = (0.f < valX) - (valX < 0.f);
valX = fabs(valX) > 1000 ? sign * 1000 : valX;
gradXRow[id] += valX;
atomicAdd(gradGamma + id, adjRow[id] * x_hat);
if(beta) {
atomicAdd(gradBeta + id, adjRow[id]);
}
}
}
}
}
}
void LayerNormalizationGrad(Tensor gradX,
Tensor gradGamma,
Tensor gradBeta,
Tensor adj,
Tensor y,
Tensor x,
Tensor gamma,
Tensor beta,
float eps) {
cudaSetDevice(adj->getDevice());
int rows = y->shape().elements() / y->shape().back();
int cols = y->shape().back();
int threads = std::min(MAX_THREADS, cols);
int blocks = std::min(MAX_BLOCKS, rows);
int shared = sizeof(float) * threads * 4;
gLayerNormalizationGrad<<<blocks, threads, shared>>>(
gradX->data(),
gradGamma->data(),
(gradBeta) ? gradBeta->data() : nullptr,
adj->data(),
y->data(),
x->data(),
gamma->data(),
(beta) ? beta->data() : nullptr,
rows,
cols,
eps);
}
__global__ void gShift(float* out, const float* in, int length, int offset) {
for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) {
int index = bid + blockDim.x * blockIdx.x + threadIdx.x;
if(index < length) {
if(index - offset < 0 || index - offset >= length)
out[index] = 0;
else
out[index] = in[index - offset];
}
}
}
void Shift(Tensor out, Tensor in, Shape shift, bool invert) {
UTIL_THROW_IF2(in->shape().size() != shift.size(), "bad dimensions");
int offset = 0;
for(int i = 0; i < shift.size(); ++i)
offset += in->shape().stride(i) * shift[i];
if(invert)
offset = -offset;
cudaSetDevice(out->getDevice());
int length = out->shape().elements();
int threads = std::min(MAX_THREADS, length);
int blocks = std::min(MAX_BLOCKS, length / threads + (length % threads != 0));
gShift<<<blocks, threads>>>(out->data(), in->data(), length, offset);
}
__global__ void gSetSparse(float* out,
const size_t* indices,
const float* values,
int length) {
for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) {
int index = bid + blockDim.x * blockIdx.x + threadIdx.x;
if(index < length) {
out[indices[index]] = values[index];
}
}
}
void SetSparse(float* out,
const std::vector<size_t>& indices,
const std::vector<float>& values) {
int length = indices.size();
int threads = std::min(MAX_THREADS, length);
int blocks = std::min(MAX_BLOCKS, length / threads + (length % threads != 0));
size_t* d_indices;
CUDA_CHECK(cudaMalloc(&d_indices, length * sizeof(size_t)));
CUDA_CHECK(cudaMemcpy(d_indices,
indices.data(),
length * sizeof(size_t),
cudaMemcpyHostToDevice));
float* d_values;
CUDA_CHECK(cudaMalloc(&d_values, length * sizeof(float)));
CUDA_CHECK(cudaMemcpy(
d_values, values.data(), length * sizeof(float), cudaMemcpyHostToDevice));
gSetSparse<<<blocks, threads>>>(out, d_indices, d_values, length);
cudaFree(d_indices);
cudaFree(d_values);
}
/******************************************************************************/
__global__ void gLSTMCellForward(float* out,
const float* cell,
const float* xW,
const float* sU,
const float* b,
const float* mask,
size_t rows,
size_t cols) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float m = !mask || mask[j];
float* rowOut = out + j * cols;
const float* rowCell = cell + j * cols;
const float* xWrow = xW + j * cols * 4;
const float* sUrow = sU + j * cols * 4;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < cols) {
float gf = stableLogit(xWrow[i] + sUrow[i] + b[i]);
int k = i + cols;
float gi = stableLogit(xWrow[k] + sUrow[k] + b[k]);
int l = i + 2 * cols;
float gc = tanhf(xWrow[l] + sUrow[l] + b[l]);
float cout = gf * rowCell[i] + gi * gc;
rowOut[i] = m * cout + (1 - m) * rowCell[i];
}
}
}
}
}
void LSTMCellForward(Tensor out, std::vector<Tensor> inputs) {
cudaSetDevice(out->getDevice());
int rows = out->shape().elements() / out->shape().back();
int cols = out->shape().back();
int blocks = std::min(MAX_BLOCKS, rows);
int threads = std::min(MAX_THREADS, cols);
gLSTMCellForward<<<blocks, threads>>>(
out->data(), // output
inputs[0]->data(), // cell state
inputs[1]->data(), // xW
inputs[2]->data(), // sU
inputs[3]->data(), // b
inputs.size() > 4 ? inputs[4]->data() : 0, // mask
rows,
cols);
}
__global__ void gLSTMOutputForward(float* out,
const float* cell,
const float* xW,
const float* sU,
const float* b,
size_t rows,
size_t cols) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float* rowOut = out + j * cols;
const float* rowCell = cell + j * cols;
const float* xWrow = xW + j * cols * 4;
const float* sUrow = sU + j * cols * 4;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < cols) {
int k = i + 3 * cols;
float go = stableLogit(xWrow[k] + sUrow[k] + b[k]);
rowOut[i] = go * tanhf(rowCell[i]);
}
}
}
}
}
void LSTMOutputForward(Tensor out, std::vector<Tensor> inputs) {
cudaSetDevice(out->getDevice());
int rows = out->shape().elements() / out->shape().back();
int cols = out->shape().back();
int blocks = std::min(MAX_BLOCKS, rows);
int threads = std::min(MAX_THREADS, cols);
gLSTMOutputForward<<<blocks, threads>>>(out->data(), // output
inputs[0]->data(), // cell state
inputs[1]->data(), // xW
inputs[2]->data(), // sU
inputs[3]->data(), // b
rows,
cols);
}
__global__ void gLSTMCellBackward(float* outCell,
float* outXW,
float* outSU,
float* outB,
const float* cell,
const float* xW,
const float* sU,
const float* b,
const float* mask,
const float* adj,
size_t rows,
size_t cols) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float m = !mask || mask[j];
float* rowOutCell = outCell + j * cols;
float* rowOutXW = outXW + j * cols * 4;
float* rowOutSU = outSU + j * cols * 4;
const float* rowCell = cell + j * cols;
const float* xWrow = xW + j * cols * 4;
const float* sUrow = sU + j * cols * 4;
const float* rowAdj = adj + j * cols;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < cols) {
float gf = stableLogit(xWrow[i] + sUrow[i] + b[i]);
int k = i + cols;
float gi = stableLogit(xWrow[k] + sUrow[k] + b[k]);
int l = i + 2 * cols;
float gc = tanhf(xWrow[l] + sUrow[l] + b[l]);
float adj = rowAdj[i];
// dc/dc_{t-1}
if(outCell)
rowOutCell[i] += (m * gf - m + 1) * adj;
// dc/d(b_f) = dc/d(xW_f) ...
float dcdxf = m * rowCell[i] * gf * (1 - gf) * adj;
if(outXW)
rowOutXW[i] += dcdxf;
if(outSU)
rowOutSU[i] += dcdxf;
if(outB)
atomicAdd(outB + i, dcdxf);
// dc/d(b_i) ...
float dcdb_i = m * gc * gi * (1 - gi) * adj;
if(outXW)
rowOutXW[k] += dcdb_i;
if(outSU)
rowOutSU[k] += dcdb_i;
if(outB)
atomicAdd(outB + k, dcdb_i);
// dc/d(b_c) ...
float dcdxc = m * gi * (1 - gc * gc) * adj;
if(outXW)
rowOutXW[l] += dcdxc;
if(outSU)
rowOutSU[l] += dcdxc;
if(outB)
atomicAdd(outB + l, dcdxc);
}
}
}
}
}
void LSTMCellBackward(std::vector<Tensor> outputs,
std::vector<Tensor> inputs,
Tensor adj) {
cudaSetDevice(adj->getDevice());
int rows = adj->shape().elements() / adj->shape().back();
int cols = adj->shape().back();
int blocks = std::min(MAX_BLOCKS, rows);
int threads = std::min(MAX_THREADS, cols);
gLSTMCellBackward<<<blocks, threads>>>(
outputs[0] ? outputs[0]->data() : 0, // state - adj
outputs[1] ? outputs[1]->data() : 0, // xW - adj
outputs[2] ? outputs[2]->data() : 0, // sU - adj
outputs[3] ? outputs[3]->data() : 0, // b - adj
inputs[0]->data(), // state
inputs[1]->data(), // xW
inputs[2]->data(), // sU
inputs[3]->data(), // b
inputs.size() > 4 ? inputs[4]->data() : 0, // mask
adj->data(),
rows,
cols);
}
__global__ void gLSTMOutputBackward(float* outCell,
float* outXW,
float* outSU,
float* outB,
const float* cell,
const float* xW,
const float* sU,
const float* b,
const float* adj,
size_t rows,
size_t cols) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float* rowOutCell = outCell + j * cols;
float* rowOutXW = outXW + j * cols * 4;
float* rowOutSU = outSU + j * cols * 4;
const float* rowCell = cell + j * cols;
const float* xWrow = xW + j * cols * 4;
const float* sUrow = sU + j * cols * 4;
const float* rowAdj = adj + j * cols;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < cols) {
int k = i + 3 * cols;
float go = stableLogit(xWrow[k] + sUrow[k] + b[k]);
float t = tanhf(rowCell[i]);
float adj = rowAdj[i];
// dc/dc_{t-1}
if(outCell)
rowOutCell[i] += go * (1 - t * t) * adj;
// dc/d(b_o) = dc/d(xW_f) ...
float dcdxo = t * go * (1 - go) * adj;
if(outXW)
rowOutXW[k] += dcdxo;
if(outSU)
rowOutSU[k] += dcdxo;
if(outB)
atomicAdd(outB + k, dcdxo);
}
}
}
}
}
void LSTMOutputBackward(std::vector<Tensor> outputs,
std::vector<Tensor> inputs,
Tensor adj) {
cudaSetDevice(adj->getDevice());
int rows = adj->shape().elements() / adj->shape().back();
int cols = adj->shape().back();
int blocks = std::min(MAX_BLOCKS, rows);
int threads = std::min(MAX_THREADS, cols);
gLSTMOutputBackward<<<blocks, threads>>>(
outputs[0] ? outputs[0]->data() : 0, // state - adj
outputs[1] ? outputs[1]->data() : 0, // xW - adj
outputs[2] ? outputs[2]->data() : 0, // sU - adj
outputs[3] ? outputs[3]->data() : 0, // b - adj
inputs[0]->data(), // state
inputs[1]->data(), // xW
inputs[2]->data(), // sU
inputs[3]->data(), // b
adj->data(),
rows,
cols);
}
__global__ void gHighwayForward(float* out,
const float* in1,
const float* in2,
const float* t,
size_t length) {
for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) {
int index = bid + blockDim.x * blockIdx.x + threadIdx.x;
if(index < length) {
float sigma = stableLogit(t[index]);
out[index] = in1[index] * sigma + in2[index] * (1.f - sigma);
}
}
}
void HighwayForward(Tensor out,
const Tensor in1,
const Tensor in2,
const Tensor t) {
cudaSetDevice(out->getDevice());
int length = out->shape().elements();
int threads = std::min(MAX_THREADS, length);
int blocks = std::min(MAX_BLOCKS, length / threads + (length % threads != 0));
gHighwayForward<<<blocks, threads>>>(
out->data(), in1->data(), in2->data(), t->data(), length);
}
__global__ void gHighwayBackward(float* out1,
float* out2,
float* outt,
const float* in1,
const float* in2,
const float* t,
const float* adj,
size_t length) {
for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) {
int index = bid + blockDim.x * blockIdx.x + threadIdx.x;
if(index < length) {
float sigma = stableLogit(t[index]);
out1[index] = sigma * adj[index];
out2[index] = (1.f - sigma) * adj[index];
outt[index]
= sigma * (1.f - sigma) * (in1[index] - in2[index]) * adj[index];
}
}
}
void HighwayBackward(Tensor out1,
Tensor out2,
Tensor outt,
const Tensor in1,
const Tensor in2,
const Tensor t,
const Tensor adj) {
cudaSetDevice(out1->getDevice());
int length = out1->shape().elements();
int threads = std::min(MAX_THREADS, length);
int blocks = std::min(MAX_BLOCKS, length / threads + (length % threads != 0));
gHighwayBackward<<<blocks, threads>>>(out1->data(),
out2->data(),
outt->data(),
in1->data(),
in2->data(),
t->data(),
adj->data(),
length);
}
__global__ void gMaxPoolingForward(float* out,
int outRows,
int outCols,
float* in,
int inRows,
int inCols,
float* mask,
int numKernels,
int maskCols,
int width,
int lastWidth) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid >= outRows * outCols) return;
int rowId = tid / outRows;
int colId = tid % outRows;
float* b = in + (rowId * inCols) + (colId * width);
if (colId == outRows - 1) {
width = lastWidth;
}
float* localMask = mask + (rowId / numKernels) * maskCols + colId * width;
float currentMax = b[0] * localMask[0];
for (int i = 1; i < width; ++i) {
if (b[i] * localMask[i] > currentMax) {
currentMax = b[i] * localMask[i];
}
}
out[rowId + (colId * outCols)] = currentMax;
}
void PoolingWithMaskingForward(Tensor out,
Tensor in,
Tensor mask,
int width,
bool isEven) {
int n = out->shape().elements();
int threads = std::min(n, MAX_THREADS);
int blocks = n / threads + (n % threads != 0);
Shape& inShape = in->shape();
int inRows = inShape[0] * inShape[1];
int inCols = inShape[2];
Shape& outShape = out->shape();
int outRows = outShape[2];
int outCols = outShape[0] * outShape[1];
int lastWidth = ((inCols - isEven) % width == 0)
? width
: (inCols - isEven) % width;
gMaxPoolingForward<<<blocks, threads>>>(
out->data(), outRows, outCols,
in->data(), inRows, inCols,
mask->data(), outShape[1], mask->shape()[2],
width, lastWidth);
}
__global__ void gMaxPoolingBackward(float* adj,
int adjRows,
int adjCols,
float* in,
float* adjIn,
int inRows,
int inCols,
float* mask,
int numKernels,
int maskCols,
int width,
int lastWidth)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid >= adjRows * adjCols) return;
int rowId = tid / adjRows;
int colId = tid % adjRows;
float* b = in + (rowId * inCols) + (colId * width);
if (colId == adjRows - 1) {
width = lastWidth;
}
float* localMask = mask + (rowId / numKernels) * maskCols + colId * width;
size_t currentMaxIdx = 0;
for (int i = 1; i < width; ++i) {
if (b[i] * localMask[i] > b[currentMaxIdx] * localMask[currentMaxIdx]) {
currentMaxIdx = i;
}
}
adjIn[(rowId * inCols) + (colId * width) + currentMaxIdx] += adj[rowId + (colId * adjCols)];
}
void PoolingWithMaskingBackward(Tensor adj,
Tensor adjIn,
Tensor in,
Tensor mask,
int width,
bool isEven) {
int n = adj->shape().elements();
int threads = std::min(n, 512);
int blocks = n / threads + (n % threads != 0);
Shape& inShape = in->shape();
int inRows = inShape[0] * inShape[1];
int inCols = inShape[2];
Shape& adjShape = adj->shape();
int adjRows = adjShape[2];
int adjCols = adjShape[0] * adjShape[1];
int lastWidth = ((inCols - isEven) % width == 0)
? width
: (inCols - isEven) % width;
gMaxPoolingBackward<<<blocks, threads>>>(
adj->data(), adjRows, adjCols,
in->data(), adjIn->data(), inRows, inCols,
mask->data(), adjShape[1], mask->shape()[2],
width, lastWidth);
}
} // namespace marian
|
f2f8eb9a7a68f2bc7f3cc2266d100b3732183f45.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "THHTensorSort.cuh"
void THCudaLongTensor_fillSliceWithIndex(THCState* state,
THCudaLongTensor* t,
int dim) {
int64_t dims = THCudaLongTensor_nDimension(state, t);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
ptrdiff_t inElements = THCudaLongTensor_nElement(state, t);
if (inElements > 0) {
int64_t sliceSize = THCudaLongTensor_size(state, t, dim);
ptrdiff_t numSlices = inElements / sliceSize;
dim3 grid;
if (!THC_getGridFromTiles(numSlices, grid)) {
THError("Slice to fill with indices is too large");
}
int64_t maxThreads =
THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock;
int64_t numThreads = sliceSize;
if (numThreads > maxThreads) {
numThreads = maxThreads;
}
dim3 block(numThreads);
#define FILL_INDEX(T, DIM) \
hipLaunchKernelGGL(( fillSliceWithIndex<T, DIM>) \
, dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), \
info, numSlices, sliceSize, info.strides[collapseDim])
if (THCTensor_canUse32BitIndexMath(state, t)) {
TensorInfo<int64_t, uint32_t> info =
getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, t);
info.reduceDim(dim);
int collapseDim = info.collapseDims(dim);
if (info.isContiguous()) {
FILL_INDEX(unsigned int, -2);
} else {
if (info.dims == 1) {
FILL_INDEX(unsigned int, 1);
} else if (info.dims == 2) {
FILL_INDEX(unsigned int, 2);
} else {
FILL_INDEX(unsigned int, -1);
}
}
} else {
TensorInfo<int64_t, uint64_t> info =
getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, t);
info.reduceDim(dim);
int collapseDim = info.collapseDims(dim);
// catch-all implementation
FILL_INDEX(uint64_t, -1);
}
#undef FILL_INDEX
THCudaCheck(hipGetLastError());
}
}
| f2f8eb9a7a68f2bc7f3cc2266d100b3732183f45.cu | #include "THCTensorSort.cuh"
void THCudaLongTensor_fillSliceWithIndex(THCState* state,
THCudaLongTensor* t,
int dim) {
int64_t dims = THCudaLongTensor_nDimension(state, t);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
ptrdiff_t inElements = THCudaLongTensor_nElement(state, t);
if (inElements > 0) {
int64_t sliceSize = THCudaLongTensor_size(state, t, dim);
ptrdiff_t numSlices = inElements / sliceSize;
dim3 grid;
if (!THC_getGridFromTiles(numSlices, grid)) {
THError("Slice to fill with indices is too large");
}
int64_t maxThreads =
THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock;
int64_t numThreads = sliceSize;
if (numThreads > maxThreads) {
numThreads = maxThreads;
}
dim3 block(numThreads);
#define FILL_INDEX(T, DIM) \
fillSliceWithIndex<T, DIM> \
<<<grid, block, 0, THCState_getCurrentStream(state)>>>( \
info, numSlices, sliceSize, info.strides[collapseDim])
if (THCTensor_canUse32BitIndexMath(state, t)) {
TensorInfo<int64_t, uint32_t> info =
getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, t);
info.reduceDim(dim);
int collapseDim = info.collapseDims(dim);
if (info.isContiguous()) {
FILL_INDEX(unsigned int, -2);
} else {
if (info.dims == 1) {
FILL_INDEX(unsigned int, 1);
} else if (info.dims == 2) {
FILL_INDEX(unsigned int, 2);
} else {
FILL_INDEX(unsigned int, -1);
}
}
} else {
TensorInfo<int64_t, uint64_t> info =
getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, t);
info.reduceDim(dim);
int collapseDim = info.collapseDims(dim);
// catch-all implementation
FILL_INDEX(uint64_t, -1);
}
#undef FILL_INDEX
THCudaCheck(cudaGetLastError());
}
}
|
af729bd4cf60a8677734b4759dd717b5e01eac13.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* \file GpuSInputRegular.cu
*
* \author Fumitaka Kawasaki
*
* \brief A class that performs stimulus input (implementation Regular) on GPU.
*/
#include "GpuSInputRegular.h"
#include <helper_cuda.h>
// Forward Delaration
void allocDeviceValues( ClusterInfo* pci, BGFLOAT* initValues, int *nShiftValues );
void deleteDeviceValues( ClusterInfo* pci );
/*
* constructor
*
* @param[in] psi Pointer to the simulation information
* @param[in] parms TiXmlElement to examine.
*/
GpuSInputRegular::GpuSInputRegular(SimulationInfo* psi, TiXmlElement* parms) : SInputRegular(psi, parms)
{
}
/*
* destructor
*/
GpuSInputRegular::~GpuSInputRegular()
{
}
/*
* Initialize data.
*
* @param[in] psi Pointer to the simulation information.
* @param[in] vtClrInfo Vector of ClusterInfo.
*/
void GpuSInputRegular::init(SimulationInfo* psi, vector<ClusterInfo *> &vtClrInfo)
{
SInputRegular::init(psi, vtClrInfo);
if (m_fSInput == false)
return;
// for each cluster
for (CLUSTER_INDEX_TYPE iCluster = 0; iCluster < vtClrInfo.size(); iCluster++) {
checkCudaErrors( hipSetDevice( vtClrInfo[iCluster]->deviceId ) );
// allocate GPU device memory and copy values
allocDeviceValues(vtClrInfo[iCluster], m_values, m_nShiftValues);
}
delete[] m_values;
delete[] m_nShiftValues;
}
/*
* Terminate process.
*
* @param[in] psi Pointer to the simulation information.
* @param[in] vtClrInfo Vector of ClusterInfo.
*/
void GpuSInputRegular::term(SimulationInfo* psi, vector<ClusterInfo *> &vtClrInfo)
{
if (m_fSInput) {
// for each cluster
for (CLUSTER_INDEX_TYPE iCluster = 0; iCluster < vtClrInfo.size(); iCluster++) {
checkCudaErrors( hipSetDevice( vtClrInfo[iCluster]->deviceId ) );
deleteDeviceValues(vtClrInfo[iCluster]);
}
}
}
/*
* Process input stimulus for each time step on GPU.
*
* @param[in] psi Pointer to the simulation information.
* @param[in] pci ClusterInfo class to read information from.
* @param[in] iStepOffset Offset from the current simulation step.
*/
void GpuSInputRegular::inputStimulus(const SimulationInfo* psi, ClusterInfo *pci, int iStepOffset)
{
if (m_fSInput == false)
return;
// for each cluster
checkCudaErrors( hipSetDevice( pci->deviceId ) );
int neuron_count = pci->totalClusterNeurons;
// CUDA parameters
const int threadsPerBlock = 256;
int blocksPerGrid;
// add input to each summation point
blocksPerGrid = ( neuron_count + threadsPerBlock - 1 ) / threadsPerBlock;
hipLaunchKernelGGL(( inputStimulusDevice) , dim3(blocksPerGrid), dim3(threadsPerBlock) , 0, 0, neuron_count, pci->pClusterSummationMap, pci->initValues_d, pci->nShiftValues_d, pci->nStepsInCycle, m_nStepsCycle, m_nStepsDuration );
// update cycle count
pci->nStepsInCycle = (pci->nStepsInCycle + 1) % m_nStepsCycle;
}
/*
* Allocate GPU device memory and copy values
*
* @param[in] pci Pointer to the cluster information.
* @param[in] initValues Pointer to the initial values.
* @param[in] nShiftValues Pointer to the shift values.
*/
void allocDeviceValues( ClusterInfo* pci, BGFLOAT* initValues, int *nShiftValues )
{
int neuron_count = pci->totalClusterNeurons;
BGSIZE initValues_d_size = neuron_count * sizeof (BGFLOAT); // size of initial values
BGSIZE nShiftValues_d_size = neuron_count * sizeof (int); // size of shift values
// Allocate GPU device memory
checkCudaErrors( hipMalloc ( ( void ** ) &pci->initValues_d, initValues_d_size ) );
checkCudaErrors( hipMalloc ( ( void ** ) &pci->nShiftValues_d, nShiftValues_d_size ) );
// Copy values into device memory
checkCudaErrors( hipMemcpy ( pci->initValues_d, &initValues[pci->clusterNeuronsBegin], initValues_d_size, hipMemcpyHostToDevice ) );
checkCudaErrors( hipMemcpy ( pci->nShiftValues_d, &nShiftValues[pci->clusterNeuronsBegin], nShiftValues_d_size, hipMemcpyHostToDevice ) );
}
/*
* Dellocate GPU device memory
*
* @param[in] pci Pointer to the cluster information.
*/
void deleteDeviceValues( ClusterInfo* pci )
{
checkCudaErrors( hipFree( pci->initValues_d ) );
checkCudaErrors( hipFree( pci->nShiftValues_d ) );
}
// CUDA code for -----------------------------------------------------------------------
/*
* Device code for adding input values to the summation map.
*
* @param[in] summationPoint_d Pointer to the summation map.
* @param[in] initValues_d Pointer to the input values.
* @param[in] nShiftValues_d Pointer to the shift values.
* @param[in] nStepsInCycle Current steps in cycle
* @param[in] nStepsCycle Number of steps in one cycle
* @param[in] nStepsDuration Number of steps in duration
*/
__global__ void inputStimulusDevice( int n, BGFLOAT* summationPoint_d, BGFLOAT* initValues_d, int* nShiftValues_d, int nStepsInCycle, int nStepsCycle, int nStepsDuration )
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if ( idx >= n )
return;
int rnShiftValues = nShiftValues_d[idx]; // load the value to a register
if ( (nStepsInCycle >= rnShiftValues) && (nStepsInCycle < (rnShiftValues + nStepsDuration ) % nStepsCycle) )
{
BGFLOAT rinitValue = initValues_d[idx];
if (rinitValue != 0.0)
summationPoint_d[idx] += rinitValue;
}
}
| af729bd4cf60a8677734b4759dd717b5e01eac13.cu | /*
* \file GpuSInputRegular.cu
*
* \author Fumitaka Kawasaki
*
* \brief A class that performs stimulus input (implementation Regular) on GPU.
*/
#include "GpuSInputRegular.h"
#include <helper_cuda.h>
// Forward Delaration
void allocDeviceValues( ClusterInfo* pci, BGFLOAT* initValues, int *nShiftValues );
void deleteDeviceValues( ClusterInfo* pci );
/*
* constructor
*
* @param[in] psi Pointer to the simulation information
* @param[in] parms TiXmlElement to examine.
*/
GpuSInputRegular::GpuSInputRegular(SimulationInfo* psi, TiXmlElement* parms) : SInputRegular(psi, parms)
{
}
/*
* destructor
*/
GpuSInputRegular::~GpuSInputRegular()
{
}
/*
* Initialize data.
*
* @param[in] psi Pointer to the simulation information.
* @param[in] vtClrInfo Vector of ClusterInfo.
*/
void GpuSInputRegular::init(SimulationInfo* psi, vector<ClusterInfo *> &vtClrInfo)
{
SInputRegular::init(psi, vtClrInfo);
if (m_fSInput == false)
return;
// for each cluster
for (CLUSTER_INDEX_TYPE iCluster = 0; iCluster < vtClrInfo.size(); iCluster++) {
checkCudaErrors( cudaSetDevice( vtClrInfo[iCluster]->deviceId ) );
// allocate GPU device memory and copy values
allocDeviceValues(vtClrInfo[iCluster], m_values, m_nShiftValues);
}
delete[] m_values;
delete[] m_nShiftValues;
}
/*
* Terminate process.
*
* @param[in] psi Pointer to the simulation information.
* @param[in] vtClrInfo Vector of ClusterInfo.
*/
void GpuSInputRegular::term(SimulationInfo* psi, vector<ClusterInfo *> &vtClrInfo)
{
if (m_fSInput) {
// for each cluster
for (CLUSTER_INDEX_TYPE iCluster = 0; iCluster < vtClrInfo.size(); iCluster++) {
checkCudaErrors( cudaSetDevice( vtClrInfo[iCluster]->deviceId ) );
deleteDeviceValues(vtClrInfo[iCluster]);
}
}
}
/*
* Process input stimulus for each time step on GPU.
*
* @param[in] psi Pointer to the simulation information.
* @param[in] pci ClusterInfo class to read information from.
* @param[in] iStepOffset Offset from the current simulation step.
*/
void GpuSInputRegular::inputStimulus(const SimulationInfo* psi, ClusterInfo *pci, int iStepOffset)
{
if (m_fSInput == false)
return;
// for each cluster
checkCudaErrors( cudaSetDevice( pci->deviceId ) );
int neuron_count = pci->totalClusterNeurons;
// CUDA parameters
const int threadsPerBlock = 256;
int blocksPerGrid;
// add input to each summation point
blocksPerGrid = ( neuron_count + threadsPerBlock - 1 ) / threadsPerBlock;
inputStimulusDevice <<< blocksPerGrid, threadsPerBlock >>> ( neuron_count, pci->pClusterSummationMap, pci->initValues_d, pci->nShiftValues_d, pci->nStepsInCycle, m_nStepsCycle, m_nStepsDuration );
// update cycle count
pci->nStepsInCycle = (pci->nStepsInCycle + 1) % m_nStepsCycle;
}
/*
* Allocate GPU device memory and copy values
*
* @param[in] pci Pointer to the cluster information.
* @param[in] initValues Pointer to the initial values.
* @param[in] nShiftValues Pointer to the shift values.
*/
void allocDeviceValues( ClusterInfo* pci, BGFLOAT* initValues, int *nShiftValues )
{
int neuron_count = pci->totalClusterNeurons;
BGSIZE initValues_d_size = neuron_count * sizeof (BGFLOAT); // size of initial values
BGSIZE nShiftValues_d_size = neuron_count * sizeof (int); // size of shift values
// Allocate GPU device memory
checkCudaErrors( cudaMalloc ( ( void ** ) &pci->initValues_d, initValues_d_size ) );
checkCudaErrors( cudaMalloc ( ( void ** ) &pci->nShiftValues_d, nShiftValues_d_size ) );
// Copy values into device memory
checkCudaErrors( cudaMemcpy ( pci->initValues_d, &initValues[pci->clusterNeuronsBegin], initValues_d_size, cudaMemcpyHostToDevice ) );
checkCudaErrors( cudaMemcpy ( pci->nShiftValues_d, &nShiftValues[pci->clusterNeuronsBegin], nShiftValues_d_size, cudaMemcpyHostToDevice ) );
}
/*
* Dellocate GPU device memory
*
* @param[in] pci Pointer to the cluster information.
*/
void deleteDeviceValues( ClusterInfo* pci )
{
checkCudaErrors( cudaFree( pci->initValues_d ) );
checkCudaErrors( cudaFree( pci->nShiftValues_d ) );
}
// CUDA code for -----------------------------------------------------------------------
/*
* Device code for adding input values to the summation map.
*
* @param[in] summationPoint_d Pointer to the summation map.
* @param[in] initValues_d Pointer to the input values.
* @param[in] nShiftValues_d Pointer to the shift values.
* @param[in] nStepsInCycle Current steps in cycle
* @param[in] nStepsCycle Number of steps in one cycle
* @param[in] nStepsDuration Number of steps in duration
*/
__global__ void inputStimulusDevice( int n, BGFLOAT* summationPoint_d, BGFLOAT* initValues_d, int* nShiftValues_d, int nStepsInCycle, int nStepsCycle, int nStepsDuration )
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if ( idx >= n )
return;
int rnShiftValues = nShiftValues_d[idx]; // load the value to a register
if ( (nStepsInCycle >= rnShiftValues) && (nStepsInCycle < (rnShiftValues + nStepsDuration ) % nStepsCycle) )
{
BGFLOAT rinitValue = initValues_d[idx];
if (rinitValue != 0.0)
summationPoint_d[idx] += rinitValue;
}
}
|
26d3b7e7f16f4b84b60de8530cdb97478e2997b1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the License
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under
// the License.
//
//
//
//
//
//
//
//
//
//
// Authors: Aster JIAN (asterjian@qq.com)
// Yzx (yzxyzxyzx777@outlook.com)
// Ao LI (346950981@qq.com)
// Paul LU (lujq96@gmail.com)
#include <NvInfer.h>
#include <cassert>
#include <cstring>
#include <vector>
#include "trt_engine/trt_network_crt/plugins/common/bert_plugin_util.h"
#include "trt_engine/trt_network_crt/plugins/skip_layer_norm_plugin/skip_layer_norm_plugin.h"
using namespace nvinfer1;
namespace fwd {
namespace bert {
template <int TPB, int VPT, bool hasBias>
__global__ void skiplnDQQ(const int ld, const int8_t* input, const int8_t* skip, int8_t* output,
const __half* beta, const __half* gamma, const __half* bias,
const float dqScaleIn, const float dqScaleSkip, const float qScale) {
const int idx = ld * blockIdx.x + threadIdx.x * VPT;
// 4 * 1024 * 4 * 2 Bytes = 16KB per block
int8_t in_local[VPT];
int8_t skip_local[VPT];
__half in_local_dq[VPT]; // dequantized input + skip + bias
__half bias_local[VPT]; // bias and beta
__half gamma_local[VPT];
copy<sizeof(int8_t) * VPT>(&input[idx], in_local);
copy<sizeof(int8_t) * VPT>(&skip[idx], skip_local);
copy<sizeof(__half) * VPT>(&bias[threadIdx.x * VPT], bias_local);
__half2 loc = __floats2half2_rn(0.f, 0.f); // accumulator
const __half rld = __half(1) / __half(ld);
#pragma unroll
for (int it = 0; it < VPT; it++) {
// DQ input and skip
const float tmp_in = in_local[it];
const float tmp_skip = skip_local[it];
in_local_dq[it] = dqScaleIn * tmp_in + dqScaleSkip * tmp_skip;
if (hasBias) in_local_dq[it] += bias_local[it];
const __half tmp = rld * in_local_dq[it];
const __half2 tmp2 = __halves2half2(tmp, tmp * in_local_dq[it]);
loc = loc + tmp2;
}
// load parameters
copy<sizeof(__half) * VPT>(&beta[threadIdx.x * VPT], bias_local);
copy<sizeof(__half) * VPT>(&gamma[threadIdx.x * VPT], gamma_local);
using BlockReduce = hipcub::BlockReduce<__half2, TPB>;
__shared__ typename BlockReduce::TempStorage temp_storage;
__shared__ __half mu; // mean
__shared__ __half rsigma; // 1 / std.dev.
const __half2 sum2 = BlockReduce(temp_storage).Reduce(loc, hipcub::Sum());
if (threadIdx.x == 0) {
mu = __low2half(sum2);
rsigma = rsqrt(__high2half(sum2) - mu * mu);
}
__syncthreads();
#pragma unroll
for (int it = 0; it < VPT; it++) {
// apply layernorm
const float tmp = gamma_local[it] * (in_local_dq[it] - mu) * rsigma + bias_local[it];
// Quantize
int tmpq = __float2int_rn(qScale * tmp);
tmpq = max(-127, tmpq);
tmpq = min(127, tmpq);
in_local[it] = tmpq;
}
copy<sizeof(int8_t) * VPT>(in_local, &output[idx]);
}
template <typename T, int TPB, int VPT, bool hasBias>
__global__ void skipln_vec(const int ld, const T* input, const T* skip, T* output, const T* beta,
const T* gamma, const T* bias) {
const int idx = ld * blockIdx.x + threadIdx.x * VPT;
// 4 * 1024 * 4 * 2 Bytes = 16KB per block
T in_local[VPT];
T skip_local[VPT];
T bias_local[VPT];
// T gamma_local[VPT];
copy<sizeof(T) * VPT>(&input[idx], in_local);
copy<sizeof(T) * VPT>(&skip[idx], skip_local);
copy<sizeof(T) * VPT>(&bias[threadIdx.x * VPT], bias_local);
T local = 0.f;
T local2 = 0.f;
const T rld = T(1) / T(ld);
#pragma unroll
for (int it = 0; it < VPT; it++) {
in_local[it] += skip_local[it];
if (hasBias) in_local[it] += bias_local[it];
const T tmp = rld * in_local[it];
local += tmp;
local2 += tmp * in_local[it];
}
copy<sizeof(T) * VPT>(&beta[threadIdx.x * VPT], bias_local);
copy<sizeof(T) * VPT>(&gamma[threadIdx.x * VPT], skip_local);
using BlockReduce = hipcub::BlockReduce<kvp<T>, TPB>;
__shared__ typename BlockReduce::TempStorage temp_storage;
__shared__ T mu; // mean
__shared__ T rsigma; // 1 / std.dev.
const auto sumKV = BlockReduce(temp_storage).Reduce(kvp<T>(local, local2), hipcub::Sum());
if (threadIdx.x == 0) {
mu = sumKV.key;
rsigma = rsqrt(sumKV.value - mu * mu);
}
__syncthreads();
///*
#pragma unroll
for (int it = 0; it < VPT; it++) {
in_local[it] = skip_local[it] * (in_local[it] - mu) * rsigma + bias_local[it];
}
/* */
copy<sizeof(T) * VPT>(in_local, &output[idx]);
}
template <typename T, unsigned TPB, bool hasBias>
__global__ void skipLayerNormKernelSmall(const int ld, const T* input, const T* skip, const T* beta,
const T* gamma, T* output, const T* bias) {
const T rld = T(1) / T(ld);
const int offset = blockIdx.x * ld;
hipcub::Sum pairSum;
// reduce x and x^2
kvp<T> threadData(0, 0);
const int idx = offset + threadIdx.x;
T val = 0;
if (threadIdx.x < ld) {
val = input[idx] + skip[idx];
if (hasBias) {
val += bias[threadIdx.x];
}
const T rldval = rld * val;
threadData = pairSum(threadData, kvp<T>(rldval, rldval * val));
}
layerNormSmall<T, T, TPB>(val, threadData, ld, idx, beta, gamma, output);
}
template <typename T, unsigned TPB, bool hasBias>
__global__ void skipLayerNormKernel(const int ld, const T* input, const T* skip, const T* beta,
const T* gamma, T* output, const T* bias) {
const T rld = T(1) / T(ld);
const int offset = blockIdx.x * ld;
hipcub::Sum pairSum;
// reduce x and x^2
kvp<T> threadData(0, 0);
for (int i = threadIdx.x; i < ld; i += TPB) {
const int idx = offset + i;
T val = T(input[idx]) + T(skip[idx]);
if (hasBias) {
val += T(bias[i]);
}
const T rldval = rld * val;
threadData = pairSum(threadData, kvp<T>(rldval, rldval * val));
output[idx] = val;
}
layerNorm<T, T, T, TPB>(threadData, ld, offset, beta, gamma, output);
}
template <bool hasBias>
int computeSkipLayerNormDQQ(hipStream_t stream, const int ld, const int n, const int8_t* input,
const int8_t* skip, const __half* beta, const __half* gamma,
int8_t* output, const __half* bias, const float dqScaleIn,
const float dqScaleSkip, const float qScale) {
// this must be true because n is the total size of the tensor
assert(n % ld == 0);
const int gridSize = n / ld;
// we're limited by the size of the parameters, i.e. 8-wide instead of 16
constexpr int VPT = 16 / sizeof(__half);
if (ld == 768) {
constexpr int TPB = 768 / VPT;
hipLaunchKernelGGL(( skiplnDQQ<TPB, VPT, hasBias>), dim3(gridSize), dim3(TPB), 0, stream,
ld, input, skip, output, beta, gamma, bias, dqScaleIn, dqScaleSkip, qScale);
} else if (ld == 1024) {
constexpr int TPB = 1024 / VPT;
hipLaunchKernelGGL(( skiplnDQQ<TPB, VPT, hasBias>), dim3(gridSize), dim3(TPB), 0, stream,
ld, input, skip, output, beta, gamma, bias, dqScaleIn, dqScaleSkip, qScale);
} else {
// TODO need to implement this
LOG(ERROR) << "SkipLayerNormDQQ - FATAL: unsupported hidden layer size: " << ld << std::endl;
exit(0);
}
CUDA_CHECK(hipPeekAtLastError());
return 0;
}
template <typename T, bool hasBias>
int computeSkipLayerNorm(hipStream_t stream, const int ld, const int n, const T* input,
const T* skip, const T* beta, const T* gamma, T* output, const T* bias) {
// this must be true because n is the total size of the tensor
assert(n % ld == 0);
const int gridSize = n / ld;
constexpr int VPT = 16 / sizeof(T);
if (ld <= 32) {
constexpr int blockSize = 32;
hipLaunchKernelGGL(( skipLayerNormKernelSmall<T, blockSize, hasBias>)
, dim3(gridSize), dim3(blockSize), 0, stream, ld, input, skip, beta, gamma, output, bias);
} else if (ld == 768) {
constexpr int TPB = 768 / VPT;
hipLaunchKernelGGL(( skipln_vec<T, TPB, VPT, hasBias>)
, dim3(gridSize), dim3(TPB), 0, stream, ld, input, skip, output, beta, gamma, bias);
} else if (ld == 1024) {
constexpr int TPB = 1024 / VPT;
hipLaunchKernelGGL(( skipln_vec<T, TPB, VPT, hasBias>)
, dim3(gridSize), dim3(TPB), 0, stream, ld, input, skip, output, beta, gamma, bias);
} else {
constexpr int blockSize = 256;
hipLaunchKernelGGL(( skipLayerNormKernel<T, blockSize, hasBias>)
, dim3(gridSize), dim3(blockSize), 0, stream, ld, input, skip, beta, gamma, output, bias);
}
CUDA_CHECK(hipPeekAtLastError());
return 0;
}
template int computeSkipLayerNormDQQ<true>(hipStream_t stream, const int ld, const int n,
const int8_t* input, const int8_t* skip,
const __half* beta, const __half* gamma, int8_t* output,
const __half* bias, const float dqScaleIn,
const float dqScaleSkip, const float qScale);
template int computeSkipLayerNormDQQ<false>(hipStream_t stream, const int ld, const int n,
const int8_t* input, const int8_t* skip,
const __half* beta, const __half* gamma, int8_t* output,
const __half* bias, const float dqScaleIn,
const float dqScaleSkip, const float qScale);
template int computeSkipLayerNorm<float, true>(hipStream_t, const int, const int, const float*,
const float*, const float*, const float*, float*,
const float*);
template int computeSkipLayerNorm<float, false>(hipStream_t, const int, const int, const float*,
const float*, const float*, const float*, float*,
const float*);
template int computeSkipLayerNorm<half, true>(hipStream_t, const int, const int, const half*,
const half*, const half*, const half*, half*,
const half*);
template int computeSkipLayerNorm<half, false>(hipStream_t, const int, const int, const half*,
const half*, const half*, const half*, half*,
const half*);
} // namespace bert
} // namespace fwd
| 26d3b7e7f16f4b84b60de8530cdb97478e2997b1.cu | // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the License
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under
// the License.
//
// ╔════════════════════════════════════════════════════════════════════════════════════════╗
// ║──█████████╗───███████╗───████████╗───██╗──────██╗───███████╗───████████╗───████████╗───║
// ║──██╔══════╝──██╔════██╗──██╔════██╗──██║──────██║──██╔════██╗──██╔════██╗──██╔════██╗──║
// ║──████████╗───██║────██║──████████╔╝──██║──█╗──██║──█████████║──████████╔╝──██║────██║──║
// ║──██╔═════╝───██║────██║──██╔════██╗──██║█████╗██║──██╔════██║──██╔════██╗──██║────██║──║
// ║──██║─────────╚███████╔╝──██║────██║──╚████╔████╔╝──██║────██║──██║────██║──████████╔╝──║
// ║──╚═╝──────────╚══════╝───╚═╝────╚═╝───╚═══╝╚═══╝───╚═╝────╚═╝──╚═╝────╚═╝──╚═══════╝───║
// ╚════════════════════════════════════════════════════════════════════════════════════════╝
//
// Authors: Aster JIAN (asterjian@qq.com)
// Yzx (yzxyzxyzx777@outlook.com)
// Ao LI (346950981@qq.com)
// Paul LU (lujq96@gmail.com)
#include <NvInfer.h>
#include <cassert>
#include <cstring>
#include <vector>
#include "trt_engine/trt_network_crt/plugins/common/bert_plugin_util.h"
#include "trt_engine/trt_network_crt/plugins/skip_layer_norm_plugin/skip_layer_norm_plugin.h"
using namespace nvinfer1;
namespace fwd {
namespace bert {
template <int TPB, int VPT, bool hasBias>
__global__ void skiplnDQQ(const int ld, const int8_t* input, const int8_t* skip, int8_t* output,
const __half* beta, const __half* gamma, const __half* bias,
const float dqScaleIn, const float dqScaleSkip, const float qScale) {
const int idx = ld * blockIdx.x + threadIdx.x * VPT;
// 4 * 1024 * 4 * 2 Bytes = 16KB per block
int8_t in_local[VPT];
int8_t skip_local[VPT];
__half in_local_dq[VPT]; // dequantized input + skip + bias
__half bias_local[VPT]; // bias and beta
__half gamma_local[VPT];
copy<sizeof(int8_t) * VPT>(&input[idx], in_local);
copy<sizeof(int8_t) * VPT>(&skip[idx], skip_local);
copy<sizeof(__half) * VPT>(&bias[threadIdx.x * VPT], bias_local);
__half2 loc = __floats2half2_rn(0.f, 0.f); // accumulator
const __half rld = __half(1) / __half(ld);
#pragma unroll
for (int it = 0; it < VPT; it++) {
// DQ input and skip
const float tmp_in = in_local[it];
const float tmp_skip = skip_local[it];
in_local_dq[it] = dqScaleIn * tmp_in + dqScaleSkip * tmp_skip;
if (hasBias) in_local_dq[it] += bias_local[it];
const __half tmp = rld * in_local_dq[it];
const __half2 tmp2 = __halves2half2(tmp, tmp * in_local_dq[it]);
loc = loc + tmp2;
}
// load parameters
copy<sizeof(__half) * VPT>(&beta[threadIdx.x * VPT], bias_local);
copy<sizeof(__half) * VPT>(&gamma[threadIdx.x * VPT], gamma_local);
using BlockReduce = cub::BlockReduce<__half2, TPB>;
__shared__ typename BlockReduce::TempStorage temp_storage;
__shared__ __half mu; // mean
__shared__ __half rsigma; // 1 / std.dev.
const __half2 sum2 = BlockReduce(temp_storage).Reduce(loc, cub::Sum());
if (threadIdx.x == 0) {
mu = __low2half(sum2);
rsigma = rsqrt(__high2half(sum2) - mu * mu);
}
__syncthreads();
#pragma unroll
for (int it = 0; it < VPT; it++) {
// apply layernorm
const float tmp = gamma_local[it] * (in_local_dq[it] - mu) * rsigma + bias_local[it];
// Quantize
int tmpq = __float2int_rn(qScale * tmp);
tmpq = max(-127, tmpq);
tmpq = min(127, tmpq);
in_local[it] = tmpq;
}
copy<sizeof(int8_t) * VPT>(in_local, &output[idx]);
}
template <typename T, int TPB, int VPT, bool hasBias>
__global__ void skipln_vec(const int ld, const T* input, const T* skip, T* output, const T* beta,
const T* gamma, const T* bias) {
const int idx = ld * blockIdx.x + threadIdx.x * VPT;
// 4 * 1024 * 4 * 2 Bytes = 16KB per block
T in_local[VPT];
T skip_local[VPT];
T bias_local[VPT];
// T gamma_local[VPT];
copy<sizeof(T) * VPT>(&input[idx], in_local);
copy<sizeof(T) * VPT>(&skip[idx], skip_local);
copy<sizeof(T) * VPT>(&bias[threadIdx.x * VPT], bias_local);
T local = 0.f;
T local2 = 0.f;
const T rld = T(1) / T(ld);
#pragma unroll
for (int it = 0; it < VPT; it++) {
in_local[it] += skip_local[it];
if (hasBias) in_local[it] += bias_local[it];
const T tmp = rld * in_local[it];
local += tmp;
local2 += tmp * in_local[it];
}
copy<sizeof(T) * VPT>(&beta[threadIdx.x * VPT], bias_local);
copy<sizeof(T) * VPT>(&gamma[threadIdx.x * VPT], skip_local);
using BlockReduce = cub::BlockReduce<kvp<T>, TPB>;
__shared__ typename BlockReduce::TempStorage temp_storage;
__shared__ T mu; // mean
__shared__ T rsigma; // 1 / std.dev.
const auto sumKV = BlockReduce(temp_storage).Reduce(kvp<T>(local, local2), cub::Sum());
if (threadIdx.x == 0) {
mu = sumKV.key;
rsigma = rsqrt(sumKV.value - mu * mu);
}
__syncthreads();
///*
#pragma unroll
for (int it = 0; it < VPT; it++) {
in_local[it] = skip_local[it] * (in_local[it] - mu) * rsigma + bias_local[it];
}
/* */
copy<sizeof(T) * VPT>(in_local, &output[idx]);
}
template <typename T, unsigned TPB, bool hasBias>
__global__ void skipLayerNormKernelSmall(const int ld, const T* input, const T* skip, const T* beta,
const T* gamma, T* output, const T* bias) {
const T rld = T(1) / T(ld);
const int offset = blockIdx.x * ld;
cub::Sum pairSum;
// reduce x and x^2
kvp<T> threadData(0, 0);
const int idx = offset + threadIdx.x;
T val = 0;
if (threadIdx.x < ld) {
val = input[idx] + skip[idx];
if (hasBias) {
val += bias[threadIdx.x];
}
const T rldval = rld * val;
threadData = pairSum(threadData, kvp<T>(rldval, rldval * val));
}
layerNormSmall<T, T, TPB>(val, threadData, ld, idx, beta, gamma, output);
}
template <typename T, unsigned TPB, bool hasBias>
__global__ void skipLayerNormKernel(const int ld, const T* input, const T* skip, const T* beta,
const T* gamma, T* output, const T* bias) {
const T rld = T(1) / T(ld);
const int offset = blockIdx.x * ld;
cub::Sum pairSum;
// reduce x and x^2
kvp<T> threadData(0, 0);
for (int i = threadIdx.x; i < ld; i += TPB) {
const int idx = offset + i;
T val = T(input[idx]) + T(skip[idx]);
if (hasBias) {
val += T(bias[i]);
}
const T rldval = rld * val;
threadData = pairSum(threadData, kvp<T>(rldval, rldval * val));
output[idx] = val;
}
layerNorm<T, T, T, TPB>(threadData, ld, offset, beta, gamma, output);
}
template <bool hasBias>
int computeSkipLayerNormDQQ(cudaStream_t stream, const int ld, const int n, const int8_t* input,
const int8_t* skip, const __half* beta, const __half* gamma,
int8_t* output, const __half* bias, const float dqScaleIn,
const float dqScaleSkip, const float qScale) {
// this must be true because n is the total size of the tensor
assert(n % ld == 0);
const int gridSize = n / ld;
// we're limited by the size of the parameters, i.e. 8-wide instead of 16
constexpr int VPT = 16 / sizeof(__half);
if (ld == 768) {
constexpr int TPB = 768 / VPT;
skiplnDQQ<TPB, VPT, hasBias><<<gridSize, TPB, 0, stream>>>(
ld, input, skip, output, beta, gamma, bias, dqScaleIn, dqScaleSkip, qScale);
} else if (ld == 1024) {
constexpr int TPB = 1024 / VPT;
skiplnDQQ<TPB, VPT, hasBias><<<gridSize, TPB, 0, stream>>>(
ld, input, skip, output, beta, gamma, bias, dqScaleIn, dqScaleSkip, qScale);
} else {
// TODO need to implement this
LOG(ERROR) << "SkipLayerNormDQQ - FATAL: unsupported hidden layer size: " << ld << std::endl;
exit(0);
}
CUDA_CHECK(cudaPeekAtLastError());
return 0;
}
template <typename T, bool hasBias>
int computeSkipLayerNorm(cudaStream_t stream, const int ld, const int n, const T* input,
const T* skip, const T* beta, const T* gamma, T* output, const T* bias) {
// this must be true because n is the total size of the tensor
assert(n % ld == 0);
const int gridSize = n / ld;
constexpr int VPT = 16 / sizeof(T);
if (ld <= 32) {
constexpr int blockSize = 32;
skipLayerNormKernelSmall<T, blockSize, hasBias>
<<<gridSize, blockSize, 0, stream>>>(ld, input, skip, beta, gamma, output, bias);
} else if (ld == 768) {
constexpr int TPB = 768 / VPT;
skipln_vec<T, TPB, VPT, hasBias>
<<<gridSize, TPB, 0, stream>>>(ld, input, skip, output, beta, gamma, bias);
} else if (ld == 1024) {
constexpr int TPB = 1024 / VPT;
skipln_vec<T, TPB, VPT, hasBias>
<<<gridSize, TPB, 0, stream>>>(ld, input, skip, output, beta, gamma, bias);
} else {
constexpr int blockSize = 256;
skipLayerNormKernel<T, blockSize, hasBias>
<<<gridSize, blockSize, 0, stream>>>(ld, input, skip, beta, gamma, output, bias);
}
CUDA_CHECK(cudaPeekAtLastError());
return 0;
}
template int computeSkipLayerNormDQQ<true>(cudaStream_t stream, const int ld, const int n,
const int8_t* input, const int8_t* skip,
const __half* beta, const __half* gamma, int8_t* output,
const __half* bias, const float dqScaleIn,
const float dqScaleSkip, const float qScale);
template int computeSkipLayerNormDQQ<false>(cudaStream_t stream, const int ld, const int n,
const int8_t* input, const int8_t* skip,
const __half* beta, const __half* gamma, int8_t* output,
const __half* bias, const float dqScaleIn,
const float dqScaleSkip, const float qScale);
template int computeSkipLayerNorm<float, true>(cudaStream_t, const int, const int, const float*,
const float*, const float*, const float*, float*,
const float*);
template int computeSkipLayerNorm<float, false>(cudaStream_t, const int, const int, const float*,
const float*, const float*, const float*, float*,
const float*);
template int computeSkipLayerNorm<half, true>(cudaStream_t, const int, const int, const half*,
const half*, const half*, const half*, half*,
const half*);
template int computeSkipLayerNorm<half, false>(cudaStream_t, const int, const int, const half*,
const half*, const half*, const half*, half*,
const half*);
} // namespace bert
} // namespace fwd
|
41291d4746f096e1f38663f47762b8037e2bf02a.hip | // !!! This is a file automatically generated by hipify!!!
/*
Author: Cao Thanh Tung and Ashwin Nanjappa
Filename: pba3DHost.cu
Copyright (c) 2010, School of Computing, National University of Singapore.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list of
conditions and the following disclaimer. Redistributions in binary form must reproduce
the above copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the distribution.
Neither the name of the National University of Singapore nor the names of its contributors
may be used to endorse or promote products derived from this software without specific
prior written permission from the National University of Singapore.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
*/
#include <hip/device_functions.h>
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <fstream>
using namespace std;
#include "pba3D.h"
#include "Geometry.h"
#include "CudaWrapper.h"
// Parameters for CUDA kernel executions
#define BLOCKX 32
#define BLOCKY 4
#define BLOCKXY 16
#define INFINITY 0x3ff
/****** Global Variables *******/
int **pbaTextures;
int pbaMemSize;
int pbaCurrentBuffer;
int pbaTexSize;
texture<int> pbaTexColor;
texture<int> pbaTexLinks;
texture<short> pbaTexPointer;
/********* Kernels ********/
#include "pba3DKernel.h"
///////////////////////////////////////////////////////////////////////////
//
// Initialize necessary memory for 3D Voronoi Diagram computation
// - textureSize: The size of the Discrete Voronoi Diagram (width = height)
//
///////////////////////////////////////////////////////////////////////////
void pba3DInitialization(int fboSize)
{
pbaTexSize = fboSize;
pbaTextures = (int **) malloc(2 * sizeof(int *));
pbaMemSize = pbaTexSize * pbaTexSize * pbaTexSize * sizeof(int);
// Allocate 2 textures
//hipMalloc((void **) &pbaTextures[0], pbaMemSize);
//hipMalloc((void **) &pbaTextures[1], pbaMemSize);
}
///////////////////////////////////////////////////////////////////////////
//
// Deallocate all allocated memory
//
///////////////////////////////////////////////////////////////////////////
void pba3DDeinitialization()
{
free(pbaTextures);
return;
}
// Copy input to GPU
void pba3DInitializeInput(int *input, int *output)
{
pbaTextures[0] = input;
pbaTextures[1] = output;
// Set Current Source Buffer
pbaCurrentBuffer = 0;
}
// In-place transpose a cubic texture.
// Transposition are performed on each XY plane.
// Point coordinates are also swapped.
void pba3DTransposeXY(int *texture)
{
dim3 block(BLOCKXY, BLOCKXY);
dim3 grid((pbaTexSize / BLOCKXY) * pbaTexSize, pbaTexSize / BLOCKXY);
hipLaunchKernelGGL(( kernelTransposeXY), dim3(grid), dim3(block) , 0, 0, texture, pbaTexSize);
CudaCheckError();
}
// Phase 1 of PBA. m1 must divides texture size
// Sweeping are done along the Z axiz.
void pba3DColorZAxis(int m1)
{
dim3 block = dim3(BLOCKX, BLOCKY);
dim3 grid = dim3((pbaTexSize / block.x) * m1, pbaTexSize / block.y);
CudaSafeCall( hipBindTexture(0, pbaTexColor, pbaTextures[pbaCurrentBuffer]) );
hipLaunchKernelGGL(( kernelFloodZ), dim3(grid), dim3(block) , 0, 0, pbaTextures[1 - pbaCurrentBuffer], pbaTexSize, pbaTexSize / block.x, pbaTexSize / m1);
CudaCheckError();
pbaCurrentBuffer = 1 - pbaCurrentBuffer;
if (m1 > 1)
{
// Passing information between bands
CudaSafeCall( hipBindTexture(0, pbaTexColor, pbaTextures[pbaCurrentBuffer]) );
hipLaunchKernelGGL(( kernelPropagateInterband), dim3(grid), dim3(block) , 0, 0, pbaTextures[1 - pbaCurrentBuffer], pbaTexSize, pbaTexSize / block.x, pbaTexSize / m1);
CudaCheckError();
CudaSafeCall( hipBindTexture(0, pbaTexLinks, pbaTextures[1 - pbaCurrentBuffer]) );
hipLaunchKernelGGL(( kernelUpdateVertical), dim3(grid), dim3(block) , 0, 0, pbaTextures[pbaCurrentBuffer], pbaTexSize, pbaTexSize / block.x, pbaTexSize / m1);
CudaCheckError();
}
}
// Phase 2 of PBA. m2 must divides texture size.
// This method work along the Y axis
void pba3DComputeProximatePointsYAxis(int m2)
{
int iStack = 1 - pbaCurrentBuffer;
int iForward = pbaCurrentBuffer;
dim3 block = dim3(BLOCKX, BLOCKY);
dim3 grid = dim3((pbaTexSize / block.x) * m2, pbaTexSize / block.y);
// Compute proximate points locally in each band
CudaSafeCall( hipBindTexture(0, pbaTexColor, pbaTextures[pbaCurrentBuffer]) );
hipLaunchKernelGGL(( kernelMaurerAxis), dim3(grid), dim3(block) , 0, 0, pbaTextures[iStack], pbaTexSize, pbaTexSize / block.x, pbaTexSize / m2);
CudaCheckError();
// Construct forward pointers
CudaSafeCall( hipBindTexture(0, pbaTexLinks, pbaTextures[iStack]) );
hipLaunchKernelGGL(( kernelCreateForwardPointers), dim3(grid), dim3(block) , 0, 0, (short *) pbaTextures[iForward], pbaTexSize, pbaTexSize / block.x, pbaTexSize / m2);
CudaCheckError();
CudaSafeCall( hipBindTexture(0, pbaTexPointer, pbaTextures[iForward], pbaTexSize * pbaTexSize * pbaTexSize * sizeof( short ) ) );
// Repeatly merging two bands into one
for (int noBand = m2; noBand > 1; noBand /= 2)
{
grid = dim3((pbaTexSize / block.x) * (noBand / 2), pbaTexSize / block.y);
hipLaunchKernelGGL(( kernelMergeBands), dim3(grid), dim3(block) , 0, 0, pbaTextures[iStack],
(short *) pbaTextures[iForward], pbaTexSize, pbaTexSize / block.x, pbaTexSize / noBand);
CudaCheckError();
}
CudaSafeCall( hipUnbindTexture(pbaTexLinks) );
CudaSafeCall( hipUnbindTexture(pbaTexColor) );
CudaSafeCall( hipUnbindTexture(pbaTexPointer) );
}
// Phase 3 of PBA. m3 must divides texture size
// This method color along the Y axis
void pba3DColorYAxis(int m3)
{
dim3 block = dim3(BLOCKX, m3);
dim3 grid = dim3(pbaTexSize / block.x, pbaTexSize);
CudaSafeCall( hipBindTexture(0, pbaTexColor, pbaTextures[1 - pbaCurrentBuffer] ) );
hipLaunchKernelGGL(( kernelColorAxis), dim3(grid), dim3(block) , 0, 0, pbaTextures[pbaCurrentBuffer], pbaTexSize);
CudaCheckError();
CudaSafeCall( hipUnbindTexture(pbaTexColor) );
return;
}
void pba3DCompute(int m1, int m2, int m3)
{
/************* Compute Z axis *************/
// --> (X, Y, Z)
pba3DColorZAxis(m1);
/************* Compute Y axis *************/
// --> (X, Y, Z)
pba3DComputeProximatePointsYAxis(m2);
pba3DColorYAxis(m3);
// --> (Y, X, Z)
pba3DTransposeXY(pbaTextures[pbaCurrentBuffer]);
/************** Compute X axis *************/
// Compute X axis
pba3DComputeProximatePointsYAxis(m2);
pba3DColorYAxis(m3);
// --> (X, Y, Z)
pba3DTransposeXY(pbaTextures[pbaCurrentBuffer]);
}
// Compute 3D Voronoi diagram
// Input: a 3D texture. Each pixel is an integer encoding 3 coordinates.
// For each site at (x, y, z), the pixel at coordinate (x, y, z) should contain
// the encoded coordinate (x, y, z). Pixels that are not sites should contain
// the integer MARKER. Use ENCODE (and DECODE) macro to encode (and decode).
// See original paper for the effect of the three parameters:
// phase1Band, phase2Band, phase3Band
// Parameters must divide textureSize
// Note: input texture will be released after this.
void pba3DVoronoiDiagram(int *dInput, int **dOutput,
int phase1Band, int phase2Band, int phase3Band)
{
// Initialization
pba3DInitializeInput(dInput, *dOutput);
// Compute the 3D Voronoi Diagram
pba3DCompute(phase1Band, phase2Band, phase3Band);
// Pass back the result
*dOutput = pbaTextures[pbaCurrentBuffer];
return;
}
// A function to draw points onto GPU texture
void setPointsInGrid( Point3DVec& pointDVec, int *dInputVoronoi )
{
const int BlocksPerGrid = 64;
const int ThreadsPerBlock = 256;
CudaSafeCall( hipMemset( dInputVoronoi, MARKER, pbaMemSize ) );
hipLaunchKernelGGL(( kerSetPointsInGrid), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0,
thrust::raw_pointer_cast( &pointDVec[0] ),
( int ) pointDVec.size(),
dInputVoronoi,
pbaTexSize
);
CudaCheckError();
return;
}
// A function to draw point's IDs onto GPU texture
void setPointIndicesInGrid( Point3DVec& pointDVec, int* dMapToID )
{
const int BlocksPerGrid = 64;
const int ThreadsPerBlock = 256;
hipLaunchKernelGGL(( kerSetPointIndicesInGrid), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0,
thrust::raw_pointer_cast( &pointDVec[0] ),
( int ) pointDVec.size(),
dMapToID,
pbaTexSize
);
CudaCheckError();
return;
}
void setIndexInGrid( int gridWidth, int* dPointIndexGrid, int* dGrid )
{
const int BlocksPerGrid = 64;
const int ThreadsPerBlock = 256;
hipLaunchKernelGGL(( kerSetIndexInGrid), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0, gridWidth, dPointIndexGrid, dGrid );
CudaCheckError();
// Free grid
CudaSafeCall( hipFree( dPointIndexGrid ) );
return;
}
| 41291d4746f096e1f38663f47762b8037e2bf02a.cu | /*
Author: Cao Thanh Tung and Ashwin Nanjappa
Filename: pba3DHost.cu
Copyright (c) 2010, School of Computing, National University of Singapore.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list of
conditions and the following disclaimer. Redistributions in binary form must reproduce
the above copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the distribution.
Neither the name of the National University of Singapore nor the names of its contributors
may be used to endorse or promote products derived from this software without specific
prior written permission from the National University of Singapore.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
*/
#include <device_functions.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <fstream>
using namespace std;
#include "pba3D.h"
#include "Geometry.h"
#include "CudaWrapper.h"
// Parameters for CUDA kernel executions
#define BLOCKX 32
#define BLOCKY 4
#define BLOCKXY 16
#define INFINITY 0x3ff
/****** Global Variables *******/
int **pbaTextures;
int pbaMemSize;
int pbaCurrentBuffer;
int pbaTexSize;
texture<int> pbaTexColor;
texture<int> pbaTexLinks;
texture<short> pbaTexPointer;
/********* Kernels ********/
#include "pba3DKernel.h"
///////////////////////////////////////////////////////////////////////////
//
// Initialize necessary memory for 3D Voronoi Diagram computation
// - textureSize: The size of the Discrete Voronoi Diagram (width = height)
//
///////////////////////////////////////////////////////////////////////////
void pba3DInitialization(int fboSize)
{
pbaTexSize = fboSize;
pbaTextures = (int **) malloc(2 * sizeof(int *));
pbaMemSize = pbaTexSize * pbaTexSize * pbaTexSize * sizeof(int);
// Allocate 2 textures
//cudaMalloc((void **) &pbaTextures[0], pbaMemSize);
//cudaMalloc((void **) &pbaTextures[1], pbaMemSize);
}
///////////////////////////////////////////////////////////////////////////
//
// Deallocate all allocated memory
//
///////////////////////////////////////////////////////////////////////////
void pba3DDeinitialization()
{
free(pbaTextures);
return;
}
// Copy input to GPU
void pba3DInitializeInput(int *input, int *output)
{
pbaTextures[0] = input;
pbaTextures[1] = output;
// Set Current Source Buffer
pbaCurrentBuffer = 0;
}
// In-place transpose a cubic texture.
// Transposition are performed on each XY plane.
// Point coordinates are also swapped.
void pba3DTransposeXY(int *texture)
{
dim3 block(BLOCKXY, BLOCKXY);
dim3 grid((pbaTexSize / BLOCKXY) * pbaTexSize, pbaTexSize / BLOCKXY);
kernelTransposeXY<<< grid, block >>>(texture, pbaTexSize);
CudaCheckError();
}
// Phase 1 of PBA. m1 must divides texture size
// Sweeping are done along the Z axiz.
void pba3DColorZAxis(int m1)
{
dim3 block = dim3(BLOCKX, BLOCKY);
dim3 grid = dim3((pbaTexSize / block.x) * m1, pbaTexSize / block.y);
CudaSafeCall( cudaBindTexture(0, pbaTexColor, pbaTextures[pbaCurrentBuffer]) );
kernelFloodZ<<< grid, block >>>(pbaTextures[1 - pbaCurrentBuffer], pbaTexSize, pbaTexSize / block.x, pbaTexSize / m1);
CudaCheckError();
pbaCurrentBuffer = 1 - pbaCurrentBuffer;
if (m1 > 1)
{
// Passing information between bands
CudaSafeCall( cudaBindTexture(0, pbaTexColor, pbaTextures[pbaCurrentBuffer]) );
kernelPropagateInterband<<< grid, block >>>(pbaTextures[1 - pbaCurrentBuffer], pbaTexSize, pbaTexSize / block.x, pbaTexSize / m1);
CudaCheckError();
CudaSafeCall( cudaBindTexture(0, pbaTexLinks, pbaTextures[1 - pbaCurrentBuffer]) );
kernelUpdateVertical<<< grid, block >>>(pbaTextures[pbaCurrentBuffer], pbaTexSize, pbaTexSize / block.x, pbaTexSize / m1);
CudaCheckError();
}
}
// Phase 2 of PBA. m2 must divides texture size.
// This method work along the Y axis
void pba3DComputeProximatePointsYAxis(int m2)
{
int iStack = 1 - pbaCurrentBuffer;
int iForward = pbaCurrentBuffer;
dim3 block = dim3(BLOCKX, BLOCKY);
dim3 grid = dim3((pbaTexSize / block.x) * m2, pbaTexSize / block.y);
// Compute proximate points locally in each band
CudaSafeCall( cudaBindTexture(0, pbaTexColor, pbaTextures[pbaCurrentBuffer]) );
kernelMaurerAxis<<< grid, block >>>(pbaTextures[iStack], pbaTexSize, pbaTexSize / block.x, pbaTexSize / m2);
CudaCheckError();
// Construct forward pointers
CudaSafeCall( cudaBindTexture(0, pbaTexLinks, pbaTextures[iStack]) );
kernelCreateForwardPointers<<< grid, block >>>((short *) pbaTextures[iForward], pbaTexSize, pbaTexSize / block.x, pbaTexSize / m2);
CudaCheckError();
CudaSafeCall( cudaBindTexture(0, pbaTexPointer, pbaTextures[iForward], pbaTexSize * pbaTexSize * pbaTexSize * sizeof( short ) ) );
// Repeatly merging two bands into one
for (int noBand = m2; noBand > 1; noBand /= 2)
{
grid = dim3((pbaTexSize / block.x) * (noBand / 2), pbaTexSize / block.y);
kernelMergeBands<<< grid, block >>>(pbaTextures[iStack],
(short *) pbaTextures[iForward], pbaTexSize, pbaTexSize / block.x, pbaTexSize / noBand);
CudaCheckError();
}
CudaSafeCall( cudaUnbindTexture(pbaTexLinks) );
CudaSafeCall( cudaUnbindTexture(pbaTexColor) );
CudaSafeCall( cudaUnbindTexture(pbaTexPointer) );
}
// Phase 3 of PBA. m3 must divides texture size
// This method color along the Y axis
void pba3DColorYAxis(int m3)
{
dim3 block = dim3(BLOCKX, m3);
dim3 grid = dim3(pbaTexSize / block.x, pbaTexSize);
CudaSafeCall( cudaBindTexture(0, pbaTexColor, pbaTextures[1 - pbaCurrentBuffer] ) );
kernelColorAxis<<< grid, block >>>(pbaTextures[pbaCurrentBuffer], pbaTexSize);
CudaCheckError();
CudaSafeCall( cudaUnbindTexture(pbaTexColor) );
return;
}
void pba3DCompute(int m1, int m2, int m3)
{
/************* Compute Z axis *************/
// --> (X, Y, Z)
pba3DColorZAxis(m1);
/************* Compute Y axis *************/
// --> (X, Y, Z)
pba3DComputeProximatePointsYAxis(m2);
pba3DColorYAxis(m3);
// --> (Y, X, Z)
pba3DTransposeXY(pbaTextures[pbaCurrentBuffer]);
/************** Compute X axis *************/
// Compute X axis
pba3DComputeProximatePointsYAxis(m2);
pba3DColorYAxis(m3);
// --> (X, Y, Z)
pba3DTransposeXY(pbaTextures[pbaCurrentBuffer]);
}
// Compute 3D Voronoi diagram
// Input: a 3D texture. Each pixel is an integer encoding 3 coordinates.
// For each site at (x, y, z), the pixel at coordinate (x, y, z) should contain
// the encoded coordinate (x, y, z). Pixels that are not sites should contain
// the integer MARKER. Use ENCODE (and DECODE) macro to encode (and decode).
// See original paper for the effect of the three parameters:
// phase1Band, phase2Band, phase3Band
// Parameters must divide textureSize
// Note: input texture will be released after this.
void pba3DVoronoiDiagram(int *dInput, int **dOutput,
int phase1Band, int phase2Band, int phase3Band)
{
// Initialization
pba3DInitializeInput(dInput, *dOutput);
// Compute the 3D Voronoi Diagram
pba3DCompute(phase1Band, phase2Band, phase3Band);
// Pass back the result
*dOutput = pbaTextures[pbaCurrentBuffer];
return;
}
// A function to draw points onto GPU texture
void setPointsInGrid( Point3DVec& pointDVec, int *dInputVoronoi )
{
const int BlocksPerGrid = 64;
const int ThreadsPerBlock = 256;
CudaSafeCall( cudaMemset( dInputVoronoi, MARKER, pbaMemSize ) );
kerSetPointsInGrid<<< BlocksPerGrid, ThreadsPerBlock >>>(
thrust::raw_pointer_cast( &pointDVec[0] ),
( int ) pointDVec.size(),
dInputVoronoi,
pbaTexSize
);
CudaCheckError();
return;
}
// A function to draw point's IDs onto GPU texture
void setPointIndicesInGrid( Point3DVec& pointDVec, int* dMapToID )
{
const int BlocksPerGrid = 64;
const int ThreadsPerBlock = 256;
kerSetPointIndicesInGrid<<< BlocksPerGrid, ThreadsPerBlock >>>(
thrust::raw_pointer_cast( &pointDVec[0] ),
( int ) pointDVec.size(),
dMapToID,
pbaTexSize
);
CudaCheckError();
return;
}
void setIndexInGrid( int gridWidth, int* dPointIndexGrid, int* dGrid )
{
const int BlocksPerGrid = 64;
const int ThreadsPerBlock = 256;
kerSetIndexInGrid<<< BlocksPerGrid, ThreadsPerBlock >>>( gridWidth, dPointIndexGrid, dGrid );
CudaCheckError();
// Free grid
CudaSafeCall( cudaFree( dPointIndexGrid ) );
return;
}
|
5b0b1f59b740a6dfdf1c97d91c05b2fee0508527.hip | // !!! This is a file automatically generated by hipify!!!
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 1993-2011, NVIDIA Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "column_filter.h"
namespace filter
{
template void linearColumn<float4, ushort4>(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, hipStream_t stream);
}
#endif /* CUDA_DISABLER */
| 5b0b1f59b740a6dfdf1c97d91c05b2fee0508527.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 1993-2011, NVIDIA Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "column_filter.h"
namespace filter
{
template void linearColumn<float4, ushort4>(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, cudaStream_t stream);
}
#endif /* CUDA_DISABLER */
|
48a6b938e7224dc8d95e0dab92a858cbd9d06eba.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <stdio.h>
#include <omp.h>
#include <string>
#define index(i, j, N) ((i)*(N)) + (j)
#define BLOCK_SIZE 1024
// get nearby value(left, right, up, down) of k-th u at point (i,j)
__device__ void getNearbyValue(int i,int j, int N, double& left, double& right, double& up, double& down, double* u){
// first row
if(i==0){
if(j==0){
left = 0;
right = u[index(i,j+1,N)];
}
else if (j==N-1) {
left = u[index(i,j-1,N)];
right = 0;
}
else{
left = u[index(i,j-1,N)];
right = u[index(i,j+1,N)];
}
down = 0;
up = u[index(i+1,j,N)];
}
// last row
if (i==N-1){
if(j==0){
left = 0;
right = u[index(i,j+1,N)];
}
else if (j==N-1) {
left = u[index(i,j-1,N)];
right = 0;
}
else{
left = u[index(i,j-1,N)];
right = u[index(i,j+1,N)];
}
up = 0;
down = u[index(i-1,j,N)];
}
// middle row
else{
if(j==0){
left = 0;
right = u[index(i,j+1,N)];
}
else if (j==N-1) {
left = u[index(i,j-1,N)];
right = 0;
}
else{
left = u[index(i,j-1,N)];
right = u[index(i,j+1,N)];
}
up = u[index(i+1,j,N)];
down = u[index(i-1,j,N)];
}
}
__device__ void indextoij(int index,int N,int& i, int& j){
i=index/N;
j=index%N;
}
__global__ void jacobi2D_kernel(double* u_next,double* f, double* u, int N, double h){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid<N*N){
int i,j=0;
indextoij(tid,N,i,j);
// printf("%d %d\n",i,j);
double left,right,up,down;
getNearbyValue(i,j,N,left,right,up,down,u);
u_next[tid]=(pow(h,2)*f[index(i,j,N)]+up+down+left+right)/4.0;
}
}
void jacobi2D(double* f, double* u, int N, int max_iteration){
double h = 1.0/(N+1.0);
double *u_next = (double*) malloc(N * N * sizeof(double)); // (k+1)-th u
memcpy(u_next, u, N * N * sizeof(double));
double *f_d;
hipMalloc(&f_d, N*N*sizeof(double));
hipMemcpy(f_d, f, N*N*sizeof(double), hipMemcpyHostToDevice);
double *u_d;
hipMalloc(&u_d, N*N*sizeof(double));
hipMemcpy(u_d, u, N*N*sizeof(double), hipMemcpyHostToDevice);
double *u_next_d;
hipMalloc(&u_next_d, N*N*sizeof(double));
hipMemcpy(u_next_d, u_next, N*N*sizeof(double), hipMemcpyHostToDevice);
long Nb = (N*N+BLOCK_SIZE-1)/(BLOCK_SIZE);
// printf("%d %d\n",Nb,BLOCK_SIZE);
for(int k=0;k<max_iteration;k++){
hipLaunchKernelGGL(( jacobi2D_kernel), dim3(Nb),dim3(BLOCK_SIZE), 0, 0, u_next_d,f_d,u_d,N,h);
hipMemcpy(u_d, u_next_d, N*N*sizeof(double), hipMemcpyDeviceToDevice);
}
hipMemcpy(u, u_d, N*N*sizeof(double), hipMemcpyDeviceToHost);
hipFree(f_d);
hipFree(u_d);
hipFree(u_next_d);
free(u_next);
}
int main(int argc, char * argv[]){
int maxite=5000;
int N;
if(argc!=2){
fprintf(stderr, "usage: jacobi2D-omp <N>\n");
exit(1);
}
N = atoi(argv[1]);
double* u = (double*) malloc(N * N * sizeof(double)); // solution
double* f = (double*) malloc(N * N * sizeof(double)); // RHS
for(int i=0;i<N*N;i++){
u[i] = 0.0; // initial guess
f[i] = 1.0; // right hand side equals 1
}
// printf("Max thread number: %d\n", omp_get_max_threads()); // => 64
double tt = omp_get_wtime();
jacobi2D(f,u,N,maxite);
double time = omp_get_wtime()-tt;
printf("\n");
printf("Total time taken: %10f seconds\n", time);
// for(int i=0;i<N*N;i++){
// printf("%lf\n", u[i]);
// }
free(u);
free(f);
return 0;
}
| 48a6b938e7224dc8d95e0dab92a858cbd9d06eba.cu | #include <algorithm>
#include <stdio.h>
#include <omp.h>
#include <string>
#define index(i, j, N) ((i)*(N)) + (j)
#define BLOCK_SIZE 1024
// get nearby value(left, right, up, down) of k-th u at point (i,j)
__device__ void getNearbyValue(int i,int j, int N, double& left, double& right, double& up, double& down, double* u){
// first row
if(i==0){
if(j==0){
left = 0;
right = u[index(i,j+1,N)];
}
else if (j==N-1) {
left = u[index(i,j-1,N)];
right = 0;
}
else{
left = u[index(i,j-1,N)];
right = u[index(i,j+1,N)];
}
down = 0;
up = u[index(i+1,j,N)];
}
// last row
if (i==N-1){
if(j==0){
left = 0;
right = u[index(i,j+1,N)];
}
else if (j==N-1) {
left = u[index(i,j-1,N)];
right = 0;
}
else{
left = u[index(i,j-1,N)];
right = u[index(i,j+1,N)];
}
up = 0;
down = u[index(i-1,j,N)];
}
// middle row
else{
if(j==0){
left = 0;
right = u[index(i,j+1,N)];
}
else if (j==N-1) {
left = u[index(i,j-1,N)];
right = 0;
}
else{
left = u[index(i,j-1,N)];
right = u[index(i,j+1,N)];
}
up = u[index(i+1,j,N)];
down = u[index(i-1,j,N)];
}
}
__device__ void indextoij(int index,int N,int& i, int& j){
i=index/N;
j=index%N;
}
__global__ void jacobi2D_kernel(double* u_next,double* f, double* u, int N, double h){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid<N*N){
int i,j=0;
indextoij(tid,N,i,j);
// printf("%d %d\n",i,j);
double left,right,up,down;
getNearbyValue(i,j,N,left,right,up,down,u);
u_next[tid]=(pow(h,2)*f[index(i,j,N)]+up+down+left+right)/4.0;
}
}
void jacobi2D(double* f, double* u, int N, int max_iteration){
double h = 1.0/(N+1.0);
double *u_next = (double*) malloc(N * N * sizeof(double)); // (k+1)-th u
memcpy(u_next, u, N * N * sizeof(double));
double *f_d;
cudaMalloc(&f_d, N*N*sizeof(double));
cudaMemcpy(f_d, f, N*N*sizeof(double), cudaMemcpyHostToDevice);
double *u_d;
cudaMalloc(&u_d, N*N*sizeof(double));
cudaMemcpy(u_d, u, N*N*sizeof(double), cudaMemcpyHostToDevice);
double *u_next_d;
cudaMalloc(&u_next_d, N*N*sizeof(double));
cudaMemcpy(u_next_d, u_next, N*N*sizeof(double), cudaMemcpyHostToDevice);
long Nb = (N*N+BLOCK_SIZE-1)/(BLOCK_SIZE);
// printf("%d %d\n",Nb,BLOCK_SIZE);
for(int k=0;k<max_iteration;k++){
jacobi2D_kernel<<<Nb,BLOCK_SIZE>>>(u_next_d,f_d,u_d,N,h);
cudaMemcpy(u_d, u_next_d, N*N*sizeof(double), cudaMemcpyDeviceToDevice);
}
cudaMemcpy(u, u_d, N*N*sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(f_d);
cudaFree(u_d);
cudaFree(u_next_d);
free(u_next);
}
int main(int argc, char * argv[]){
int maxite=5000;
int N;
if(argc!=2){
fprintf(stderr, "usage: jacobi2D-omp <N>\n");
exit(1);
}
N = atoi(argv[1]);
double* u = (double*) malloc(N * N * sizeof(double)); // solution
double* f = (double*) malloc(N * N * sizeof(double)); // RHS
for(int i=0;i<N*N;i++){
u[i] = 0.0; // initial guess
f[i] = 1.0; // right hand side equals 1
}
// printf("Max thread number: %d\n", omp_get_max_threads()); // => 64
double tt = omp_get_wtime();
jacobi2D(f,u,N,maxite);
double time = omp_get_wtime()-tt;
printf("\n");
printf("Total time taken: %10f seconds\n", time);
// for(int i=0;i<N*N;i++){
// printf("%lf\n", u[i]);
// }
free(u);
free(f);
return 0;
}
|
26f30e8df9ed2b99664423b1bdebb77e5069531d.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <stdio.h>
#include <assert.h>
#include <algorithm>
#include <cmath>
#include <hip/hip_runtime.h>
#include <rocblas.h>
// #include "integral-strided-cuda.hpp"
#define BLOCK_SIZE 32
#define BLOCK_CHANNELS (1024 / (BLOCK_SIZE * BLOCK_SIZE))
using std::max;
using std::min;
using std::floor;
using std::ceil;
hipblasHandle_t cublasHandle;
float *CUDA_ZERO_FLOAT, *CUDA_ONE_FLOAT; // for cublas in device pointer mode
extern "C"
void _initCublasHandleVarScale() {
if (hipblasCreate(&cublasHandle) != HIPBLAS_STATUS_SUCCESS) {
printf ("CUBLAS initialization failed\n");
}
hipblasSetPointerMode(cublasHandle, HIPBLAS_POINTER_MODE_DEVICE);
// TODO: at shutdown, `hipblasDestroy(handle);`
// TODO: deallocate this!
float zeroOne[] = {0, 1};
hipMalloc((void**)&CUDA_ZERO_FLOAT, sizeof(zeroOne));
CUDA_ONE_FLOAT = CUDA_ZERO_FLOAT + 1;
hipMemcpy(CUDA_ZERO_FLOAT, zeroOne, sizeof(zeroOne), hipMemcpyHostToDevice);
}
// TODO remove this code
#define CUDA_ERROR_CHECK
#define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ )
#define CudaCheckError() __cudaCheckError( __FILE__, __LINE__ )
inline void __cudaSafeCall( hipError_t err, const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
if ( hipSuccess != err )
{
fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n",
file, line, hipGetErrorString( err ) );
exit( -1 );
}
#endif
return;
}
inline void __cudaCheckError( const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
hipError_t err = hipGetLastError();
if ( hipSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
file, line, hipGetErrorString( err ) );
exit( -1 );
}
// More careful checking. However, this will affect performance.
// Comment away if needed.
err = hipDeviceSynchronize();
if( hipSuccess != err )
{
fprintf( stderr, "cudaCheckError() with sync failed at %s:%i : %s\n",
file, line, hipGetErrorString( err ) );
exit( -1 );
}
#endif
return;
}
#define upscaleMax(x) ((x + 0.5) * scale - 0.5)
#define upscaleMin(x) ((x - 0.5) * scale + 0.5)
/************************ updateOutput ************************/
// TODO
__global__ void forwardNoNormReplicateVarScaleKernel(
float *intData, int intDataStrideChannel, float *outData,
int h, int w, int nInputPlane, int nWindows,
float *xMin, float *xMax, float *yMin, float *yMax) {
const int x = BLOCK_SIZE * blockIdx.x + threadIdx.x;
const int y = BLOCK_SIZE * blockIdx.y + threadIdx.y;
const int z = BLOCK_CHANNELS * blockIdx.z + threadIdx.z;
const int inPlaneIdx = z / nWindows;
intData += intDataStrideChannel * inPlaneIdx;
if (x < h and y < w and z < nInputPlane*nWindows) {
// Must add 1 to xMax/yMax/xMin/yMin due to OpenCV's
// `integral()` behavior. Namely, I(x,0) and I(0,y) are
// always 0 (so it's a C-style array sum).
// However, when computing sums, we subtract values at points
// like y+yMin-1 and x+xMin-1, so we also SUBTRACT 1 from xMin
// and yMin, and thus finally they are not affected.
const int t = max(0, min(x+(int) ceil(xMin[z]) , h-1) );
const int b = max(1, min(x+(int)floor(xMax[z])+1, h ) );
const int l = max(0, min(y+(int) ceil(yMin[z]) , w-1) );
const int r = max(1, min(y+(int)floor(yMax[z])+1, w ) );
double outValue = 0;
outValue += intData[b*(w+1) + r];
outValue -= intData[t*(w+1) + r];
outValue -= intData[b*(w+1) + l];
outValue += intData[t*(w+1) + l];
outData[z*w*h + x*w + y] = outValue;
}
}
__global__ void forwardNoNormReplicateFracVarScaleKernel(
float *intData, int intDataStrideChannel, float *outData,
int h, int w, int nInputPlane, int nWindows,
float *xMin, float *xMax, float *yMin, float *yMax,
float *inData, int inDataStrideRow, int inDataStrideChannel,
const float *const scaleData) {
int id = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x;
const int y = id % w; id /= w;
const int x = id % h; id /= h;
const int & z = id;
const int inPlaneIdx = z / nWindows;
intData += intDataStrideChannel * inPlaneIdx;
inData += inDataStrideChannel * inPlaneIdx;
if (x < h and y < w and z < nInputPlane*nWindows) {
// Must add 1 to xMax/yMax/xMin/yMin due to OpenCV's
// `integral()` behavior. Namely, I(x,0) and I(0,y) are
// always 0 (so it's a C-style array sum).
// However, when computing sums, we subtract values at points
// like y+yMin-1 and x+xMin-1, so we also SUBTRACT 1 from xMin
// and yMin, and thus finally they are not affected.
const float scale = scaleData[x*w + y];
const int xMinCurr = (int)ceil(upscaleMin(xMin[z]));
const float xMinCurrFrac = (float)xMinCurr - upscaleMin(xMin[z]);
const int yMinCurr = (int)ceil(upscaleMin(yMin[z]));
const float yMinCurrFrac = (float)yMinCurr - upscaleMin(yMin[z]);
const float xMaxCurrFrac = upscaleMax(xMax[z]) - floor(upscaleMax(xMax[z]));
const int xMaxCurr = (int)floor(upscaleMax(xMax[z])) + 1;
const float yMaxCurrFrac = upscaleMax(yMax[z]) - floor(upscaleMax(yMax[z]));
const int yMaxCurr = (int)floor(upscaleMax(yMax[z])) + 1;
const int t = max(0, min(x+xMinCurr, h-1) );
const int b = max(1, min(x+xMaxCurr, h) );
const int l = max(0, min(y+yMinCurr, w-1) );
const int r = max(1, min(y+yMaxCurr, w) );
double outValue = 0;
outValue += intData[b*(w+1) + r];
outValue -= intData[t*(w+1) + r];
outValue -= intData[b*(w+1) + l];
outValue += intData[t*(w+1) + l];
// -- xMax border
outValue +=
( intData[max(1,min(x+xMaxCurr+1,h))*(w+1)
+ max(1,min(y+yMaxCurr,w))]
- intData[max(1,min(x+xMaxCurr,h))*(w+1)
+ max(1,min(y+yMaxCurr,w))]
- intData[max(1,min(x+xMaxCurr+1,h))*(w+1)
+ max(0,min(y+yMinCurr,w-1))]
+ intData[max(1,min(x+xMaxCurr,h))*(w+1)
+ max(0,min(y+yMinCurr,w-1))]
) * xMaxCurrFrac;
// -- yMax border
outValue +=
( intData[max(1,min(x+xMaxCurr,h))*(w+1)
+ max(1,min(y+yMaxCurr+1,w))]
- intData[max(1,min(x+xMaxCurr,h))*(w+1)
+ max(1,min(y+yMaxCurr,w))]
- intData[max(0,min(x+xMinCurr,h-1))*(w+1)
+ max(1,min(y+yMaxCurr+1,w))]
+ intData[max(0,min(x+xMinCurr,h-1))*(w+1)
+ max(1,min(y+yMaxCurr,w))]
) * yMaxCurrFrac;
// -- xMin border
outValue +=
( intData[max(0,min(x+xMinCurr,h-1))*(w+1)
+ max(1,min(y+yMaxCurr,w))]
- intData[max(0,min(x+xMinCurr-1,h-1))*(w+1)
+ max(1,min(y+yMaxCurr,w))]
- intData[max(0,min(x+xMinCurr,h-1))*(w+1)
+ max(0,min(y+yMinCurr,w-1))]
+ intData[max(0,min(x+xMinCurr-1,h-1))*(w+1)
+ max(0,min(y+yMinCurr,w-1))]
) * xMinCurrFrac;
// -- yMin border
outValue +=
( intData[max(1,min(x+xMaxCurr,h))*(w+1)
+ max(0,min(y+yMinCurr,w-1))]
- intData[max(1,min(x+xMaxCurr,h))*(w+1)
+ max(0,min(y+yMinCurr-1,w-1))]
- intData[max(0,min(x+xMinCurr,h-1))*(w+1)
+ max(0,min(y+yMinCurr,w-1))]
+ intData[max(0,min(x+xMinCurr,h-1))*(w+1)
+ max(0,min(y+yMinCurr-1,w-1))]
) * yMinCurrFrac;
// -- corner pixels
outValue +=
xMaxCurrFrac*yMaxCurrFrac * (
(x+xMaxCurr > h-1 or
y+yMaxCurr > w-1 or
x+xMaxCurr <= 0 or
y+yMaxCurr <= 0) ? 0 : inData[(x+xMaxCurr)*inDataStrideRow + (y+yMaxCurr)]);
outValue +=
xMinCurrFrac*yMaxCurrFrac * (
(x+xMinCurr-1 >= h-1 or
y+yMaxCurr > w-1 or
x+xMinCurr-1 < 0 or
y+yMaxCurr <= 0) ? 0 : inData[(x+xMinCurr-1)*inDataStrideRow + (y+yMaxCurr)]);
outValue +=
xMaxCurrFrac*yMinCurrFrac * (
(x+xMaxCurr > h-1 or
y+yMinCurr-1 >= w-1 or
x+xMaxCurr <= 0 or
y+yMinCurr-1 < 0) ? 0 : inData[(x+xMaxCurr)*inDataStrideRow + (y+yMinCurr-1)]);
outValue +=
xMinCurrFrac*yMinCurrFrac * (
(x+xMinCurr-1 >= h-1 or
y+yMinCurr-1 >= w-1 or
x+xMinCurr-1 < 0 or
y+yMinCurr-1 < 0) ? 0 : inData[(x+xMinCurr-1)*inDataStrideRow + (y+yMinCurr-1)]);
outData[z*w*h + x*w + y] = outValue;
}
}
extern "C" {
// TODO
void forwardNoNormReplicateVarScaleCuda(
float *intData, int intDataStrideChannel, float *outData,
int h, int w, int nInputPlane, int nWindows,
float *xMin, float *xMax, float *yMin, float *yMax,
const int strideH, const int strideW) {
if (strideH != 1 or strideW != 1) {
// strided::forwardNoNormReplicateCuda(
// intData, intDataStrideChannel, outData,
// h, w, nInputPlane, nWindows,
// xMin, xMax, yMin, yMax,
// strideH, strideW);
return;
}
// TODO: 1D grid
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE, BLOCK_CHANNELS);
dim3 dimGrid(
(h + dimBlock.x - 1) / dimBlock.x,
(w + dimBlock.y - 1) / dimBlock.y,
(nInputPlane*nWindows + dimBlock.z - 1) / dimBlock.z);
hipLaunchKernelGGL(( forwardNoNormReplicateVarScaleKernel) , dim3(dimGrid), dim3(dimBlock), 0, 0,
intData, intDataStrideChannel, outData,
h, w, nInputPlane, nWindows,
xMin, xMax, yMin, yMax);
}
void forwardNoNormReplicateFracVarScaleCuda(
float *intData, int intDataStrideChannel, float *outData,
int h, int w, int nInputPlane, int nWindows,
float *xMin, float *xMax, float *yMin, float *yMax,
float *inData, int inDataStrideRow, int inDataStrideChannel,
const int strideH, const int strideW, const float *const scaleData) {
if (strideH != 1 or strideW != 1) {
// TODO
int a = *(int*)0;
// strided::forwardNoNormReplicateFracCuda(
// intData, intDataStrideChannel, outData,
// h, w, nInputPlane, nWindows,
// xMin, xMax, yMin, yMax,
// inData, inDataStrideRow, inDataStrideChannel,
// strideH, strideW);
return;
}
dim3 dimBlock(BLOCK_SIZE * BLOCK_SIZE);
dim3 dimGrid((nInputPlane*nWindows*h*w + dimBlock.x - 1) / dimBlock.x);
hipLaunchKernelGGL(( forwardNoNormReplicateFracVarScaleKernel) , dim3(dimGrid), dim3(dimBlock), 0, 0,
intData, intDataStrideChannel, outData,
h, w, nInputPlane, nWindows,
xMin, xMax, yMin, yMax,
inData, inDataStrideRow, inDataStrideChannel, scaleData);
}
/************************ updateGradInput ************************/
__global__ void updateGradInputVarScaleKernel(
float *gradOutputIntData, float *gradInputData,
int h, int w, int nWindows,
float *xMin, float *xMax, float *yMin, float *yMax) {
const int x = BLOCK_SIZE * blockIdx.x + threadIdx.x;
const int y = BLOCK_SIZE * blockIdx.y + threadIdx.y;
if (x < h and y < w) {
int xMinCurr, xMaxCurr, yMinCurr, yMaxCurr;
double outValue = 0;
for (int windowIdx = 0; windowIdx < nWindows; ++windowIdx) {
xMinCurr = (int)ceil(-xMax[windowIdx]);
yMinCurr = (int)ceil(-yMax[windowIdx]);
xMaxCurr = (int)floor(-xMin[windowIdx]) + 1;
yMaxCurr = (int)floor(-yMin[windowIdx]) + 1;
// The following code block implements these lines
// as if they were executed simultaneously (see `void updateGradInputFrac()`):
// xMinCurr = (x == 0 and xMaxCurr >= 0 ? 0 : xMinCurr);
// xMaxCurr = (x == h-1 and xMinCurr <= 0 ? h+66 : xMaxCurr);
// yMinCurr = (y == 0 and yMaxCurr >= 0 ? 0 : yMinCurr);
// yMaxCurr = (y == w-1 and yMinCurr <= 0 ? w+66 : yMaxCurr);
bool needToChangeMin, needToChangeMax;
needToChangeMin = x == 0 and xMaxCurr >= 0;
needToChangeMax = x == h-1 and xMinCurr <= 0;
if (needToChangeMin) xMinCurr = 0;
if (needToChangeMax) xMaxCurr = h+66;
needToChangeMin = y == 0 and yMaxCurr >= 0;
needToChangeMax = y == w-1 and yMinCurr <= 0;
if (needToChangeMin) yMinCurr = 0;
if (needToChangeMax) yMaxCurr = w+66;
const int t = max(0, min(x+xMinCurr, h) );
const int b = max(0, min(x+xMaxCurr, h) );
const int l = max(0, min(y+yMinCurr, w) );
const int r = max(0, min(y+yMaxCurr, w) );
outValue += gradOutputIntData[b*(w+1) + r];
outValue -= gradOutputIntData[t*(w+1) + r];
outValue -= gradOutputIntData[b*(w+1) + l];
outValue += gradOutputIntData[t*(w+1) + l];
// go to the next channel
gradOutputIntData += (h+1)*(w+1);
}
gradInputData[x*w + y] = outValue;
}
}
__global__ void updateGradInputFracVarScaleKernel(
float *gradOutputIntData, float *gradInputData,
int h, int w, int nWindows,
float *xMin, float *xMax, float *yMin, float *yMax,
float *gradOutputData, int gradOutputStrideRow, int gradOutputStrideChannel,
const float *const scaleData) {
const int x = BLOCK_SIZE * blockIdx.x + threadIdx.x;
const int y = BLOCK_SIZE * blockIdx.y + threadIdx.y;
if (x < h and y < w) {
const float scale = scaleData[x*w + y];
int xMinCurr, xMaxCurr, yMinCurr, yMaxCurr;
double outValue = 0;
for (int windowIdx = 0; windowIdx < nWindows; ++windowIdx) {
xMinCurr = (int)ceil(-upscaleMax(xMax[windowIdx]));
yMinCurr = (int)ceil(-upscaleMax(yMax[windowIdx]));
const float xMinCurrFrac = (float)xMinCurr + upscaleMax(xMax[windowIdx]);
const float yMinCurrFrac = (float)yMinCurr + upscaleMax(yMax[windowIdx]);
xMaxCurr = (int)floor(-upscaleMin(xMin[windowIdx])) + 1;
yMaxCurr = (int)floor(-upscaleMin(yMin[windowIdx])) + 1;
const float xMaxCurrFrac = -upscaleMin(xMin[windowIdx]) + 1 - xMaxCurr;
const float yMaxCurrFrac = -upscaleMin(yMin[windowIdx]) + 1 - yMaxCurr;
// The following code block implements these lines
// as if they were executed simultaneously (see `void updateGradInputFrac()`):
// xMinCurr = (x == 0 and xMaxCurr >= 0 ? 0 : xMinCurr);
// xMaxCurr = (x == h-1 and xMinCurr <= 0 ? h+66 : xMaxCurr);
// yMinCurr = (y == 0 and yMaxCurr >= 0 ? 0 : yMinCurr);
// yMaxCurr = (y == w-1 and yMinCurr <= 0 ? w+66 : yMaxCurr);
bool needToChangeMin, needToChangeMax;
needToChangeMin = x == 0 and xMaxCurr >= 0;
needToChangeMax = x == h-1 and xMinCurr <= 0;
if (needToChangeMin) xMinCurr = 0;
if (needToChangeMax) xMaxCurr = h+66;
needToChangeMin = y == 0 and yMaxCurr >= 0;
needToChangeMax = y == w-1 and yMinCurr <= 0;
if (needToChangeMin) yMinCurr = 0;
if (needToChangeMax) yMaxCurr = w+66;
const int t = max(0, min(x+xMinCurr, h) );
const int b = max(0, min(x+xMaxCurr, h) );
const int l = max(0, min(y+yMinCurr, w) );
const int r = max(0, min(y+yMaxCurr, w) );
const int tAdv = x+xMinCurr-1 < h ? max(0, min(t-1, h)) : t;
const int bAdv = x+xMaxCurr >= 0 ? max(0, min(b+1, h)) : b;
const int lAdv = y+yMinCurr-1 < w ? max(0, min(l-1, w)) : l;
const int rAdv = y+yMaxCurr >= 0 ? max(0, min(r+1, w)) : r;
// TODO: 1D grid
outValue += gradOutputIntData[b*(w+1) + r];
outValue -= gradOutputIntData[t*(w+1) + r];
outValue -= gradOutputIntData[b*(w+1) + l];
outValue += gradOutputIntData[t*(w+1) + l];
// -- xMax border
outValue +=
( gradOutputIntData[bAdv*(w+1) + r]
- gradOutputIntData[b *(w+1) + r]
- gradOutputIntData[bAdv*(w+1) + l]
+ gradOutputIntData[b *(w+1) + l]
) * xMaxCurrFrac;
// -- yMax border
outValue +=
( gradOutputIntData[b*(w+1) + rAdv]
- gradOutputIntData[b*(w+1) + r ]
- gradOutputIntData[t*(w+1) + rAdv]
+ gradOutputIntData[t*(w+1) + r ]
) * yMaxCurrFrac;
// -- xMin border
outValue +=
( gradOutputIntData[t *(w+1) + r]
- gradOutputIntData[tAdv*(w+1) + r]
- gradOutputIntData[t *(w+1) + l]
+ gradOutputIntData[tAdv*(w+1) + l]
) * xMinCurrFrac;
// -- yMin border
outValue +=
( gradOutputIntData[b*(w+1) + l ]
- gradOutputIntData[b*(w+1) + lAdv]
- gradOutputIntData[t*(w+1) + l ]
+ gradOutputIntData[t*(w+1) + lAdv]
) * yMinCurrFrac;
// -- corner pixels
outValue +=
xMaxCurrFrac*yMaxCurrFrac * (
(x+xMaxCurr > h-1 or
y+yMaxCurr > w-1 or
x+xMaxCurr < 0 or
y+yMaxCurr < 0 or
b == bAdv or
r == rAdv) ? 0 :
gradOutputData[b*gradOutputStrideRow + r]);
outValue +=
xMinCurrFrac*yMaxCurrFrac * (
(x+xMinCurr-1 > h-1 or
y+yMaxCurr > w-1 or
x+xMinCurr-1 < 0 or
y+yMaxCurr < 0 or
t == tAdv or
r == rAdv) ? 0 :
gradOutputData[tAdv*gradOutputStrideRow + r]);
outValue +=
xMaxCurrFrac*yMinCurrFrac * (
(x+xMaxCurr > h-1 or
y+yMinCurr-1 > w-1 or
x+xMaxCurr < 0 or
y+yMinCurr-1 < 0 or
b == bAdv or
l == lAdv) ? 0 :
gradOutputData[b*gradOutputStrideRow + lAdv]);
outValue +=
xMinCurrFrac*yMinCurrFrac * (
(x+xMinCurr-1 > h-1 or
y+yMinCurr-1 > w-1 or
x+xMinCurr-1 < 0 or
y+yMinCurr-1 < 0 or
t == tAdv or
l == lAdv) ? 0 :
gradOutputData[tAdv*gradOutputStrideRow + lAdv]);
// go to the next channel
gradOutputIntData += (h+1)*(w+1);
gradOutputData += gradOutputStrideChannel;
}
gradInputData[x*w + y] = outValue;
}
}
// TODO
void updateGradInputVarScaleCuda(
float *gradOutputIntData, float *gradInputData,
int h, int w, int nWindows,
float *xMin, float *xMax, float *yMin, float *yMax,
const int strideH, const int strideW, const float *const scaleData) {
if (strideH != 1 or strideW != 1) {
// TODO
int a = *(int*)0;
// strided::updateGradInputCuda(
// gradOutputIntData, gradInputData, h, w, nWindows,
// xMin, xMax, yMin, yMax, strideH, strideW);
// return;
}
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE, BLOCK_CHANNELS);
dim3 dimGrid(
(h + dimBlock.x - 1) / dimBlock.x,
(w + dimBlock.y - 1) / dimBlock.y);
hipLaunchKernelGGL(( updateGradInputVarScaleKernel) , dim3(dimGrid), dim3(dimBlock), 0, 0,
gradOutputIntData, gradInputData,
h, w, nWindows,
xMin, xMax, yMin, yMax);
}
void updateGradInputFracVarScaleCuda(
float *gradOutputIntData, float *gradInputData,
int h, int w, int nWindows,
float *xMin, float *xMax, float *yMin, float *yMax,
float *gradOutputData, int gradOutputStrideRow, int gradOutputStrideChannel,
const int strideH, const int strideW, const float *const scaleData) {
if (strideH != 1 or strideW != 1) {
// TODO
int a = *(int*)0;
// strided::updateGradInputFracCuda(
// gradOutputIntData, gradInputData, h, w, nWindows,
// xMin, xMax, yMin, yMax,
// gradOutputData, gradOutputStrideRow, gradOutputStrideChannel,
// strideH, strideW);
// return;
}
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE, BLOCK_CHANNELS);
dim3 dimGrid(
(h + dimBlock.x - 1) / dimBlock.x,
(w + dimBlock.y - 1) / dimBlock.y);
hipLaunchKernelGGL(( updateGradInputFracVarScaleKernel) , dim3(dimGrid), dim3(dimBlock), 0, 0,
gradOutputIntData, gradInputData,
h, w, nWindows,
xMin, xMax, yMin, yMax,
gradOutputData, gradOutputStrideRow, gradOutputStrideChannel,
scaleData);
}
/************************ accGradParameters ************************/
__global__ void xMaxDeltaIntegralFracVarScaleKernel(
const float *intData, float *tmpArray,
const int nWindows, const int h, const int w,
const float *xMax, const float *yMin, const float *yMax,
const float *inData, const int inDataStrideRow,
const float *const scaleData) {
int id = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x;
const int y = id % w + 1; id /= w; // 1-indexed
const int x = id % h + 1; id /= h; // 1-indexed
const int & windowIdx = id;
if (windowIdx < nWindows and x <= h and y <= w) {
tmpArray += windowIdx * h * w;
const float scale = scaleData[(x-1)*w + (y-1)];
// const int xMinInt = (int)ceil(upscaleMin(xMin[windowIdx])-1);
// const float xMinFrac = xMinInt-upscaleMin(xMin[windowIdx])+1;
const int yMinInt = (int)ceil(upscaleMin(yMin[windowIdx])-1);
const float yMinFrac = yMinInt-upscaleMin(yMin[windowIdx])+1;
const int xMaxInt = (int)floor(upscaleMax(xMax[windowIdx]));
// const float xMaxFrac = upscaleMax(xMax[windowIdx])-xMaxInt;
const int yMaxInt = (int)floor(upscaleMax(yMax[windowIdx]));
const float yMaxFrac = upscaleMax(yMax[windowIdx])-yMaxInt;
// const float tlCorner = y+yMinInt < 1 or x+xMinInt < 1 ? 0 :
// inData[
// max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow +
// max(0,min(w-1,y+yMinInt-1))];
const float blCorner = y+yMinInt < 1 or x+xMaxInt >= h ? 0 :
inData[
max(0,min(h-1,x+xMaxInt )) * inDataStrideRow +
max(0,min(w-1,y+yMinInt-1))];
// const float trCorner = y+yMaxInt >= w or x+xMinInt < 1 ? 0 :
// inData[
// max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow +
// max(0,min(w-1,y+yMaxInt ))];
const float brCorner = y+yMaxInt >= w or x+xMaxInt >= h ? 0 :
inData[
max(0,min(h-1,x+xMaxInt )) * inDataStrideRow +
max(0,min(w-1,y+yMaxInt ))];
float delta = 0;
delta += brCorner * (y+yMaxInt < 1 ? 1.0f : yMaxFrac);
delta += blCorner * (y+yMinInt >= w ? 1.0f : yMinFrac);
delta +=
intData[max(0,min(x+xMaxInt+1, h))*(w+1)
+ max(0,min(y+yMaxInt, w))];
delta -=
intData[max(0,min(x+xMaxInt , h))*(w+1)
+ max(0,min(y+yMaxInt, w))];
delta -=
intData[max(0,min(x+xMaxInt+1, h))*(w+1)
+ max(0,min(y+yMinInt, w))];
delta +=
intData[max(0,min(x+xMaxInt , h))*(w+1)
+ max(0,min(y+yMinInt, w))];
delta *= (x+xMaxInt >= 1 and x+xMaxInt < h) * scale;
// note multiplication of `delta` by `scale`
tmpArray[(x-1)*w + (y-1)] *= delta;
}
}
__global__ void xMinDeltaIntegralFracVarScaleKernel(
const float *intData, float *tmpArray,
const int nWindows, const int h, const int w,
const float *xMin, const float *yMin, const float *yMax,
const float *inData, const int inDataStrideRow,
const float *const scaleData) {
int id = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x;
const int y = id % w + 1; id /= w; // 1-indexed
const int x = id % h + 1; id /= h; // 1-indexed
const int & windowIdx = id;
if (windowIdx < nWindows and x <= h and y <= w) {
tmpArray += windowIdx * h * w;
const float scale = scaleData[(x-1)*w + (y-1)];
const int xMinInt = (int)ceil(upscaleMin(xMin[windowIdx])-1);
// const float xMinFrac = xMinInt-upscaleMin(xMin[windowIdx])+1;
const int yMinInt = (int)ceil(upscaleMin(yMin[windowIdx])-1);
const float yMinFrac = yMinInt-upscaleMin(yMin[windowIdx])+1;
// const int xMaxInt = (int)floor(upscaleMax(xMax[windowIdx]));
// const float xMaxFrac = upscaleMax(xMax[windowIdx])-xMaxInt;
const int yMaxInt = (int)floor(upscaleMax(yMax[windowIdx]));
const float yMaxFrac = upscaleMax(yMax[windowIdx])-yMaxInt;
const float tlCorner = y+yMinInt < 1 or x+xMinInt < 1 ? 0 :
inData[
max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow +
max(0,min(w-1,y+yMinInt-1))];
// const float blCorner = y+yMinInt < 1 or x+xMaxInt >= h ? 0 :
// inData[
// max(0,min(h-1,x+xMaxInt )) * inDataStrideRow +
// max(0,min(w-1,y+yMinInt-1))];
const float trCorner = y+yMaxInt >= w or x+xMinInt < 1 ? 0 :
inData[
max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow +
max(0,min(w-1,y+yMaxInt ))];
// const float brCorner = y+yMaxInt >= w or x+xMaxInt >= h ? 0 :
// inData[
// max(0,min(h-1,x+xMaxInt )) * inDataStrideRow +
// max(0,min(w-1,y+yMaxInt ))];
float delta = 0;
delta += trCorner * (y+yMaxInt < 1 ? 1.0f : yMaxFrac);
delta += tlCorner * (y+yMinInt >= w ? 1.0f : yMinFrac);
delta +=
intData[max(0,min(x+xMinInt , h))*(w+1)
+ max(0,min(y+yMaxInt, w))];
delta -=
intData[max(0,min(x+xMinInt-1, h))*(w+1)
+ max(0,min(y+yMaxInt, w))];
delta -=
intData[max(0,min(x+xMinInt , h))*(w+1)
+ max(0,min(y+yMinInt, w))];
delta +=
intData[max(0,min(x+xMinInt-1, h))*(w+1)
+ max(0,min(y+yMinInt, w))];
delta *= (x+xMinInt >= 1 and x+xMinInt < h) * scale;
// note multiplication of `delta` by `scale
tmpArray[(x-1)*w + (y-1)] *= -delta;
}
}
__global__ void yMaxDeltaIntegralFracVarScaleKernel(
const float *intData, float *tmpArray,
const int nWindows, const int h, const int w,
const float *xMin, const float *xMax, const float *yMax,
const float *inData, const int inDataStrideRow,
const float *const scaleData) {
int id = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x;
const int y = id % w + 1; id /= w; // 1-indexed
const int x = id % h + 1; id /= h; // 1-indexed
const int & windowIdx = id;
if (windowIdx < nWindows and x <= h and y <= w) {
tmpArray += windowIdx * h * w;
const float scale = scaleData[(x-1)*w + (y-1)];
const int xMinInt = (int)ceil(upscaleMin(xMin[windowIdx])-1);
const float xMinFrac = xMinInt-upscaleMin(xMin[windowIdx])+1;
// const int yMinInt = (int)ceil(upscaleMin(yMin[windowIdx])-1);
// const float yMinFrac = yMinInt-upscaleMin(yMin[windowIdx])+1;
const int xMaxInt = (int)floor(upscaleMax(xMax[windowIdx]));
const float xMaxFrac = upscaleMax(xMax[windowIdx])-xMaxInt;
const int yMaxInt = (int)floor(upscaleMax(yMax[windowIdx]));
// const float yMaxFrac = upscaleMax(yMax[windowIdx])-yMaxInt;
// const float tlCorner = y+yMinInt < 1 or x+xMinInt < 1 ? 0 :
// inData[
// max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow +
// max(0,min(w-1,y+yMinInt-1))];
// const float blCorner = y+yMinInt < 1 or x+xMaxInt >= h ? 0 :
// inData[
// max(0,min(h-1,x+xMaxInt )) * inDataStrideRow +
// max(0,min(w-1,y+yMinInt-1))];
const float trCorner = y+yMaxInt >= w or x+xMinInt < 1 ? 0 :
inData[
max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow +
max(0,min(w-1,y+yMaxInt ))];
const float brCorner = y+yMaxInt >= w or x+xMaxInt >= h ? 0 :
inData[
max(0,min(h-1,x+xMaxInt )) * inDataStrideRow +
max(0,min(w-1,y+yMaxInt ))];
float delta = 0;
delta += trCorner * (x+xMinInt >= h ? 1.0f : xMinFrac);
delta += brCorner * (x+xMaxInt < 1 ? 1.0f : xMaxFrac);
delta +=
intData[max(0,min(x+xMaxInt, h))*(w+1)
+ max(0,min(y+yMaxInt+1, w))];
delta -=
intData[max(0,min(x+xMaxInt, h))*(w+1)
+ max(0,min(y+yMaxInt , w))];
delta -=
intData[max(0,min(x+xMinInt, h))*(w+1)
+ max(0,min(y+yMaxInt+1, w))];
delta +=
intData[max(0,min(x+xMinInt, h))*(w+1)
+ max(0,min(y+yMaxInt , w))];
delta *= (y+yMaxInt >= 1 and y+yMaxInt < w) * scale;
// note multiplication of `delta` by `scale
tmpArray[(x-1)*w + (y-1)] *= delta;
}
}
__global__ void yMinDeltaIntegralFracVarScaleKernel(
const float *intData, float *tmpArray,
const int nWindows, const int h, const int w,
const float *xMin, const float *xMax, const float *yMin,
const float *inData, const int inDataStrideRow,
const float *const scaleData) {
int id = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x;
const int y = id % w + 1; id /= w; // 1-indexed
const int x = id % h + 1; id /= h; // 1-indexed
const int & windowIdx = id;
if (windowIdx < nWindows and x <= h and y <= w) {
tmpArray += windowIdx * h * w;
const float scale = scaleData[(x-1)*w + (y-1)];
const int xMinInt = (int)ceil(upscaleMin(xMin[windowIdx])-1);
const float xMinFrac = xMinInt-upscaleMin(xMin[windowIdx])+1;
const int yMinInt = (int)ceil(upscaleMin(yMin[windowIdx])-1);
// const float yMinFrac = yMinInt-upscaleMin(yMin[windowIdx])+1;
const int xMaxInt = (int)floor(upscaleMax(xMax[windowIdx]));
const float xMaxFrac = upscaleMax(xMax[windowIdx])-xMaxInt;
// const int yMaxInt = (int)floor(upscaleMax(yMax[windowIdx]));
// const float yMaxFrac = upscaleMax(yMax[windowIdx])-yMaxInt;
const float tlCorner = y+yMinInt < 1 or x+xMinInt < 1 ? 0 :
inData[
max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow +
max(0,min(w-1,y+yMinInt-1))];
const float blCorner = y+yMinInt < 1 or x+xMaxInt >= h ? 0 :
inData[
max(0,min(h-1,x+xMaxInt )) * inDataStrideRow +
max(0,min(w-1,y+yMinInt-1))];
// const float trCorner = y+yMaxInt >= w or x+xMinInt < 1 ? 0 :
// inData[
// max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow +
// max(0,min(w-1,y+yMaxInt ))];
// const float brCorner = y+yMaxInt >= w or x+xMaxInt >= h ? 0 :
// inData[
// max(0,min(h-1,x+xMaxInt )) * inDataStrideRow +
// max(0,min(w-1,y+yMaxInt ))];
float delta = 0;
delta += tlCorner * (x+xMinInt >= h ? 1.0f : xMinFrac);
delta += blCorner * (x+xMaxInt < 1 ? 1.0f : xMaxFrac);
delta +=
intData[max(0,min(x+xMaxInt, h))*(w+1)
+ max(0,min(y+yMinInt , w))];
delta -=
intData[max(0,min(x+xMaxInt, h))*(w+1)
+ max(0,min(y+yMinInt-1, w))];
delta -=
intData[max(0,min(x+xMinInt, h))*(w+1)
+ max(0,min(y+yMinInt , w))];
delta +=
intData[max(0,min(x+xMinInt, h))*(w+1)
+ max(0,min(y+yMinInt-1, w))];
delta *= (y+yMinInt >= 1 and y+yMinInt < w) * scale;
// note multiplication of `delta` by `scale
tmpArray[(x-1)*w + (y-1)] *= -delta;
}
}
void backwardFracVarScaleCuda(
float *intData, float *tmpArray,
int nWindows, int h, int w,
float *xMin, float *xMax, float *yMin, float *yMax,
float *inData, int inDataStrideRow,
const int strideH, const int strideW,
const float *const scaleData) {
if (strideH != 1 or strideW != 1) {
// TODO
int a = *(int*)0;
// strided::backwardFracCuda(
// intData, tmpArray, nWindows, h, w,
// xMin, xMax, yMin, yMax, inData, inDataStrideRow,
// strideH, strideW);
// return;
}
dim3 dimBlock(BLOCK_SIZE * BLOCK_SIZE);
dim3 dimGrid((nWindows * h * w + dimBlock.x - 1) / dimBlock.x);
hipLaunchKernelGGL(( xMaxDeltaIntegralFracVarScaleKernel) , dim3(dimGrid), dim3(dimBlock), 0, 0,
intData, tmpArray + 0*nWindows*h*w, nWindows, h, w,
xMax, yMin, yMax, inData, inDataStrideRow, scaleData);
hipLaunchKernelGGL(( xMinDeltaIntegralFracVarScaleKernel) , dim3(dimGrid), dim3(dimBlock), 0, 0,
intData, tmpArray + 1*nWindows*h*w, nWindows, h, w,
xMin, yMin, yMax, inData, inDataStrideRow, scaleData);
hipLaunchKernelGGL(( yMaxDeltaIntegralFracVarScaleKernel) , dim3(dimGrid), dim3(dimBlock), 0, 0,
intData, tmpArray + 2*nWindows*h*w, nWindows, h, w,
xMin, xMax, yMax, inData, inDataStrideRow, scaleData);
hipLaunchKernelGGL(( yMinDeltaIntegralFracVarScaleKernel) , dim3(dimGrid), dim3(dimBlock), 0, 0,
intData, tmpArray + 3*nWindows*h*w, nWindows, h, w,
xMin, xMax, yMin, inData, inDataStrideRow, scaleData);
}
__global__ void xMaxDeltaIntegralVarScaleKernel(
const float *intData, float *tmpArray,
const int nWindows, const int h, const int w,
const float *xMax, const float *yMin, const float *yMax,
const float *const scaleData) {
int id = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x;
const int y = id % w + 1; id /= w; // 1-indexed
const int x = id % h + 1; id /= h; // 1-indexed
const int & windowIdx = id;
if (windowIdx < nWindows and x <= h and y <= w) {
tmpArray += windowIdx * h * w;
// const int xMinInt = (int)ceil(xMin[windowIdx]-1);
const int yMinInt = (int)ceil(yMin[windowIdx]-1);
const int xMaxInt = (int)floor(xMax[windowIdx]);
const int yMaxInt = (int)floor(yMax[windowIdx]);
float delta = 0;
delta +=
intData[max(1,min(x+xMaxInt+1, h))*(w+1)
+ max(0,min(y+yMaxInt, w))];
delta -=
intData[max(0,min(x+xMaxInt , h))*(w+1)
+ max(0,min(y+yMaxInt, w))];
delta -=
intData[max(1,min(x+xMaxInt+1, h))*(w+1)
+ max(0,min(y+yMinInt, w))];
delta +=
intData[max(0,min(x+xMaxInt , h))*(w+1)
+ max(0,min(y+yMinInt, w))];
delta *= (x+xMaxInt >= 1 and x+xMaxInt < h);
tmpArray[(x-1)*w + (y-1)] *= delta;
}
}
__global__ void xMinDeltaIntegralVarScaleKernel(
const float *intData, float *tmpArray,
const int nWindows, const int h, const int w,
const float *xMin, const float *yMin, const float *yMax,
const float *const scaleData) {
int id = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x;
const int y = id % w + 1; id /= w; // 1-indexed
const int x = id % h + 1; id /= h; // 1-indexed
const int & windowIdx = id;
if (windowIdx < nWindows and x <= h and y <= w) {
tmpArray += windowIdx * h * w;
const int xMinInt = (int)ceil(xMin[windowIdx]-1);
const int yMinInt = (int)ceil(yMin[windowIdx]-1);
// const int xMaxInt = (int)floor(xMax[windowIdx]);
const int yMaxInt = (int)floor(yMax[windowIdx]);
float delta = 0;
delta +=
intData[max(0,min(x+xMinInt , h-1))*(w+1)
+ max(0,min(y+yMaxInt, w))];
delta -=
intData[max(0,min(x+xMinInt-1, h ))*(w+1)
+ max(0,min(y+yMaxInt, w))];
delta -=
intData[max(0,min(x+xMinInt , h-1))*(w+1)
+ max(0,min(y+yMinInt, w))];
delta +=
intData[max(0,min(x+xMinInt-1, h ))*(w+1)
+ max(0,min(y+yMinInt, w))];
delta *= (x+xMinInt >= 1 and x+xMinInt < h);
tmpArray[(x-1)*w + (y-1)] *= -delta;
}
}
__global__ void yMaxDeltaIntegralVarScaleKernel(
const float *intData, float *tmpArray,
const int nWindows, const int h, const int w,
const float *xMin, const float *xMax, const float *yMax,
const float *const scaleData) {
int id = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x;
const int y = id % w + 1; id /= w; // 1-indexed
const int x = id % h + 1; id /= h; // 1-indexed
const int & windowIdx = id;
if (windowIdx < nWindows and x <= h and y <= w) {
tmpArray += windowIdx * h * w;
const int xMinInt = (int)ceil(xMin[windowIdx]-1);
// const int yMinInt = (int)ceil(yMin[windowIdx]-1);
const int xMaxInt = (int)floor(xMax[windowIdx]);
const int yMaxInt = (int)floor(yMax[windowIdx]);
float delta = 0;
delta +=
intData[max(0,min(x+xMaxInt, h))*(w+1)
+ max(1,min(y+yMaxInt+1, w))];
delta -=
intData[max(0,min(x+xMaxInt, h))*(w+1)
+ max(0,min(y+yMaxInt , w))];
delta -=
intData[max(0,min(x+xMinInt, h))*(w+1)
+ max(1,min(y+yMaxInt+1, w))];
delta +=
intData[max(0,min(x+xMinInt, h))*(w+1)
+ max(0,min(y+yMaxInt , w))];
delta *= (y+yMaxInt >= 1 and y+yMaxInt < w);
tmpArray[(x-1)*w + (y-1)] *= delta;
}
}
__global__ void yMinDeltaIntegralVarScaleKernel(
const float *intData, float *tmpArray,
const int nWindows, const int h, const int w,
const float *xMin, const float *xMax, const float *yMin,
const float *const scaleData) {
int id = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x;
const int y = id % w + 1; id /= w; // 1-indexed
const int x = id % h + 1; id /= h; // 1-indexed
const int & windowIdx = id;
if (windowIdx < nWindows and x <= h and y <= w) {
tmpArray += windowIdx * h * w;
const int xMinInt = (int)ceil(xMin[windowIdx]-1);
const int yMinInt = (int)ceil(yMin[windowIdx]-1);
const int xMaxInt = (int)floor(xMax[windowIdx]);
// const int yMaxInt = (int)floor(yMax[windowIdx]);
float delta = 0;
delta +=
intData[max(0,min(x+xMaxInt, h))*(w+1)
+ max(0,min(y+yMinInt , w ))];
delta -=
intData[max(0,min(x+xMaxInt, h))*(w+1)
+ max(0,min(y+yMinInt-1, w-1))];
delta -=
intData[max(0,min(x+xMinInt, h))*(w+1)
+ max(0,min(y+yMinInt , w ))];
delta +=
intData[max(0,min(x+xMinInt, h))*(w+1)
+ max(0,min(y+yMinInt-1, w-1))];
delta *= (y+yMinInt >= 1 and y+yMinInt < w);
tmpArray[(x-1)*w + (y-1)] *= -delta;
}
}
void backwardVarScaleCuda(
float *intData, float *tmpArray,
int nWindows, int h, int w,
float *xMin, float *xMax, float *yMin, float *yMax,
const int strideH, const int strideW,
const float *const scaleData) {
if (strideH != 1 or strideW != 1) {
// TODO
int a = *(int*)0;
// strided::backwardCuda(
// intData, tmpArray, nWindows, h, w,
// xMin, xMax, yMin, yMax, strideH, strideW);
// return;
}
dim3 dimBlock(BLOCK_SIZE * BLOCK_SIZE);
dim3 dimGrid((nWindows * h * w + dimBlock.x - 1) / dimBlock.x);
hipLaunchKernelGGL(( xMaxDeltaIntegralVarScaleKernel) , dim3(dimGrid), dim3(dimBlock), 0, 0,
intData, tmpArray + 0*nWindows*h*w,
nWindows, h, w, xMax, yMin, yMax, scaleData);
hipLaunchKernelGGL(( xMinDeltaIntegralVarScaleKernel) , dim3(dimGrid), dim3(dimBlock), 0, 0,
intData, tmpArray + 1*nWindows*h*w,
nWindows, h, w, xMin, yMin, yMax, scaleData);
hipLaunchKernelGGL(( yMaxDeltaIntegralVarScaleKernel) , dim3(dimGrid), dim3(dimBlock), 0, 0,
intData, tmpArray + 2*nWindows*h*w,
nWindows, h, w, xMin, xMax, yMax, scaleData);
hipLaunchKernelGGL(( yMinDeltaIntegralVarScaleKernel) , dim3(dimGrid), dim3(dimBlock), 0, 0,
intData, tmpArray + 3*nWindows*h*w,
nWindows, h, w, xMin, xMax, yMin, scaleData);
}
/************************ Other stuff ************************/
__global__ void dirtyFixWindowsVarScaleKernel(
float *xMin, float *xMax, float *yMin, float *yMax,
const int size, const float h, const float w, const float minWidth) {
int idx = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x;
if (idx < 2*size) {
float paramMin, paramMax;
if (idx < size) {
paramMin = max(-h+1, min(h-1, xMin[idx]));
paramMax = max(-h+1, min(h-1, xMax[idx]));
if (paramMin + minWidth - 0.99 > paramMax) {
const float mean = 0.5 * (paramMin + paramMax);
paramMin = mean - 0.5 * (minWidth - 0.9);
paramMax = mean + 0.5 * (minWidth - 0.9);
}
xMin[idx] = paramMin;
xMax[idx] = paramMax;
} else {
idx -= size;
paramMin = max(-w+1, min(w-1, yMin[idx]));
paramMax = max(-w+1, min(w-1, yMax[idx]));
if (paramMin + minWidth - 0.99 > paramMax) {
const float mean = 0.5 * (paramMin + paramMax);
paramMin = mean - 0.5 * (minWidth - 0.9);
paramMax = mean + 0.5 * (minWidth - 0.9);
}
yMin[idx] = paramMin;
yMax[idx] = paramMax;
}
}
}
void dirtyFixWindowsVarScale(
float *xMin, float *xMax, float *yMin, float *yMax,
int size, int h, int w, float minWidth) {
dim3 dimBlock(BLOCK_SIZE * BLOCK_SIZE);
dim3 dimGrid((2*size + dimBlock.x - 1) / dimBlock.x);
hipLaunchKernelGGL(( dirtyFixWindowsVarScaleKernel) , dim3(dimGrid), dim3(dimBlock), 0, 0,
xMin, xMax, yMin, yMax, size, (float)h, (float)w, minWidth);
}
} // extern "C"
| 26f30e8df9ed2b99664423b1bdebb77e5069531d.cu | #include <iostream>
#include <stdio.h>
#include <assert.h>
#include <algorithm>
#include <cmath>
#include <cuda_runtime.h>
#include <cublas_v2.h>
// #include "integral-strided-cuda.hpp"
#define BLOCK_SIZE 32
#define BLOCK_CHANNELS (1024 / (BLOCK_SIZE * BLOCK_SIZE))
using std::max;
using std::min;
using std::floor;
using std::ceil;
cublasHandle_t cublasHandle;
float *CUDA_ZERO_FLOAT, *CUDA_ONE_FLOAT; // for cublas in device pointer mode
extern "C"
void _initCublasHandleVarScale() {
if (cublasCreate(&cublasHandle) != CUBLAS_STATUS_SUCCESS) {
printf ("CUBLAS initialization failed\n");
}
cublasSetPointerMode(cublasHandle, CUBLAS_POINTER_MODE_DEVICE);
// TODO: at shutdown, `cublasDestroy(handle);`
// TODO: deallocate this!
float zeroOne[] = {0, 1};
cudaMalloc((void**)&CUDA_ZERO_FLOAT, sizeof(zeroOne));
CUDA_ONE_FLOAT = CUDA_ZERO_FLOAT + 1;
cudaMemcpy(CUDA_ZERO_FLOAT, zeroOne, sizeof(zeroOne), cudaMemcpyHostToDevice);
}
// TODO remove this code
#define CUDA_ERROR_CHECK
#define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ )
#define CudaCheckError() __cudaCheckError( __FILE__, __LINE__ )
inline void __cudaSafeCall( cudaError err, const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
#endif
return;
}
inline void __cudaCheckError( const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
cudaError err = cudaGetLastError();
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
// More careful checking. However, this will affect performance.
// Comment away if needed.
err = cudaDeviceSynchronize();
if( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() with sync failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
#endif
return;
}
#define upscaleMax(x) ((x + 0.5) * scale - 0.5)
#define upscaleMin(x) ((x - 0.5) * scale + 0.5)
/************************ updateOutput ************************/
// TODO
__global__ void forwardNoNormReplicateVarScaleKernel(
float *intData, int intDataStrideChannel, float *outData,
int h, int w, int nInputPlane, int nWindows,
float *xMin, float *xMax, float *yMin, float *yMax) {
const int x = BLOCK_SIZE * blockIdx.x + threadIdx.x;
const int y = BLOCK_SIZE * blockIdx.y + threadIdx.y;
const int z = BLOCK_CHANNELS * blockIdx.z + threadIdx.z;
const int inPlaneIdx = z / nWindows;
intData += intDataStrideChannel * inPlaneIdx;
if (x < h and y < w and z < nInputPlane*nWindows) {
// Must add 1 to xMax/yMax/xMin/yMin due to OpenCV's
// `integral()` behavior. Namely, I(x,0) and I(0,y) are
// always 0 (so it's a C-style array sum).
// However, when computing sums, we subtract values at points
// like y+yMin-1 and x+xMin-1, so we also SUBTRACT 1 from xMin
// and yMin, and thus finally they are not affected.
const int t = max(0, min(x+(int) ceil(xMin[z]) , h-1) );
const int b = max(1, min(x+(int)floor(xMax[z])+1, h ) );
const int l = max(0, min(y+(int) ceil(yMin[z]) , w-1) );
const int r = max(1, min(y+(int)floor(yMax[z])+1, w ) );
double outValue = 0;
outValue += intData[b*(w+1) + r];
outValue -= intData[t*(w+1) + r];
outValue -= intData[b*(w+1) + l];
outValue += intData[t*(w+1) + l];
outData[z*w*h + x*w + y] = outValue;
}
}
__global__ void forwardNoNormReplicateFracVarScaleKernel(
float *intData, int intDataStrideChannel, float *outData,
int h, int w, int nInputPlane, int nWindows,
float *xMin, float *xMax, float *yMin, float *yMax,
float *inData, int inDataStrideRow, int inDataStrideChannel,
const float *const scaleData) {
int id = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x;
const int y = id % w; id /= w;
const int x = id % h; id /= h;
const int & z = id;
const int inPlaneIdx = z / nWindows;
intData += intDataStrideChannel * inPlaneIdx;
inData += inDataStrideChannel * inPlaneIdx;
if (x < h and y < w and z < nInputPlane*nWindows) {
// Must add 1 to xMax/yMax/xMin/yMin due to OpenCV's
// `integral()` behavior. Namely, I(x,0) and I(0,y) are
// always 0 (so it's a C-style array sum).
// However, when computing sums, we subtract values at points
// like y+yMin-1 and x+xMin-1, so we also SUBTRACT 1 from xMin
// and yMin, and thus finally they are not affected.
const float scale = scaleData[x*w + y];
const int xMinCurr = (int)ceil(upscaleMin(xMin[z]));
const float xMinCurrFrac = (float)xMinCurr - upscaleMin(xMin[z]);
const int yMinCurr = (int)ceil(upscaleMin(yMin[z]));
const float yMinCurrFrac = (float)yMinCurr - upscaleMin(yMin[z]);
const float xMaxCurrFrac = upscaleMax(xMax[z]) - floor(upscaleMax(xMax[z]));
const int xMaxCurr = (int)floor(upscaleMax(xMax[z])) + 1;
const float yMaxCurrFrac = upscaleMax(yMax[z]) - floor(upscaleMax(yMax[z]));
const int yMaxCurr = (int)floor(upscaleMax(yMax[z])) + 1;
const int t = max(0, min(x+xMinCurr, h-1) );
const int b = max(1, min(x+xMaxCurr, h) );
const int l = max(0, min(y+yMinCurr, w-1) );
const int r = max(1, min(y+yMaxCurr, w) );
double outValue = 0;
outValue += intData[b*(w+1) + r];
outValue -= intData[t*(w+1) + r];
outValue -= intData[b*(w+1) + l];
outValue += intData[t*(w+1) + l];
// -- xMax border
outValue +=
( intData[max(1,min(x+xMaxCurr+1,h))*(w+1)
+ max(1,min(y+yMaxCurr,w))]
- intData[max(1,min(x+xMaxCurr,h))*(w+1)
+ max(1,min(y+yMaxCurr,w))]
- intData[max(1,min(x+xMaxCurr+1,h))*(w+1)
+ max(0,min(y+yMinCurr,w-1))]
+ intData[max(1,min(x+xMaxCurr,h))*(w+1)
+ max(0,min(y+yMinCurr,w-1))]
) * xMaxCurrFrac;
// -- yMax border
outValue +=
( intData[max(1,min(x+xMaxCurr,h))*(w+1)
+ max(1,min(y+yMaxCurr+1,w))]
- intData[max(1,min(x+xMaxCurr,h))*(w+1)
+ max(1,min(y+yMaxCurr,w))]
- intData[max(0,min(x+xMinCurr,h-1))*(w+1)
+ max(1,min(y+yMaxCurr+1,w))]
+ intData[max(0,min(x+xMinCurr,h-1))*(w+1)
+ max(1,min(y+yMaxCurr,w))]
) * yMaxCurrFrac;
// -- xMin border
outValue +=
( intData[max(0,min(x+xMinCurr,h-1))*(w+1)
+ max(1,min(y+yMaxCurr,w))]
- intData[max(0,min(x+xMinCurr-1,h-1))*(w+1)
+ max(1,min(y+yMaxCurr,w))]
- intData[max(0,min(x+xMinCurr,h-1))*(w+1)
+ max(0,min(y+yMinCurr,w-1))]
+ intData[max(0,min(x+xMinCurr-1,h-1))*(w+1)
+ max(0,min(y+yMinCurr,w-1))]
) * xMinCurrFrac;
// -- yMin border
outValue +=
( intData[max(1,min(x+xMaxCurr,h))*(w+1)
+ max(0,min(y+yMinCurr,w-1))]
- intData[max(1,min(x+xMaxCurr,h))*(w+1)
+ max(0,min(y+yMinCurr-1,w-1))]
- intData[max(0,min(x+xMinCurr,h-1))*(w+1)
+ max(0,min(y+yMinCurr,w-1))]
+ intData[max(0,min(x+xMinCurr,h-1))*(w+1)
+ max(0,min(y+yMinCurr-1,w-1))]
) * yMinCurrFrac;
// -- corner pixels
outValue +=
xMaxCurrFrac*yMaxCurrFrac * (
(x+xMaxCurr > h-1 or
y+yMaxCurr > w-1 or
x+xMaxCurr <= 0 or
y+yMaxCurr <= 0) ? 0 : inData[(x+xMaxCurr)*inDataStrideRow + (y+yMaxCurr)]);
outValue +=
xMinCurrFrac*yMaxCurrFrac * (
(x+xMinCurr-1 >= h-1 or
y+yMaxCurr > w-1 or
x+xMinCurr-1 < 0 or
y+yMaxCurr <= 0) ? 0 : inData[(x+xMinCurr-1)*inDataStrideRow + (y+yMaxCurr)]);
outValue +=
xMaxCurrFrac*yMinCurrFrac * (
(x+xMaxCurr > h-1 or
y+yMinCurr-1 >= w-1 or
x+xMaxCurr <= 0 or
y+yMinCurr-1 < 0) ? 0 : inData[(x+xMaxCurr)*inDataStrideRow + (y+yMinCurr-1)]);
outValue +=
xMinCurrFrac*yMinCurrFrac * (
(x+xMinCurr-1 >= h-1 or
y+yMinCurr-1 >= w-1 or
x+xMinCurr-1 < 0 or
y+yMinCurr-1 < 0) ? 0 : inData[(x+xMinCurr-1)*inDataStrideRow + (y+yMinCurr-1)]);
outData[z*w*h + x*w + y] = outValue;
}
}
extern "C" {
// TODO
void forwardNoNormReplicateVarScaleCuda(
float *intData, int intDataStrideChannel, float *outData,
int h, int w, int nInputPlane, int nWindows,
float *xMin, float *xMax, float *yMin, float *yMax,
const int strideH, const int strideW) {
if (strideH != 1 or strideW != 1) {
// strided::forwardNoNormReplicateCuda(
// intData, intDataStrideChannel, outData,
// h, w, nInputPlane, nWindows,
// xMin, xMax, yMin, yMax,
// strideH, strideW);
return;
}
// TODO: 1D grid
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE, BLOCK_CHANNELS);
dim3 dimGrid(
(h + dimBlock.x - 1) / dimBlock.x,
(w + dimBlock.y - 1) / dimBlock.y,
(nInputPlane*nWindows + dimBlock.z - 1) / dimBlock.z);
forwardNoNormReplicateVarScaleKernel <<<dimGrid, dimBlock>>> (
intData, intDataStrideChannel, outData,
h, w, nInputPlane, nWindows,
xMin, xMax, yMin, yMax);
}
void forwardNoNormReplicateFracVarScaleCuda(
float *intData, int intDataStrideChannel, float *outData,
int h, int w, int nInputPlane, int nWindows,
float *xMin, float *xMax, float *yMin, float *yMax,
float *inData, int inDataStrideRow, int inDataStrideChannel,
const int strideH, const int strideW, const float *const scaleData) {
if (strideH != 1 or strideW != 1) {
// TODO
int a = *(int*)0;
// strided::forwardNoNormReplicateFracCuda(
// intData, intDataStrideChannel, outData,
// h, w, nInputPlane, nWindows,
// xMin, xMax, yMin, yMax,
// inData, inDataStrideRow, inDataStrideChannel,
// strideH, strideW);
return;
}
dim3 dimBlock(BLOCK_SIZE * BLOCK_SIZE);
dim3 dimGrid((nInputPlane*nWindows*h*w + dimBlock.x - 1) / dimBlock.x);
forwardNoNormReplicateFracVarScaleKernel <<<dimGrid, dimBlock>>> (
intData, intDataStrideChannel, outData,
h, w, nInputPlane, nWindows,
xMin, xMax, yMin, yMax,
inData, inDataStrideRow, inDataStrideChannel, scaleData);
}
/************************ updateGradInput ************************/
__global__ void updateGradInputVarScaleKernel(
float *gradOutputIntData, float *gradInputData,
int h, int w, int nWindows,
float *xMin, float *xMax, float *yMin, float *yMax) {
const int x = BLOCK_SIZE * blockIdx.x + threadIdx.x;
const int y = BLOCK_SIZE * blockIdx.y + threadIdx.y;
if (x < h and y < w) {
int xMinCurr, xMaxCurr, yMinCurr, yMaxCurr;
double outValue = 0;
for (int windowIdx = 0; windowIdx < nWindows; ++windowIdx) {
xMinCurr = (int)ceil(-xMax[windowIdx]);
yMinCurr = (int)ceil(-yMax[windowIdx]);
xMaxCurr = (int)floor(-xMin[windowIdx]) + 1;
yMaxCurr = (int)floor(-yMin[windowIdx]) + 1;
// The following code block implements these lines
// as if they were executed simultaneously (see `void updateGradInputFrac()`):
// xMinCurr = (x == 0 and xMaxCurr >= 0 ? 0 : xMinCurr);
// xMaxCurr = (x == h-1 and xMinCurr <= 0 ? h+66 : xMaxCurr);
// yMinCurr = (y == 0 and yMaxCurr >= 0 ? 0 : yMinCurr);
// yMaxCurr = (y == w-1 and yMinCurr <= 0 ? w+66 : yMaxCurr);
bool needToChangeMin, needToChangeMax;
needToChangeMin = x == 0 and xMaxCurr >= 0;
needToChangeMax = x == h-1 and xMinCurr <= 0;
if (needToChangeMin) xMinCurr = 0;
if (needToChangeMax) xMaxCurr = h+66;
needToChangeMin = y == 0 and yMaxCurr >= 0;
needToChangeMax = y == w-1 and yMinCurr <= 0;
if (needToChangeMin) yMinCurr = 0;
if (needToChangeMax) yMaxCurr = w+66;
const int t = max(0, min(x+xMinCurr, h) );
const int b = max(0, min(x+xMaxCurr, h) );
const int l = max(0, min(y+yMinCurr, w) );
const int r = max(0, min(y+yMaxCurr, w) );
outValue += gradOutputIntData[b*(w+1) + r];
outValue -= gradOutputIntData[t*(w+1) + r];
outValue -= gradOutputIntData[b*(w+1) + l];
outValue += gradOutputIntData[t*(w+1) + l];
// go to the next channel
gradOutputIntData += (h+1)*(w+1);
}
gradInputData[x*w + y] = outValue;
}
}
__global__ void updateGradInputFracVarScaleKernel(
float *gradOutputIntData, float *gradInputData,
int h, int w, int nWindows,
float *xMin, float *xMax, float *yMin, float *yMax,
float *gradOutputData, int gradOutputStrideRow, int gradOutputStrideChannel,
const float *const scaleData) {
const int x = BLOCK_SIZE * blockIdx.x + threadIdx.x;
const int y = BLOCK_SIZE * blockIdx.y + threadIdx.y;
if (x < h and y < w) {
const float scale = scaleData[x*w + y];
int xMinCurr, xMaxCurr, yMinCurr, yMaxCurr;
double outValue = 0;
for (int windowIdx = 0; windowIdx < nWindows; ++windowIdx) {
xMinCurr = (int)ceil(-upscaleMax(xMax[windowIdx]));
yMinCurr = (int)ceil(-upscaleMax(yMax[windowIdx]));
const float xMinCurrFrac = (float)xMinCurr + upscaleMax(xMax[windowIdx]);
const float yMinCurrFrac = (float)yMinCurr + upscaleMax(yMax[windowIdx]);
xMaxCurr = (int)floor(-upscaleMin(xMin[windowIdx])) + 1;
yMaxCurr = (int)floor(-upscaleMin(yMin[windowIdx])) + 1;
const float xMaxCurrFrac = -upscaleMin(xMin[windowIdx]) + 1 - xMaxCurr;
const float yMaxCurrFrac = -upscaleMin(yMin[windowIdx]) + 1 - yMaxCurr;
// The following code block implements these lines
// as if they were executed simultaneously (see `void updateGradInputFrac()`):
// xMinCurr = (x == 0 and xMaxCurr >= 0 ? 0 : xMinCurr);
// xMaxCurr = (x == h-1 and xMinCurr <= 0 ? h+66 : xMaxCurr);
// yMinCurr = (y == 0 and yMaxCurr >= 0 ? 0 : yMinCurr);
// yMaxCurr = (y == w-1 and yMinCurr <= 0 ? w+66 : yMaxCurr);
bool needToChangeMin, needToChangeMax;
needToChangeMin = x == 0 and xMaxCurr >= 0;
needToChangeMax = x == h-1 and xMinCurr <= 0;
if (needToChangeMin) xMinCurr = 0;
if (needToChangeMax) xMaxCurr = h+66;
needToChangeMin = y == 0 and yMaxCurr >= 0;
needToChangeMax = y == w-1 and yMinCurr <= 0;
if (needToChangeMin) yMinCurr = 0;
if (needToChangeMax) yMaxCurr = w+66;
const int t = max(0, min(x+xMinCurr, h) );
const int b = max(0, min(x+xMaxCurr, h) );
const int l = max(0, min(y+yMinCurr, w) );
const int r = max(0, min(y+yMaxCurr, w) );
const int tAdv = x+xMinCurr-1 < h ? max(0, min(t-1, h)) : t;
const int bAdv = x+xMaxCurr >= 0 ? max(0, min(b+1, h)) : b;
const int lAdv = y+yMinCurr-1 < w ? max(0, min(l-1, w)) : l;
const int rAdv = y+yMaxCurr >= 0 ? max(0, min(r+1, w)) : r;
// TODO: 1D grid
outValue += gradOutputIntData[b*(w+1) + r];
outValue -= gradOutputIntData[t*(w+1) + r];
outValue -= gradOutputIntData[b*(w+1) + l];
outValue += gradOutputIntData[t*(w+1) + l];
// -- xMax border
outValue +=
( gradOutputIntData[bAdv*(w+1) + r]
- gradOutputIntData[b *(w+1) + r]
- gradOutputIntData[bAdv*(w+1) + l]
+ gradOutputIntData[b *(w+1) + l]
) * xMaxCurrFrac;
// -- yMax border
outValue +=
( gradOutputIntData[b*(w+1) + rAdv]
- gradOutputIntData[b*(w+1) + r ]
- gradOutputIntData[t*(w+1) + rAdv]
+ gradOutputIntData[t*(w+1) + r ]
) * yMaxCurrFrac;
// -- xMin border
outValue +=
( gradOutputIntData[t *(w+1) + r]
- gradOutputIntData[tAdv*(w+1) + r]
- gradOutputIntData[t *(w+1) + l]
+ gradOutputIntData[tAdv*(w+1) + l]
) * xMinCurrFrac;
// -- yMin border
outValue +=
( gradOutputIntData[b*(w+1) + l ]
- gradOutputIntData[b*(w+1) + lAdv]
- gradOutputIntData[t*(w+1) + l ]
+ gradOutputIntData[t*(w+1) + lAdv]
) * yMinCurrFrac;
// -- corner pixels
outValue +=
xMaxCurrFrac*yMaxCurrFrac * (
(x+xMaxCurr > h-1 or
y+yMaxCurr > w-1 or
x+xMaxCurr < 0 or
y+yMaxCurr < 0 or
b == bAdv or
r == rAdv) ? 0 :
gradOutputData[b*gradOutputStrideRow + r]);
outValue +=
xMinCurrFrac*yMaxCurrFrac * (
(x+xMinCurr-1 > h-1 or
y+yMaxCurr > w-1 or
x+xMinCurr-1 < 0 or
y+yMaxCurr < 0 or
t == tAdv or
r == rAdv) ? 0 :
gradOutputData[tAdv*gradOutputStrideRow + r]);
outValue +=
xMaxCurrFrac*yMinCurrFrac * (
(x+xMaxCurr > h-1 or
y+yMinCurr-1 > w-1 or
x+xMaxCurr < 0 or
y+yMinCurr-1 < 0 or
b == bAdv or
l == lAdv) ? 0 :
gradOutputData[b*gradOutputStrideRow + lAdv]);
outValue +=
xMinCurrFrac*yMinCurrFrac * (
(x+xMinCurr-1 > h-1 or
y+yMinCurr-1 > w-1 or
x+xMinCurr-1 < 0 or
y+yMinCurr-1 < 0 or
t == tAdv or
l == lAdv) ? 0 :
gradOutputData[tAdv*gradOutputStrideRow + lAdv]);
// go to the next channel
gradOutputIntData += (h+1)*(w+1);
gradOutputData += gradOutputStrideChannel;
}
gradInputData[x*w + y] = outValue;
}
}
// TODO
void updateGradInputVarScaleCuda(
float *gradOutputIntData, float *gradInputData,
int h, int w, int nWindows,
float *xMin, float *xMax, float *yMin, float *yMax,
const int strideH, const int strideW, const float *const scaleData) {
if (strideH != 1 or strideW != 1) {
// TODO
int a = *(int*)0;
// strided::updateGradInputCuda(
// gradOutputIntData, gradInputData, h, w, nWindows,
// xMin, xMax, yMin, yMax, strideH, strideW);
// return;
}
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE, BLOCK_CHANNELS);
dim3 dimGrid(
(h + dimBlock.x - 1) / dimBlock.x,
(w + dimBlock.y - 1) / dimBlock.y);
updateGradInputVarScaleKernel <<<dimGrid, dimBlock>>> (
gradOutputIntData, gradInputData,
h, w, nWindows,
xMin, xMax, yMin, yMax);
}
void updateGradInputFracVarScaleCuda(
float *gradOutputIntData, float *gradInputData,
int h, int w, int nWindows,
float *xMin, float *xMax, float *yMin, float *yMax,
float *gradOutputData, int gradOutputStrideRow, int gradOutputStrideChannel,
const int strideH, const int strideW, const float *const scaleData) {
if (strideH != 1 or strideW != 1) {
// TODO
int a = *(int*)0;
// strided::updateGradInputFracCuda(
// gradOutputIntData, gradInputData, h, w, nWindows,
// xMin, xMax, yMin, yMax,
// gradOutputData, gradOutputStrideRow, gradOutputStrideChannel,
// strideH, strideW);
// return;
}
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE, BLOCK_CHANNELS);
dim3 dimGrid(
(h + dimBlock.x - 1) / dimBlock.x,
(w + dimBlock.y - 1) / dimBlock.y);
updateGradInputFracVarScaleKernel <<<dimGrid, dimBlock>>> (
gradOutputIntData, gradInputData,
h, w, nWindows,
xMin, xMax, yMin, yMax,
gradOutputData, gradOutputStrideRow, gradOutputStrideChannel,
scaleData);
}
/************************ accGradParameters ************************/
__global__ void xMaxDeltaIntegralFracVarScaleKernel(
const float *intData, float *tmpArray,
const int nWindows, const int h, const int w,
const float *xMax, const float *yMin, const float *yMax,
const float *inData, const int inDataStrideRow,
const float *const scaleData) {
int id = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x;
const int y = id % w + 1; id /= w; // 1-indexed
const int x = id % h + 1; id /= h; // 1-indexed
const int & windowIdx = id;
if (windowIdx < nWindows and x <= h and y <= w) {
tmpArray += windowIdx * h * w;
const float scale = scaleData[(x-1)*w + (y-1)];
// const int xMinInt = (int)ceil(upscaleMin(xMin[windowIdx])-1);
// const float xMinFrac = xMinInt-upscaleMin(xMin[windowIdx])+1;
const int yMinInt = (int)ceil(upscaleMin(yMin[windowIdx])-1);
const float yMinFrac = yMinInt-upscaleMin(yMin[windowIdx])+1;
const int xMaxInt = (int)floor(upscaleMax(xMax[windowIdx]));
// const float xMaxFrac = upscaleMax(xMax[windowIdx])-xMaxInt;
const int yMaxInt = (int)floor(upscaleMax(yMax[windowIdx]));
const float yMaxFrac = upscaleMax(yMax[windowIdx])-yMaxInt;
// const float tlCorner = y+yMinInt < 1 or x+xMinInt < 1 ? 0 :
// inData[
// max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow +
// max(0,min(w-1,y+yMinInt-1))];
const float blCorner = y+yMinInt < 1 or x+xMaxInt >= h ? 0 :
inData[
max(0,min(h-1,x+xMaxInt )) * inDataStrideRow +
max(0,min(w-1,y+yMinInt-1))];
// const float trCorner = y+yMaxInt >= w or x+xMinInt < 1 ? 0 :
// inData[
// max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow +
// max(0,min(w-1,y+yMaxInt ))];
const float brCorner = y+yMaxInt >= w or x+xMaxInt >= h ? 0 :
inData[
max(0,min(h-1,x+xMaxInt )) * inDataStrideRow +
max(0,min(w-1,y+yMaxInt ))];
float delta = 0;
delta += brCorner * (y+yMaxInt < 1 ? 1.0f : yMaxFrac);
delta += blCorner * (y+yMinInt >= w ? 1.0f : yMinFrac);
delta +=
intData[max(0,min(x+xMaxInt+1, h))*(w+1)
+ max(0,min(y+yMaxInt, w))];
delta -=
intData[max(0,min(x+xMaxInt , h))*(w+1)
+ max(0,min(y+yMaxInt, w))];
delta -=
intData[max(0,min(x+xMaxInt+1, h))*(w+1)
+ max(0,min(y+yMinInt, w))];
delta +=
intData[max(0,min(x+xMaxInt , h))*(w+1)
+ max(0,min(y+yMinInt, w))];
delta *= (x+xMaxInt >= 1 and x+xMaxInt < h) * scale;
// note multiplication of `delta` by `scale`
tmpArray[(x-1)*w + (y-1)] *= delta;
}
}
__global__ void xMinDeltaIntegralFracVarScaleKernel(
const float *intData, float *tmpArray,
const int nWindows, const int h, const int w,
const float *xMin, const float *yMin, const float *yMax,
const float *inData, const int inDataStrideRow,
const float *const scaleData) {
int id = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x;
const int y = id % w + 1; id /= w; // 1-indexed
const int x = id % h + 1; id /= h; // 1-indexed
const int & windowIdx = id;
if (windowIdx < nWindows and x <= h and y <= w) {
tmpArray += windowIdx * h * w;
const float scale = scaleData[(x-1)*w + (y-1)];
const int xMinInt = (int)ceil(upscaleMin(xMin[windowIdx])-1);
// const float xMinFrac = xMinInt-upscaleMin(xMin[windowIdx])+1;
const int yMinInt = (int)ceil(upscaleMin(yMin[windowIdx])-1);
const float yMinFrac = yMinInt-upscaleMin(yMin[windowIdx])+1;
// const int xMaxInt = (int)floor(upscaleMax(xMax[windowIdx]));
// const float xMaxFrac = upscaleMax(xMax[windowIdx])-xMaxInt;
const int yMaxInt = (int)floor(upscaleMax(yMax[windowIdx]));
const float yMaxFrac = upscaleMax(yMax[windowIdx])-yMaxInt;
const float tlCorner = y+yMinInt < 1 or x+xMinInt < 1 ? 0 :
inData[
max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow +
max(0,min(w-1,y+yMinInt-1))];
// const float blCorner = y+yMinInt < 1 or x+xMaxInt >= h ? 0 :
// inData[
// max(0,min(h-1,x+xMaxInt )) * inDataStrideRow +
// max(0,min(w-1,y+yMinInt-1))];
const float trCorner = y+yMaxInt >= w or x+xMinInt < 1 ? 0 :
inData[
max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow +
max(0,min(w-1,y+yMaxInt ))];
// const float brCorner = y+yMaxInt >= w or x+xMaxInt >= h ? 0 :
// inData[
// max(0,min(h-1,x+xMaxInt )) * inDataStrideRow +
// max(0,min(w-1,y+yMaxInt ))];
float delta = 0;
delta += trCorner * (y+yMaxInt < 1 ? 1.0f : yMaxFrac);
delta += tlCorner * (y+yMinInt >= w ? 1.0f : yMinFrac);
delta +=
intData[max(0,min(x+xMinInt , h))*(w+1)
+ max(0,min(y+yMaxInt, w))];
delta -=
intData[max(0,min(x+xMinInt-1, h))*(w+1)
+ max(0,min(y+yMaxInt, w))];
delta -=
intData[max(0,min(x+xMinInt , h))*(w+1)
+ max(0,min(y+yMinInt, w))];
delta +=
intData[max(0,min(x+xMinInt-1, h))*(w+1)
+ max(0,min(y+yMinInt, w))];
delta *= (x+xMinInt >= 1 and x+xMinInt < h) * scale;
// note multiplication of `delta` by `scale
tmpArray[(x-1)*w + (y-1)] *= -delta;
}
}
__global__ void yMaxDeltaIntegralFracVarScaleKernel(
const float *intData, float *tmpArray,
const int nWindows, const int h, const int w,
const float *xMin, const float *xMax, const float *yMax,
const float *inData, const int inDataStrideRow,
const float *const scaleData) {
int id = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x;
const int y = id % w + 1; id /= w; // 1-indexed
const int x = id % h + 1; id /= h; // 1-indexed
const int & windowIdx = id;
if (windowIdx < nWindows and x <= h and y <= w) {
tmpArray += windowIdx * h * w;
const float scale = scaleData[(x-1)*w + (y-1)];
const int xMinInt = (int)ceil(upscaleMin(xMin[windowIdx])-1);
const float xMinFrac = xMinInt-upscaleMin(xMin[windowIdx])+1;
// const int yMinInt = (int)ceil(upscaleMin(yMin[windowIdx])-1);
// const float yMinFrac = yMinInt-upscaleMin(yMin[windowIdx])+1;
const int xMaxInt = (int)floor(upscaleMax(xMax[windowIdx]));
const float xMaxFrac = upscaleMax(xMax[windowIdx])-xMaxInt;
const int yMaxInt = (int)floor(upscaleMax(yMax[windowIdx]));
// const float yMaxFrac = upscaleMax(yMax[windowIdx])-yMaxInt;
// const float tlCorner = y+yMinInt < 1 or x+xMinInt < 1 ? 0 :
// inData[
// max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow +
// max(0,min(w-1,y+yMinInt-1))];
// const float blCorner = y+yMinInt < 1 or x+xMaxInt >= h ? 0 :
// inData[
// max(0,min(h-1,x+xMaxInt )) * inDataStrideRow +
// max(0,min(w-1,y+yMinInt-1))];
const float trCorner = y+yMaxInt >= w or x+xMinInt < 1 ? 0 :
inData[
max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow +
max(0,min(w-1,y+yMaxInt ))];
const float brCorner = y+yMaxInt >= w or x+xMaxInt >= h ? 0 :
inData[
max(0,min(h-1,x+xMaxInt )) * inDataStrideRow +
max(0,min(w-1,y+yMaxInt ))];
float delta = 0;
delta += trCorner * (x+xMinInt >= h ? 1.0f : xMinFrac);
delta += brCorner * (x+xMaxInt < 1 ? 1.0f : xMaxFrac);
delta +=
intData[max(0,min(x+xMaxInt, h))*(w+1)
+ max(0,min(y+yMaxInt+1, w))];
delta -=
intData[max(0,min(x+xMaxInt, h))*(w+1)
+ max(0,min(y+yMaxInt , w))];
delta -=
intData[max(0,min(x+xMinInt, h))*(w+1)
+ max(0,min(y+yMaxInt+1, w))];
delta +=
intData[max(0,min(x+xMinInt, h))*(w+1)
+ max(0,min(y+yMaxInt , w))];
delta *= (y+yMaxInt >= 1 and y+yMaxInt < w) * scale;
// note multiplication of `delta` by `scale
tmpArray[(x-1)*w + (y-1)] *= delta;
}
}
__global__ void yMinDeltaIntegralFracVarScaleKernel(
const float *intData, float *tmpArray,
const int nWindows, const int h, const int w,
const float *xMin, const float *xMax, const float *yMin,
const float *inData, const int inDataStrideRow,
const float *const scaleData) {
int id = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x;
const int y = id % w + 1; id /= w; // 1-indexed
const int x = id % h + 1; id /= h; // 1-indexed
const int & windowIdx = id;
if (windowIdx < nWindows and x <= h and y <= w) {
tmpArray += windowIdx * h * w;
const float scale = scaleData[(x-1)*w + (y-1)];
const int xMinInt = (int)ceil(upscaleMin(xMin[windowIdx])-1);
const float xMinFrac = xMinInt-upscaleMin(xMin[windowIdx])+1;
const int yMinInt = (int)ceil(upscaleMin(yMin[windowIdx])-1);
// const float yMinFrac = yMinInt-upscaleMin(yMin[windowIdx])+1;
const int xMaxInt = (int)floor(upscaleMax(xMax[windowIdx]));
const float xMaxFrac = upscaleMax(xMax[windowIdx])-xMaxInt;
// const int yMaxInt = (int)floor(upscaleMax(yMax[windowIdx]));
// const float yMaxFrac = upscaleMax(yMax[windowIdx])-yMaxInt;
const float tlCorner = y+yMinInt < 1 or x+xMinInt < 1 ? 0 :
inData[
max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow +
max(0,min(w-1,y+yMinInt-1))];
const float blCorner = y+yMinInt < 1 or x+xMaxInt >= h ? 0 :
inData[
max(0,min(h-1,x+xMaxInt )) * inDataStrideRow +
max(0,min(w-1,y+yMinInt-1))];
// const float trCorner = y+yMaxInt >= w or x+xMinInt < 1 ? 0 :
// inData[
// max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow +
// max(0,min(w-1,y+yMaxInt ))];
// const float brCorner = y+yMaxInt >= w or x+xMaxInt >= h ? 0 :
// inData[
// max(0,min(h-1,x+xMaxInt )) * inDataStrideRow +
// max(0,min(w-1,y+yMaxInt ))];
float delta = 0;
delta += tlCorner * (x+xMinInt >= h ? 1.0f : xMinFrac);
delta += blCorner * (x+xMaxInt < 1 ? 1.0f : xMaxFrac);
delta +=
intData[max(0,min(x+xMaxInt, h))*(w+1)
+ max(0,min(y+yMinInt , w))];
delta -=
intData[max(0,min(x+xMaxInt, h))*(w+1)
+ max(0,min(y+yMinInt-1, w))];
delta -=
intData[max(0,min(x+xMinInt, h))*(w+1)
+ max(0,min(y+yMinInt , w))];
delta +=
intData[max(0,min(x+xMinInt, h))*(w+1)
+ max(0,min(y+yMinInt-1, w))];
delta *= (y+yMinInt >= 1 and y+yMinInt < w) * scale;
// note multiplication of `delta` by `scale
tmpArray[(x-1)*w + (y-1)] *= -delta;
}
}
void backwardFracVarScaleCuda(
float *intData, float *tmpArray,
int nWindows, int h, int w,
float *xMin, float *xMax, float *yMin, float *yMax,
float *inData, int inDataStrideRow,
const int strideH, const int strideW,
const float *const scaleData) {
if (strideH != 1 or strideW != 1) {
// TODO
int a = *(int*)0;
// strided::backwardFracCuda(
// intData, tmpArray, nWindows, h, w,
// xMin, xMax, yMin, yMax, inData, inDataStrideRow,
// strideH, strideW);
// return;
}
dim3 dimBlock(BLOCK_SIZE * BLOCK_SIZE);
dim3 dimGrid((nWindows * h * w + dimBlock.x - 1) / dimBlock.x);
xMaxDeltaIntegralFracVarScaleKernel <<<dimGrid, dimBlock>>> (
intData, tmpArray + 0*nWindows*h*w, nWindows, h, w,
xMax, yMin, yMax, inData, inDataStrideRow, scaleData);
xMinDeltaIntegralFracVarScaleKernel <<<dimGrid, dimBlock>>> (
intData, tmpArray + 1*nWindows*h*w, nWindows, h, w,
xMin, yMin, yMax, inData, inDataStrideRow, scaleData);
yMaxDeltaIntegralFracVarScaleKernel <<<dimGrid, dimBlock>>> (
intData, tmpArray + 2*nWindows*h*w, nWindows, h, w,
xMin, xMax, yMax, inData, inDataStrideRow, scaleData);
yMinDeltaIntegralFracVarScaleKernel <<<dimGrid, dimBlock>>> (
intData, tmpArray + 3*nWindows*h*w, nWindows, h, w,
xMin, xMax, yMin, inData, inDataStrideRow, scaleData);
}
__global__ void xMaxDeltaIntegralVarScaleKernel(
const float *intData, float *tmpArray,
const int nWindows, const int h, const int w,
const float *xMax, const float *yMin, const float *yMax,
const float *const scaleData) {
int id = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x;
const int y = id % w + 1; id /= w; // 1-indexed
const int x = id % h + 1; id /= h; // 1-indexed
const int & windowIdx = id;
if (windowIdx < nWindows and x <= h and y <= w) {
tmpArray += windowIdx * h * w;
// const int xMinInt = (int)ceil(xMin[windowIdx]-1);
const int yMinInt = (int)ceil(yMin[windowIdx]-1);
const int xMaxInt = (int)floor(xMax[windowIdx]);
const int yMaxInt = (int)floor(yMax[windowIdx]);
float delta = 0;
delta +=
intData[max(1,min(x+xMaxInt+1, h))*(w+1)
+ max(0,min(y+yMaxInt, w))];
delta -=
intData[max(0,min(x+xMaxInt , h))*(w+1)
+ max(0,min(y+yMaxInt, w))];
delta -=
intData[max(1,min(x+xMaxInt+1, h))*(w+1)
+ max(0,min(y+yMinInt, w))];
delta +=
intData[max(0,min(x+xMaxInt , h))*(w+1)
+ max(0,min(y+yMinInt, w))];
delta *= (x+xMaxInt >= 1 and x+xMaxInt < h);
tmpArray[(x-1)*w + (y-1)] *= delta;
}
}
__global__ void xMinDeltaIntegralVarScaleKernel(
const float *intData, float *tmpArray,
const int nWindows, const int h, const int w,
const float *xMin, const float *yMin, const float *yMax,
const float *const scaleData) {
int id = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x;
const int y = id % w + 1; id /= w; // 1-indexed
const int x = id % h + 1; id /= h; // 1-indexed
const int & windowIdx = id;
if (windowIdx < nWindows and x <= h and y <= w) {
tmpArray += windowIdx * h * w;
const int xMinInt = (int)ceil(xMin[windowIdx]-1);
const int yMinInt = (int)ceil(yMin[windowIdx]-1);
// const int xMaxInt = (int)floor(xMax[windowIdx]);
const int yMaxInt = (int)floor(yMax[windowIdx]);
float delta = 0;
delta +=
intData[max(0,min(x+xMinInt , h-1))*(w+1)
+ max(0,min(y+yMaxInt, w))];
delta -=
intData[max(0,min(x+xMinInt-1, h ))*(w+1)
+ max(0,min(y+yMaxInt, w))];
delta -=
intData[max(0,min(x+xMinInt , h-1))*(w+1)
+ max(0,min(y+yMinInt, w))];
delta +=
intData[max(0,min(x+xMinInt-1, h ))*(w+1)
+ max(0,min(y+yMinInt, w))];
delta *= (x+xMinInt >= 1 and x+xMinInt < h);
tmpArray[(x-1)*w + (y-1)] *= -delta;
}
}
__global__ void yMaxDeltaIntegralVarScaleKernel(
const float *intData, float *tmpArray,
const int nWindows, const int h, const int w,
const float *xMin, const float *xMax, const float *yMax,
const float *const scaleData) {
int id = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x;
const int y = id % w + 1; id /= w; // 1-indexed
const int x = id % h + 1; id /= h; // 1-indexed
const int & windowIdx = id;
if (windowIdx < nWindows and x <= h and y <= w) {
tmpArray += windowIdx * h * w;
const int xMinInt = (int)ceil(xMin[windowIdx]-1);
// const int yMinInt = (int)ceil(yMin[windowIdx]-1);
const int xMaxInt = (int)floor(xMax[windowIdx]);
const int yMaxInt = (int)floor(yMax[windowIdx]);
float delta = 0;
delta +=
intData[max(0,min(x+xMaxInt, h))*(w+1)
+ max(1,min(y+yMaxInt+1, w))];
delta -=
intData[max(0,min(x+xMaxInt, h))*(w+1)
+ max(0,min(y+yMaxInt , w))];
delta -=
intData[max(0,min(x+xMinInt, h))*(w+1)
+ max(1,min(y+yMaxInt+1, w))];
delta +=
intData[max(0,min(x+xMinInt, h))*(w+1)
+ max(0,min(y+yMaxInt , w))];
delta *= (y+yMaxInt >= 1 and y+yMaxInt < w);
tmpArray[(x-1)*w + (y-1)] *= delta;
}
}
__global__ void yMinDeltaIntegralVarScaleKernel(
const float *intData, float *tmpArray,
const int nWindows, const int h, const int w,
const float *xMin, const float *xMax, const float *yMin,
const float *const scaleData) {
int id = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x;
const int y = id % w + 1; id /= w; // 1-indexed
const int x = id % h + 1; id /= h; // 1-indexed
const int & windowIdx = id;
if (windowIdx < nWindows and x <= h and y <= w) {
tmpArray += windowIdx * h * w;
const int xMinInt = (int)ceil(xMin[windowIdx]-1);
const int yMinInt = (int)ceil(yMin[windowIdx]-1);
const int xMaxInt = (int)floor(xMax[windowIdx]);
// const int yMaxInt = (int)floor(yMax[windowIdx]);
float delta = 0;
delta +=
intData[max(0,min(x+xMaxInt, h))*(w+1)
+ max(0,min(y+yMinInt , w ))];
delta -=
intData[max(0,min(x+xMaxInt, h))*(w+1)
+ max(0,min(y+yMinInt-1, w-1))];
delta -=
intData[max(0,min(x+xMinInt, h))*(w+1)
+ max(0,min(y+yMinInt , w ))];
delta +=
intData[max(0,min(x+xMinInt, h))*(w+1)
+ max(0,min(y+yMinInt-1, w-1))];
delta *= (y+yMinInt >= 1 and y+yMinInt < w);
tmpArray[(x-1)*w + (y-1)] *= -delta;
}
}
void backwardVarScaleCuda(
float *intData, float *tmpArray,
int nWindows, int h, int w,
float *xMin, float *xMax, float *yMin, float *yMax,
const int strideH, const int strideW,
const float *const scaleData) {
if (strideH != 1 or strideW != 1) {
// TODO
int a = *(int*)0;
// strided::backwardCuda(
// intData, tmpArray, nWindows, h, w,
// xMin, xMax, yMin, yMax, strideH, strideW);
// return;
}
dim3 dimBlock(BLOCK_SIZE * BLOCK_SIZE);
dim3 dimGrid((nWindows * h * w + dimBlock.x - 1) / dimBlock.x);
xMaxDeltaIntegralVarScaleKernel <<<dimGrid, dimBlock>>> (
intData, tmpArray + 0*nWindows*h*w,
nWindows, h, w, xMax, yMin, yMax, scaleData);
xMinDeltaIntegralVarScaleKernel <<<dimGrid, dimBlock>>> (
intData, tmpArray + 1*nWindows*h*w,
nWindows, h, w, xMin, yMin, yMax, scaleData);
yMaxDeltaIntegralVarScaleKernel <<<dimGrid, dimBlock>>> (
intData, tmpArray + 2*nWindows*h*w,
nWindows, h, w, xMin, xMax, yMax, scaleData);
yMinDeltaIntegralVarScaleKernel <<<dimGrid, dimBlock>>> (
intData, tmpArray + 3*nWindows*h*w,
nWindows, h, w, xMin, xMax, yMin, scaleData);
}
/************************ Other stuff ************************/
__global__ void dirtyFixWindowsVarScaleKernel(
float *xMin, float *xMax, float *yMin, float *yMax,
const int size, const float h, const float w, const float minWidth) {
int idx = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x;
if (idx < 2*size) {
float paramMin, paramMax;
if (idx < size) {
paramMin = max(-h+1, min(h-1, xMin[idx]));
paramMax = max(-h+1, min(h-1, xMax[idx]));
if (paramMin + minWidth - 0.99 > paramMax) {
const float mean = 0.5 * (paramMin + paramMax);
paramMin = mean - 0.5 * (minWidth - 0.9);
paramMax = mean + 0.5 * (minWidth - 0.9);
}
xMin[idx] = paramMin;
xMax[idx] = paramMax;
} else {
idx -= size;
paramMin = max(-w+1, min(w-1, yMin[idx]));
paramMax = max(-w+1, min(w-1, yMax[idx]));
if (paramMin + minWidth - 0.99 > paramMax) {
const float mean = 0.5 * (paramMin + paramMax);
paramMin = mean - 0.5 * (minWidth - 0.9);
paramMax = mean + 0.5 * (minWidth - 0.9);
}
yMin[idx] = paramMin;
yMax[idx] = paramMax;
}
}
}
void dirtyFixWindowsVarScale(
float *xMin, float *xMax, float *yMin, float *yMax,
int size, int h, int w, float minWidth) {
dim3 dimBlock(BLOCK_SIZE * BLOCK_SIZE);
dim3 dimGrid((2*size + dimBlock.x - 1) / dimBlock.x);
dirtyFixWindowsVarScaleKernel <<<dimGrid, dimBlock>>> (
xMin, xMax, yMin, yMax, size, (float)h, (float)w, minWidth);
}
} // extern "C"
|
597b874dcaa4a41e0f9da4eb82c99120716679c6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <cmath>
#include "error_checks_1.h" // Macros CUDA_CHECK and CHECK_ERROR_MSG
__global__ void vector_add(double *C, const double *A, const double *B, int N)
{
// Add the kernel code
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// Do not try to access past the allocated memory
if (idx < N) {
C[idx] = A[idx] + B[idx];
}
}
int main(void)
{
const int N = 20;
const int ThreadsInBlock = 1025;
double *dA, *dB, *dC;
double hA[N], hB[N], hC[N];
for(int i = 0; i < N; ++i) {
hA[i] = (double) i;
hB[i] = (double) i * i;
}
/*
Add memory allocations and copies. Wrap your runtime function
calls with CUDA_CHECK( ) macro
*/
CUDA_CHECK( hipMalloc((void**)&dA, sizeof(double)*N) );
CUDA_CHECK( hipMalloc((void**)&dB, sizeof(double)*N) );
CUDA_CHECK( hipMalloc((void**)&dC, sizeof(double)*N) );
CUDA_CHECK( hipMemcpy((void*)dA, (void*)hA, sizeof(double)*N, hipMemcpyHostToDevice) );
CUDA_CHECK( hipMemcpy((void*)dB, (void*)hB, sizeof(double)*N, hipMemcpyHostToDevice) );
//#error Add the remaining memory allocations and copies
// Note the maximum size of threads in a block
int blockSize = ThreadsInBlock;
int numBlocks = (N + blockSize - 1) / blockSize;
dim3 grid(numBlocks), threads(blockSize);
//// Add the kernel call here
//CUDA_CHECK( (vector_add<<<grid,threads>>>(dC,dA,dB,N)) );
hipLaunchKernelGGL(( vector_add), dim3(grid),dim3(threads), 0, 0, dC,dA,dB,N);
//#error Add the CUDA kernel call
// Here we add an explicit synchronization so that we catch errors
// as early as possible. Don't do this in production code!
//hipDeviceSynchronize();
CHECK_ERROR_MSG("vector_add kernel");
//// Copy back the results and free the device memory
CUDA_CHECK( hipMemcpy((void*)hC,(void*)dC,sizeof(double)*N,hipMemcpyDeviceToHost ) );
CUDA_CHECK( hipFree(dA) );
CUDA_CHECK( hipFree(dB) );
CUDA_CHECK( hipFree(dC) );
//#error Copy back the results and free the allocated memory
for(int i = 0; i < N; i++)
printf("%5.1f\n", hC[i]);
return 0;
} | 597b874dcaa4a41e0f9da4eb82c99120716679c6.cu | #include <cstdio>
#include <cmath>
#include "error_checks_1.h" // Macros CUDA_CHECK and CHECK_ERROR_MSG
__global__ void vector_add(double *C, const double *A, const double *B, int N)
{
// Add the kernel code
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// Do not try to access past the allocated memory
if (idx < N) {
C[idx] = A[idx] + B[idx];
}
}
int main(void)
{
const int N = 20;
const int ThreadsInBlock = 1025;
double *dA, *dB, *dC;
double hA[N], hB[N], hC[N];
for(int i = 0; i < N; ++i) {
hA[i] = (double) i;
hB[i] = (double) i * i;
}
/*
Add memory allocations and copies. Wrap your runtime function
calls with CUDA_CHECK( ) macro
*/
CUDA_CHECK( cudaMalloc((void**)&dA, sizeof(double)*N) );
CUDA_CHECK( cudaMalloc((void**)&dB, sizeof(double)*N) );
CUDA_CHECK( cudaMalloc((void**)&dC, sizeof(double)*N) );
CUDA_CHECK( cudaMemcpy((void*)dA, (void*)hA, sizeof(double)*N, cudaMemcpyHostToDevice) );
CUDA_CHECK( cudaMemcpy((void*)dB, (void*)hB, sizeof(double)*N, cudaMemcpyHostToDevice) );
//#error Add the remaining memory allocations and copies
// Note the maximum size of threads in a block
int blockSize = ThreadsInBlock;
int numBlocks = (N + blockSize - 1) / blockSize;
dim3 grid(numBlocks), threads(blockSize);
//// Add the kernel call here
//CUDA_CHECK( (vector_add<<<grid,threads>>>(dC,dA,dB,N)) );
vector_add<<<grid,threads>>>(dC,dA,dB,N);
//#error Add the CUDA kernel call
// Here we add an explicit synchronization so that we catch errors
// as early as possible. Don't do this in production code!
//cudaDeviceSynchronize();
CHECK_ERROR_MSG("vector_add kernel");
//// Copy back the results and free the device memory
CUDA_CHECK( cudaMemcpy((void*)hC,(void*)dC,sizeof(double)*N,cudaMemcpyDeviceToHost ) );
CUDA_CHECK( cudaFree(dA) );
CUDA_CHECK( cudaFree(dB) );
CUDA_CHECK( cudaFree(dC) );
//#error Copy back the results and free the allocated memory
for(int i = 0; i < N; i++)
printf("%5.1f\n", hC[i]);
return 0;
} |
2bd7cd2ff99d65a91d629c644c21ed75e6d8167b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "common.h"
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
hipError_t err = hipGetLastError();
if (hipSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
namespace StreamCompaction {
namespace Common {
/**
* Maps an array to an array of 0s and 1s for stream compaction. Elements
* which map to 0 will be removed, and elements which map to 1 will be kept.
*/
__global__ void kernMapToBoolean(int n, int *bools, const int *idata) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n) return;
if (idata[index] == 0) bools[index] = 0;
else {
bools[index] = 1;
}
}
/**
* Performs scatter on an array. That is, for each element in idata,
* if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]].
*/
__global__ void kernScatter(int n, int *odata,
const int *idata, const int *bools, const int *indices) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n) return;
if (bools[index] == 1) odata[indices[index]] = idata[index];
}
}
}
| 2bd7cd2ff99d65a91d629c644c21ed75e6d8167b.cu | #include "common.h"
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
namespace StreamCompaction {
namespace Common {
/**
* Maps an array to an array of 0s and 1s for stream compaction. Elements
* which map to 0 will be removed, and elements which map to 1 will be kept.
*/
__global__ void kernMapToBoolean(int n, int *bools, const int *idata) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n) return;
if (idata[index] == 0) bools[index] = 0;
else {
bools[index] = 1;
}
}
/**
* Performs scatter on an array. That is, for each element in idata,
* if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]].
*/
__global__ void kernScatter(int n, int *odata,
const int *idata, const int *bools, const int *indices) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n) return;
if (bools[index] == 1) odata[indices[index]] = idata[index];
}
}
}
|
77151d376e4d7ebf3e77b7c0952164056a843726.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__device__ size_t GIDX(size_t row, size_t col, int H, int W) {
return row * W + col;
}
__global__ void kernel_sub(float* d_f1ptr, float* d_f2ptr, float* d_dt, int H, int W) {
size_t row = threadIdx.y + blockDim.y * blockIdx.y;
size_t col = threadIdx.x + blockDim.x * blockIdx.x;
size_t idx = GIDX(row, col, H, W);
if (row >= H || col >= W) {
return;
}
d_dt[idx] = d_f2ptr[idx] - d_f1ptr[idx];
} | 77151d376e4d7ebf3e77b7c0952164056a843726.cu | #include "includes.h"
__device__ size_t GIDX(size_t row, size_t col, int H, int W) {
return row * W + col;
}
__global__ void kernel_sub(float* d_f1ptr, float* d_f2ptr, float* d_dt, int H, int W) {
size_t row = threadIdx.y + blockDim.y * blockIdx.y;
size_t col = threadIdx.x + blockDim.x * blockIdx.x;
size_t idx = GIDX(row, col, H, W);
if (row >= H || col >= W) {
return;
}
d_dt[idx] = d_f2ptr[idx] - d_f1ptr[idx];
} |
545b160b9ed3c28d3ec59a28a6204c7418ed14c6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <fstream>
#include <gsl/gsl_rng.h>
#include <gsl/gsl_randist.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include "time.h"
#include "Pass.h"
#include "constants.h"
#include "lattice_PAR.h"
using namespace std;
void emittance(COORD *y, REAL *PEx, REAL *PEy, REAL *PEdelta)
{
REAL avg_x=0,avg_xp=0,avg_y=0,avg_yp=0,avg_delta=0,sig_xx=0,sig_xpxp=0,sig_xxp=0,sig_yy=0,sig_ypyp=0,sig_yyp=0,sig_delta=0;
for(int i=0;i<_Npart;i++)
{
avg_x+=y[i].x[x_]/_Npart;
avg_xp+=y[i].x[px_]/(1+y[i].x[delta_])/_Npart;
avg_y+=y[i].x[y_]/_Npart;
avg_yp+=y[i].x[py_]/(1+y[i].x[delta_])/_Npart;
avg_delta+=y[i].x[delta_]/_Npart;
}
for(int i=0;i<_Npart;i++)
{
sig_xx+=(y[i].x[x_]-avg_x)*(y[i].x[x_]-avg_x)/_Npart;
sig_xpxp+=(y[i].x[px_]/(1+y[i].x[delta_])-avg_xp)*(y[i].x[px_]/(1+y[i].x[delta_])-avg_xp)/_Npart;
sig_xxp+=(y[i].x[x_]-avg_x)*(y[i].x[px_]/(1+y[i].x[delta_])-avg_xp)/_Npart;
sig_yy+=(y[i].x[y_]-avg_y)*(y[i].x[y_]-avg_y)/_Npart;
sig_ypyp+=(y[i].x[py_]/(1+y[i].x[delta_])-avg_yp)*(y[i].x[py_]/(1+y[i].x[delta_])-avg_yp)/_Npart;
sig_yyp+=(y[i].x[y_]-avg_y)*(y[i].x[py_]/(1+y[i].x[delta_])-avg_yp)/_Npart;
sig_delta+=(y[i].x[delta_]-avg_delta)*(y[i].x[delta_]-avg_delta)/_Npart;
}
*PEx=sqrt(sig_xx*sig_xpxp-sig_xxp*sig_xxp);
*PEy=sqrt(sig_yy*sig_ypyp-sig_yyp*sig_yyp);
*PEdelta=sqrt(sig_delta);
}
int main(int argc, char** argv)
{
clock_t start, finish;
start = clock();
//get a bunch of random numbers
REAL *queue, *dqueue;
queue=(REAL*)malloc(_pool*sizeof(REAL));
hipMalloc(&dqueue,_pool*sizeof(REAL));
ifstream infile1("queue");
for(int i1=0;i1<_pool;i1++) infile1>>queue[i1];
infile1.close();
hipMemcpy(dqueue,queue,_pool*sizeof(REAL),hipMemcpyHostToDevice);
//initialization
const gsl_rng_type * T;
gsl_rng * r;
gsl_rng_env_setup();
T = gsl_rng_default;
r = gsl_rng_alloc (T);
COORD *part, *dpart;
int size = _Npart * sizeof(COORD);
part=(COORD*)malloc(size);
hipMalloc(&dpart,size);
REAL phi_x,phi_y,Jx,Jy,Ex,Ey,Sdelta;
int i,n;
for(i=0;i<_Npart;i++)
{
do {Jx=gsl_ran_exponential(r, 2*E_x);}
while(Jx>E_x*6);
do {Jy=gsl_ran_exponential(r, 2*E_y);}
while(Jy>E_y*6);
phi_x=gsl_ran_flat(r,0,2*M_PI);
phi_y=gsl_ran_flat(r,0,2*M_PI);
part[i].x[x_]=sqrt(Jx*Beta_x)*cos(phi_x);
part[i].x[px_]=sqrt(Jx/Beta_x)*sin(phi_x);
part[i].x[y_]=sqrt(Jy*Beta_y)*cos(phi_y);
part[i].x[py_]=sqrt(Jy/Beta_y)*sin(phi_y);
part[i].x[z_]=0;
part[i].x[delta_]=0.00;
}
// part[0].x[0]=0.000;part[0].x[1]=0.000;part[0].x[2]=0.000;part[0].x[3]=0.000;part[0].x[5]=0.01;
hipMemcpy(dpart,part,size,hipMemcpyHostToDevice);
//Initiate lattice
Initiate_lattice();
ofstream outfile("abc.txt");
outfile.close();
for(n=0;n<_Nturn1;n++)
{
hipLaunchKernelGGL(( Track), dim3(_BlockNum),dim3(_ThreadNum), 0, 0, dpart,dqueue,n);
hipMemcpy(part,dpart,size,hipMemcpyDeviceToHost);
emittance(part,&Ex,&Ey,&Sdelta);
ofstream outfile("abc.txt",ios::app);
outfile<<n<<" "<<Ex<<" "<<Ey<<" "<<Sdelta<<endl;
// outfile<<part[0].x[0]<<" "<<part[0].x[1]<<" "<<part[0].x[2]<<" "<<part[0].x[3]<<" "<<part[0].x[4]<<" "<<part[0].x[5]<<endl;
// for(int k=0;k<_Npart;k++) { if(abs(part[k].x[0])>10||abs(part[k].x[1])>10||part[k].x[2]>10||part[k].x[3]>10||part[k].x[5]>2) {cout<<n<<" "<<part[k].x[0]<<" "<<part[k].x[1]<<" "<<part[k].x[2]<<" "<<part[k].x[3]<<" "<<part[k].x[4]<<" "<<part[k].x[5]<<endl;} }
outfile.close();
}
cout<<part[0].x[0]<<" "<<part[0].x[1]<<" "<<part[0].x[2]<<" "<<part[0].x[3]<<" "<<part[0].x[4]<<" "<<part[0].x[5]<<endl;
free(part);
hipFree(dpart);
free(queue);
hipFree(dqueue);
gsl_rng_free (r);
finish = clock();
cout<<(finish-start)/CLOCKS_PER_SEC<<" sec"<<endl;
}
| 545b160b9ed3c28d3ec59a28a6204c7418ed14c6.cu | #include <iostream>
#include <fstream>
#include <gsl/gsl_rng.h>
#include <gsl/gsl_randist.h>
#include <curand.h>
#include <curand_kernel.h>
#include "time.h"
#include "Pass.h"
#include "constants.h"
#include "lattice_PAR.h"
using namespace std;
void emittance(COORD *y, REAL *PEx, REAL *PEy, REAL *PEdelta)
{
REAL avg_x=0,avg_xp=0,avg_y=0,avg_yp=0,avg_delta=0,sig_xx=0,sig_xpxp=0,sig_xxp=0,sig_yy=0,sig_ypyp=0,sig_yyp=0,sig_delta=0;
for(int i=0;i<_Npart;i++)
{
avg_x+=y[i].x[x_]/_Npart;
avg_xp+=y[i].x[px_]/(1+y[i].x[delta_])/_Npart;
avg_y+=y[i].x[y_]/_Npart;
avg_yp+=y[i].x[py_]/(1+y[i].x[delta_])/_Npart;
avg_delta+=y[i].x[delta_]/_Npart;
}
for(int i=0;i<_Npart;i++)
{
sig_xx+=(y[i].x[x_]-avg_x)*(y[i].x[x_]-avg_x)/_Npart;
sig_xpxp+=(y[i].x[px_]/(1+y[i].x[delta_])-avg_xp)*(y[i].x[px_]/(1+y[i].x[delta_])-avg_xp)/_Npart;
sig_xxp+=(y[i].x[x_]-avg_x)*(y[i].x[px_]/(1+y[i].x[delta_])-avg_xp)/_Npart;
sig_yy+=(y[i].x[y_]-avg_y)*(y[i].x[y_]-avg_y)/_Npart;
sig_ypyp+=(y[i].x[py_]/(1+y[i].x[delta_])-avg_yp)*(y[i].x[py_]/(1+y[i].x[delta_])-avg_yp)/_Npart;
sig_yyp+=(y[i].x[y_]-avg_y)*(y[i].x[py_]/(1+y[i].x[delta_])-avg_yp)/_Npart;
sig_delta+=(y[i].x[delta_]-avg_delta)*(y[i].x[delta_]-avg_delta)/_Npart;
}
*PEx=sqrt(sig_xx*sig_xpxp-sig_xxp*sig_xxp);
*PEy=sqrt(sig_yy*sig_ypyp-sig_yyp*sig_yyp);
*PEdelta=sqrt(sig_delta);
}
int main(int argc, char** argv)
{
clock_t start, finish;
start = clock();
//get a bunch of random numbers
REAL *queue, *dqueue;
queue=(REAL*)malloc(_pool*sizeof(REAL));
cudaMalloc(&dqueue,_pool*sizeof(REAL));
ifstream infile1("queue");
for(int i1=0;i1<_pool;i1++) infile1>>queue[i1];
infile1.close();
cudaMemcpy(dqueue,queue,_pool*sizeof(REAL),cudaMemcpyHostToDevice);
//initialization
const gsl_rng_type * T;
gsl_rng * r;
gsl_rng_env_setup();
T = gsl_rng_default;
r = gsl_rng_alloc (T);
COORD *part, *dpart;
int size = _Npart * sizeof(COORD);
part=(COORD*)malloc(size);
cudaMalloc(&dpart,size);
REAL phi_x,phi_y,Jx,Jy,Ex,Ey,Sdelta;
int i,n;
for(i=0;i<_Npart;i++)
{
do {Jx=gsl_ran_exponential(r, 2*E_x);}
while(Jx>E_x*6);
do {Jy=gsl_ran_exponential(r, 2*E_y);}
while(Jy>E_y*6);
phi_x=gsl_ran_flat(r,0,2*M_PI);
phi_y=gsl_ran_flat(r,0,2*M_PI);
part[i].x[x_]=sqrt(Jx*Beta_x)*cos(phi_x);
part[i].x[px_]=sqrt(Jx/Beta_x)*sin(phi_x);
part[i].x[y_]=sqrt(Jy*Beta_y)*cos(phi_y);
part[i].x[py_]=sqrt(Jy/Beta_y)*sin(phi_y);
part[i].x[z_]=0;
part[i].x[delta_]=0.00;
}
// part[0].x[0]=0.000;part[0].x[1]=0.000;part[0].x[2]=0.000;part[0].x[3]=0.000;part[0].x[5]=0.01;
cudaMemcpy(dpart,part,size,cudaMemcpyHostToDevice);
//Initiate lattice
Initiate_lattice();
ofstream outfile("abc.txt");
outfile.close();
for(n=0;n<_Nturn1;n++)
{
Track<<<_BlockNum,_ThreadNum>>>(dpart,dqueue,n);
cudaMemcpy(part,dpart,size,cudaMemcpyDeviceToHost);
emittance(part,&Ex,&Ey,&Sdelta);
ofstream outfile("abc.txt",ios::app);
outfile<<n<<" "<<Ex<<" "<<Ey<<" "<<Sdelta<<endl;
// outfile<<part[0].x[0]<<" "<<part[0].x[1]<<" "<<part[0].x[2]<<" "<<part[0].x[3]<<" "<<part[0].x[4]<<" "<<part[0].x[5]<<endl;
// for(int k=0;k<_Npart;k++) { if(abs(part[k].x[0])>10||abs(part[k].x[1])>10||part[k].x[2]>10||part[k].x[3]>10||part[k].x[5]>2) {cout<<n<<" "<<part[k].x[0]<<" "<<part[k].x[1]<<" "<<part[k].x[2]<<" "<<part[k].x[3]<<" "<<part[k].x[4]<<" "<<part[k].x[5]<<endl;} }
outfile.close();
}
cout<<part[0].x[0]<<" "<<part[0].x[1]<<" "<<part[0].x[2]<<" "<<part[0].x[3]<<" "<<part[0].x[4]<<" "<<part[0].x[5]<<endl;
free(part);
cudaFree(dpart);
free(queue);
cudaFree(dqueue);
gsl_rng_free (r);
finish = clock();
cout<<(finish-start)/CLOCKS_PER_SEC<<" sec"<<endl;
}
|
8dff04194cc365ccd3e47e30f48a74e0129803b0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
* Copyright (c) 2019 Konduit K.K.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma (iuriish@yahoo.com)
//
#include <ops/declarable/helpers/convolutions.h>
#include <ops/declarable/helpers/im2col.h>
#include <ops/declarable/helpers/col2im.h>
#include<ops/declarable/helpers/addBias.h>
#include <exceptions/cuda_exception.h>
#include <NDArrayFactory.h>
#include <MmulHelper.h>
#include <PointersManager.h>
#include <templatemath.h>
namespace nd4j {
namespace ops {
//////////////////////////////////////////////////////////////////////////
// vol [bS, iC, iD, iH, iW] is convoluted to col [bS, iC, kD, kH, kW, oD, oH, oW]
template <typename T>
static __global__ void vol2colCuda(const void* volume, const Nd4jLong* volShapeInfo, void* columns, const Nd4jLong* colShapeInfo, const int sD, const int sH, const int sW, const int pD, const int pH, const int pW, const int dD, const int dH, const int dW) {
const T* vol = reinterpret_cast<const T*>(volume);
T* col = reinterpret_cast<T*>(columns);
__shared__ int colRank, volRank;
__shared__ Nd4jLong colLen, iD, iH, iW, *sharedMem;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<Nd4jLong*>(shmem);
volRank = 5;
colRank = 8;
colLen = shape::length(colShapeInfo);
iD = volShapeInfo[3];
iH = volShapeInfo[4];
iW = volShapeInfo[5];
}
__syncthreads();
const auto colInd = threadIdx.x + blockIdx.x * blockDim.x;
if(colInd >= colLen)
return;
auto coords = sharedMem + threadIdx.x * colRank;
shape::index2coords(colInd, colShapeInfo, coords);
// const auto colW = coords[7];
// const auto colH = coords[6];
// const auto colD = coords[5];
// const auto kCol = coords[4];
// const auto kRow = coords[3];
// const auto kDep = coords[2];
// const auto c = coords[1];
// const auto b = coords[0];
const auto colOffset = shape::getOffset(colShapeInfo, coords);
coords[2] = -pD + coords[2] * dD + coords[5] * sD; // const auto volDep = (-pD + kDep * dD) + colD * sD;
coords[3] = -pH + coords[3] * dH + coords[6] * sH; // const auto volRow = (-pH + kRow * dH) + colH * sH;
coords[4] = -pW + coords[4] * dW + coords[7] * sW; // const auto volCol = (-pW + kCol * dW) + colW * sW;
if (static_cast<unsigned>(coords[2]) >= static_cast<unsigned>(iD) || static_cast<unsigned>(coords[3]) >= static_cast<unsigned>(iH) || static_cast<unsigned>(coords[4]) >= static_cast<unsigned>(iW))
col[colOffset] = static_cast<T>(0.);
else
col[colOffset] = vol[shape::getOffset(volShapeInfo, coords)];
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
static void vol2colCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream,
const void* volume, const Nd4jLong* volShapeInfo,
void* columns, const Nd4jLong* colShapeInfo,
const int sD, const int sH, const int sW, const int pD, const int pH, const int pW, const int dD, const int dH, const int dW) {
hipLaunchKernelGGL(( vol2colCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, volume, volShapeInfo, columns, colShapeInfo, sD, sH, sW, pD, pH, pW, dD, dH, dW);
}
//////////////////////////////////////////////////////////////////////////
void ConvolutionUtils::vol2col(nd4j::graph::Context& block, const NDArray& vol, NDArray& col, const int sD, const int sH, const int sW, const int pD, const int pH, const int pW, const int dD, const int dH, const int dW) {
PointersManager manager(block.launchContext(), "vol2col");
const int threadsPerBlock = MAX_NUM_THREADS / 4;
const int blocksPerGrid = (col.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = col.rankOf() * sizeof(Nd4jLong) * threadsPerBlock + 128;
NDArray::prepareSpecialUse({&col}, {&vol});
BUILD_SINGLE_SELECTOR(vol.dataType(), vol2colCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, block.launchContext()->getCudaStream(), vol.getSpecialBuffer(), vol.getSpecialShapeInfo(), col.specialBuffer(), col.specialShapeInfo(), sD, sH, sW, pD, pH, pW, dD, dH, dW), FLOAT_TYPES);
NDArray::registerSpecialUse({&col}, {&vol});
manager.synchronize();
}
//////////////////////////////////////////////////////////////////////////
// columns [bS, iC, kD, kH, kW, oD, oH, oW] to be de-convoluted to volume [bS, iC, iD, iH, iW]
template <typename T>
static __global__ void col2volCuda(const void* columns, const Nd4jLong* colShapeInfo, void* volume, const Nd4jLong* volShapeInfo, const int sD, const int sH, const int sW, const int pD, const int pH, const int pW, const int dD, const int dH, const int dW) {
const T* col = reinterpret_cast<const T*>(columns);
T* vol = reinterpret_cast<T*>(volume);
__shared__ uint kD, kH, kW, oD, oH, oW, *sharedMem;
__shared__ Nd4jLong volLen;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<uint*>(shmem);
oD = colShapeInfo[6];
oH = colShapeInfo[7];
oW = colShapeInfo[8];
kD = dD * (colShapeInfo[3] - 1) + 1;
kH = dH * (colShapeInfo[4] - 1) + 1;
kW = dW * (colShapeInfo[5] - 1) + 1;
volLen = shape::length(volShapeInfo);
}
__syncthreads();
auto coords = sharedMem + threadIdx.x * 8;
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong i = tid; i < volLen; i += gridDim.x * blockDim.x) {
shape::index2coords(i, volShapeInfo, coords);
const auto volOffset = shape::getOffset(volShapeInfo, coords);
const auto bSiCoffset = coords[0] * colShapeInfo[9] + coords[1] * colShapeInfo[10];
const uint imD = coords[2] + pD;
const uint imH = coords[3] + pH;
const uint imW = coords[4] + pW;
const uint colDstart = (imD < kD) ? 0 : (imD - kD) / sD + 1;
const uint colHstart = (imH < kH) ? 0 : (imH - kH) / sH + 1;
const uint colWstart = (imW < kW) ? 0 : (imW - kW) / sW + 1;
const uint colDend = nd4j::math::nd4j_min<uint>(imD / sD + 1, oD);
const uint colHend = nd4j::math::nd4j_min<uint>(imH / sH + 1, oH);
const uint colWend = nd4j::math::nd4j_min<uint>(imW / sW + 1, oW);
T val = 0;
for(uint colD = colDstart; colD < colDend; ++colD) {
coords[2] = imD - colD * sD;
if(coords[2] % dD != 0) continue;
for(uint colH = colHstart; colH < colHend; ++colH) {
coords[3] = imH - colH * sH;
if(coords[3] % dH != 0) continue;
for(uint colW = colWstart; colW < colWend; ++colW) {
coords[4] = imW - colW * sW;
if(coords[4] % dW != 0) continue;
val += col[bSiCoffset + (coords[2]/dD)*colShapeInfo[11] + (coords[3]/dH)*colShapeInfo[12] + (coords[4]/dW)*colShapeInfo[13] + colD*colShapeInfo[14] + colH*colShapeInfo[15] + colW*colShapeInfo[16]];
}
}
}
vol[volOffset] = val;
}
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
static void col2volCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream,
const void* columns, const Nd4jLong* colShapeInfo,
void* volume, const Nd4jLong* volShapeInfo,
const int sD, const int sH, const int sW, const int pD, const int pH, const int pW, const int dD, const int dH, const int dW) {
hipLaunchKernelGGL(( col2volCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, columns, colShapeInfo, volume, volShapeInfo, sD, sH, sW, pD, pH, pW, dD, dH, dW);
}
//////////////////////////////////////////////////////////////////////////
void ConvolutionUtils::col2vol(nd4j::graph::Context& block, const NDArray& col, NDArray& vol, const int sD, const int sH, const int sW, const int pD, const int pH, const int pW, const int dD, const int dH, const int dW) {
PointersManager manager(block.launchContext(), "col2vol");
const int threadsPerBlock = MAX_NUM_THREADS / 4;
const int blocksPerGrid = (vol.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = col.rankOf() * sizeof(uint) * threadsPerBlock + 256;
NDArray::prepareSpecialUse({&vol}, {&col});
BUILD_SINGLE_SELECTOR(vol.dataType(), col2volCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, block.launchContext()->getCudaStream(), col.getSpecialBuffer(), col.getSpecialShapeInfo(), vol.specialBuffer(), vol.specialShapeInfo(), sD, sH, sW, pD, pH, pW, dD, dH, dW), FLOAT_TYPES);
NDArray::registerSpecialUse({&vol}, {&col});
manager.synchronize();
}
//////////////////////////////////////////////////////////////////////////
template <typename X, typename Y>
static void conv2d_(nd4j::graph::Context& block, const NDArray* input, const NDArray* weights, const NDArray* bias, NDArray* output, const int kH, const int kW, const int sH, const int sW, int pH, int pW, const int dH, const int dW, const int paddingMode, const int isNCHW) {
// input [bS, iH, iW, iC] (NHWC) or [bS, iC, iH, iW] (NCHW)
// weights [kH, kW, iC, oC] always
// bias [oC]
// output [bS, oH, oW, oC] (NHWC) or [bS, oC, oH, oW] (NCHW)
// kH filter(kernel) height
// kW filter(kernel) width
// sH strides height
// sW strides width
// pH paddings height
// pW paddings width
// dH dilations height
// dW dilations width
// paddingMode 0-VALID, 1-SAME
// isNCHW 1-NCHW, 0-NHWC
int bS, iC, iH, iW, oC, oH, oW; // batch size, input channels, input height/width, output channels, output height/width;
int indIOioC, indIiH, indWoC, indWiC, indWkH, indOoH; // corresponding indexes
ConvolutionUtils::getSizesAndIndexesConv2d(isNCHW, *input, *output, bS, iC, iH, iW, oC, oH, oW, indIOioC, indIiH, indWiC, indWoC, indWkH, indOoH);
ConvolutionUtils::calcPadding2D(pH, pW, oH, oW, iH, iW, kH, kW, sH, sW, dH, dW, paddingMode);
std::vector<int> permutForOutput;
if(isNCHW)
permutForOutput = {0, 3, 1, 2}; // [bS, oH, oW, oC] -> [bS, oC, oH, oW]
else
input = new NDArray(input->permute({0, 3, 1, 2})); // [bS, iH, iW, iC] -> [bS, iC, iH, iW] if NHWC
NDArray col('c', {bS, oH, oW, kH, kW, iC}, input->dataType(), input->getContext());
NDArray colP = col.permute({0, 5, 3, 4, 1, 2}); // {bS, iC, kH, kW, oH, oW}
NDArray mmulResult('f', {bS*oH*oW, oC}, output->dataType(), output->getContext());
//----- calculation of output -----//
auto ctx = block.launchContext();
helpers::im2col(*ctx, *input, colP, kH, kW, sH, sW, pH, pW, dH, dW, NDArrayFactory::create(0.f, input->getContext())); // [bS, iC, iH, iW] is convoluted to [bS, iC, kH, kW, oH, oW]
MmulHelper::tensorDot(&col, weights, &mmulResult, {3,4,5}, {0,1,2}, {}); // [bS, oH, oW, kH, kW, iC] x [kH, kW, iC, oC] = [bS, oH, oW, oC]
//----- assign outTemp to output -----//
if(isNCHW) {
mmulResult.reshapei({bS, oH, oW, oC});
mmulResult.permutei(permutForOutput);
}
output->assign(mmulResult);
//----- add biases if required -----//
if(bias)
// output->applyBroadcast(broadcast::Add, {indIOioC}, bias);
helpers::addBias(block, *output, *bias, *output, isNCHW);
if(!isNCHW)
delete input;
}
//////////////////////////////////////////////////////////////////////////
void ConvolutionUtils::conv2d(nd4j::graph::Context& block, const NDArray* input, const NDArray* weights, const NDArray* bias, NDArray* output, const int kH, const int kW, const int sH, const int sW, int pH, int pW, const int dH, const int dW, const int paddingMode, const int isNCHW) {
BUILD_SINGLE_SELECTOR_TWICE(input->dataType(), conv2d_, (block, input, weights, bias, output, kH, kW, sH, sW, pH, pW, dH, dW, paddingMode, isNCHW), FLOAT_TYPES);
}
//////////////////////////////////////////////////////////////////////////
template <typename X, typename Y>
static void depthwiseConv2d_(nd4j::graph::Context& block, const NDArray* input, const NDArray* weights, const NDArray* bias, NDArray* output, const int kH, const int kW, const int sH, const int sW, int pH, int pW, const int dH, const int dW, const int paddingMode, const int isNCHW) {
// input [bS, iH, iW, iC] (NHWC) or [bS, iC, iH, iW] (NCHW)
// weights [kH, kW, iC, mC] always
// bias [oC] = iC*mC
// output [bS, oH, oW, iC*mC] (NHWC) or [bS, iC*mC, oH, oW] (NCHW)
// kH filter(kernel) height
// kW filter(kernel) width
// sH strides height
// sW strides width
// pH paddings height
// pW paddings width
// dH dilations height
// dW dilations width
// paddingMode 0-VALID, 1-SAME
// isNCHW 0-NCHW, 1-NHWC
int bS, iC, iH, iW, mC, oC, oH, oW; // batch size, input channels, input height/width, channels multiplier(oC = iC*mC), output channels, output height/width
int indIOioC, indIiH, indWmC, indWiC, indWkH, indOoH; // corresponding indexes
ConvolutionUtils::getSizesAndIndexesConv2d(isNCHW, *input, *output, bS, iC, iH, iW, oC, oH, oW, indIOioC, indIiH, indWiC, indWmC, indWkH, indOoH);
mC = weights->sizeAt(indWmC); // channels multiplier
std::vector<std::vector<Nd4jLong>> modifColumns = {{1,0,4,5,2,3}, {iC,bS*oH*oW,kH*kW}}; // [bS,iC,kH,kW,oH,oW] -> [iC,bS,oH,oW,kH,kW] -> [iC,bS*oH*oW,kH*kW]
std::vector<std::vector<Nd4jLong>> modifOutput;
std::vector<Nd4jLong> outReShape;
if(!isNCHW) {
outReShape = {bS, oH, oW, iC, mC}; // [bS,oH,oW,iC*mC] -> [bS,oH,oW,iC,mC]
modifOutput = {{3,0,1,2,4},{iC, bS*oH*oW, mC}}; // [bS,oH,oW,iC,mC] -> [iC,bS,oH,oW,mC] -> [iC,bS*oH*oW,mC]
input = new NDArray(input->permute({0, 3, 1, 2})); // [bS,iH,iW,iC] -> [bS,iC,iH,iW]
}
else {
outReShape = {bS, iC, mC, oH, oW}; // [bS,iC*mC,oH,oW] -> [bS,iC,mC,oH,oW]
modifOutput = {{1,0,3,4,2},{iC, bS*oH*oW, mC}}; // [bS,iC,mC,oH,oW] -> [iC,bS,oH,oW,mC] -> [iC,bS*oH*oW,mC]
}
if(paddingMode == 1) // SAME
ConvolutionUtils::calcPadding2D(pH, pW, oH, oW, iH, iW, kH, kW, sH, sW, dH, dW);
NDArray columns(input->ordering(), {bS, iC, kH, kW, oH, oW}, input->dataType(), input->getContext());
NDArray outputReshaped = output->reshape(output->ordering(), outReShape);
helpers::im2col(*output->getContext(), *input, columns, kH, kW, sH, sW, pH, pW, dH, dW, NDArrayFactory::create(0.f, input->getContext())); // [bS, iC, iH, iW] is convoluted to [bS, iC, kH, kW, oH, oW]
MmulHelper::tensorDot(&columns, weights, &outputReshaped, modifColumns, {{2,0,1,3},{iC,kH*kW,mC}}, modifOutput); // [iC, bS*oH*oW, kW*kH] x [iC, kH*kW, mC] = [iC, bS*oH*oW, mC]
if(bias)
// output->applyBroadcast(broadcast::Add, {indIOioC}, bias);
helpers::addBias(block, *output, *bias, *output, isNCHW);
if(!isNCHW)
delete input;
}
//////////////////////////////////////////////////////////////////////////
void ConvolutionUtils::depthwiseConv2d(nd4j::graph::Context& block, const NDArray* input, const NDArray* weights, const NDArray* bias, NDArray* output, const int kH, const int kW, const int sH, const int sW, int pH, int pW, const int dH, const int dW, const int paddingMode, const int isNCHW) {
BUILD_SINGLE_SELECTOR_TWICE(input->dataType(), depthwiseConv2d_, (block, input, weights, bias, output, kH, kW, sH, sW, pH, pW, dH, dW, paddingMode, isNCHW), FLOAT_TYPES);
}
//////////////////////////////////////////////////////////////////////////
template <typename X, typename Y>
static void sconv2d_(nd4j::graph::Context& block, const NDArray* input, const NDArray* weightsDepth, const NDArray* weightsPoint, const NDArray* bias, NDArray* output, const int kH, const int kW, const int sH, const int sW, int pH, int pW, const int dH, const int dW, const int paddingMode, const int isNCHW) {
// input [bS, iH, iW, iC] (NHWC) or [bS, iC, iH, iW] (NCHW)
// weightsDepth [kH, kW, iC, mC] always
// weightsPoint [1, 1, iC*mC, oC] always
// bias [oC], oC = iC*mC if weightsPoint=nullptr
// output is [bS, oH, oW, oC] (NHWC) or [bS, oC, oH, oW] (NCHW)
// kH filter(kernel) height
// kW filter(kernel) width
// sH strides height
// sW strides width
// pH paddings height
// pW paddings width
// dH dilations height
// dW dilations width
// paddingMode 0-VALID, 1-SAME
// isNCHW 1-NCHW, 0-NHWC
int bS, iC, iH, iW, mC, oC, oH, oW; // batch size, input channels, input height/width, channels multiplier, output channels, output height/width
int indIOioC, indIiH, indWmC, indWiC, indWkH, indOoH; // corresponding indexes
ConvolutionUtils::getSizesAndIndexesConv2d(isNCHW, *input, *output, bS, iC, iH, iW, oC, oH, oW, indIOioC, indIiH, indWiC, indWmC, indWkH, indOoH);
mC = weightsDepth->sizeAt(indWmC); // channels multiplier
NDArray* outputDepth = output;
if(weightsPoint) // if pointwise convolution is expected
outputDepth = new NDArray(output->ordering(), !isNCHW ? std::vector<Nd4jLong>({bS, oH, oW, iC*mC}) : std::vector<Nd4jLong>({bS, iC*mC, oH, oW}), input->dataType(), input->getContext());
// ----- perform depthwise convolution (if weightsPoint is absent then oC = iC*mC) ----- //
ConvolutionUtils::depthwiseConv2d(block, input, weightsDepth, weightsPoint ? nullptr : bias, outputDepth, kH,kW, sH,sW, pH,pW, dH,dW, paddingMode, isNCHW);
// ----- perform pointwise convolution (oH = iH, oW = iW) ----- //
if (weightsPoint) {
ConvolutionUtils::conv2d(block, outputDepth, weightsPoint, bias, output, 1,1, 1,1, 0,0, 1,1, paddingMode, isNCHW); // in this case oH=iH, oW=iW
delete outputDepth;
}
}
//////////////////////////////////////////////////////////////////////////
void ConvolutionUtils::sconv2d(nd4j::graph::Context& block, const NDArray* input, const NDArray* weightsDepth, const NDArray* weightsPoint, const NDArray* bias, NDArray* output, const int kH, const int kW, const int sH, const int sW, int pH, int pW, const int dH, const int dW, const int paddingMode, const int isNCHW) {
BUILD_SINGLE_SELECTOR_TWICE(input->dataType(), sconv2d_, (block, input, weightsDepth, weightsPoint, bias, output, kH, kW, sH, sW, pH, pW, dH, dW, paddingMode, isNCHW), FLOAT_TYPES);
}
//////////////////////////////////////////////////////////////////////////
template <typename X, typename Z>
static __global__ void avgPooling2dCuda(const void *vx, const Nd4jLong *xShapeInfo, void *vz, const Nd4jLong *zShapeInfo, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int extraParam0) {
// input is [bS, iC, iH, iW]
// output is [bS, iC, oH, oW]
const auto x = reinterpret_cast<const X*>(vx);
auto z = reinterpret_cast<Z*>(vz);
__shared__ int bS, iC, oH, oW, iH, iW, strideB, strideC, strideY, strideX, strideOB, strideOC, strideOY, strideOX, length, kHEff, kWEff;
if (threadIdx.x == 0) {
bS = shape::sizeAt(xShapeInfo, 0);
iC = shape::sizeAt(xShapeInfo, 1);
oH = shape::sizeAt(zShapeInfo, 2);
oW = shape::sizeAt(zShapeInfo, 3);
iH = shape::sizeAt(xShapeInfo, 2);
iW = shape::sizeAt(xShapeInfo, 3);
strideB = shape::stride(xShapeInfo)[0];
strideC = shape::stride(xShapeInfo)[1];
strideY = shape::stride(xShapeInfo)[2];
strideX = shape::stride(xShapeInfo)[3];
strideOB = shape::stride(zShapeInfo)[0];
strideOC = shape::stride(zShapeInfo)[1];
strideOY = shape::stride(zShapeInfo)[2];
strideOX = shape::stride(zShapeInfo)[3];
length = shape::length(zShapeInfo);
//Replace kernel H/W with *effective* kernel H/W accounting for dilatyon
kHEff = kH + (kH-1)*(dH-1);
kWEff = kW + (kW-1)*(dW-1);
}
__syncthreads();
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int index = tid; index < length; index += blockDim.x * gridDim.x) {
const int pw = index % oW;
const int ph = (index / oW) % oH;
const int c = (index / oW / oH) % iC;
const int n = index / oW / oH / iC;
int hstart = sH * ph - pH;
int wstart = sW * pw - pW;
int hend = hstart + kHEff;
int wend = wstart + kWEff;
if(hstart < 0){
int f = nd4j::math::nd4j_ceil<Z,int>((Z) -hstart / (Z)dH);
hstart += f * dH;
}
if(wstart < 0){
int f = nd4j::math::nd4j_ceil<Z,int>((Z) -wstart / (Z) dW);
wstart += f * dW;
}
if(hend > iH){
int f = nd4j::math::nd4j_ceil<Z,int>((Z) (hend-iH) / (Z) dH);
hend -= f * dH;
}
if(wend > iW){
int f = nd4j::math::nd4j_ceil<Z,int>((Z) (wend-iW) / (Z) dW);
wend -= f * dW;
}
//Accounts for dilation
int pool_size = nd4j::math::nd4j_ceil<double,int>((double) (hend-hstart) / (double) dH) * nd4j::math::nd4j_ceil<double,int>((double) (wend-wstart) / (double) dW);
Z sum = 0.0f;
const X *inSlice = x + (n * strideB + c * strideC);
for (int h = hstart; h < hend; h += dH)
for (int w = wstart; w < wend; w += dW)
sum += static_cast<Z>(inSlice[h * strideY + w * strideX]);
int divide_factor = pool_size; //Case 0: exclude padding
if (extraParam0 == 1) //Case 1: include padding
divide_factor = kH * kW;
z[n * strideOB + c * strideOC + pw * strideOX + ph * strideOY] = sum / static_cast<Z>(divide_factor);
}
}
//////////////////////////////////////////////////////////////////////////
template <typename X, typename Z>
static void avgPooling2dCudaLauncher(nd4j::LaunchContext & block, void *vx, Nd4jLong *vxShapeInfo, void *vz, Nd4jLong *vzShapeInfo, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int extraParam0) {
hipLaunchKernelGGL(( avgPooling2dCuda<X, Z>), dim3(512), dim3(512), 4192, *block.getCudaStream(), vx, vxShapeInfo, vz, vzShapeInfo, kH, kW, sH, sW, pH, pW, dH, dW, extraParam0);
}
//////////////////////////////////////////////////////////////////////////
template <typename X, typename Z>
static __global__ void pnormPooling2dCuda(const void *vx, const Nd4jLong *xShapeInfo, void *vz, const Nd4jLong *zShapeInfo, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int extraParam0) {
// input is [bS, iC, iH, iW]
// output is [bS, iC, oH, oW]
const auto x = reinterpret_cast<const X*>(vx);
auto z = reinterpret_cast<Z*>(vz);
__shared__ int bS, iC, oH, oW, iH, iW, strideB, strideC, strideY, strideX, strideOB, strideOC, strideOY, strideOX, length, kHEff, kWEff;
__shared__ bool fOrder;
if (threadIdx.x == 0) {
bS = shape::sizeAt(xShapeInfo, 0);
iC = shape::sizeAt(xShapeInfo, 1);
oH = shape::sizeAt(zShapeInfo, 2);
oW = shape::sizeAt(zShapeInfo, 3);
iH = shape::sizeAt(xShapeInfo, 2);
iW = shape::sizeAt(xShapeInfo, 3);
strideB = shape::stride(xShapeInfo)[0];
strideC = shape::stride(xShapeInfo)[1];
strideY = shape::stride(xShapeInfo)[2];
strideX = shape::stride(xShapeInfo)[3];
strideOB = shape::stride(zShapeInfo)[0];
strideOC = shape::stride(zShapeInfo)[1];
strideOY = shape::stride(zShapeInfo)[2];
strideOX = shape::stride(zShapeInfo)[3];
length = shape::length(zShapeInfo);
//Replace kernel H/W with *effective* kernel H/W accounting for dilatyon
kHEff = kH + (kH-1)*(dH-1);
kWEff = kW + (kW-1)*(dW-1);
}
__syncthreads();
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int index = tid; index < length; index += blockDim.x * gridDim.x) {
const int pw = index % oW;
const int ph = (index / oW) % oH;
const int c = (index / oW / oH) % iC;
const int n = index / oW / oH / iC;
int hstart = sH * ph - pH;
int wstart = sW * pw - pW;
int hend = hstart + kHEff;
int wend = wstart + kWEff;
if (hstart < 0) {
int f = nd4j::math::nd4j_ceil<Z, int>((Z) -hstart / (Z) dH);
hstart += f * dH;
}
if (wstart < 0) {
int f = nd4j::math::nd4j_ceil<Z, int>((Z) -wstart / (Z) dW);
wstart += f * dW;
}
if (hend > iH) {
int f = nd4j::math::nd4j_ceil<Z, int>((Z) (hend - iH) / (Z) dH);
hend -= f * dH;
}
if (wend > iW) {
int f = nd4j::math::nd4j_ceil<Z, int>((Z) (wend - iW) / (Z) dW);
wend -= f * dW;
}
//Accounts for dilation
int pool_size = nd4j::math::nd4j_ceil<double, int>((double) (hend - hstart) / (double) dH) *
nd4j::math::nd4j_ceil<double, int>((double) (wend - wstart) / (double) dW);
Z sum = 0.f;
const X *inSlice = x + (n * strideB + c * strideC);
for (int h = hstart; h < hend; h += dH)
for (int w = wstart; w < wend; w += dW)
sum += nd4j::math::nd4j_pow<Z, Z, Z>(static_cast<Z>(nd4j::math::nd4j_abs<X>(inSlice[h * strideY + w * strideX])), extraParam0);
z[n * strideOB + c * strideOC + pw * strideOX + ph * strideOY] = nd4j::math::nd4j_pow<Z, Z, Z>(sum, (Z) 1.0f / extraParam0);
}
}
//////////////////////////////////////////////////////////////////////////
template <typename X, typename Z>
static void pnormPooling2dCudaLauncher(nd4j::LaunchContext & block, void *vx, Nd4jLong *vxShapeInfo, void *vz, Nd4jLong *vzShapeInfo, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int extraParam0) {
hipLaunchKernelGGL(( pnormPooling2dCuda<X, Z>), dim3(512), dim3(512), 4192, *block.getCudaStream(), vx, vxShapeInfo, vz, vzShapeInfo, kH, kW, sH, sW, pH, pW, dH, dW, extraParam0);
}
//////////////////////////////////////////////////////////////////////////
template <typename X, typename Z>
static __global__ void maxPooling2dCuda(const void *vx, const Nd4jLong *xShapeInfo, void *vz, const Nd4jLong *zShapeInfo, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int extraParam0) {
// input is [bS, iC, iH, iW]
// output is [bS, iC, oH, oW]
const auto x = reinterpret_cast<const X*>(vx);
auto z = reinterpret_cast<Z*>(vz);
__shared__ int bS, iC, oH, oW, iH, iW, strideB, strideC, strideY, strideX, strideOB, strideOC, strideOY, strideOX, length, kHEff, kWEff;
__shared__ bool fOrder;
if (threadIdx.x == 0) {
bS = shape::sizeAt(xShapeInfo, 0);
iC = shape::sizeAt(xShapeInfo, 1);
oH = shape::sizeAt(zShapeInfo, 2);
oW = shape::sizeAt(zShapeInfo, 3);
iH = shape::sizeAt(xShapeInfo, 2);
iW = shape::sizeAt(xShapeInfo, 3);
strideB = shape::stride(xShapeInfo)[0];
strideC = shape::stride(xShapeInfo)[1];
strideY = shape::stride(xShapeInfo)[2];
strideX = shape::stride(xShapeInfo)[3];
strideOB = shape::stride(zShapeInfo)[0];
strideOC = shape::stride(zShapeInfo)[1];
strideOY = shape::stride(zShapeInfo)[2];
strideOX = shape::stride(zShapeInfo)[3];
length = shape::length(zShapeInfo);
//Replace kernel H/W with *effective* kernel H/W accounting for dilatyon
kHEff = kH + (kH-1)*(dH-1);
kWEff = kW + (kW-1)*(dW-1);
}
__syncthreads();
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int index = tid; index < length; index += blockDim.x * gridDim.x) {
const int pw = index % oW;
const int ph = (index / oW) % oH;
const int c = (index / oW / oH) % iC;
const int n = index / oW / oH / iC;
int hstart = sH * ph - pH;
int wstart = sW * pw - pW;
int hend = hstart + kHEff;
int wend = wstart + kWEff;
if(hstart < 0){
int f = nd4j::math::nd4j_ceil<Z,int>((Z) -hstart / (Z)dH);
hstart += f * dH;
}
if(wstart < 0){
int f = nd4j::math::nd4j_ceil<Z,int>((Z) -wstart / (Z) dW);
wstart += f * dW;
}
if(hend > iH){
int f = nd4j::math::nd4j_ceil<Z,int>((Z) (hend-iH) / (Z) dH);
hend -= f * dH;
}
if(wend > iW){
int f = nd4j::math::nd4j_ceil<Z,int>((Z) (wend-iW) / (Z) dW);
wend -= f * dW;
}
//Accounts for dilation
int pool_size = nd4j::math::nd4j_ceil<double,int>((double) (hend-hstart) / (double) dH) * nd4j::math::nd4j_ceil<double,int>((double) (wend-wstart) / (double) dW);
Z max = -nd4j::DataTypeUtils::max<Z>();
const X *inSlice = x + (n * strideB + c * strideC);
for (int h = hstart; h < hend; h += dH) {
for (int w = wstart; w < wend; w += dW) {
Z v = static_cast<Z>(inSlice[h * strideY + w * strideX]);
if (v > max)
max = v;
}
}
z[n * strideOB + c * strideOC + pw * strideOX + ph * strideOY] = max;
}
}
//////////////////////////////////////////////////////////////////////////
template <typename X, typename Z>
static void maxPooling2dCudaLauncher(nd4j::LaunchContext & block, void *vx, Nd4jLong *vxShapeInfo, void *vz, Nd4jLong *vzShapeInfo, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int extraParam0) {
hipLaunchKernelGGL(( maxPooling2dCuda<X,Z>), dim3(512), dim3(512), 4192, *block.getCudaStream(), vx, vxShapeInfo, vz, vzShapeInfo, kH, kW, sH, sW, pH, pW, dH, dW, extraParam0);
}
//////////////////////////////////////////////////////////////////////////
void ConvolutionUtils::pooling2d(nd4j::graph::Context& block, const NDArray& input, NDArray& output, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const PoolingType poolingMode, const int extraParam0) {
if(!input.isActualOnDeviceSide()) input.syncToDevice();
switch (poolingMode) {
case MAX_POOL: {
BUILD_SINGLE_SELECTOR_TWICE(input.dataType(), maxPooling2dCudaLauncher, (*block.launchContext(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), output.getSpecialBuffer(), output.getSpecialShapeInfo(), kH, kW, sH, sW, pH, pW, dH, dW, extraParam0), FLOAT_TYPES);
}
break;
case AVG_POOL: {
BUILD_SINGLE_SELECTOR_TWICE(input.dataType(), avgPooling2dCudaLauncher, (*block.launchContext(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), output.getSpecialBuffer(), output.getSpecialShapeInfo(), kH, kW, sH, sW, pH, pW, dH, dW, extraParam0), FLOAT_TYPES);
}
break;
case PNORM_POOL: {
BUILD_SINGLE_SELECTOR_TWICE(input.dataType(), pnormPooling2dCudaLauncher, (*block.launchContext(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), output.getSpecialBuffer(), output.getSpecialShapeInfo(), kH, kW, sH, sW, pH, pW, dH, dW, extraParam0), FLOAT_TYPES);
}
break;
default:
throw std::runtime_error("Pooling2D: Unknown PoolingType used");
}
output.tickWriteDevice();
input.tickReadDevice();
auto result = hipStreamSynchronize(*block.launchContext()->getCudaStream());
if (result != 0)
throw cuda_exception::build("Pooling2D failed", result);
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
__global__ static void pooling3dCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int kD, const int kH, const int kW, const int sD, const int sH, const int sW, const int pD, const int pH, const int pW, const int dD, const int dH, const int dW, const int poolingMode, const int extraParam0) {
// x input is [bS, iC, iD, iH, iW]
// z output is [bS, iC, oD, oH, oW]
const T* x = reinterpret_cast<const T*>(vx);
T* z = reinterpret_cast<T*>(vz);
__shared__ int rank, kDeff, kHeff, kWeff, iD, iH, iW, kProd;
__shared__ Nd4jLong *sharedMem, zLen;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<Nd4jLong*>(shmem);
zLen = shape::length(zShapeInfo);
rank = 5;
kDeff = kD + (kD - 1) * (dD - 1);
kHeff = kH + (kH - 1) * (dH - 1);
kWeff = kW + (kW - 1) * (dW - 1);
iD = xShapeInfo[3];
iH = xShapeInfo[4];
iW = xShapeInfo[5];
kProd = kD * kH * kW;
}
__syncthreads();
const auto zInd = threadIdx.x + blockIdx.x * blockDim.x;
if(zInd >= zLen)
return;
auto coords = sharedMem + threadIdx.x * rank;
shape::index2coords(zInd, zShapeInfo, coords);
const auto zOffset = shape::getOffset(zShapeInfo, coords);
int dstart = coords[2] * sD - pD;
int hstart = coords[3] * sH - pH;
int wstart = coords[4] * sW - pW;
int dend = dstart + kDeff;
int hend = hstart + kHeff;
int wend = wstart + kWeff;
if(dstart < 0)
dstart += dD * ((-dstart + dD - 1) / dD);
if(hstart < 0)
hstart += dH * ((-hstart + dH - 1) / dH);
if(wstart < 0)
wstart += dW * ((-wstart + dW - 1) / dW);
if(dend > iD)
dend -= dD * ((dend - iD + dD - 1) / dD);
if(hend > iH)
hend -= dH * ((hend - iH + dH - 1) / dH);
if(wend > iW)
wend -= dW * ((wend - iW + dW - 1) / dW);
switch (poolingMode) {
/*** max ***/
case 0: {
T max = -DataTypeUtils::max<T>();
for (coords[2] = dstart; coords[2] < dend; coords[2] += dD) {
for (coords[3] = hstart; coords[3] < hend; coords[3] += dH){
for (coords[4] = wstart; coords[4] < wend; coords[4] += dW) {
T val = x[shape::getOffset(xShapeInfo, coords)];
if (val > max)
max = val;
}
}
}
z[zOffset] = max;
}
break;
/*** avg ***/
case 1: {
T sum = static_cast<T>(0.);
for (coords[2] = dstart; coords[2] < dend; coords[2] += dD)
for (coords[3] = hstart; coords[3] < hend; coords[3] += dH)
for (coords[4] = wstart; coords[4] < wend; coords[4] += dW)
sum += x[shape::getOffset(xShapeInfo, coords)];
if (extraParam0 == 0) { //Exclude padding
uint a = (dend - dstart) / dD + ((dend - dstart) % dD == 0 ? 0 : 1);
uint b = (hend - hstart) / dH + ((hend - hstart) % dH == 0 ? 0 : 1);
uint c = (wend - wstart) / dW + ((wend - wstart) % dW == 0 ? 0 : 1);
sum /= static_cast<T>(a * b * c); // /= nd4j::math::nd4j_ceil<double,T>(static_cast<double>(dend - dstart) / static_cast<double>(dD)) * nd4j::math::nd4j_ceil<double,T>(static_cast<double>(hend - hstart) / static_cast<double>(dH)) * nd4j::math::nd4j_ceil<double,T>(static_cast<double>(wend - wstart) / static_cast<double>(dW)); //Accounts for dilation
}
else if (extraParam0 == 1) //Include padding
sum /= kProd;
z[zOffset] = sum;
}
break;
/*** pnorm ***/
case 2: {
T sum = static_cast<T>(0.);
for (coords[2] = dstart; coords[2] < dend; coords[2] += dD)
for (coords[3] = hstart; coords[3] < hend; coords[3] += dH)
for (coords[4] = wstart; coords[4] < wend; coords[4] += dW)
sum += nd4j::math::nd4j_pow<T,T,T>(nd4j::math::nd4j_abs<T>(x[shape::getOffset(xShapeInfo, coords)]), extraParam0);
sum = nd4j::math::nd4j_pow<T,T,T>(sum, (T) 1.f / extraParam0);
z[zOffset] = sum;
}
break;
}
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
static void pooling3dCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream,
const void* vx, const Nd4jLong* xShapeInfo,
void* vz, const Nd4jLong* zShapeInfo,
const int kD, const int kH, const int kW,
const int sD, const int sH, const int sW,
const int pD, const int pH, const int pW,
const int dD, const int dH, const int dW,
const int poolingMode, const int extraParam0) {
hipLaunchKernelGGL(( pooling3dCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, vz, zShapeInfo, kD, kH, kW, sD, sH, sW, pD, pH, pW, dD, dH, dW, poolingMode, extraParam0);
}
//////////////////////////////////////////////////////////////////////////
void ConvolutionUtils::pooling3d(nd4j::graph::Context& block, const NDArray& input, NDArray& output, const int kD, const int kH, const int kW, const int sD, const int sH, const int sW, const int pD, const int pH, const int pW, const int dD, const int dH, const int dW, const int poolingMode, const int extraParam0) {
PointersManager manager(block.launchContext(), "pooling3d");
const int threadsPerBlock = MAX_NUM_THREADS / 2;
const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = output.rankOf() * sizeof(Nd4jLong) * threadsPerBlock + 128;
NDArray::prepareSpecialUse({&output}, {&input});
BUILD_SINGLE_SELECTOR(input.dataType(), pooling3dCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, block.launchContext()->getCudaStream(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), kD, kH, kW, sD, sH, sW, pD, pH, pW, dD, dH, dW, poolingMode, extraParam0), FLOAT_TYPES);
NDArray::registerSpecialUse({&output}, {&input});
manager.synchronize();
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
__global__ static void pooling2dBPCuda(const void* vx, const Nd4jLong* xShapeInfo, const void* vy, const Nd4jLong* yShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int poolingMode, const int extraParam0) {
// x: input [bS, iC, iH, iW]
// y: gradO [bS, iC, oH, oW]
// z: gradI [bS, iC, iH, iW] -> gradI is output in this function
const T* x = reinterpret_cast<const T*>(vx);
const T* y = reinterpret_cast<const T*>(vy);
T* z = reinterpret_cast<T*>(vz);
Nd4jLong coord2, coord3;
__shared__ int rank, kHeff, kWeff, iH, iW, kProd;
__shared__ Nd4jLong *sharedMem, yLen;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<Nd4jLong*>(shmem);
yLen = shape::length(yShapeInfo);
rank = 4;
kHeff = kH + (kH - 1) * (dH - 1);
kWeff = kW + (kW - 1) * (dW - 1);
iH = xShapeInfo[3];
iW = xShapeInfo[4];
kProd = kH * kW;
}
__syncthreads();
const auto yInd = threadIdx.x + blockIdx.x * blockDim.x;
if(yInd >= yLen)
return;
auto coords = sharedMem + threadIdx.x * rank;
shape::index2coords(yInd, yShapeInfo, coords);
const auto yOffset = shape::getOffset(yShapeInfo, coords);
int hstart = coords[2] * sH - pH;
int wstart = coords[3] * sW - pW;
int hend = hstart + kHeff;
int wend = wstart + kWeff;
if(hstart < 0)
hstart += dH * ((-hstart + dH - 1) / dH);
if(wstart < 0)
wstart += dW * ((-wstart + dW - 1) / dW);
if(hend > iH)
hend -= dH * ((hend - iH + dH - 1) / dH);
if(wend > iW)
wend -= dW * ((wend - iW + dW - 1) / dW);
switch (poolingMode) {
/*** max ***/
case 0: {
coord2 = hstart;
coord3 = wstart;
T max = -DataTypeUtils::max<T>();
for (coords[2] = hstart; coords[2] < hend; coords[2] += dH) {
for (coords[3] = wstart; coords[3] < wend; coords[3] += dW){
T val = x[shape::getOffset(xShapeInfo, coords)];
if (val > max) {
max = val;
coord2 = coords[2];
coord3 = coords[3];
}
}
}
coords[2] = coord2;
coords[3] = coord3;
auto zOffset = shape::getOffset(zShapeInfo, coords);
nd4j::math::atomics::nd4j_atomicAdd<T>(&z[zOffset], y[yOffset]);
//z[zOffset] += y[yOffset];
}
break;
/*** avg ***/
case 1: {
T val = y[yOffset];
if (extraParam0 == 0) //Exclude padding
val /= nd4j::math::nd4j_ceil<double,T>(static_cast<double>(hend - hstart) / static_cast<double>(dH)) * nd4j::math::nd4j_ceil<double,T>(static_cast<double>(wend - wstart) / static_cast<double>(dW)); //Accounts for dilation
else if (extraParam0 == 1) //Include padding
val /= kProd;
for (coords[2] = hstart; coords[2] < hend; coords[2] += dH)
for (coords[3] = wstart; coords[3] < wend; coords[3] += dW)
nd4j::math::atomics::nd4j_atomicAdd<T>(&z[shape::getOffset(zShapeInfo, coords)], val);
}
break;
/*** pnorm ***/
case 2: {
T sum = static_cast<T>(0.);
T val = y[yOffset];
for (coords[2] = hstart; coords[2] < hend; coords[2] += dH)
for (coords[3] = wstart; coords[3] < wend; coords[3] += dW)
sum += nd4j::math::nd4j_pow<T,T,T>(nd4j::math::nd4j_abs<T>(x[shape::getOffset(xShapeInfo, coords)]), extraParam0);
val *= nd4j::math::nd4j_pow<T,T,T>(sum, ((T)1.f - extraParam0) / extraParam0);
for (coords[2] = hstart; coords[2] < hend; coords[2] += dH) {
for (coords[3] = wstart; coords[3] < wend; coords[3] += dW) {
const auto xOffset = shape::getOffset(xShapeInfo, coords);
const auto zOffset = shape::getOffset(zShapeInfo, coords);
nd4j::math::atomics::nd4j_atomicAdd<T>(&z[zOffset], val * nd4j::math::nd4j_pow<T,T,T>(nd4j::math::nd4j_abs<T>(x[xOffset]), extraParam0 - 1.f) * nd4j::math::nd4j_sgn<T,T>(x[xOffset]));
}
}
}
break;
}
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
static void pooling2dBPCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream,
const void* vx, const Nd4jLong* xShapeInfo,
const void* vy, const Nd4jLong* yShapeInfo,
void* vz, const Nd4jLong* zShapeInfo,
const int kH, const int kW,
const int sH, const int sW,
const int pH, const int pW,
const int dH, const int dW,
const int poolingMode, const int extraParam0) {
hipLaunchKernelGGL(( pooling2dBPCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, vy, yShapeInfo, vz, zShapeInfo, kH, kW, sH, sW, pH, pW, dH, dW, poolingMode, extraParam0);
}
//////////////////////////////////////////////////////////////////////////
void ConvolutionUtils::pooling2dBP(nd4j::graph::Context& block, const NDArray& input, const NDArray& gradO, NDArray& gradI, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int poolingMode, const int extraParam0) {
// initial zeroing of gradI
gradI.nullify();
PointersManager manager(block.launchContext(), "pooling2dBP");
const int threadsPerBlock = 256;
const int blocksPerGrid = (gradO.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = gradO.rankOf() * sizeof(Nd4jLong) * threadsPerBlock + 128;
NDArray::prepareSpecialUse({&gradI}, {&input, &gradO});
BUILD_SINGLE_SELECTOR(input.dataType(), pooling2dBPCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, block.launchContext()->getCudaStream(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), gradO.getSpecialBuffer(), gradO.getSpecialShapeInfo(), gradI.specialBuffer(), gradI.specialShapeInfo(), kH, kW, sH, sW, pH, pW, dH, dW, poolingMode, extraParam0), FLOAT_TYPES);
NDArray::registerSpecialUse({&gradI}, {&input, &gradO});
manager.synchronize();
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
__global__ static void pooling3dBPCuda(const void* vx, const Nd4jLong* xShapeInfo, const void* vy, const Nd4jLong* yShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int kD, const int kH, const int kW, const int sD, const int sH, const int sW, const int pD, const int pH, const int pW, const int dD, const int dH, const int dW, const int poolingMode, const int extraParam0) {
// x: input [bS, iC, iD, iH, iW]
// y: gradO [bS, iC, oD, oH, oW]
// z: gradI [bS, iC, iD, iH, iW] -> gradI is output in this function
const T* x = reinterpret_cast<const T*>(vx);
const T* y = reinterpret_cast<const T*>(vy);
T* z = reinterpret_cast<T*>(vz);
Nd4jLong coord2, coord3, coord4;
__shared__ int rank, kDeff, kHeff, kWeff, iD, iH, iW, kProd;
__shared__ Nd4jLong *sharedMem, yLen;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<Nd4jLong*>(shmem);
yLen = shape::length(yShapeInfo);
rank = 5;
kDeff = kD + (kD - 1) * (dD - 1);
kHeff = kH + (kH - 1) * (dH - 1);
kWeff = kW + (kW - 1) * (dW - 1);
iD = xShapeInfo[3];
iH = xShapeInfo[4];
iW = xShapeInfo[5];
kProd = kD * kH * kW;
}
__syncthreads();
const auto yInd = threadIdx.x + blockIdx.x * blockDim.x;
if(yInd >= yLen)
return;
auto coords = sharedMem + threadIdx.x * rank;
shape::index2coords(yInd, yShapeInfo, coords);
const auto yOffset = shape::getOffset(yShapeInfo, coords);
int dstart = coords[2] * sD - pD;
int hstart = coords[3] * sH - pH;
int wstart = coords[4] * sW - pW;
int dend = dstart + kDeff;
int hend = hstart + kHeff;
int wend = wstart + kWeff;
if(dstart < 0)
dstart += dD * ((-dstart + dD - 1) / dD);
if(hstart < 0)
hstart += dH * ((-hstart + dH - 1) / dH);
if(wstart < 0)
wstart += dW * ((-wstart + dW - 1) / dW);
if(dend > iD)
dend -= dD * ((dend - iD + dD - 1) / dD);
if(hend > iH)
hend -= dH * ((hend - iH + dH - 1) / dH);
if(wend > iW)
wend -= dW * ((wend - iW + dW - 1) / dW);
switch (poolingMode) {
/*** max ***/
case 0: {
T max = -DataTypeUtils::max<T>();
for (coords[2] = dstart; coords[2] < dend; coords[2] += dD) {
for (coords[3] = hstart; coords[3] < hend; coords[3] += dH){
for (coords[4] = wstart; coords[4] < wend; coords[4] += dW) {
T val = x[shape::getOffset(xShapeInfo, coords)];
if (val > max) {
max = val;
coord2 = coords[2];
coord3 = coords[3];
coord4 = coords[4];
}
}
}
}
coords[2] = coord2;
coords[3] = coord3;
coords[4] = coord4;
nd4j::math::atomics::nd4j_atomicAdd<T>(&z[shape::getOffset(zShapeInfo, coords)], y[yOffset]);
}
break;
/*** avg ***/
case 1: {
T val = y[yOffset];
if (extraParam0 == 0) //Exclude padding
val /= nd4j::math::nd4j_ceil<double,T>(static_cast<double>(dend - dstart) / static_cast<double>(dD)) * nd4j::math::nd4j_ceil<double,T>(static_cast<double>(hend - hstart) / static_cast<double>(dH)) * nd4j::math::nd4j_ceil<double,T>(static_cast<double>(wend - wstart) / static_cast<double>(dW)); //Accounts for dilation
else if (extraParam0 == 1) //Include padding
val /= kProd;
for (coords[2] = dstart; coords[2] < dend; coords[2] += dD)
for (coords[3] = hstart; coords[3] < hend; coords[3] += dH)
for (coords[4] = wstart; coords[4] < wend; coords[4] += dW)
nd4j::math::atomics::nd4j_atomicAdd<T>(&z[shape::getOffset(zShapeInfo, coords)], val);
}
break;
/*** pnorm ***/
case 2: {
T sum = static_cast<T>(0.);
T val = y[yOffset];
for (coords[2] = dstart; coords[2] < dend; coords[2] += dD)
for (coords[3] = hstart; coords[3] < hend; coords[3] += dH)
for (coords[4] = wstart; coords[4] < wend; coords[4] += dW)
sum += nd4j::math::nd4j_pow<T,T,T>(nd4j::math::nd4j_abs<T>(x[shape::getOffset(xShapeInfo, coords)]), extraParam0);
val *= nd4j::math::nd4j_pow<T,T,T>(sum, ((T)1.f - extraParam0) / extraParam0);
for (coords[2] = dstart; coords[2] < dend; coords[2] += dD) {
for (coords[3] = hstart; coords[3] < hend; coords[3] += dH) {
for (coords[4] = wstart; coords[4] < wend; coords[4] += dW) {
const auto xOffset = shape::getOffset(xShapeInfo, coords);
const auto zOffset = shape::getOffset(zShapeInfo, coords);
nd4j::math::atomics::nd4j_atomicAdd<T>(&z[zOffset], val * nd4j::math::nd4j_pow<T,T,T>(nd4j::math::nd4j_abs<T>(x[xOffset]), extraParam0 - 1.f) * nd4j::math::nd4j_sgn<T,T>(x[xOffset]));
}
}
}
}
break;
}
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
static void pooling3dBPCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream,
const void* vx, const Nd4jLong* xShapeInfo,
const void* vy, const Nd4jLong* yShapeInfo,
void* vz, const Nd4jLong* zShapeInfo,
const int kD, const int kH, const int kW,
const int sD, const int sH, const int sW,
const int pD, const int pH, const int pW,
const int dD, const int dH, const int dW,
const int poolingMode, const int extraParam0) {
hipLaunchKernelGGL(( pooling3dBPCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, vy, yShapeInfo, vz, zShapeInfo, kD, kH, kW, sD, sH, sW, pD, pH, pW, dD, dH, dW, poolingMode, extraParam0);
}
//////////////////////////////////////////////////////////////////////////
void ConvolutionUtils::pooling3dBP(nd4j::graph::Context& block, const NDArray& input, const NDArray& gradO, NDArray& gradI, const int kD, const int kH, const int kW, const int sD, const int sH, const int sW, const int pD, const int pH, const int pW, const int dD, const int dH, const int dW, const int poolingMode, const int extraParam0) {
// initial zeroing of gradI
gradI.nullify();
PointersManager manager(block.launchContext(), "pooling3dBP");
const int threadsPerBlock = MAX_NUM_THREADS / 2;
const int blocksPerGrid = (gradO.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = gradO.rankOf() * sizeof(Nd4jLong) * threadsPerBlock + 128;
NDArray::prepareSpecialUse({&gradI}, {&input, &gradO});
BUILD_SINGLE_SELECTOR(input.dataType(), pooling3dBPCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, block.launchContext()->getCudaStream(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), gradO.getSpecialBuffer(), gradO.getSpecialShapeInfo(), gradI.specialBuffer(), gradI.specialShapeInfo(), kD, kH, kW, sD, sH, sW, pD, pH, pW, dD, dH, dW, poolingMode, extraParam0), FLOAT_TYPES);
NDArray::registerSpecialUse({&gradI}, {&input, &gradO});
manager.synchronize();
}
//////////////////////////////////////////////////////////////////////////
template <typename X, typename Y>
static void conv2dBP_(nd4j::graph::Context& block, const NDArray* input, const NDArray* weights, const NDArray* bias, const NDArray* gradO, NDArray* gradI, NDArray* gradW, NDArray* gradB, const int kH, const int kW, const int sH, const int sW, int pH, int pW, const int dH, const int dW, const int paddingMode, const int isNCHW) {
// input [bS, iH, iW, iC] (NHWC) or [bS, iC, iH, iW] (NCHW)
// weights [kH, kW, iC, oC] always
// bias [oC]
// gradO [bS, oH, oW, oC] (NHWC) or [bS, oC, oH, oW] (NCHW), epsilon_next
// gradI [bS, iH, iW, iC] (NHWC) or [bS, iC, iH, iW] (NCHW), epsilon
// gradW [kH, kW, iC, oC] always
// gradB [oC]
// kH filter(kernel) height
// kW filter(kernel) width
// sH strides height
// sW strides width
// pH paddings height
// pW paddings width
// dH dilations height
// dW dilations width
// paddingMode 0-VALID, 1-SAME
// isNCHW 0-NHWC, 1-NCHW
int bS, iC, iH, iW, oC, oH, oW; // batch size, input channels, input height/width, output channels, output height/width;
int indIOioC, indIiH, indWoC, indWiC, indWkH, indOoH; // corresponding indexes
ConvolutionUtils::getSizesAndIndexesConv2d(isNCHW, *input, *gradO, bS, iC, iH, iW, oC, oH, oW, indIOioC, indIiH, indWiC, indWoC, indWkH, indOoH);
ConvolutionUtils::calcPadding2D(pH, pW, oH, oW, iH, iW, kH, kW, sH, sW, dH, dW, paddingMode);
std::vector<int> gradOaxesForDot;
if(!isNCHW) {
gradOaxesForDot = {0, 1, 2}; // bS, oH, oW
input = new NDArray(input->permute({0, 3, 1, 2})); // [bS, iH, iW, iC] -> [bS, iC, iH, iW]
gradI = new NDArray(gradI->permute({0, 3, 1, 2})); // [bS, iH, iW, iC] -> [bS, iC, iH, iW]
} else {
gradOaxesForDot = {0, 2, 3}; // bS, oH, oW
}
NDArray columns(input->ordering(), {bS, iC, kH, kW, oH, oW}, input->dataType(), input->getContext());
// ----- calculation of gradW ----- //
if(gradW) {
auto ctx = block.launchContext();
helpers::im2col(*ctx, *input, columns, kH, kW, sH, sW, pH, pW, dH, dW, NDArrayFactory::create(0.f, input->getContext())); // [bS, iC, iH, iW] is convoluted to [bS, iC, kH, kW, oH, oW]
nd4j::MmulHelper::tensorDot(&columns, gradO, gradW, {0,4,5}, gradOaxesForDot, {2, 0, 1, 3}); // [bS, iC, kH, kW, oH, oW] x [bS, oH, oW, oC]/[bS, oC, oH, oW] = [iC, kH, kW, oC]
}
// ----- calculation of gradB ----- //
if(gradB) {
NDArray* gradBR = gradB;
if(gradB->rankOf() == 2)
gradBR = new NDArray(gradB->reshape(gradB->ordering(), {(int)gradB->lengthOf()}));
gradO->reduceAlongDimension(reduce::Sum, gradBR, gradOaxesForDot); // sum over bS, oH, oW
if(gradBR != gradB)
delete gradBR;
}
//----- calculation of gradI -----//
nd4j::MmulHelper::tensorDot(weights, gradO, &columns, {indWoC}, {indIOioC}, {2, 3, 1, 0, 4, 5}); // [kH, kW, iC, oC]/[oC, iC, kH, kW]] x [bS, oH, oW, oC]/[bS, oC, oH, oW] = [kH, kW, iC, bS, oH, oW]
helpers::col2im(*block.launchContext(), columns, *gradI, sH, sW, pH, pW, iH, iW, dH, dW); // [bS, iC, kH, kW, oH, oW] is de-convoluted to [bS, iC, iH, iW]
if(!isNCHW) {
delete input;
delete gradI;
}
}
//////////////////////////////////////////////////////////////////////////
void ConvolutionUtils::conv2dBP(nd4j::graph::Context& block, const NDArray* input, const NDArray* weights, const NDArray* bias, const NDArray* gradO, NDArray* gradI, NDArray* gradW, NDArray* gradB, const int kH, const int kW, const int sH, const int sW, int pH, int pW, const int dH, const int dW, const int paddingMode, const int isNCHW) {
BUILD_SINGLE_SELECTOR_TWICE(input->dataType(), conv2dBP_, (block, input, weights, bias, gradO, gradI, gradW, gradB, kH, kW, sH, sW, pH, pW, dH, dW, paddingMode, isNCHW), FLOAT_TYPES);
}
//////////////////////////////////////////////////////////////////////////
template <typename X, typename Y>
static void depthwiseConv2dBP_(const NDArray* input, const NDArray* weights, const NDArray* bias, const NDArray* gradO, NDArray* gradI, NDArray* gradW, NDArray* gradB, const int kH, const int kW, const int sH, const int sW, int pH, int pW, const int dH, const int dW, const int paddingMode, const int isNCHW) {
// input [bS, iH, iW, iC] (NDHWC) or [bS, iC, iH, iW] (NCDHW)
// weights [kH, kW, iC, mC] always
// bias [oC] = [iC*mC]
// gradO [bS, oH, oW, oC] (NDHWC) or [bS, oC, oH, oW] (NCDHW), epsilon_next
// gradI [bS, iH, iW, iC] (NDHWC) or [bS, iC, iH, iW] (NCDHW), epsilon
// gradW [kH, kW, iC, mC] always
// gradB [oC]
// kH filter(kernel) height
// kW filter(kernel) width
// sH strides height
// sW strides width
// pH paddings height
// pW paddings width
// dH dilations height
// dW dilations width
// paddingMode 0-VALID, 1-SAME
// isNCHW 0-NHWC, 1-NCHW
int bS, iC, iH, iW, mC, oC, oH, oW; // batch size, input channels, input height/width, channels multiplier(oC = iC*mC), output channels, output height/width
int indIOioC, indIiH, indWmC, indWiC, indWkH, indOoH; // corresponding indexes
ConvolutionUtils::getSizesAndIndexesConv2d(isNCHW, *input, *gradO, bS, iC, iH, iW, oC, oH, oW, indIOioC, indIiH, indWiC, indWmC, indWkH, indOoH);
mC = weights->sizeAt(indWmC); // channels multiplier
std::vector<std::vector<Nd4jLong>> modifColumns = {{1,2,3,0,4,5}, {iC, kH*kW, bS*oH*oW}}; // [bS,iC,kH,kW,oH,oW] -> [iC, kH*kW, bS*oH*oW]
std::vector<std::vector<Nd4jLong>> modifGradO1, modifGradO2;
std::vector<Nd4jLong> gradOreShape;
if(!isNCHW) {
gradOreShape = {bS, oH, oW, iC, mC}; // [bS,oH,oW,iC*mC] -> [bS,oH,oW,iC,mC]
modifGradO1 = {{3,0,1,2,4},{iC, bS*oH*oW, mC}}; // [bS,oH,oW,iC,mC] -> [iC,bS,oH,oW,mC] -> [iC,bS*oH*oW,mC]
modifGradO2 = {{3,0,1,2},{iC, mC, bS*oH*oW}}; // [bS,oH,oW,iC*mC] -> [iC*mC,bS,oH,oW] -> [iC,mC,bS*oH*oW]
input = new NDArray(input->permute({0, 3, 1, 2})); // [bS,iH,iW,iC] -> [bS,iC,iH,iW]
gradI = new NDArray(gradI->permute({0, 3, 1, 2})); // [bS,iH,iW,iC] -> [bS,iC,iH,iW]
}
else {
gradOreShape = {bS, iC, mC, oH, oW}; // [bS,iC*mC,oH,oW] -> [bS,iC,mC,oH,oW]
modifGradO1 = {{1,0,3,4,2},{iC, bS*oH*oW, mC}}; // [bS,iC,mC,oH,oW] -> [iC,bS,oH,oW,mC] -> [iC,bS*oH*oW,mC]
modifGradO2 = {{1,0,2,3},{iC, mC, bS*oH*oW}}; // [bS,iC*mC,oH,oW] -> [iC*mC,bS,oH,oW] -> [iC,mC,bS*oH*oW]
}
if(paddingMode == 1) // SAME
ConvolutionUtils::calcPadding2D(pH, pW, oH, oW, iH, iW, kH, kW, sH, sW, dH, dW);
NDArray columns(input->ordering(), {bS, iC, kH, kW, oH, oW}, input->dataType(), input->getContext());
NDArray gradOreshaped = gradO->reshape(gradO->ordering(), gradOreShape);
// ----- calculation of gradW and gradB ----- //
helpers::im2col(*input->getContext(), *input, columns, kH, kW, sH, sW, pH, pW, dH, dW, NDArrayFactory::create(0.f, input->getContext())); // [bS, iC, iH, iW] is convoluted to [bS, iC, kH, kW, oH, oW]
nd4j::MmulHelper::tensorDot(&columns, &gradOreshaped, gradW, modifColumns, modifGradO1, {{2,0,1,3},{iC,kH*kW,mC}}); // [iC, kW*kH, bS*oH*oW] x [iC, bS*oH*oW, mC] = [iC, kH*kW, mC]
// ----- calculation of gradB ----- //
if(gradB) {
NDArray* gradBR = gradB;
if(gradB->rankOf() == 2)
gradBR = new NDArray(gradB->reshape(gradB->ordering(), {(int)gradB->lengthOf()}));
gradO->reduceAlongDimension(reduce::Sum, gradBR, {0,indOoH,indOoH+1}); // sum over bS, oH, oW
if(gradBR != gradB)
delete gradBR;
}
//----- calculation of gradI -----//
nd4j::MmulHelper::tensorDot(weights, gradO, &columns, {{2,0,1,3},{iC,kH*kW,mC}}, modifGradO2, modifColumns); // [iC, kH*kW, mC] x [iC, mC, bS*oH*oW] = [iC, kW*kH, bS*oH*oW]
helpers::col2im(*input->getContext(), columns, *gradI, sH, sW, pH, pW, iH, iW, dH, dW); // [bS, iC, kH, kW, oH, oW] is de-convoluted to [bS, iC, iH, iW]
if(!isNCHW) {
delete input;
delete gradI;
}
}
//////////////////////////////////////////////////////////////////////////
void ConvolutionUtils::depthwiseConv2dBP(nd4j::graph::Context& block, const NDArray* input, const NDArray* weights, const NDArray* bias, const NDArray* gradO, NDArray* gradI, NDArray* gradW, NDArray* gradB, const int kH, const int kW, const int sH, const int sW, int pH, int pW, const int dH, const int dW, const int paddingMode, const int isNCHW) {
BUILD_SINGLE_SELECTOR_TWICE(input->dataType(), depthwiseConv2dBP_, (input, weights, bias, gradO, gradI, gradW, gradB, kH, kW, sH, sW, pH, pW, dH, dW, paddingMode, isNCHW), FLOAT_TYPES);
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
__global__ static void upsampling2dCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int factorH, const int factorW, const bool isNCHW) {
// x has shape [bS, iC, iH, iW] (NCHW) or [bS, iH, iW, iC] (NHWC)
// z has shape [bS, iC, factorH*iH, factorW*iW ] (NCHW) or [bS, factorH*iH, factorW*iW, iC] (NHWC)
const T* x = reinterpret_cast<const T*>(vx);
T* z = reinterpret_cast<T*>(vz);
__shared__ int rank, dimIH;
__shared__ Nd4jLong *sharedMem, zLen;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<Nd4jLong*>(shmem);
dimIH = isNCHW ? 2 : 1;
zLen = shape::length(zShapeInfo);
rank = 4;
}
__syncthreads();
const auto zInd = threadIdx.x + blockIdx.x * blockDim.x;
if(zInd >= zLen)
return;
auto coords = sharedMem + threadIdx.x * rank;
shape::index2coords(zInd, zShapeInfo, coords);
const auto zOffset = shape::getOffset(zShapeInfo, coords);
coords[dimIH] /= factorH;
coords[dimIH + 1] /= factorW;
const auto xOffset = shape::getOffset(xShapeInfo, coords);
z[zOffset] = x[xOffset];
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
static void upsampling2dCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream,
const void* vx, const Nd4jLong* xShapeInfo,
void* vz, const Nd4jLong* zShapeInfo,
const int factorH, const int factorW, const bool isNCHW) {
hipLaunchKernelGGL(( upsampling2dCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, vz, zShapeInfo, factorH, factorW, isNCHW);
}
//////////////////////////////////////////////////////////////////////////
void ConvolutionUtils::upsampling2d(nd4j::graph::Context& block, const NDArray& input, NDArray& output, const int factorH, const int factorW, const bool isNCHW) {
PointersManager manager(block.launchContext(), "upsampling2d");
const int threadsPerBlock = MAX_NUM_THREADS / 2;
const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = output.rankOf() * sizeof(Nd4jLong) * threadsPerBlock + 128;
NDArray::prepareSpecialUse({&output}, {&input});
BUILD_SINGLE_SELECTOR(input.dataType(), upsampling2dCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, block.launchContext()->getCudaStream(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), factorH, factorW, isNCHW), FLOAT_TYPES);
NDArray::registerSpecialUse({&output}, {&input});
manager.synchronize();
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
__global__ static void upsampling3dCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int factorD, const int factorH, const int factorW, const bool isNCDHW) {
// x has shape [bS, iC, iD, iH, iW] (NCDHW) or [bS, iD, iH, iW, iC] (NDHWC)
// z has shape [bS, iC, factorD*iD, factorH*iH, factorW*iW ] (NCDHW) or [bS, factorD*iD, factorH*iH, factorW*iW, iC] (NDHWC)
const T* x = reinterpret_cast<const T*>(vx);
T* z = reinterpret_cast<T*>(vz);
__shared__ int rank, dimID;
__shared__ Nd4jLong *sharedMem, zLen;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<Nd4jLong*>(shmem);
dimID = isNCDHW ? 2 : 1;
zLen = shape::length(zShapeInfo);
rank = 5;
}
__syncthreads();
const auto zInd = threadIdx.x + blockIdx.x * blockDim.x;
if(zInd >= zLen)
return;
auto coords = sharedMem + threadIdx.x * rank;
shape::index2coords(zInd, zShapeInfo, coords);
const auto zOffset = shape::getOffset(zShapeInfo, coords);
coords[dimID] /= factorD;
coords[dimID + 1] /= factorH;
coords[dimID + 2] /= factorW;
const auto xOffset = shape::getOffset(xShapeInfo, coords);
z[zOffset] = x[xOffset];
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
static void upsampling3dCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream,
const void* vx, const Nd4jLong* xShapeInfo,
void* vz, const Nd4jLong* zShapeInfo,
const int factorD, const int factorH, const int factorW, const bool isNCDHW) {
hipLaunchKernelGGL(( upsampling3dCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, vz, zShapeInfo, factorD, factorH, factorW, isNCDHW);
}
//////////////////////////////////////////////////////////////////////////
void ConvolutionUtils::upsampling3d(nd4j::graph::Context& block, const NDArray& input, NDArray& output, const int factorD, const int factorH, const int factorW, const bool isNCDHW) {
PointersManager manager(block.launchContext(), "upsampling3d");
const int threadsPerBlock = MAX_NUM_THREADS / 2;
const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = output.rankOf() * sizeof(Nd4jLong) * threadsPerBlock + 128;
NDArray::prepareSpecialUse({&output}, {&input});
BUILD_SINGLE_SELECTOR(input.dataType(), upsampling3dCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, block.launchContext()->getCudaStream(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), factorD, factorH, factorW, isNCDHW), FLOAT_TYPES);
NDArray::registerSpecialUse({&output}, {&input});
manager.synchronize();
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
__global__ static void upsampling2dBPCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const bool isNCHW) {
// x (gradO) has shape [bS, iC, factorH*iH, factorW*iW ] (NCHW) or [bS, factorH*iH, factorW*iW, iC] (NHWC)
// z (gradI) has shape [bS, iC, iH, iW] (NCHW) or [bS, iH, iW, iC] (NHWC)
const T* x = reinterpret_cast<const T*>(vx);
T* z = reinterpret_cast<T*>(vz);
__shared__ int rank, dimIH;
__shared__ uint factorH, factorW;
__shared__ Nd4jLong *sharedMem, zLen;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<Nd4jLong*>(shmem);
dimIH = isNCHW ? 2 : 1;
zLen = shape::length(zShapeInfo);
rank = 4;
factorH = xShapeInfo[dimIH + 1] / zShapeInfo[dimIH + 1];
factorW = xShapeInfo[dimIH + 2] / zShapeInfo[dimIH + 2];
}
__syncthreads();
const auto zInd = threadIdx.x + blockIdx.x * blockDim.x;
if(zInd >= zLen)
return;
auto coords = sharedMem + threadIdx.x * rank;
shape::index2coords(zInd, zShapeInfo, coords);
const auto zOffset = shape::getOffset(zShapeInfo, coords);
z[zOffset] = 0;
const Nd4jLong zCoord2 = coords[dimIH] * factorH;
const Nd4jLong zCoord3 = coords[dimIH + 1] * factorW;
for(coords[dimIH] = zCoord2; coords[dimIH] < zCoord2 + factorH; ++coords[dimIH])
for(coords[dimIH + 1] = zCoord3; coords[dimIH + 1] < zCoord3 + factorW; ++coords[dimIH + 1])
z[zOffset] += x[shape::getOffset(xShapeInfo, coords)];
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
static void upsampling2dBPCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream,
const void* vx, const Nd4jLong* xShapeInfo,
void* vz, const Nd4jLong* zShapeInfo,
const bool isNCHW) {
hipLaunchKernelGGL(( upsampling2dBPCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, vz, zShapeInfo, isNCHW);
}
//////////////////////////////////////////////////////////////////////////
void ConvolutionUtils::upsampling2dBP(nd4j::graph::Context& block, const NDArray& gradO, NDArray& gradI, const bool isNCHW) {
PointersManager manager(block.launchContext(), "upsampling2d_bp");
const int threadsPerBlock = MAX_NUM_THREADS / 2;
const int blocksPerGrid = (gradI.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = gradI.rankOf() * sizeof(Nd4jLong) * threadsPerBlock + 128;
NDArray::prepareSpecialUse({&gradI}, {&gradO});
BUILD_SINGLE_SELECTOR(gradI.dataType(), upsampling2dBPCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, block.launchContext()->getCudaStream(), gradO.getSpecialBuffer(), gradO.getSpecialShapeInfo(), gradI.specialBuffer(), gradI.specialShapeInfo(), isNCHW), FLOAT_TYPES);
NDArray::registerSpecialUse({&gradI}, {&gradO});
manager.synchronize();
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
__global__ static void upsampling3dBPCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const bool isNCDHW) {
// x (gradO) has shape [bS, iC, iD, iH, iW] (NCDHW) or [bS, iD, iH, iW, iC] (NDHWC)
// z (gradI) has shape [bS, iC, factorD*iD, factorH*iH, factorW*iW ] (NCDHW) or [bS, factorD*iD, factorH*iH, factorW*iW, iC] (NDHWC)
const T* x = reinterpret_cast<const T*>(vx);
T* z = reinterpret_cast<T*>(vz);
__shared__ int rank, dimID;
__shared__ uint factorD, factorH, factorW;
__shared__ Nd4jLong *sharedMem, zLen;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<Nd4jLong*>(shmem);
dimID = isNCDHW ? 2 : 1;
zLen = shape::length(zShapeInfo);
rank = 5;
factorD = xShapeInfo[dimID + 1] / zShapeInfo[dimID + 1];
factorH = xShapeInfo[dimID + 2] / zShapeInfo[dimID + 2];
factorW = xShapeInfo[dimID + 3] / zShapeInfo[dimID + 3];
}
__syncthreads();
const auto zInd = threadIdx.x + blockIdx.x * blockDim.x;
if(zInd >= zLen)
return;
auto coords = sharedMem + threadIdx.x * rank;
shape::index2coords(zInd, zShapeInfo, coords);
const auto zOffset = shape::getOffset(zShapeInfo, coords);
z[zOffset] = 0;
const Nd4jLong zCoord2 = coords[dimID] * factorD;
const Nd4jLong zCoord3 = coords[dimID + 1] * factorH;
const Nd4jLong zCoord4 = coords[dimID + 2] * factorW;
for(coords[dimID] = zCoord2; coords[dimID] < zCoord2 + factorD; ++coords[dimID])
for(coords[dimID + 1] = zCoord3; coords[dimID + 1] < zCoord3 + factorH; ++coords[dimID + 1])
for(coords[dimID + 2] = zCoord4; coords[dimID + 2] < zCoord4 + factorW; ++coords[dimID + 2])
z[zOffset] += x[shape::getOffset(xShapeInfo, coords)];
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
static void upsampling3dBPCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream,
const void* vx, const Nd4jLong* xShapeInfo,
void* vz, const Nd4jLong* zShapeInfo,
const bool isNCDHW) {
hipLaunchKernelGGL(( upsampling3dBPCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, vz, zShapeInfo, isNCDHW);
}
//////////////////////////////////////////////////////////////////////////
void ConvolutionUtils::upsampling3dBP(nd4j::graph::Context& block, const NDArray& gradO, NDArray& gradI, const bool isNCDHW) {
PointersManager manager(block.launchContext(), "upsampling3d_bp");
const int threadsPerBlock = MAX_NUM_THREADS / 2;
const int blocksPerGrid = (gradI.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = gradI.rankOf() * sizeof(Nd4jLong) * threadsPerBlock + 128;
NDArray::prepareSpecialUse({&gradI}, {&gradO});
BUILD_SINGLE_SELECTOR(gradI.dataType(), upsampling3dBPCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, block.launchContext()->getCudaStream(), gradO.getSpecialBuffer(), gradO.getSpecialShapeInfo(), gradI.specialBuffer(), gradI.specialShapeInfo(), isNCDHW), FLOAT_TYPES);
NDArray::registerSpecialUse({&gradI}, {&gradO});
manager.synchronize();
}
}
} | 8dff04194cc365ccd3e47e30f48a74e0129803b0.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
* Copyright (c) 2019 Konduit K.K.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma (iuriish@yahoo.com)
//
#include <ops/declarable/helpers/convolutions.h>
#include <ops/declarable/helpers/im2col.h>
#include <ops/declarable/helpers/col2im.h>
#include<ops/declarable/helpers/addBias.h>
#include <exceptions/cuda_exception.h>
#include <NDArrayFactory.h>
#include <MmulHelper.h>
#include <PointersManager.h>
#include <templatemath.h>
namespace nd4j {
namespace ops {
//////////////////////////////////////////////////////////////////////////
// vol [bS, iC, iD, iH, iW] is convoluted to col [bS, iC, kD, kH, kW, oD, oH, oW]
template <typename T>
static __global__ void vol2colCuda(const void* volume, const Nd4jLong* volShapeInfo, void* columns, const Nd4jLong* colShapeInfo, const int sD, const int sH, const int sW, const int pD, const int pH, const int pW, const int dD, const int dH, const int dW) {
const T* vol = reinterpret_cast<const T*>(volume);
T* col = reinterpret_cast<T*>(columns);
__shared__ int colRank, volRank;
__shared__ Nd4jLong colLen, iD, iH, iW, *sharedMem;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<Nd4jLong*>(shmem);
volRank = 5;
colRank = 8;
colLen = shape::length(colShapeInfo);
iD = volShapeInfo[3];
iH = volShapeInfo[4];
iW = volShapeInfo[5];
}
__syncthreads();
const auto colInd = threadIdx.x + blockIdx.x * blockDim.x;
if(colInd >= colLen)
return;
auto coords = sharedMem + threadIdx.x * colRank;
shape::index2coords(colInd, colShapeInfo, coords);
// const auto colW = coords[7];
// const auto colH = coords[6];
// const auto colD = coords[5];
// const auto kCol = coords[4];
// const auto kRow = coords[3];
// const auto kDep = coords[2];
// const auto c = coords[1];
// const auto b = coords[0];
const auto colOffset = shape::getOffset(colShapeInfo, coords);
coords[2] = -pD + coords[2] * dD + coords[5] * sD; // const auto volDep = (-pD + kDep * dD) + colD * sD;
coords[3] = -pH + coords[3] * dH + coords[6] * sH; // const auto volRow = (-pH + kRow * dH) + colH * sH;
coords[4] = -pW + coords[4] * dW + coords[7] * sW; // const auto volCol = (-pW + kCol * dW) + colW * sW;
if (static_cast<unsigned>(coords[2]) >= static_cast<unsigned>(iD) || static_cast<unsigned>(coords[3]) >= static_cast<unsigned>(iH) || static_cast<unsigned>(coords[4]) >= static_cast<unsigned>(iW))
col[colOffset] = static_cast<T>(0.);
else
col[colOffset] = vol[shape::getOffset(volShapeInfo, coords)];
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
static void vol2colCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream,
const void* volume, const Nd4jLong* volShapeInfo,
void* columns, const Nd4jLong* colShapeInfo,
const int sD, const int sH, const int sW, const int pD, const int pH, const int pW, const int dD, const int dH, const int dW) {
vol2colCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(volume, volShapeInfo, columns, colShapeInfo, sD, sH, sW, pD, pH, pW, dD, dH, dW);
}
//////////////////////////////////////////////////////////////////////////
void ConvolutionUtils::vol2col(nd4j::graph::Context& block, const NDArray& vol, NDArray& col, const int sD, const int sH, const int sW, const int pD, const int pH, const int pW, const int dD, const int dH, const int dW) {
PointersManager manager(block.launchContext(), "vol2col");
const int threadsPerBlock = MAX_NUM_THREADS / 4;
const int blocksPerGrid = (col.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = col.rankOf() * sizeof(Nd4jLong) * threadsPerBlock + 128;
NDArray::prepareSpecialUse({&col}, {&vol});
BUILD_SINGLE_SELECTOR(vol.dataType(), vol2colCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, block.launchContext()->getCudaStream(), vol.getSpecialBuffer(), vol.getSpecialShapeInfo(), col.specialBuffer(), col.specialShapeInfo(), sD, sH, sW, pD, pH, pW, dD, dH, dW), FLOAT_TYPES);
NDArray::registerSpecialUse({&col}, {&vol});
manager.synchronize();
}
//////////////////////////////////////////////////////////////////////////
// columns [bS, iC, kD, kH, kW, oD, oH, oW] to be de-convoluted to volume [bS, iC, iD, iH, iW]
template <typename T>
static __global__ void col2volCuda(const void* columns, const Nd4jLong* colShapeInfo, void* volume, const Nd4jLong* volShapeInfo, const int sD, const int sH, const int sW, const int pD, const int pH, const int pW, const int dD, const int dH, const int dW) {
const T* col = reinterpret_cast<const T*>(columns);
T* vol = reinterpret_cast<T*>(volume);
__shared__ uint kD, kH, kW, oD, oH, oW, *sharedMem;
__shared__ Nd4jLong volLen;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<uint*>(shmem);
oD = colShapeInfo[6];
oH = colShapeInfo[7];
oW = colShapeInfo[8];
kD = dD * (colShapeInfo[3] - 1) + 1;
kH = dH * (colShapeInfo[4] - 1) + 1;
kW = dW * (colShapeInfo[5] - 1) + 1;
volLen = shape::length(volShapeInfo);
}
__syncthreads();
auto coords = sharedMem + threadIdx.x * 8;
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong i = tid; i < volLen; i += gridDim.x * blockDim.x) {
shape::index2coords(i, volShapeInfo, coords);
const auto volOffset = shape::getOffset(volShapeInfo, coords);
const auto bSiCoffset = coords[0] * colShapeInfo[9] + coords[1] * colShapeInfo[10];
const uint imD = coords[2] + pD;
const uint imH = coords[3] + pH;
const uint imW = coords[4] + pW;
const uint colDstart = (imD < kD) ? 0 : (imD - kD) / sD + 1;
const uint colHstart = (imH < kH) ? 0 : (imH - kH) / sH + 1;
const uint colWstart = (imW < kW) ? 0 : (imW - kW) / sW + 1;
const uint colDend = nd4j::math::nd4j_min<uint>(imD / sD + 1, oD);
const uint colHend = nd4j::math::nd4j_min<uint>(imH / sH + 1, oH);
const uint colWend = nd4j::math::nd4j_min<uint>(imW / sW + 1, oW);
T val = 0;
for(uint colD = colDstart; colD < colDend; ++colD) {
coords[2] = imD - colD * sD;
if(coords[2] % dD != 0) continue;
for(uint colH = colHstart; colH < colHend; ++colH) {
coords[3] = imH - colH * sH;
if(coords[3] % dH != 0) continue;
for(uint colW = colWstart; colW < colWend; ++colW) {
coords[4] = imW - colW * sW;
if(coords[4] % dW != 0) continue;
val += col[bSiCoffset + (coords[2]/dD)*colShapeInfo[11] + (coords[3]/dH)*colShapeInfo[12] + (coords[4]/dW)*colShapeInfo[13] + colD*colShapeInfo[14] + colH*colShapeInfo[15] + colW*colShapeInfo[16]];
}
}
}
vol[volOffset] = val;
}
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
static void col2volCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream,
const void* columns, const Nd4jLong* colShapeInfo,
void* volume, const Nd4jLong* volShapeInfo,
const int sD, const int sH, const int sW, const int pD, const int pH, const int pW, const int dD, const int dH, const int dW) {
col2volCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(columns, colShapeInfo, volume, volShapeInfo, sD, sH, sW, pD, pH, pW, dD, dH, dW);
}
//////////////////////////////////////////////////////////////////////////
void ConvolutionUtils::col2vol(nd4j::graph::Context& block, const NDArray& col, NDArray& vol, const int sD, const int sH, const int sW, const int pD, const int pH, const int pW, const int dD, const int dH, const int dW) {
PointersManager manager(block.launchContext(), "col2vol");
const int threadsPerBlock = MAX_NUM_THREADS / 4;
const int blocksPerGrid = (vol.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = col.rankOf() * sizeof(uint) * threadsPerBlock + 256;
NDArray::prepareSpecialUse({&vol}, {&col});
BUILD_SINGLE_SELECTOR(vol.dataType(), col2volCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, block.launchContext()->getCudaStream(), col.getSpecialBuffer(), col.getSpecialShapeInfo(), vol.specialBuffer(), vol.specialShapeInfo(), sD, sH, sW, pD, pH, pW, dD, dH, dW), FLOAT_TYPES);
NDArray::registerSpecialUse({&vol}, {&col});
manager.synchronize();
}
//////////////////////////////////////////////////////////////////////////
template <typename X, typename Y>
static void conv2d_(nd4j::graph::Context& block, const NDArray* input, const NDArray* weights, const NDArray* bias, NDArray* output, const int kH, const int kW, const int sH, const int sW, int pH, int pW, const int dH, const int dW, const int paddingMode, const int isNCHW) {
// input [bS, iH, iW, iC] (NHWC) or [bS, iC, iH, iW] (NCHW)
// weights [kH, kW, iC, oC] always
// bias [oC]
// output [bS, oH, oW, oC] (NHWC) or [bS, oC, oH, oW] (NCHW)
// kH filter(kernel) height
// kW filter(kernel) width
// sH strides height
// sW strides width
// pH paddings height
// pW paddings width
// dH dilations height
// dW dilations width
// paddingMode 0-VALID, 1-SAME
// isNCHW 1-NCHW, 0-NHWC
int bS, iC, iH, iW, oC, oH, oW; // batch size, input channels, input height/width, output channels, output height/width;
int indIOioC, indIiH, indWoC, indWiC, indWkH, indOoH; // corresponding indexes
ConvolutionUtils::getSizesAndIndexesConv2d(isNCHW, *input, *output, bS, iC, iH, iW, oC, oH, oW, indIOioC, indIiH, indWiC, indWoC, indWkH, indOoH);
ConvolutionUtils::calcPadding2D(pH, pW, oH, oW, iH, iW, kH, kW, sH, sW, dH, dW, paddingMode);
std::vector<int> permutForOutput;
if(isNCHW)
permutForOutput = {0, 3, 1, 2}; // [bS, oH, oW, oC] -> [bS, oC, oH, oW]
else
input = new NDArray(input->permute({0, 3, 1, 2})); // [bS, iH, iW, iC] -> [bS, iC, iH, iW] if NHWC
NDArray col('c', {bS, oH, oW, kH, kW, iC}, input->dataType(), input->getContext());
NDArray colP = col.permute({0, 5, 3, 4, 1, 2}); // {bS, iC, kH, kW, oH, oW}
NDArray mmulResult('f', {bS*oH*oW, oC}, output->dataType(), output->getContext());
//----- calculation of output -----//
auto ctx = block.launchContext();
helpers::im2col(*ctx, *input, colP, kH, kW, sH, sW, pH, pW, dH, dW, NDArrayFactory::create(0.f, input->getContext())); // [bS, iC, iH, iW] is convoluted to [bS, iC, kH, kW, oH, oW]
MmulHelper::tensorDot(&col, weights, &mmulResult, {3,4,5}, {0,1,2}, {}); // [bS, oH, oW, kH, kW, iC] x [kH, kW, iC, oC] = [bS, oH, oW, oC]
//----- assign outTemp to output -----//
if(isNCHW) {
mmulResult.reshapei({bS, oH, oW, oC});
mmulResult.permutei(permutForOutput);
}
output->assign(mmulResult);
//----- add biases if required -----//
if(bias)
// output->applyBroadcast(broadcast::Add, {indIOioC}, bias);
helpers::addBias(block, *output, *bias, *output, isNCHW);
if(!isNCHW)
delete input;
}
//////////////////////////////////////////////////////////////////////////
void ConvolutionUtils::conv2d(nd4j::graph::Context& block, const NDArray* input, const NDArray* weights, const NDArray* bias, NDArray* output, const int kH, const int kW, const int sH, const int sW, int pH, int pW, const int dH, const int dW, const int paddingMode, const int isNCHW) {
BUILD_SINGLE_SELECTOR_TWICE(input->dataType(), conv2d_, (block, input, weights, bias, output, kH, kW, sH, sW, pH, pW, dH, dW, paddingMode, isNCHW), FLOAT_TYPES);
}
//////////////////////////////////////////////////////////////////////////
template <typename X, typename Y>
static void depthwiseConv2d_(nd4j::graph::Context& block, const NDArray* input, const NDArray* weights, const NDArray* bias, NDArray* output, const int kH, const int kW, const int sH, const int sW, int pH, int pW, const int dH, const int dW, const int paddingMode, const int isNCHW) {
// input [bS, iH, iW, iC] (NHWC) or [bS, iC, iH, iW] (NCHW)
// weights [kH, kW, iC, mC] always
// bias [oC] = iC*mC
// output [bS, oH, oW, iC*mC] (NHWC) or [bS, iC*mC, oH, oW] (NCHW)
// kH filter(kernel) height
// kW filter(kernel) width
// sH strides height
// sW strides width
// pH paddings height
// pW paddings width
// dH dilations height
// dW dilations width
// paddingMode 0-VALID, 1-SAME
// isNCHW 0-NCHW, 1-NHWC
int bS, iC, iH, iW, mC, oC, oH, oW; // batch size, input channels, input height/width, channels multiplier(oC = iC*mC), output channels, output height/width
int indIOioC, indIiH, indWmC, indWiC, indWkH, indOoH; // corresponding indexes
ConvolutionUtils::getSizesAndIndexesConv2d(isNCHW, *input, *output, bS, iC, iH, iW, oC, oH, oW, indIOioC, indIiH, indWiC, indWmC, indWkH, indOoH);
mC = weights->sizeAt(indWmC); // channels multiplier
std::vector<std::vector<Nd4jLong>> modifColumns = {{1,0,4,5,2,3}, {iC,bS*oH*oW,kH*kW}}; // [bS,iC,kH,kW,oH,oW] -> [iC,bS,oH,oW,kH,kW] -> [iC,bS*oH*oW,kH*kW]
std::vector<std::vector<Nd4jLong>> modifOutput;
std::vector<Nd4jLong> outReShape;
if(!isNCHW) {
outReShape = {bS, oH, oW, iC, mC}; // [bS,oH,oW,iC*mC] -> [bS,oH,oW,iC,mC]
modifOutput = {{3,0,1,2,4},{iC, bS*oH*oW, mC}}; // [bS,oH,oW,iC,mC] -> [iC,bS,oH,oW,mC] -> [iC,bS*oH*oW,mC]
input = new NDArray(input->permute({0, 3, 1, 2})); // [bS,iH,iW,iC] -> [bS,iC,iH,iW]
}
else {
outReShape = {bS, iC, mC, oH, oW}; // [bS,iC*mC,oH,oW] -> [bS,iC,mC,oH,oW]
modifOutput = {{1,0,3,4,2},{iC, bS*oH*oW, mC}}; // [bS,iC,mC,oH,oW] -> [iC,bS,oH,oW,mC] -> [iC,bS*oH*oW,mC]
}
if(paddingMode == 1) // SAME
ConvolutionUtils::calcPadding2D(pH, pW, oH, oW, iH, iW, kH, kW, sH, sW, dH, dW);
NDArray columns(input->ordering(), {bS, iC, kH, kW, oH, oW}, input->dataType(), input->getContext());
NDArray outputReshaped = output->reshape(output->ordering(), outReShape);
helpers::im2col(*output->getContext(), *input, columns, kH, kW, sH, sW, pH, pW, dH, dW, NDArrayFactory::create(0.f, input->getContext())); // [bS, iC, iH, iW] is convoluted to [bS, iC, kH, kW, oH, oW]
MmulHelper::tensorDot(&columns, weights, &outputReshaped, modifColumns, {{2,0,1,3},{iC,kH*kW,mC}}, modifOutput); // [iC, bS*oH*oW, kW*kH] x [iC, kH*kW, mC] = [iC, bS*oH*oW, mC]
if(bias)
// output->applyBroadcast(broadcast::Add, {indIOioC}, bias);
helpers::addBias(block, *output, *bias, *output, isNCHW);
if(!isNCHW)
delete input;
}
//////////////////////////////////////////////////////////////////////////
void ConvolutionUtils::depthwiseConv2d(nd4j::graph::Context& block, const NDArray* input, const NDArray* weights, const NDArray* bias, NDArray* output, const int kH, const int kW, const int sH, const int sW, int pH, int pW, const int dH, const int dW, const int paddingMode, const int isNCHW) {
BUILD_SINGLE_SELECTOR_TWICE(input->dataType(), depthwiseConv2d_, (block, input, weights, bias, output, kH, kW, sH, sW, pH, pW, dH, dW, paddingMode, isNCHW), FLOAT_TYPES);
}
//////////////////////////////////////////////////////////////////////////
template <typename X, typename Y>
static void sconv2d_(nd4j::graph::Context& block, const NDArray* input, const NDArray* weightsDepth, const NDArray* weightsPoint, const NDArray* bias, NDArray* output, const int kH, const int kW, const int sH, const int sW, int pH, int pW, const int dH, const int dW, const int paddingMode, const int isNCHW) {
// input [bS, iH, iW, iC] (NHWC) or [bS, iC, iH, iW] (NCHW)
// weightsDepth [kH, kW, iC, mC] always
// weightsPoint [1, 1, iC*mC, oC] always
// bias [oC], oC = iC*mC if weightsPoint=nullptr
// output is [bS, oH, oW, oC] (NHWC) or [bS, oC, oH, oW] (NCHW)
// kH filter(kernel) height
// kW filter(kernel) width
// sH strides height
// sW strides width
// pH paddings height
// pW paddings width
// dH dilations height
// dW dilations width
// paddingMode 0-VALID, 1-SAME
// isNCHW 1-NCHW, 0-NHWC
int bS, iC, iH, iW, mC, oC, oH, oW; // batch size, input channels, input height/width, channels multiplier, output channels, output height/width
int indIOioC, indIiH, indWmC, indWiC, indWkH, indOoH; // corresponding indexes
ConvolutionUtils::getSizesAndIndexesConv2d(isNCHW, *input, *output, bS, iC, iH, iW, oC, oH, oW, indIOioC, indIiH, indWiC, indWmC, indWkH, indOoH);
mC = weightsDepth->sizeAt(indWmC); // channels multiplier
NDArray* outputDepth = output;
if(weightsPoint) // if pointwise convolution is expected
outputDepth = new NDArray(output->ordering(), !isNCHW ? std::vector<Nd4jLong>({bS, oH, oW, iC*mC}) : std::vector<Nd4jLong>({bS, iC*mC, oH, oW}), input->dataType(), input->getContext());
// ----- perform depthwise convolution (if weightsPoint is absent then oC = iC*mC) ----- //
ConvolutionUtils::depthwiseConv2d(block, input, weightsDepth, weightsPoint ? nullptr : bias, outputDepth, kH,kW, sH,sW, pH,pW, dH,dW, paddingMode, isNCHW);
// ----- perform pointwise convolution (oH = iH, oW = iW) ----- //
if (weightsPoint) {
ConvolutionUtils::conv2d(block, outputDepth, weightsPoint, bias, output, 1,1, 1,1, 0,0, 1,1, paddingMode, isNCHW); // in this case oH=iH, oW=iW
delete outputDepth;
}
}
//////////////////////////////////////////////////////////////////////////
void ConvolutionUtils::sconv2d(nd4j::graph::Context& block, const NDArray* input, const NDArray* weightsDepth, const NDArray* weightsPoint, const NDArray* bias, NDArray* output, const int kH, const int kW, const int sH, const int sW, int pH, int pW, const int dH, const int dW, const int paddingMode, const int isNCHW) {
BUILD_SINGLE_SELECTOR_TWICE(input->dataType(), sconv2d_, (block, input, weightsDepth, weightsPoint, bias, output, kH, kW, sH, sW, pH, pW, dH, dW, paddingMode, isNCHW), FLOAT_TYPES);
}
//////////////////////////////////////////////////////////////////////////
template <typename X, typename Z>
static __global__ void avgPooling2dCuda(const void *vx, const Nd4jLong *xShapeInfo, void *vz, const Nd4jLong *zShapeInfo, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int extraParam0) {
// input is [bS, iC, iH, iW]
// output is [bS, iC, oH, oW]
const auto x = reinterpret_cast<const X*>(vx);
auto z = reinterpret_cast<Z*>(vz);
__shared__ int bS, iC, oH, oW, iH, iW, strideB, strideC, strideY, strideX, strideOB, strideOC, strideOY, strideOX, length, kHEff, kWEff;
if (threadIdx.x == 0) {
bS = shape::sizeAt(xShapeInfo, 0);
iC = shape::sizeAt(xShapeInfo, 1);
oH = shape::sizeAt(zShapeInfo, 2);
oW = shape::sizeAt(zShapeInfo, 3);
iH = shape::sizeAt(xShapeInfo, 2);
iW = shape::sizeAt(xShapeInfo, 3);
strideB = shape::stride(xShapeInfo)[0];
strideC = shape::stride(xShapeInfo)[1];
strideY = shape::stride(xShapeInfo)[2];
strideX = shape::stride(xShapeInfo)[3];
strideOB = shape::stride(zShapeInfo)[0];
strideOC = shape::stride(zShapeInfo)[1];
strideOY = shape::stride(zShapeInfo)[2];
strideOX = shape::stride(zShapeInfo)[3];
length = shape::length(zShapeInfo);
//Replace kernel H/W with *effective* kernel H/W accounting for dilatyon
kHEff = kH + (kH-1)*(dH-1);
kWEff = kW + (kW-1)*(dW-1);
}
__syncthreads();
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int index = tid; index < length; index += blockDim.x * gridDim.x) {
const int pw = index % oW;
const int ph = (index / oW) % oH;
const int c = (index / oW / oH) % iC;
const int n = index / oW / oH / iC;
int hstart = sH * ph - pH;
int wstart = sW * pw - pW;
int hend = hstart + kHEff;
int wend = wstart + kWEff;
if(hstart < 0){
int f = nd4j::math::nd4j_ceil<Z,int>((Z) -hstart / (Z)dH);
hstart += f * dH;
}
if(wstart < 0){
int f = nd4j::math::nd4j_ceil<Z,int>((Z) -wstart / (Z) dW);
wstart += f * dW;
}
if(hend > iH){
int f = nd4j::math::nd4j_ceil<Z,int>((Z) (hend-iH) / (Z) dH);
hend -= f * dH;
}
if(wend > iW){
int f = nd4j::math::nd4j_ceil<Z,int>((Z) (wend-iW) / (Z) dW);
wend -= f * dW;
}
//Accounts for dilation
int pool_size = nd4j::math::nd4j_ceil<double,int>((double) (hend-hstart) / (double) dH) * nd4j::math::nd4j_ceil<double,int>((double) (wend-wstart) / (double) dW);
Z sum = 0.0f;
const X *inSlice = x + (n * strideB + c * strideC);
for (int h = hstart; h < hend; h += dH)
for (int w = wstart; w < wend; w += dW)
sum += static_cast<Z>(inSlice[h * strideY + w * strideX]);
int divide_factor = pool_size; //Case 0: exclude padding
if (extraParam0 == 1) //Case 1: include padding
divide_factor = kH * kW;
z[n * strideOB + c * strideOC + pw * strideOX + ph * strideOY] = sum / static_cast<Z>(divide_factor);
}
}
//////////////////////////////////////////////////////////////////////////
template <typename X, typename Z>
static void avgPooling2dCudaLauncher(nd4j::LaunchContext & block, void *vx, Nd4jLong *vxShapeInfo, void *vz, Nd4jLong *vzShapeInfo, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int extraParam0) {
avgPooling2dCuda<X, Z><<<512, 512, 4192, *block.getCudaStream()>>>(vx, vxShapeInfo, vz, vzShapeInfo, kH, kW, sH, sW, pH, pW, dH, dW, extraParam0);
}
//////////////////////////////////////////////////////////////////////////
template <typename X, typename Z>
static __global__ void pnormPooling2dCuda(const void *vx, const Nd4jLong *xShapeInfo, void *vz, const Nd4jLong *zShapeInfo, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int extraParam0) {
// input is [bS, iC, iH, iW]
// output is [bS, iC, oH, oW]
const auto x = reinterpret_cast<const X*>(vx);
auto z = reinterpret_cast<Z*>(vz);
__shared__ int bS, iC, oH, oW, iH, iW, strideB, strideC, strideY, strideX, strideOB, strideOC, strideOY, strideOX, length, kHEff, kWEff;
__shared__ bool fOrder;
if (threadIdx.x == 0) {
bS = shape::sizeAt(xShapeInfo, 0);
iC = shape::sizeAt(xShapeInfo, 1);
oH = shape::sizeAt(zShapeInfo, 2);
oW = shape::sizeAt(zShapeInfo, 3);
iH = shape::sizeAt(xShapeInfo, 2);
iW = shape::sizeAt(xShapeInfo, 3);
strideB = shape::stride(xShapeInfo)[0];
strideC = shape::stride(xShapeInfo)[1];
strideY = shape::stride(xShapeInfo)[2];
strideX = shape::stride(xShapeInfo)[3];
strideOB = shape::stride(zShapeInfo)[0];
strideOC = shape::stride(zShapeInfo)[1];
strideOY = shape::stride(zShapeInfo)[2];
strideOX = shape::stride(zShapeInfo)[3];
length = shape::length(zShapeInfo);
//Replace kernel H/W with *effective* kernel H/W accounting for dilatyon
kHEff = kH + (kH-1)*(dH-1);
kWEff = kW + (kW-1)*(dW-1);
}
__syncthreads();
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int index = tid; index < length; index += blockDim.x * gridDim.x) {
const int pw = index % oW;
const int ph = (index / oW) % oH;
const int c = (index / oW / oH) % iC;
const int n = index / oW / oH / iC;
int hstart = sH * ph - pH;
int wstart = sW * pw - pW;
int hend = hstart + kHEff;
int wend = wstart + kWEff;
if (hstart < 0) {
int f = nd4j::math::nd4j_ceil<Z, int>((Z) -hstart / (Z) dH);
hstart += f * dH;
}
if (wstart < 0) {
int f = nd4j::math::nd4j_ceil<Z, int>((Z) -wstart / (Z) dW);
wstart += f * dW;
}
if (hend > iH) {
int f = nd4j::math::nd4j_ceil<Z, int>((Z) (hend - iH) / (Z) dH);
hend -= f * dH;
}
if (wend > iW) {
int f = nd4j::math::nd4j_ceil<Z, int>((Z) (wend - iW) / (Z) dW);
wend -= f * dW;
}
//Accounts for dilation
int pool_size = nd4j::math::nd4j_ceil<double, int>((double) (hend - hstart) / (double) dH) *
nd4j::math::nd4j_ceil<double, int>((double) (wend - wstart) / (double) dW);
Z sum = 0.f;
const X *inSlice = x + (n * strideB + c * strideC);
for (int h = hstart; h < hend; h += dH)
for (int w = wstart; w < wend; w += dW)
sum += nd4j::math::nd4j_pow<Z, Z, Z>(static_cast<Z>(nd4j::math::nd4j_abs<X>(inSlice[h * strideY + w * strideX])), extraParam0);
z[n * strideOB + c * strideOC + pw * strideOX + ph * strideOY] = nd4j::math::nd4j_pow<Z, Z, Z>(sum, (Z) 1.0f / extraParam0);
}
}
//////////////////////////////////////////////////////////////////////////
template <typename X, typename Z>
static void pnormPooling2dCudaLauncher(nd4j::LaunchContext & block, void *vx, Nd4jLong *vxShapeInfo, void *vz, Nd4jLong *vzShapeInfo, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int extraParam0) {
pnormPooling2dCuda<X, Z><<<512, 512, 4192, *block.getCudaStream()>>>(vx, vxShapeInfo, vz, vzShapeInfo, kH, kW, sH, sW, pH, pW, dH, dW, extraParam0);
}
//////////////////////////////////////////////////////////////////////////
template <typename X, typename Z>
static __global__ void maxPooling2dCuda(const void *vx, const Nd4jLong *xShapeInfo, void *vz, const Nd4jLong *zShapeInfo, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int extraParam0) {
// input is [bS, iC, iH, iW]
// output is [bS, iC, oH, oW]
const auto x = reinterpret_cast<const X*>(vx);
auto z = reinterpret_cast<Z*>(vz);
__shared__ int bS, iC, oH, oW, iH, iW, strideB, strideC, strideY, strideX, strideOB, strideOC, strideOY, strideOX, length, kHEff, kWEff;
__shared__ bool fOrder;
if (threadIdx.x == 0) {
bS = shape::sizeAt(xShapeInfo, 0);
iC = shape::sizeAt(xShapeInfo, 1);
oH = shape::sizeAt(zShapeInfo, 2);
oW = shape::sizeAt(zShapeInfo, 3);
iH = shape::sizeAt(xShapeInfo, 2);
iW = shape::sizeAt(xShapeInfo, 3);
strideB = shape::stride(xShapeInfo)[0];
strideC = shape::stride(xShapeInfo)[1];
strideY = shape::stride(xShapeInfo)[2];
strideX = shape::stride(xShapeInfo)[3];
strideOB = shape::stride(zShapeInfo)[0];
strideOC = shape::stride(zShapeInfo)[1];
strideOY = shape::stride(zShapeInfo)[2];
strideOX = shape::stride(zShapeInfo)[3];
length = shape::length(zShapeInfo);
//Replace kernel H/W with *effective* kernel H/W accounting for dilatyon
kHEff = kH + (kH-1)*(dH-1);
kWEff = kW + (kW-1)*(dW-1);
}
__syncthreads();
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int index = tid; index < length; index += blockDim.x * gridDim.x) {
const int pw = index % oW;
const int ph = (index / oW) % oH;
const int c = (index / oW / oH) % iC;
const int n = index / oW / oH / iC;
int hstart = sH * ph - pH;
int wstart = sW * pw - pW;
int hend = hstart + kHEff;
int wend = wstart + kWEff;
if(hstart < 0){
int f = nd4j::math::nd4j_ceil<Z,int>((Z) -hstart / (Z)dH);
hstart += f * dH;
}
if(wstart < 0){
int f = nd4j::math::nd4j_ceil<Z,int>((Z) -wstart / (Z) dW);
wstart += f * dW;
}
if(hend > iH){
int f = nd4j::math::nd4j_ceil<Z,int>((Z) (hend-iH) / (Z) dH);
hend -= f * dH;
}
if(wend > iW){
int f = nd4j::math::nd4j_ceil<Z,int>((Z) (wend-iW) / (Z) dW);
wend -= f * dW;
}
//Accounts for dilation
int pool_size = nd4j::math::nd4j_ceil<double,int>((double) (hend-hstart) / (double) dH) * nd4j::math::nd4j_ceil<double,int>((double) (wend-wstart) / (double) dW);
Z max = -nd4j::DataTypeUtils::max<Z>();
const X *inSlice = x + (n * strideB + c * strideC);
for (int h = hstart; h < hend; h += dH) {
for (int w = wstart; w < wend; w += dW) {
Z v = static_cast<Z>(inSlice[h * strideY + w * strideX]);
if (v > max)
max = v;
}
}
z[n * strideOB + c * strideOC + pw * strideOX + ph * strideOY] = max;
}
}
//////////////////////////////////////////////////////////////////////////
template <typename X, typename Z>
static void maxPooling2dCudaLauncher(nd4j::LaunchContext & block, void *vx, Nd4jLong *vxShapeInfo, void *vz, Nd4jLong *vzShapeInfo, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int extraParam0) {
maxPooling2dCuda<X,Z><<<512, 512, 4192, *block.getCudaStream()>>>(vx, vxShapeInfo, vz, vzShapeInfo, kH, kW, sH, sW, pH, pW, dH, dW, extraParam0);
}
//////////////////////////////////////////////////////////////////////////
void ConvolutionUtils::pooling2d(nd4j::graph::Context& block, const NDArray& input, NDArray& output, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const PoolingType poolingMode, const int extraParam0) {
if(!input.isActualOnDeviceSide()) input.syncToDevice();
switch (poolingMode) {
case MAX_POOL: {
BUILD_SINGLE_SELECTOR_TWICE(input.dataType(), maxPooling2dCudaLauncher, (*block.launchContext(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), output.getSpecialBuffer(), output.getSpecialShapeInfo(), kH, kW, sH, sW, pH, pW, dH, dW, extraParam0), FLOAT_TYPES);
}
break;
case AVG_POOL: {
BUILD_SINGLE_SELECTOR_TWICE(input.dataType(), avgPooling2dCudaLauncher, (*block.launchContext(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), output.getSpecialBuffer(), output.getSpecialShapeInfo(), kH, kW, sH, sW, pH, pW, dH, dW, extraParam0), FLOAT_TYPES);
}
break;
case PNORM_POOL: {
BUILD_SINGLE_SELECTOR_TWICE(input.dataType(), pnormPooling2dCudaLauncher, (*block.launchContext(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), output.getSpecialBuffer(), output.getSpecialShapeInfo(), kH, kW, sH, sW, pH, pW, dH, dW, extraParam0), FLOAT_TYPES);
}
break;
default:
throw std::runtime_error("Pooling2D: Unknown PoolingType used");
}
output.tickWriteDevice();
input.tickReadDevice();
auto result = cudaStreamSynchronize(*block.launchContext()->getCudaStream());
if (result != 0)
throw cuda_exception::build("Pooling2D failed", result);
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
__global__ static void pooling3dCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int kD, const int kH, const int kW, const int sD, const int sH, const int sW, const int pD, const int pH, const int pW, const int dD, const int dH, const int dW, const int poolingMode, const int extraParam0) {
// x input is [bS, iC, iD, iH, iW]
// z output is [bS, iC, oD, oH, oW]
const T* x = reinterpret_cast<const T*>(vx);
T* z = reinterpret_cast<T*>(vz);
__shared__ int rank, kDeff, kHeff, kWeff, iD, iH, iW, kProd;
__shared__ Nd4jLong *sharedMem, zLen;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<Nd4jLong*>(shmem);
zLen = shape::length(zShapeInfo);
rank = 5;
kDeff = kD + (kD - 1) * (dD - 1);
kHeff = kH + (kH - 1) * (dH - 1);
kWeff = kW + (kW - 1) * (dW - 1);
iD = xShapeInfo[3];
iH = xShapeInfo[4];
iW = xShapeInfo[5];
kProd = kD * kH * kW;
}
__syncthreads();
const auto zInd = threadIdx.x + blockIdx.x * blockDim.x;
if(zInd >= zLen)
return;
auto coords = sharedMem + threadIdx.x * rank;
shape::index2coords(zInd, zShapeInfo, coords);
const auto zOffset = shape::getOffset(zShapeInfo, coords);
int dstart = coords[2] * sD - pD;
int hstart = coords[3] * sH - pH;
int wstart = coords[4] * sW - pW;
int dend = dstart + kDeff;
int hend = hstart + kHeff;
int wend = wstart + kWeff;
if(dstart < 0)
dstart += dD * ((-dstart + dD - 1) / dD);
if(hstart < 0)
hstart += dH * ((-hstart + dH - 1) / dH);
if(wstart < 0)
wstart += dW * ((-wstart + dW - 1) / dW);
if(dend > iD)
dend -= dD * ((dend - iD + dD - 1) / dD);
if(hend > iH)
hend -= dH * ((hend - iH + dH - 1) / dH);
if(wend > iW)
wend -= dW * ((wend - iW + dW - 1) / dW);
switch (poolingMode) {
/*** max ***/
case 0: {
T max = -DataTypeUtils::max<T>();
for (coords[2] = dstart; coords[2] < dend; coords[2] += dD) {
for (coords[3] = hstart; coords[3] < hend; coords[3] += dH){
for (coords[4] = wstart; coords[4] < wend; coords[4] += dW) {
T val = x[shape::getOffset(xShapeInfo, coords)];
if (val > max)
max = val;
}
}
}
z[zOffset] = max;
}
break;
/*** avg ***/
case 1: {
T sum = static_cast<T>(0.);
for (coords[2] = dstart; coords[2] < dend; coords[2] += dD)
for (coords[3] = hstart; coords[3] < hend; coords[3] += dH)
for (coords[4] = wstart; coords[4] < wend; coords[4] += dW)
sum += x[shape::getOffset(xShapeInfo, coords)];
if (extraParam0 == 0) { //Exclude padding
uint a = (dend - dstart) / dD + ((dend - dstart) % dD == 0 ? 0 : 1);
uint b = (hend - hstart) / dH + ((hend - hstart) % dH == 0 ? 0 : 1);
uint c = (wend - wstart) / dW + ((wend - wstart) % dW == 0 ? 0 : 1);
sum /= static_cast<T>(a * b * c); // /= nd4j::math::nd4j_ceil<double,T>(static_cast<double>(dend - dstart) / static_cast<double>(dD)) * nd4j::math::nd4j_ceil<double,T>(static_cast<double>(hend - hstart) / static_cast<double>(dH)) * nd4j::math::nd4j_ceil<double,T>(static_cast<double>(wend - wstart) / static_cast<double>(dW)); //Accounts for dilation
}
else if (extraParam0 == 1) //Include padding
sum /= kProd;
z[zOffset] = sum;
}
break;
/*** pnorm ***/
case 2: {
T sum = static_cast<T>(0.);
for (coords[2] = dstart; coords[2] < dend; coords[2] += dD)
for (coords[3] = hstart; coords[3] < hend; coords[3] += dH)
for (coords[4] = wstart; coords[4] < wend; coords[4] += dW)
sum += nd4j::math::nd4j_pow<T,T,T>(nd4j::math::nd4j_abs<T>(x[shape::getOffset(xShapeInfo, coords)]), extraParam0);
sum = nd4j::math::nd4j_pow<T,T,T>(sum, (T) 1.f / extraParam0);
z[zOffset] = sum;
}
break;
}
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
static void pooling3dCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream,
const void* vx, const Nd4jLong* xShapeInfo,
void* vz, const Nd4jLong* zShapeInfo,
const int kD, const int kH, const int kW,
const int sD, const int sH, const int sW,
const int pD, const int pH, const int pW,
const int dD, const int dH, const int dW,
const int poolingMode, const int extraParam0) {
pooling3dCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vz, zShapeInfo, kD, kH, kW, sD, sH, sW, pD, pH, pW, dD, dH, dW, poolingMode, extraParam0);
}
//////////////////////////////////////////////////////////////////////////
void ConvolutionUtils::pooling3d(nd4j::graph::Context& block, const NDArray& input, NDArray& output, const int kD, const int kH, const int kW, const int sD, const int sH, const int sW, const int pD, const int pH, const int pW, const int dD, const int dH, const int dW, const int poolingMode, const int extraParam0) {
PointersManager manager(block.launchContext(), "pooling3d");
const int threadsPerBlock = MAX_NUM_THREADS / 2;
const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = output.rankOf() * sizeof(Nd4jLong) * threadsPerBlock + 128;
NDArray::prepareSpecialUse({&output}, {&input});
BUILD_SINGLE_SELECTOR(input.dataType(), pooling3dCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, block.launchContext()->getCudaStream(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), kD, kH, kW, sD, sH, sW, pD, pH, pW, dD, dH, dW, poolingMode, extraParam0), FLOAT_TYPES);
NDArray::registerSpecialUse({&output}, {&input});
manager.synchronize();
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
__global__ static void pooling2dBPCuda(const void* vx, const Nd4jLong* xShapeInfo, const void* vy, const Nd4jLong* yShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int poolingMode, const int extraParam0) {
// x: input [bS, iC, iH, iW]
// y: gradO [bS, iC, oH, oW]
// z: gradI [bS, iC, iH, iW] -> gradI is output in this function
const T* x = reinterpret_cast<const T*>(vx);
const T* y = reinterpret_cast<const T*>(vy);
T* z = reinterpret_cast<T*>(vz);
Nd4jLong coord2, coord3;
__shared__ int rank, kHeff, kWeff, iH, iW, kProd;
__shared__ Nd4jLong *sharedMem, yLen;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<Nd4jLong*>(shmem);
yLen = shape::length(yShapeInfo);
rank = 4;
kHeff = kH + (kH - 1) * (dH - 1);
kWeff = kW + (kW - 1) * (dW - 1);
iH = xShapeInfo[3];
iW = xShapeInfo[4];
kProd = kH * kW;
}
__syncthreads();
const auto yInd = threadIdx.x + blockIdx.x * blockDim.x;
if(yInd >= yLen)
return;
auto coords = sharedMem + threadIdx.x * rank;
shape::index2coords(yInd, yShapeInfo, coords);
const auto yOffset = shape::getOffset(yShapeInfo, coords);
int hstart = coords[2] * sH - pH;
int wstart = coords[3] * sW - pW;
int hend = hstart + kHeff;
int wend = wstart + kWeff;
if(hstart < 0)
hstart += dH * ((-hstart + dH - 1) / dH);
if(wstart < 0)
wstart += dW * ((-wstart + dW - 1) / dW);
if(hend > iH)
hend -= dH * ((hend - iH + dH - 1) / dH);
if(wend > iW)
wend -= dW * ((wend - iW + dW - 1) / dW);
switch (poolingMode) {
/*** max ***/
case 0: {
coord2 = hstart;
coord3 = wstart;
T max = -DataTypeUtils::max<T>();
for (coords[2] = hstart; coords[2] < hend; coords[2] += dH) {
for (coords[3] = wstart; coords[3] < wend; coords[3] += dW){
T val = x[shape::getOffset(xShapeInfo, coords)];
if (val > max) {
max = val;
coord2 = coords[2];
coord3 = coords[3];
}
}
}
coords[2] = coord2;
coords[3] = coord3;
auto zOffset = shape::getOffset(zShapeInfo, coords);
nd4j::math::atomics::nd4j_atomicAdd<T>(&z[zOffset], y[yOffset]);
//z[zOffset] += y[yOffset];
}
break;
/*** avg ***/
case 1: {
T val = y[yOffset];
if (extraParam0 == 0) //Exclude padding
val /= nd4j::math::nd4j_ceil<double,T>(static_cast<double>(hend - hstart) / static_cast<double>(dH)) * nd4j::math::nd4j_ceil<double,T>(static_cast<double>(wend - wstart) / static_cast<double>(dW)); //Accounts for dilation
else if (extraParam0 == 1) //Include padding
val /= kProd;
for (coords[2] = hstart; coords[2] < hend; coords[2] += dH)
for (coords[3] = wstart; coords[3] < wend; coords[3] += dW)
nd4j::math::atomics::nd4j_atomicAdd<T>(&z[shape::getOffset(zShapeInfo, coords)], val);
}
break;
/*** pnorm ***/
case 2: {
T sum = static_cast<T>(0.);
T val = y[yOffset];
for (coords[2] = hstart; coords[2] < hend; coords[2] += dH)
for (coords[3] = wstart; coords[3] < wend; coords[3] += dW)
sum += nd4j::math::nd4j_pow<T,T,T>(nd4j::math::nd4j_abs<T>(x[shape::getOffset(xShapeInfo, coords)]), extraParam0);
val *= nd4j::math::nd4j_pow<T,T,T>(sum, ((T)1.f - extraParam0) / extraParam0);
for (coords[2] = hstart; coords[2] < hend; coords[2] += dH) {
for (coords[3] = wstart; coords[3] < wend; coords[3] += dW) {
const auto xOffset = shape::getOffset(xShapeInfo, coords);
const auto zOffset = shape::getOffset(zShapeInfo, coords);
nd4j::math::atomics::nd4j_atomicAdd<T>(&z[zOffset], val * nd4j::math::nd4j_pow<T,T,T>(nd4j::math::nd4j_abs<T>(x[xOffset]), extraParam0 - 1.f) * nd4j::math::nd4j_sgn<T,T>(x[xOffset]));
}
}
}
break;
}
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
static void pooling2dBPCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream,
const void* vx, const Nd4jLong* xShapeInfo,
const void* vy, const Nd4jLong* yShapeInfo,
void* vz, const Nd4jLong* zShapeInfo,
const int kH, const int kW,
const int sH, const int sW,
const int pH, const int pW,
const int dH, const int dW,
const int poolingMode, const int extraParam0) {
pooling2dBPCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vy, yShapeInfo, vz, zShapeInfo, kH, kW, sH, sW, pH, pW, dH, dW, poolingMode, extraParam0);
}
//////////////////////////////////////////////////////////////////////////
void ConvolutionUtils::pooling2dBP(nd4j::graph::Context& block, const NDArray& input, const NDArray& gradO, NDArray& gradI, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int poolingMode, const int extraParam0) {
// initial zeroing of gradI
gradI.nullify();
PointersManager manager(block.launchContext(), "pooling2dBP");
const int threadsPerBlock = 256;
const int blocksPerGrid = (gradO.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = gradO.rankOf() * sizeof(Nd4jLong) * threadsPerBlock + 128;
NDArray::prepareSpecialUse({&gradI}, {&input, &gradO});
BUILD_SINGLE_SELECTOR(input.dataType(), pooling2dBPCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, block.launchContext()->getCudaStream(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), gradO.getSpecialBuffer(), gradO.getSpecialShapeInfo(), gradI.specialBuffer(), gradI.specialShapeInfo(), kH, kW, sH, sW, pH, pW, dH, dW, poolingMode, extraParam0), FLOAT_TYPES);
NDArray::registerSpecialUse({&gradI}, {&input, &gradO});
manager.synchronize();
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
__global__ static void pooling3dBPCuda(const void* vx, const Nd4jLong* xShapeInfo, const void* vy, const Nd4jLong* yShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int kD, const int kH, const int kW, const int sD, const int sH, const int sW, const int pD, const int pH, const int pW, const int dD, const int dH, const int dW, const int poolingMode, const int extraParam0) {
// x: input [bS, iC, iD, iH, iW]
// y: gradO [bS, iC, oD, oH, oW]
// z: gradI [bS, iC, iD, iH, iW] -> gradI is output in this function
const T* x = reinterpret_cast<const T*>(vx);
const T* y = reinterpret_cast<const T*>(vy);
T* z = reinterpret_cast<T*>(vz);
Nd4jLong coord2, coord3, coord4;
__shared__ int rank, kDeff, kHeff, kWeff, iD, iH, iW, kProd;
__shared__ Nd4jLong *sharedMem, yLen;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<Nd4jLong*>(shmem);
yLen = shape::length(yShapeInfo);
rank = 5;
kDeff = kD + (kD - 1) * (dD - 1);
kHeff = kH + (kH - 1) * (dH - 1);
kWeff = kW + (kW - 1) * (dW - 1);
iD = xShapeInfo[3];
iH = xShapeInfo[4];
iW = xShapeInfo[5];
kProd = kD * kH * kW;
}
__syncthreads();
const auto yInd = threadIdx.x + blockIdx.x * blockDim.x;
if(yInd >= yLen)
return;
auto coords = sharedMem + threadIdx.x * rank;
shape::index2coords(yInd, yShapeInfo, coords);
const auto yOffset = shape::getOffset(yShapeInfo, coords);
int dstart = coords[2] * sD - pD;
int hstart = coords[3] * sH - pH;
int wstart = coords[4] * sW - pW;
int dend = dstart + kDeff;
int hend = hstart + kHeff;
int wend = wstart + kWeff;
if(dstart < 0)
dstart += dD * ((-dstart + dD - 1) / dD);
if(hstart < 0)
hstart += dH * ((-hstart + dH - 1) / dH);
if(wstart < 0)
wstart += dW * ((-wstart + dW - 1) / dW);
if(dend > iD)
dend -= dD * ((dend - iD + dD - 1) / dD);
if(hend > iH)
hend -= dH * ((hend - iH + dH - 1) / dH);
if(wend > iW)
wend -= dW * ((wend - iW + dW - 1) / dW);
switch (poolingMode) {
/*** max ***/
case 0: {
T max = -DataTypeUtils::max<T>();
for (coords[2] = dstart; coords[2] < dend; coords[2] += dD) {
for (coords[3] = hstart; coords[3] < hend; coords[3] += dH){
for (coords[4] = wstart; coords[4] < wend; coords[4] += dW) {
T val = x[shape::getOffset(xShapeInfo, coords)];
if (val > max) {
max = val;
coord2 = coords[2];
coord3 = coords[3];
coord4 = coords[4];
}
}
}
}
coords[2] = coord2;
coords[3] = coord3;
coords[4] = coord4;
nd4j::math::atomics::nd4j_atomicAdd<T>(&z[shape::getOffset(zShapeInfo, coords)], y[yOffset]);
}
break;
/*** avg ***/
case 1: {
T val = y[yOffset];
if (extraParam0 == 0) //Exclude padding
val /= nd4j::math::nd4j_ceil<double,T>(static_cast<double>(dend - dstart) / static_cast<double>(dD)) * nd4j::math::nd4j_ceil<double,T>(static_cast<double>(hend - hstart) / static_cast<double>(dH)) * nd4j::math::nd4j_ceil<double,T>(static_cast<double>(wend - wstart) / static_cast<double>(dW)); //Accounts for dilation
else if (extraParam0 == 1) //Include padding
val /= kProd;
for (coords[2] = dstart; coords[2] < dend; coords[2] += dD)
for (coords[3] = hstart; coords[3] < hend; coords[3] += dH)
for (coords[4] = wstart; coords[4] < wend; coords[4] += dW)
nd4j::math::atomics::nd4j_atomicAdd<T>(&z[shape::getOffset(zShapeInfo, coords)], val);
}
break;
/*** pnorm ***/
case 2: {
T sum = static_cast<T>(0.);
T val = y[yOffset];
for (coords[2] = dstart; coords[2] < dend; coords[2] += dD)
for (coords[3] = hstart; coords[3] < hend; coords[3] += dH)
for (coords[4] = wstart; coords[4] < wend; coords[4] += dW)
sum += nd4j::math::nd4j_pow<T,T,T>(nd4j::math::nd4j_abs<T>(x[shape::getOffset(xShapeInfo, coords)]), extraParam0);
val *= nd4j::math::nd4j_pow<T,T,T>(sum, ((T)1.f - extraParam0) / extraParam0);
for (coords[2] = dstart; coords[2] < dend; coords[2] += dD) {
for (coords[3] = hstart; coords[3] < hend; coords[3] += dH) {
for (coords[4] = wstart; coords[4] < wend; coords[4] += dW) {
const auto xOffset = shape::getOffset(xShapeInfo, coords);
const auto zOffset = shape::getOffset(zShapeInfo, coords);
nd4j::math::atomics::nd4j_atomicAdd<T>(&z[zOffset], val * nd4j::math::nd4j_pow<T,T,T>(nd4j::math::nd4j_abs<T>(x[xOffset]), extraParam0 - 1.f) * nd4j::math::nd4j_sgn<T,T>(x[xOffset]));
}
}
}
}
break;
}
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
static void pooling3dBPCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream,
const void* vx, const Nd4jLong* xShapeInfo,
const void* vy, const Nd4jLong* yShapeInfo,
void* vz, const Nd4jLong* zShapeInfo,
const int kD, const int kH, const int kW,
const int sD, const int sH, const int sW,
const int pD, const int pH, const int pW,
const int dD, const int dH, const int dW,
const int poolingMode, const int extraParam0) {
pooling3dBPCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vy, yShapeInfo, vz, zShapeInfo, kD, kH, kW, sD, sH, sW, pD, pH, pW, dD, dH, dW, poolingMode, extraParam0);
}
//////////////////////////////////////////////////////////////////////////
void ConvolutionUtils::pooling3dBP(nd4j::graph::Context& block, const NDArray& input, const NDArray& gradO, NDArray& gradI, const int kD, const int kH, const int kW, const int sD, const int sH, const int sW, const int pD, const int pH, const int pW, const int dD, const int dH, const int dW, const int poolingMode, const int extraParam0) {
// initial zeroing of gradI
gradI.nullify();
PointersManager manager(block.launchContext(), "pooling3dBP");
const int threadsPerBlock = MAX_NUM_THREADS / 2;
const int blocksPerGrid = (gradO.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = gradO.rankOf() * sizeof(Nd4jLong) * threadsPerBlock + 128;
NDArray::prepareSpecialUse({&gradI}, {&input, &gradO});
BUILD_SINGLE_SELECTOR(input.dataType(), pooling3dBPCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, block.launchContext()->getCudaStream(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), gradO.getSpecialBuffer(), gradO.getSpecialShapeInfo(), gradI.specialBuffer(), gradI.specialShapeInfo(), kD, kH, kW, sD, sH, sW, pD, pH, pW, dD, dH, dW, poolingMode, extraParam0), FLOAT_TYPES);
NDArray::registerSpecialUse({&gradI}, {&input, &gradO});
manager.synchronize();
}
//////////////////////////////////////////////////////////////////////////
template <typename X, typename Y>
static void conv2dBP_(nd4j::graph::Context& block, const NDArray* input, const NDArray* weights, const NDArray* bias, const NDArray* gradO, NDArray* gradI, NDArray* gradW, NDArray* gradB, const int kH, const int kW, const int sH, const int sW, int pH, int pW, const int dH, const int dW, const int paddingMode, const int isNCHW) {
// input [bS, iH, iW, iC] (NHWC) or [bS, iC, iH, iW] (NCHW)
// weights [kH, kW, iC, oC] always
// bias [oC]
// gradO [bS, oH, oW, oC] (NHWC) or [bS, oC, oH, oW] (NCHW), epsilon_next
// gradI [bS, iH, iW, iC] (NHWC) or [bS, iC, iH, iW] (NCHW), epsilon
// gradW [kH, kW, iC, oC] always
// gradB [oC]
// kH filter(kernel) height
// kW filter(kernel) width
// sH strides height
// sW strides width
// pH paddings height
// pW paddings width
// dH dilations height
// dW dilations width
// paddingMode 0-VALID, 1-SAME
// isNCHW 0-NHWC, 1-NCHW
int bS, iC, iH, iW, oC, oH, oW; // batch size, input channels, input height/width, output channels, output height/width;
int indIOioC, indIiH, indWoC, indWiC, indWkH, indOoH; // corresponding indexes
ConvolutionUtils::getSizesAndIndexesConv2d(isNCHW, *input, *gradO, bS, iC, iH, iW, oC, oH, oW, indIOioC, indIiH, indWiC, indWoC, indWkH, indOoH);
ConvolutionUtils::calcPadding2D(pH, pW, oH, oW, iH, iW, kH, kW, sH, sW, dH, dW, paddingMode);
std::vector<int> gradOaxesForDot;
if(!isNCHW) {
gradOaxesForDot = {0, 1, 2}; // bS, oH, oW
input = new NDArray(input->permute({0, 3, 1, 2})); // [bS, iH, iW, iC] -> [bS, iC, iH, iW]
gradI = new NDArray(gradI->permute({0, 3, 1, 2})); // [bS, iH, iW, iC] -> [bS, iC, iH, iW]
} else {
gradOaxesForDot = {0, 2, 3}; // bS, oH, oW
}
NDArray columns(input->ordering(), {bS, iC, kH, kW, oH, oW}, input->dataType(), input->getContext());
// ----- calculation of gradW ----- //
if(gradW) {
auto ctx = block.launchContext();
helpers::im2col(*ctx, *input, columns, kH, kW, sH, sW, pH, pW, dH, dW, NDArrayFactory::create(0.f, input->getContext())); // [bS, iC, iH, iW] is convoluted to [bS, iC, kH, kW, oH, oW]
nd4j::MmulHelper::tensorDot(&columns, gradO, gradW, {0,4,5}, gradOaxesForDot, {2, 0, 1, 3}); // [bS, iC, kH, kW, oH, oW] x [bS, oH, oW, oC]/[bS, oC, oH, oW] = [iC, kH, kW, oC]
}
// ----- calculation of gradB ----- //
if(gradB) {
NDArray* gradBR = gradB;
if(gradB->rankOf() == 2)
gradBR = new NDArray(gradB->reshape(gradB->ordering(), {(int)gradB->lengthOf()}));
gradO->reduceAlongDimension(reduce::Sum, gradBR, gradOaxesForDot); // sum over bS, oH, oW
if(gradBR != gradB)
delete gradBR;
}
//----- calculation of gradI -----//
nd4j::MmulHelper::tensorDot(weights, gradO, &columns, {indWoC}, {indIOioC}, {2, 3, 1, 0, 4, 5}); // [kH, kW, iC, oC]/[oC, iC, kH, kW]] x [bS, oH, oW, oC]/[bS, oC, oH, oW] = [kH, kW, iC, bS, oH, oW]
helpers::col2im(*block.launchContext(), columns, *gradI, sH, sW, pH, pW, iH, iW, dH, dW); // [bS, iC, kH, kW, oH, oW] is de-convoluted to [bS, iC, iH, iW]
if(!isNCHW) {
delete input;
delete gradI;
}
}
//////////////////////////////////////////////////////////////////////////
void ConvolutionUtils::conv2dBP(nd4j::graph::Context& block, const NDArray* input, const NDArray* weights, const NDArray* bias, const NDArray* gradO, NDArray* gradI, NDArray* gradW, NDArray* gradB, const int kH, const int kW, const int sH, const int sW, int pH, int pW, const int dH, const int dW, const int paddingMode, const int isNCHW) {
BUILD_SINGLE_SELECTOR_TWICE(input->dataType(), conv2dBP_, (block, input, weights, bias, gradO, gradI, gradW, gradB, kH, kW, sH, sW, pH, pW, dH, dW, paddingMode, isNCHW), FLOAT_TYPES);
}
//////////////////////////////////////////////////////////////////////////
template <typename X, typename Y>
static void depthwiseConv2dBP_(const NDArray* input, const NDArray* weights, const NDArray* bias, const NDArray* gradO, NDArray* gradI, NDArray* gradW, NDArray* gradB, const int kH, const int kW, const int sH, const int sW, int pH, int pW, const int dH, const int dW, const int paddingMode, const int isNCHW) {
// input [bS, iH, iW, iC] (NDHWC) or [bS, iC, iH, iW] (NCDHW)
// weights [kH, kW, iC, mC] always
// bias [oC] = [iC*mC]
// gradO [bS, oH, oW, oC] (NDHWC) or [bS, oC, oH, oW] (NCDHW), epsilon_next
// gradI [bS, iH, iW, iC] (NDHWC) or [bS, iC, iH, iW] (NCDHW), epsilon
// gradW [kH, kW, iC, mC] always
// gradB [oC]
// kH filter(kernel) height
// kW filter(kernel) width
// sH strides height
// sW strides width
// pH paddings height
// pW paddings width
// dH dilations height
// dW dilations width
// paddingMode 0-VALID, 1-SAME
// isNCHW 0-NHWC, 1-NCHW
int bS, iC, iH, iW, mC, oC, oH, oW; // batch size, input channels, input height/width, channels multiplier(oC = iC*mC), output channels, output height/width
int indIOioC, indIiH, indWmC, indWiC, indWkH, indOoH; // corresponding indexes
ConvolutionUtils::getSizesAndIndexesConv2d(isNCHW, *input, *gradO, bS, iC, iH, iW, oC, oH, oW, indIOioC, indIiH, indWiC, indWmC, indWkH, indOoH);
mC = weights->sizeAt(indWmC); // channels multiplier
std::vector<std::vector<Nd4jLong>> modifColumns = {{1,2,3,0,4,5}, {iC, kH*kW, bS*oH*oW}}; // [bS,iC,kH,kW,oH,oW] -> [iC, kH*kW, bS*oH*oW]
std::vector<std::vector<Nd4jLong>> modifGradO1, modifGradO2;
std::vector<Nd4jLong> gradOreShape;
if(!isNCHW) {
gradOreShape = {bS, oH, oW, iC, mC}; // [bS,oH,oW,iC*mC] -> [bS,oH,oW,iC,mC]
modifGradO1 = {{3,0,1,2,4},{iC, bS*oH*oW, mC}}; // [bS,oH,oW,iC,mC] -> [iC,bS,oH,oW,mC] -> [iC,bS*oH*oW,mC]
modifGradO2 = {{3,0,1,2},{iC, mC, bS*oH*oW}}; // [bS,oH,oW,iC*mC] -> [iC*mC,bS,oH,oW] -> [iC,mC,bS*oH*oW]
input = new NDArray(input->permute({0, 3, 1, 2})); // [bS,iH,iW,iC] -> [bS,iC,iH,iW]
gradI = new NDArray(gradI->permute({0, 3, 1, 2})); // [bS,iH,iW,iC] -> [bS,iC,iH,iW]
}
else {
gradOreShape = {bS, iC, mC, oH, oW}; // [bS,iC*mC,oH,oW] -> [bS,iC,mC,oH,oW]
modifGradO1 = {{1,0,3,4,2},{iC, bS*oH*oW, mC}}; // [bS,iC,mC,oH,oW] -> [iC,bS,oH,oW,mC] -> [iC,bS*oH*oW,mC]
modifGradO2 = {{1,0,2,3},{iC, mC, bS*oH*oW}}; // [bS,iC*mC,oH,oW] -> [iC*mC,bS,oH,oW] -> [iC,mC,bS*oH*oW]
}
if(paddingMode == 1) // SAME
ConvolutionUtils::calcPadding2D(pH, pW, oH, oW, iH, iW, kH, kW, sH, sW, dH, dW);
NDArray columns(input->ordering(), {bS, iC, kH, kW, oH, oW}, input->dataType(), input->getContext());
NDArray gradOreshaped = gradO->reshape(gradO->ordering(), gradOreShape);
// ----- calculation of gradW and gradB ----- //
helpers::im2col(*input->getContext(), *input, columns, kH, kW, sH, sW, pH, pW, dH, dW, NDArrayFactory::create(0.f, input->getContext())); // [bS, iC, iH, iW] is convoluted to [bS, iC, kH, kW, oH, oW]
nd4j::MmulHelper::tensorDot(&columns, &gradOreshaped, gradW, modifColumns, modifGradO1, {{2,0,1,3},{iC,kH*kW,mC}}); // [iC, kW*kH, bS*oH*oW] x [iC, bS*oH*oW, mC] = [iC, kH*kW, mC]
// ----- calculation of gradB ----- //
if(gradB) {
NDArray* gradBR = gradB;
if(gradB->rankOf() == 2)
gradBR = new NDArray(gradB->reshape(gradB->ordering(), {(int)gradB->lengthOf()}));
gradO->reduceAlongDimension(reduce::Sum, gradBR, {0,indOoH,indOoH+1}); // sum over bS, oH, oW
if(gradBR != gradB)
delete gradBR;
}
//----- calculation of gradI -----//
nd4j::MmulHelper::tensorDot(weights, gradO, &columns, {{2,0,1,3},{iC,kH*kW,mC}}, modifGradO2, modifColumns); // [iC, kH*kW, mC] x [iC, mC, bS*oH*oW] = [iC, kW*kH, bS*oH*oW]
helpers::col2im(*input->getContext(), columns, *gradI, sH, sW, pH, pW, iH, iW, dH, dW); // [bS, iC, kH, kW, oH, oW] is de-convoluted to [bS, iC, iH, iW]
if(!isNCHW) {
delete input;
delete gradI;
}
}
//////////////////////////////////////////////////////////////////////////
void ConvolutionUtils::depthwiseConv2dBP(nd4j::graph::Context& block, const NDArray* input, const NDArray* weights, const NDArray* bias, const NDArray* gradO, NDArray* gradI, NDArray* gradW, NDArray* gradB, const int kH, const int kW, const int sH, const int sW, int pH, int pW, const int dH, const int dW, const int paddingMode, const int isNCHW) {
BUILD_SINGLE_SELECTOR_TWICE(input->dataType(), depthwiseConv2dBP_, (input, weights, bias, gradO, gradI, gradW, gradB, kH, kW, sH, sW, pH, pW, dH, dW, paddingMode, isNCHW), FLOAT_TYPES);
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
__global__ static void upsampling2dCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int factorH, const int factorW, const bool isNCHW) {
// x has shape [bS, iC, iH, iW] (NCHW) or [bS, iH, iW, iC] (NHWC)
// z has shape [bS, iC, factorH*iH, factorW*iW ] (NCHW) or [bS, factorH*iH, factorW*iW, iC] (NHWC)
const T* x = reinterpret_cast<const T*>(vx);
T* z = reinterpret_cast<T*>(vz);
__shared__ int rank, dimIH;
__shared__ Nd4jLong *sharedMem, zLen;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<Nd4jLong*>(shmem);
dimIH = isNCHW ? 2 : 1;
zLen = shape::length(zShapeInfo);
rank = 4;
}
__syncthreads();
const auto zInd = threadIdx.x + blockIdx.x * blockDim.x;
if(zInd >= zLen)
return;
auto coords = sharedMem + threadIdx.x * rank;
shape::index2coords(zInd, zShapeInfo, coords);
const auto zOffset = shape::getOffset(zShapeInfo, coords);
coords[dimIH] /= factorH;
coords[dimIH + 1] /= factorW;
const auto xOffset = shape::getOffset(xShapeInfo, coords);
z[zOffset] = x[xOffset];
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
static void upsampling2dCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream,
const void* vx, const Nd4jLong* xShapeInfo,
void* vz, const Nd4jLong* zShapeInfo,
const int factorH, const int factorW, const bool isNCHW) {
upsampling2dCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vz, zShapeInfo, factorH, factorW, isNCHW);
}
//////////////////////////////////////////////////////////////////////////
void ConvolutionUtils::upsampling2d(nd4j::graph::Context& block, const NDArray& input, NDArray& output, const int factorH, const int factorW, const bool isNCHW) {
PointersManager manager(block.launchContext(), "upsampling2d");
const int threadsPerBlock = MAX_NUM_THREADS / 2;
const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = output.rankOf() * sizeof(Nd4jLong) * threadsPerBlock + 128;
NDArray::prepareSpecialUse({&output}, {&input});
BUILD_SINGLE_SELECTOR(input.dataType(), upsampling2dCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, block.launchContext()->getCudaStream(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), factorH, factorW, isNCHW), FLOAT_TYPES);
NDArray::registerSpecialUse({&output}, {&input});
manager.synchronize();
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
__global__ static void upsampling3dCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int factorD, const int factorH, const int factorW, const bool isNCDHW) {
// x has shape [bS, iC, iD, iH, iW] (NCDHW) or [bS, iD, iH, iW, iC] (NDHWC)
// z has shape [bS, iC, factorD*iD, factorH*iH, factorW*iW ] (NCDHW) or [bS, factorD*iD, factorH*iH, factorW*iW, iC] (NDHWC)
const T* x = reinterpret_cast<const T*>(vx);
T* z = reinterpret_cast<T*>(vz);
__shared__ int rank, dimID;
__shared__ Nd4jLong *sharedMem, zLen;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<Nd4jLong*>(shmem);
dimID = isNCDHW ? 2 : 1;
zLen = shape::length(zShapeInfo);
rank = 5;
}
__syncthreads();
const auto zInd = threadIdx.x + blockIdx.x * blockDim.x;
if(zInd >= zLen)
return;
auto coords = sharedMem + threadIdx.x * rank;
shape::index2coords(zInd, zShapeInfo, coords);
const auto zOffset = shape::getOffset(zShapeInfo, coords);
coords[dimID] /= factorD;
coords[dimID + 1] /= factorH;
coords[dimID + 2] /= factorW;
const auto xOffset = shape::getOffset(xShapeInfo, coords);
z[zOffset] = x[xOffset];
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
static void upsampling3dCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream,
const void* vx, const Nd4jLong* xShapeInfo,
void* vz, const Nd4jLong* zShapeInfo,
const int factorD, const int factorH, const int factorW, const bool isNCDHW) {
upsampling3dCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vz, zShapeInfo, factorD, factorH, factorW, isNCDHW);
}
//////////////////////////////////////////////////////////////////////////
void ConvolutionUtils::upsampling3d(nd4j::graph::Context& block, const NDArray& input, NDArray& output, const int factorD, const int factorH, const int factorW, const bool isNCDHW) {
PointersManager manager(block.launchContext(), "upsampling3d");
const int threadsPerBlock = MAX_NUM_THREADS / 2;
const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = output.rankOf() * sizeof(Nd4jLong) * threadsPerBlock + 128;
NDArray::prepareSpecialUse({&output}, {&input});
BUILD_SINGLE_SELECTOR(input.dataType(), upsampling3dCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, block.launchContext()->getCudaStream(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), factorD, factorH, factorW, isNCDHW), FLOAT_TYPES);
NDArray::registerSpecialUse({&output}, {&input});
manager.synchronize();
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
__global__ static void upsampling2dBPCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const bool isNCHW) {
// x (gradO) has shape [bS, iC, factorH*iH, factorW*iW ] (NCHW) or [bS, factorH*iH, factorW*iW, iC] (NHWC)
// z (gradI) has shape [bS, iC, iH, iW] (NCHW) or [bS, iH, iW, iC] (NHWC)
const T* x = reinterpret_cast<const T*>(vx);
T* z = reinterpret_cast<T*>(vz);
__shared__ int rank, dimIH;
__shared__ uint factorH, factorW;
__shared__ Nd4jLong *sharedMem, zLen;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<Nd4jLong*>(shmem);
dimIH = isNCHW ? 2 : 1;
zLen = shape::length(zShapeInfo);
rank = 4;
factorH = xShapeInfo[dimIH + 1] / zShapeInfo[dimIH + 1];
factorW = xShapeInfo[dimIH + 2] / zShapeInfo[dimIH + 2];
}
__syncthreads();
const auto zInd = threadIdx.x + blockIdx.x * blockDim.x;
if(zInd >= zLen)
return;
auto coords = sharedMem + threadIdx.x * rank;
shape::index2coords(zInd, zShapeInfo, coords);
const auto zOffset = shape::getOffset(zShapeInfo, coords);
z[zOffset] = 0;
const Nd4jLong zCoord2 = coords[dimIH] * factorH;
const Nd4jLong zCoord3 = coords[dimIH + 1] * factorW;
for(coords[dimIH] = zCoord2; coords[dimIH] < zCoord2 + factorH; ++coords[dimIH])
for(coords[dimIH + 1] = zCoord3; coords[dimIH + 1] < zCoord3 + factorW; ++coords[dimIH + 1])
z[zOffset] += x[shape::getOffset(xShapeInfo, coords)];
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
static void upsampling2dBPCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream,
const void* vx, const Nd4jLong* xShapeInfo,
void* vz, const Nd4jLong* zShapeInfo,
const bool isNCHW) {
upsampling2dBPCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vz, zShapeInfo, isNCHW);
}
//////////////////////////////////////////////////////////////////////////
void ConvolutionUtils::upsampling2dBP(nd4j::graph::Context& block, const NDArray& gradO, NDArray& gradI, const bool isNCHW) {
PointersManager manager(block.launchContext(), "upsampling2d_bp");
const int threadsPerBlock = MAX_NUM_THREADS / 2;
const int blocksPerGrid = (gradI.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = gradI.rankOf() * sizeof(Nd4jLong) * threadsPerBlock + 128;
NDArray::prepareSpecialUse({&gradI}, {&gradO});
BUILD_SINGLE_SELECTOR(gradI.dataType(), upsampling2dBPCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, block.launchContext()->getCudaStream(), gradO.getSpecialBuffer(), gradO.getSpecialShapeInfo(), gradI.specialBuffer(), gradI.specialShapeInfo(), isNCHW), FLOAT_TYPES);
NDArray::registerSpecialUse({&gradI}, {&gradO});
manager.synchronize();
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
__global__ static void upsampling3dBPCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const bool isNCDHW) {
// x (gradO) has shape [bS, iC, iD, iH, iW] (NCDHW) or [bS, iD, iH, iW, iC] (NDHWC)
// z (gradI) has shape [bS, iC, factorD*iD, factorH*iH, factorW*iW ] (NCDHW) or [bS, factorD*iD, factorH*iH, factorW*iW, iC] (NDHWC)
const T* x = reinterpret_cast<const T*>(vx);
T* z = reinterpret_cast<T*>(vz);
__shared__ int rank, dimID;
__shared__ uint factorD, factorH, factorW;
__shared__ Nd4jLong *sharedMem, zLen;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<Nd4jLong*>(shmem);
dimID = isNCDHW ? 2 : 1;
zLen = shape::length(zShapeInfo);
rank = 5;
factorD = xShapeInfo[dimID + 1] / zShapeInfo[dimID + 1];
factorH = xShapeInfo[dimID + 2] / zShapeInfo[dimID + 2];
factorW = xShapeInfo[dimID + 3] / zShapeInfo[dimID + 3];
}
__syncthreads();
const auto zInd = threadIdx.x + blockIdx.x * blockDim.x;
if(zInd >= zLen)
return;
auto coords = sharedMem + threadIdx.x * rank;
shape::index2coords(zInd, zShapeInfo, coords);
const auto zOffset = shape::getOffset(zShapeInfo, coords);
z[zOffset] = 0;
const Nd4jLong zCoord2 = coords[dimID] * factorD;
const Nd4jLong zCoord3 = coords[dimID + 1] * factorH;
const Nd4jLong zCoord4 = coords[dimID + 2] * factorW;
for(coords[dimID] = zCoord2; coords[dimID] < zCoord2 + factorD; ++coords[dimID])
for(coords[dimID + 1] = zCoord3; coords[dimID + 1] < zCoord3 + factorH; ++coords[dimID + 1])
for(coords[dimID + 2] = zCoord4; coords[dimID + 2] < zCoord4 + factorW; ++coords[dimID + 2])
z[zOffset] += x[shape::getOffset(xShapeInfo, coords)];
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
static void upsampling3dBPCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream,
const void* vx, const Nd4jLong* xShapeInfo,
void* vz, const Nd4jLong* zShapeInfo,
const bool isNCDHW) {
upsampling3dBPCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vz, zShapeInfo, isNCDHW);
}
//////////////////////////////////////////////////////////////////////////
void ConvolutionUtils::upsampling3dBP(nd4j::graph::Context& block, const NDArray& gradO, NDArray& gradI, const bool isNCDHW) {
PointersManager manager(block.launchContext(), "upsampling3d_bp");
const int threadsPerBlock = MAX_NUM_THREADS / 2;
const int blocksPerGrid = (gradI.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = gradI.rankOf() * sizeof(Nd4jLong) * threadsPerBlock + 128;
NDArray::prepareSpecialUse({&gradI}, {&gradO});
BUILD_SINGLE_SELECTOR(gradI.dataType(), upsampling3dBPCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, block.launchContext()->getCudaStream(), gradO.getSpecialBuffer(), gradO.getSpecialShapeInfo(), gradI.specialBuffer(), gradI.specialShapeInfo(), isNCDHW), FLOAT_TYPES);
NDArray::registerSpecialUse({&gradI}, {&gradO});
manager.synchronize();
}
}
} |
d8df048cd4e202ef2da6da0ce468791e90376a62.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/permute_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void PermuteForward_type0(const int nthreads,
const Dtype* const bottom_data, const int num, const int channels,
const int height, const int width, Dtype* const top_data, int* map_idx) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % width;
const int ph = (index / width) % height;
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
int new_idx = n * width * height * channels + ph * channels * width + pw * channels + c;
map_idx[index] = new_idx;
top_data[new_idx] = bottom_data[index];
}
}
template <typename Dtype>
__global__ void PermuteForward_type1(const int nthreads,
const Dtype* const bottom_data, const int num, const int channels,
const int height, const int width, Dtype* const top_data, int* map_idx) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % width;
const int ph = (index / width) % height;
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
int new_idx = n * width * height * channels + pw * channels * height + ph + c* height;
map_idx[index] = new_idx;
top_data[new_idx] = bottom_data[index];
}
}
template <typename Dtype>
void PermuteLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int count = top[0]->count();
int *map_idx = map_idx_.mutable_gpu_data();
// We'll output the mask to top[1] if it's of size >1.
if (type_ == 0){
hipLaunchKernelGGL(( PermuteForward_type0<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, bottom[0]->num(), channels_,height_, width_,top_data, map_idx);
}
else{
hipLaunchKernelGGL(( PermuteForward_type1<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, bottom[0]->num(), channels_,height_, width_,top_data, map_idx);
}
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void PermuteBackward(const int nthreads, const Dtype* const top_diff, const int* map_idx, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
bottom_diff[index] = top_diff[map_idx[index]];
}
}
template <typename Dtype>
void PermuteLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
caffe_gpu_set(count, Dtype(0.), bottom_diff);
// We'll output the mask to top[1] if it's of size >1.
hipLaunchKernelGGL(( PermuteBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top_diff,map_idx_.gpu_data(), bottom_diff);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(PermuteLayer);
} // namespace caffe
| d8df048cd4e202ef2da6da0ce468791e90376a62.cu | #include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/permute_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void PermuteForward_type0(const int nthreads,
const Dtype* const bottom_data, const int num, const int channels,
const int height, const int width, Dtype* const top_data, int* map_idx) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % width;
const int ph = (index / width) % height;
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
int new_idx = n * width * height * channels + ph * channels * width + pw * channels + c;
map_idx[index] = new_idx;
top_data[new_idx] = bottom_data[index];
}
}
template <typename Dtype>
__global__ void PermuteForward_type1(const int nthreads,
const Dtype* const bottom_data, const int num, const int channels,
const int height, const int width, Dtype* const top_data, int* map_idx) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % width;
const int ph = (index / width) % height;
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
int new_idx = n * width * height * channels + pw * channels * height + ph + c* height;
map_idx[index] = new_idx;
top_data[new_idx] = bottom_data[index];
}
}
template <typename Dtype>
void PermuteLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int count = top[0]->count();
int *map_idx = map_idx_.mutable_gpu_data();
// We'll output the mask to top[1] if it's of size >1.
if (type_ == 0){
PermuteForward_type0<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, bottom[0]->num(), channels_,height_, width_,top_data, map_idx);
}
else{
PermuteForward_type1<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, bottom[0]->num(), channels_,height_, width_,top_data, map_idx);
}
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void PermuteBackward(const int nthreads, const Dtype* const top_diff, const int* map_idx, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
bottom_diff[index] = top_diff[map_idx[index]];
}
}
template <typename Dtype>
void PermuteLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
caffe_gpu_set(count, Dtype(0.), bottom_diff);
// We'll output the mask to top[1] if it's of size >1.
PermuteBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, top_diff,map_idx_.gpu_data(), bottom_diff);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(PermuteLayer);
} // namespace caffe
|
f1e793f8137f5b4e89dd8f4f4da75525cccb7d8b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
/* Matrix convolution.
* Host code.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <cutil.h>
// includes, kernels
#include <2Dconvolution_kernel.cu>
////////////////////////////////////////////////////////////////////////////////
// declarations, forward
extern "C"
void computeGold(float*, const float*, const float*, unsigned int, unsigned int);
Matrix AllocateDeviceMatrix(const Matrix M);
Matrix AllocateMatrix(int height, int width, int init);
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost);
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice);
int ReadFile(Matrix* M, char* file_name);
void WriteFile(Matrix M, char* file_name);
void FreeDeviceMatrix(Matrix* M);
void FreeMatrix(Matrix* M);
void ConvolutionOnDevice(const Matrix M, const Matrix N, Matrix P);
unsigned int timer_name;
unsigned int timer_name1;
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv) {
Matrix M;
Matrix N;
Matrix P;
srand(2012);
if(argc != 5 && argc != 4)
{
// Allocate and initialize the matrices
M = AllocateMatrix(KERNEL_SIZE, KERNEL_SIZE, 1);
//N = AllocateMatrix((rand() % 1024) + 1, (rand() % 1024) + 1, 1);
N = AllocateMatrix(2048, 2048, 1);
P = AllocateMatrix(N.height, N.width, 0);
}
else
{
// Allocate and read in matrices from disk
int* params = NULL;
unsigned int data_read = 0;
cutReadFilei(argv[1], ¶ms, &data_read, true);
if(data_read != 2){
printf("Error reading parameter file\n");
cutFree(params);
return 1;
}
M = AllocateMatrix(KERNEL_SIZE, KERNEL_SIZE, 0);
N = AllocateMatrix(params[0], params[1], 0);
P = AllocateMatrix(params[0], params[1], 0);
cutFree(params);
(void)ReadFile(&M, argv[2]);
(void)ReadFile(&N, argv[3]);
}
printf("Size %dx%d ", N.width, N.height);
// M * N on the device
cutCreateTimer(&timer_name);
cutCreateTimer(&timer_name1);
cutStartTimer(timer_name1);
ConvolutionOnDevice(M, N, P);
cutStopTimer(timer_name1);
printf(" GPU %f ",cutGetTimerValue(timer_name1));
printf(" Overhead %f "
,(cutGetTimerValue(timer_name1)-cutGetTimerValue(timer_name)));
// compute the matrix convolution on the CPU for comparison
Matrix reference = AllocateMatrix(P.height, P.width, 0);
cutResetTimer(timer_name);
cutStartTimer(timer_name);
computeGold(reference.elements, M.elements, N.elements, N.height, N.width);
cutStopTimer(timer_name);
printf(" CPU %f ",cutGetTimerValue(timer_name));
cutDeleteTimer(timer_name);
// in this case check if the result is equivalent to the expected soluion
CUTBoolean res = cutComparefe(reference.elements, P.elements, P.width * P.height, 0.001f);
printf("Test %s\n", (1 == res) ? "PASSED" : "FAILED");
if(argc == 5)
{
WriteFile(P, argv[4]);
}
else if(argc == 2)
{
WriteFile(P, argv[1]);
}
// Free matrices
FreeMatrix(&M);
FreeMatrix(&N);
FreeMatrix(&P);
return 0;
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void ConvolutionOnDevice(const Matrix M, const Matrix N, Matrix P)
{
// Load M and N to the device
Matrix Md = AllocateDeviceMatrix(M);
int x=0;
//CopyToDeviceMatrix(Md, M);
if(hipSuccess != (x=hipMemcpyToSymbol("Mc", M.elements,
KERNEL_SIZE*KERNEL_SIZE*sizeof(float))))
printf("Const mem error! sizeof()Matrix %d %d \n",
sizeof(Matrix), x);
Matrix Nd = AllocateDeviceMatrix(N);
CopyToDeviceMatrix(Nd, N);
// Allocate P on the device
Matrix Pd = AllocateDeviceMatrix(P);
CopyToDeviceMatrix(Pd, P); // Clear memory
// Setup the execution configuration
dim3 grid = dim3(ceil(float (N.width)/TILE_SIZE), ceil(float
(N.height)/TILE_SIZE),1);
dim3 block = dim3(BLOCK_SIZE, BLOCK_SIZE,1);
cutStartTimer(timer_name);
// Launch the device computation threads!
hipLaunchKernelGGL(( ConvolutionKernel), dim3(grid),dim3(block), 0, 0, Md, Nd, Pd);
hipDeviceSynchronize();
cutStopTimer(timer_name);
// Read P from the device
CopyFromDeviceMatrix(P, Pd);
printf(" Kernel %f ",cutGetTimerValue(timer_name));
// Free device matrices
FreeDeviceMatrix(&Md);
FreeDeviceMatrix(&Nd);
FreeDeviceMatrix(&Pd);
}
// Allocate a device matrix of same size as M.
Matrix AllocateDeviceMatrix(const Matrix M)
{
Matrix Mdevice = M;
int size = M.width * M.height * sizeof(float);
hipMalloc((void**)&Mdevice.elements, size);
return Mdevice;
}
// Allocate a device matrix of dimensions height*width
// If init == 0, initialize to all zeroes.
// If init == 1, perform random initialization.
// If init == 2, initialize matrix parameters, but do not allocate memory
Matrix AllocateMatrix(int height, int width, int init)
{
Matrix M;
M.width = M.pitch = width;
M.height = height;
int size = M.width * M.height;
M.elements = NULL;
// don't allocate memory on option 2
if(init == 2)
return M;
M.elements = (float*) malloc(size*sizeof(float));
for(unsigned int i = 0; i < M.height * M.width; i++)
{
M.elements[i] = (init == 0) ? (0.0f) : (rand() / (float)RAND_MAX);
if(rand() % 2)
M.elements[i] = - M.elements[i];
}
return M;
}
// Copy a host matrix to a device matrix.
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost)
{
int size = Mhost.width * Mhost.height * sizeof(float);
Mdevice.height = Mhost.height;
Mdevice.width = Mhost.width;
Mdevice.pitch = Mhost.pitch;
hipMemcpy(Mdevice.elements, Mhost.elements, size,
hipMemcpyHostToDevice);
}
// Copy a device matrix to a host matrix.
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice)
{
int size = Mdevice.width * Mdevice.height * sizeof(float);
hipMemcpy(Mhost.elements, Mdevice.elements, size,
hipMemcpyDeviceToHost);
}
// Free a device matrix.
void FreeDeviceMatrix(Matrix* M)
{
hipFree(M->elements);
M->elements = NULL;
}
// Free a host Matrix
void FreeMatrix(Matrix* M)
{
free(M->elements);
M->elements = NULL;
}
// Read a 16x16 floating point matrix in from file
int ReadFile(Matrix* M, char* file_name)
{
unsigned int data_read = M->height * M->width;
cutReadFilef(file_name, &(M->elements), &data_read, true);
return data_read;
}
// Write a 16x16 floating point matrix to file
void WriteFile(Matrix M, char* file_name)
{
cutWriteFilef(file_name, M.elements, M.width*M.height,
0.0001f);
}
| f1e793f8137f5b4e89dd8f4f4da75525cccb7d8b.cu | /*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
/* Matrix convolution.
* Host code.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <cutil.h>
// includes, kernels
#include <2Dconvolution_kernel.cu>
////////////////////////////////////////////////////////////////////////////////
// declarations, forward
extern "C"
void computeGold(float*, const float*, const float*, unsigned int, unsigned int);
Matrix AllocateDeviceMatrix(const Matrix M);
Matrix AllocateMatrix(int height, int width, int init);
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost);
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice);
int ReadFile(Matrix* M, char* file_name);
void WriteFile(Matrix M, char* file_name);
void FreeDeviceMatrix(Matrix* M);
void FreeMatrix(Matrix* M);
void ConvolutionOnDevice(const Matrix M, const Matrix N, Matrix P);
unsigned int timer_name;
unsigned int timer_name1;
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv) {
Matrix M;
Matrix N;
Matrix P;
srand(2012);
if(argc != 5 && argc != 4)
{
// Allocate and initialize the matrices
M = AllocateMatrix(KERNEL_SIZE, KERNEL_SIZE, 1);
//N = AllocateMatrix((rand() % 1024) + 1, (rand() % 1024) + 1, 1);
N = AllocateMatrix(2048, 2048, 1);
P = AllocateMatrix(N.height, N.width, 0);
}
else
{
// Allocate and read in matrices from disk
int* params = NULL;
unsigned int data_read = 0;
cutReadFilei(argv[1], ¶ms, &data_read, true);
if(data_read != 2){
printf("Error reading parameter file\n");
cutFree(params);
return 1;
}
M = AllocateMatrix(KERNEL_SIZE, KERNEL_SIZE, 0);
N = AllocateMatrix(params[0], params[1], 0);
P = AllocateMatrix(params[0], params[1], 0);
cutFree(params);
(void)ReadFile(&M, argv[2]);
(void)ReadFile(&N, argv[3]);
}
printf("Size %dx%d ", N.width, N.height);
// M * N on the device
cutCreateTimer(&timer_name);
cutCreateTimer(&timer_name1);
cutStartTimer(timer_name1);
ConvolutionOnDevice(M, N, P);
cutStopTimer(timer_name1);
printf(" GPU %f ",cutGetTimerValue(timer_name1));
printf(" Overhead %f "
,(cutGetTimerValue(timer_name1)-cutGetTimerValue(timer_name)));
// compute the matrix convolution on the CPU for comparison
Matrix reference = AllocateMatrix(P.height, P.width, 0);
cutResetTimer(timer_name);
cutStartTimer(timer_name);
computeGold(reference.elements, M.elements, N.elements, N.height, N.width);
cutStopTimer(timer_name);
printf(" CPU %f ",cutGetTimerValue(timer_name));
cutDeleteTimer(timer_name);
// in this case check if the result is equivalent to the expected soluion
CUTBoolean res = cutComparefe(reference.elements, P.elements, P.width * P.height, 0.001f);
printf("Test %s\n", (1 == res) ? "PASSED" : "FAILED");
if(argc == 5)
{
WriteFile(P, argv[4]);
}
else if(argc == 2)
{
WriteFile(P, argv[1]);
}
// Free matrices
FreeMatrix(&M);
FreeMatrix(&N);
FreeMatrix(&P);
return 0;
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void ConvolutionOnDevice(const Matrix M, const Matrix N, Matrix P)
{
// Load M and N to the device
Matrix Md = AllocateDeviceMatrix(M);
int x=0;
//CopyToDeviceMatrix(Md, M);
if(cudaSuccess != (x=cudaMemcpyToSymbol("Mc", M.elements,
KERNEL_SIZE*KERNEL_SIZE*sizeof(float))))
printf("Const mem error! sizeof()Matrix %d %d \n",
sizeof(Matrix), x);
Matrix Nd = AllocateDeviceMatrix(N);
CopyToDeviceMatrix(Nd, N);
// Allocate P on the device
Matrix Pd = AllocateDeviceMatrix(P);
CopyToDeviceMatrix(Pd, P); // Clear memory
// Setup the execution configuration
dim3 grid = dim3(ceil(float (N.width)/TILE_SIZE), ceil(float
(N.height)/TILE_SIZE),1);
dim3 block = dim3(BLOCK_SIZE, BLOCK_SIZE,1);
cutStartTimer(timer_name);
// Launch the device computation threads!
ConvolutionKernel<<<grid,block>>>(Md, Nd, Pd);
cudaDeviceSynchronize();
cutStopTimer(timer_name);
// Read P from the device
CopyFromDeviceMatrix(P, Pd);
printf(" Kernel %f ",cutGetTimerValue(timer_name));
// Free device matrices
FreeDeviceMatrix(&Md);
FreeDeviceMatrix(&Nd);
FreeDeviceMatrix(&Pd);
}
// Allocate a device matrix of same size as M.
Matrix AllocateDeviceMatrix(const Matrix M)
{
Matrix Mdevice = M;
int size = M.width * M.height * sizeof(float);
cudaMalloc((void**)&Mdevice.elements, size);
return Mdevice;
}
// Allocate a device matrix of dimensions height*width
// If init == 0, initialize to all zeroes.
// If init == 1, perform random initialization.
// If init == 2, initialize matrix parameters, but do not allocate memory
Matrix AllocateMatrix(int height, int width, int init)
{
Matrix M;
M.width = M.pitch = width;
M.height = height;
int size = M.width * M.height;
M.elements = NULL;
// don't allocate memory on option 2
if(init == 2)
return M;
M.elements = (float*) malloc(size*sizeof(float));
for(unsigned int i = 0; i < M.height * M.width; i++)
{
M.elements[i] = (init == 0) ? (0.0f) : (rand() / (float)RAND_MAX);
if(rand() % 2)
M.elements[i] = - M.elements[i];
}
return M;
}
// Copy a host matrix to a device matrix.
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost)
{
int size = Mhost.width * Mhost.height * sizeof(float);
Mdevice.height = Mhost.height;
Mdevice.width = Mhost.width;
Mdevice.pitch = Mhost.pitch;
cudaMemcpy(Mdevice.elements, Mhost.elements, size,
cudaMemcpyHostToDevice);
}
// Copy a device matrix to a host matrix.
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice)
{
int size = Mdevice.width * Mdevice.height * sizeof(float);
cudaMemcpy(Mhost.elements, Mdevice.elements, size,
cudaMemcpyDeviceToHost);
}
// Free a device matrix.
void FreeDeviceMatrix(Matrix* M)
{
cudaFree(M->elements);
M->elements = NULL;
}
// Free a host Matrix
void FreeMatrix(Matrix* M)
{
free(M->elements);
M->elements = NULL;
}
// Read a 16x16 floating point matrix in from file
int ReadFile(Matrix* M, char* file_name)
{
unsigned int data_read = M->height * M->width;
cutReadFilef(file_name, &(M->elements), &data_read, true);
return data_read;
}
// Write a 16x16 floating point matrix to file
void WriteFile(Matrix M, char* file_name)
{
cutWriteFilef(file_name, M.elements, M.width*M.height,
0.0001f);
}
|
c9fd1b68a0a02c718db8807555186adcc72c4f08.hip | // !!! This is a file automatically generated by hipify!!!
// Fix for gcc 4.7
//#undef _GLIBCXX_ATOMIC_BUILTINS
//#undef _GLIBCXX_USE_INT128
#include "thrust/sort.h"
#include "thrust/unique.h"
#include <thrust/remove.h>
#include <thrust/count.h>
#include <thrust/device_ptr.h>
#include "hip/hip_runtime.h"
#include "cudahelper.cuh"
#include "pointcloudcuda.cuh"
#include "helper_math.h"
#include "grid.cuh"
// pointcloud parameters in constant memory
__constant__ ParametersPointCloud paramsPointCloud;
inline __host__ __device__ bool operator!=(float3 &a, float3 &b)
{
return !(a.x == b.x && a.y == b.y && a.z == b.z);
}
inline __host__ __device__ bool operator!=(float4 &a, float4 &b)
{
return !(a.x == b.x && a.y == b.y && a.z == b.z && a.w == b.w);
}
// rearrange particle data into sorted order (sorted according to containing grid cell), and find the start of each cell in the sorted hash array
void getDeviceAddressOfParametersPointCloud(ParametersPointCloud** ptr)
{
cudaSafeCall(hipGetSymbolAddress((void**)ptr, paramsPointCloud));
}
void copyParametersToGpu(ParametersPointCloud *hostParams)
{
// copy parameters to constant memory
cudaSafeCall(hipMemcpyToSymbol(paramsPointCloud, hostParams, sizeof(ParametersPointCloud)));
}
// Returns the presence of neighbors of @pos within paramsPointCloud.minimumDistance in @gridCell
// Caller must ensure:
// - pos is in grid
// - gridCell is valid (less than grid.cells)
__device__ void checkCellForNeighbors(
int3 gridCell, // grid cell to search for particles that could collide
uint index, // index of particle that is being collided
float4 pos, // position of particle that is being collided
float4* posSorted,
uint* pointCellStart,
uint* pointCellStopp,
float4& clusterPosition,
uint& numberofNeighborsFound)
{
const uint gridHash = paramsPointCloud.grid.getCellHash(gridCell);
// get start of bucket for this cell
uint startIndex = pointCellStart[gridHash];
// cell is not empty
if(startIndex != 0xffffffff)
{
// iterate over particles in this cell
uint endIndex = pointCellStopp[gridHash];
for(uint j=startIndex; j<endIndex; j++)
{
// check not colliding with self
if(j != index)
{
const float4 posOther = posSorted[j];
const float4 relPos = pos - posOther;
float distSquared = lengthSquared(make_float3(relPos));
// If they collide AND we're checking the point that was further from the scanner, THEN reduce it!
if(distSquared < paramsPointCloud.minimumDistance * paramsPointCloud.minimumDistance)
{
clusterPosition += posOther;
numberofNeighborsFound ++;
}
}
}
}
}
// not really random, but should be good enough.
__device__ float randomNumber(uint seed)
{
uint a = threadIdx.x * seed;
uint b = blockIdx.x * blockDim.x;
b = 36969 * (b & 65535) + (b >> 16);
a = 18000 * (a & 65535) + (a >> 16);
return ((b << 16) + a) / 4294967295.0;
}
// Collide a single point (given by thread-id through @index) against all points in own and neighboring cells
__global__
void markCollidingPointsD(
float4* posOriginal, // output: new positions, same or zeroed. This is actually mDevicePointPos, so its the original position location
float4* positionsSorted, // input: positions sorted according to containing grid cell
uint* gridPointIndex, // input: particle indices sorted according to containing grid cell
uint* pointCellStart, // input: pointCellStart[19] contains the index of gridParticleIndex in which cell 19 starts
uint* pointCellStopp, // input: pointCellStopp[19] contains the index of gridParticleIndex in which cell 19 ends
uint numPoints) // input: number of total particles
{
uint threadIndex = getThreadIndex1D();
if(threadIndex >= numPoints) return;
// read particle data from sorted arrays
const float4 worldPos = positionsSorted[threadIndex];
// get address of particle in grid
const int3 gridCellCoordinate = paramsPointCloud.grid.getCellCoordinate(make_float3(worldPos));
// Do not process points that are not in the defined grid!
if(gridCellCoordinate.x == -1)
{
printf("got a point not in grid, ouch!\n");
return;
}
const uint originalIndex = gridPointIndex[threadIndex];
float4 clusterPosition = make_float4(0.0);
unsigned int numberOfCollisionsInOwnCell = 0;
unsigned int numberOfCollisionsInNeighborCells = 0;
// This code tries to optimize, thinking: If we already find many neighbors in our own cell, there's really not much
// use in looking in other cells, too. We could even go so far as to test for the minimumDistance vs cellSize: If the
// mindist is 20 cm, the cell contains 100 points and the cellsize is 20cm, then we know we have lots of neighbors
// without even looking at them. But we don't know their .w component values!
checkCellForNeighbors(
gridCellCoordinate,
threadIndex,
worldPos,
positionsSorted,
pointCellStart,
pointCellStopp,
clusterPosition,
numberOfCollisionsInOwnCell);
// examine neighbouring cells
for(int z=-1; z<=1 && numberOfCollisionsInOwnCell < 5; z++)
{
for(int y=-1; y<=1; y++)
{
for(int x=-1; x<=1; x++)
{
const int3 neighbourGridCoordinate = gridCellCoordinate + make_int3(x, y, z);
if(x == 0 && y == 0 && z == 0) continue;
checkCellForNeighbors(
neighbourGridCoordinate,
threadIndex,
worldPos,
positionsSorted,
pointCellStart,
pointCellStopp,
clusterPosition,
numberOfCollisionsInNeighborCells);
}
}
}
const float numberOfCollisionsTotal = numberOfCollisionsInOwnCell + numberOfCollisionsInNeighborCells;
if(numberOfCollisionsTotal > 0.0)
{
const float averageNeighborScanDistance = clusterPosition.w / numberOfCollisionsTotal;
if(averageNeighborScanDistance > worldPos.w /*&& randomNumber(numPoints + threadIndex) * numberOfCollisionsTotal > 1.0*/)
{
// If the other neighbors are of better quality, delete ourselves
posOriginal[originalIndex] = make_float4(0.0);
}
else
{
// If we're better, move us into the center of our neighborhood
posOriginal[originalIndex] = clusterPosition / numberOfCollisionsTotal;
}
}
//posOriginal[originalIndex] = make_float4(worldPos.x, worldPos.y, worldPos.z, numberOfCollisionsTotal);
}
void markCollidingPoints(
float* posOriginal,
float* posSorted,
unsigned int* gridPointIndex,
unsigned int* pointCellStart,
unsigned int* pointCellStopp,
unsigned int numPoints)
{
if(numPoints == 0) return;
// thread per particle
uint numThreads, numBlocks;
computeExecutionKernelGrid(numPoints, 128, numBlocks, numThreads);
//std::cout << "markCollidingPoints(): we have " << numPoints << " points, " << numThreads << " threads and " << numBlocks << " blocks" << std::endl;
// execute the kernel
// TODO: test optimization: Write back not into posOriginal, but into posSorted (should be faster), then memcpy posSorted into posOriginal.
// If writing back into posSorted is faster, we should be able to just switch buffers after every reduction.
hipLaunchKernelGGL(( markCollidingPointsD), dim3(numBlocks), dim3(numThreads) , 0, 0,
(float4*)posOriginal,
(float4*)posSorted,
gridPointIndex,
pointCellStart,
pointCellStopp,
numPoints
);
cudaCheckSuccess("markCollidingPoints");
}
// bounding box type
typedef thrust::pair<float4, float4> bbox;
// reduce a pair of bounding boxes (a,b) to a bounding box containing a and b
struct bbox_reduction : public thrust::binary_function<bbox,bbox,bbox>
{
__host__ __device__
bbox operator()(bbox a, bbox b)
{
// min corner
float4 min = make_float4(thrust::min(a.first.x, b.first.x), thrust::min(a.first.y, b.first.y), thrust::min(a.first.z, b.first.z), 0);
// max corner
float4 max = make_float4(thrust::max(a.second.x, b.second.x), thrust::max(a.second.y, b.second.y), thrust::max(a.second.z, b.second.z), 0);
return bbox(min, max);
}
};
// convert a point to a bbox containing that point, (point) -> (point, point)
struct bbox_transformation : public thrust::unary_function<float4,bbox>
{
__host__ __device__
bbox operator()(float4 point)
{
return bbox(point, point);
}
};
void getBoundingBox(float *dPoints, uint numPoints, float3& min, float3& max)
{
float4* points = (float4*)dPoints;
// wrap raw pointer with a device_ptr
thrust::device_ptr<float4> dev_ptr = thrust::device_pointer_cast(points);
bbox init = bbox(dev_ptr[0], dev_ptr[0]);
// transformation operation
bbox_transformation opConvertPointToBoundingBox;
// binary reduction operation
bbox_reduction opUnifyBoundingBoxes;
// compute the bounding box for the point set
bbox result = thrust::transform_reduce(
thrust::device_ptr<float4>(points),
thrust::device_ptr<float4>(points + numPoints),
opConvertPointToBoundingBox,
init,
opUnifyBoundingBoxes);
min = make_float3(result.first);
max = make_float3(result.second);
}
void sortMapAccordingToKeys(uint *dGridCellIndex, uint *dGridPointIndex, uint numPoints)
{
if(numPoints == 0) return;
thrust::sort_by_key(thrust::device_ptr<uint>(dGridCellIndex), // KeysBeginning
thrust::device_ptr<uint>(dGridCellIndex + numPoints), // KeysEnd
thrust::device_ptr<uint>(dGridPointIndex)); // ValuesBeginning
cudaCheckSuccess("sortMapAccordingToKeys");
}
inline __host__ __device__ bool operator==(float4 a, float4 b)
{
return
a.x == b.x &&
a.y == b.y &&
a.z == b.z &&
a.w == b.w;
}
unsigned int removeClearedPoints(float *devicePoints, unsigned int numberOfPoints)
{
float4* points = (float4*)devicePoints;
thrust::device_ptr<float4> newEnd;
try
{
// Just for debugging!
int result = thrust::count(thrust::device_ptr<float4>(points),
thrust::device_ptr<float4>(points + numberOfPoints),
make_float4(0.0f));
std::cerr << __PRETTY_FUNCTION__ << " removing zero points: " << result << " of " << numberOfPoints << std::endl;
newEnd = thrust::remove(
thrust::device_ptr<float4>(points),
thrust::device_ptr<float4>(points + numberOfPoints),
make_float4(0.0f)
);
}
catch(thrust::system_error &e)
{
// output an error message and exit
std::cerr << "Error accessing vector element: " << e.what() << std::endl;
exit(-1);
}
cudaCheckSuccess("removeZeroPoints");
unsigned int numberOfPointsLeft = newEnd.get() - points;
return numberOfPointsLeft;
}
struct IsOutsideBoundingBoxOp
{
const float3 mBBoxMin, mBBoxMax;
IsOutsideBoundingBoxOp(const float3& boxMin, const float3& boxMax) :
mBBoxMin(boxMin),
mBBoxMax(boxMax)
{ }
__host__ __device__
bool operator()(const float4 point)
{
return
mBBoxMin.x > point.x ||
mBBoxMin.y > point.y ||
mBBoxMin.z > point.z ||
mBBoxMax.x < point.x ||
mBBoxMax.y < point.y ||
mBBoxMax.z < point.z;
}
};
unsigned int removePointsOutsideBoundingBox(float* points, unsigned int numberOfPoints, Grid* grid)
{
printf("removePointsOutsideBoundingBox(): clearing %d points if outside of %.2f %.2f %.2f and %.2f %.2f %.2f\n", numberOfPoints, grid->worldMin.x, grid->worldMin.y, grid->worldMin.z, grid->worldMax.x, grid->worldMax.y, grid->worldMax.z);
// move all points in bbox to beginning of devicePointsBase and return number of points left
IsOutsideBoundingBoxOp op(grid->worldMin, grid->worldMax);
float4* pointsf4 = (float4*)points;
const thrust::device_ptr<float4> newEnd = thrust::remove_if(
thrust::device_ptr<float4>(pointsf4),
thrust::device_ptr<float4>(pointsf4 + numberOfPoints),
op);
cudaCheckSuccess("removePointsOutsideBoundingBox");
unsigned int numberOfPointsRemaining = newEnd.get() - pointsf4;
printf("removePointsOutsideBoundingBox(): done, %d points deleted, %d points remaining\n", numberOfPoints - numberOfPointsRemaining, numberOfPointsRemaining);
return numberOfPointsRemaining;
}
unsigned int copyPoints(float* devicePointsBaseDst, float* devicePointsBaseSrc, unsigned int numberOfPointsToCopy)
{
float4* pointsSrc = (float4*)devicePointsBaseSrc;
float4* pointsDst = (float4*)devicePointsBaseDst;
const thrust::device_ptr<float4> newEnd = thrust::copy(
thrust::device_ptr<float4>(pointsSrc),
thrust::device_ptr<float4>(pointsSrc + numberOfPointsToCopy),
thrust::device_ptr<float4>(pointsDst));
cudaCheckSuccess("copyPoints");
const unsigned int numberOfPointsCopied = newEnd.get() - pointsDst;
return numberOfPointsCopied;
}
struct IsInsideBoundingBoxOp
{
const float3 mBBoxMin, mBBoxMax;
IsInsideBoundingBoxOp(const float3& boxMin, const float3& boxMax) :
mBBoxMin(boxMin),
mBBoxMax(boxMax)
{ }
__host__ __device__
bool operator()(const float4 point)
{
return
mBBoxMin.x < point.x &&
mBBoxMin.y < point.y &&
mBBoxMin.z < point.z &&
mBBoxMax.x > point.x &&
mBBoxMax.y > point.y &&
mBBoxMax.z > point.z;
}
};
// Requires dst and src to live in device memory space
unsigned int copyPointsInBoundingBox(float* devicePointsBaseDst, float* devicePointsBaseSrc, float3 &bBoxMin, float3 &bBoxMax, unsigned int numberOfPointsToCopy)
{
float4* pointsSrc = (float4*)devicePointsBaseSrc;
float4* pointsDst = (float4*)devicePointsBaseDst;
IsInsideBoundingBoxOp op(bBoxMin, bBoxMax);
const thrust::device_ptr<float4> newEnd = thrust::copy_if(
thrust::device_ptr<float4>(pointsSrc),
thrust::device_ptr<float4>(pointsSrc + numberOfPointsToCopy),
thrust::device_ptr<float4>(pointsDst),
op);
cudaCheckSuccess("copyPointsInBoundingBox");
const unsigned int numberOfPointsCopied = newEnd.get() - pointsDst;
return numberOfPointsCopied;
}
| c9fd1b68a0a02c718db8807555186adcc72c4f08.cu | // Fix for gcc 4.7
//#undef _GLIBCXX_ATOMIC_BUILTINS
//#undef _GLIBCXX_USE_INT128
#include "thrust/sort.h"
#include "thrust/unique.h"
#include <thrust/remove.h>
#include <thrust/count.h>
#include <thrust/device_ptr.h>
#include "cuda.h"
#include "cudahelper.cuh"
#include "pointcloudcuda.cuh"
#include "helper_math.h"
#include "grid.cuh"
// pointcloud parameters in constant memory
__constant__ ParametersPointCloud paramsPointCloud;
inline __host__ __device__ bool operator!=(float3 &a, float3 &b)
{
return !(a.x == b.x && a.y == b.y && a.z == b.z);
}
inline __host__ __device__ bool operator!=(float4 &a, float4 &b)
{
return !(a.x == b.x && a.y == b.y && a.z == b.z && a.w == b.w);
}
// rearrange particle data into sorted order (sorted according to containing grid cell), and find the start of each cell in the sorted hash array
void getDeviceAddressOfParametersPointCloud(ParametersPointCloud** ptr)
{
cudaSafeCall(cudaGetSymbolAddress((void**)ptr, paramsPointCloud));
}
void copyParametersToGpu(ParametersPointCloud *hostParams)
{
// copy parameters to constant memory
cudaSafeCall(cudaMemcpyToSymbol(paramsPointCloud, hostParams, sizeof(ParametersPointCloud)));
}
// Returns the presence of neighbors of @pos within paramsPointCloud.minimumDistance in @gridCell
// Caller must ensure:
// - pos is in grid
// - gridCell is valid (less than grid.cells)
__device__ void checkCellForNeighbors(
int3 gridCell, // grid cell to search for particles that could collide
uint index, // index of particle that is being collided
float4 pos, // position of particle that is being collided
float4* posSorted,
uint* pointCellStart,
uint* pointCellStopp,
float4& clusterPosition,
uint& numberofNeighborsFound)
{
const uint gridHash = paramsPointCloud.grid.getCellHash(gridCell);
// get start of bucket for this cell
uint startIndex = pointCellStart[gridHash];
// cell is not empty
if(startIndex != 0xffffffff)
{
// iterate over particles in this cell
uint endIndex = pointCellStopp[gridHash];
for(uint j=startIndex; j<endIndex; j++)
{
// check not colliding with self
if(j != index)
{
const float4 posOther = posSorted[j];
const float4 relPos = pos - posOther;
float distSquared = lengthSquared(make_float3(relPos));
// If they collide AND we're checking the point that was further from the scanner, THEN reduce it!
if(distSquared < paramsPointCloud.minimumDistance * paramsPointCloud.minimumDistance)
{
clusterPosition += posOther;
numberofNeighborsFound ++;
}
}
}
}
}
// not really random, but should be good enough.
__device__ float randomNumber(uint seed)
{
uint a = threadIdx.x * seed;
uint b = blockIdx.x * blockDim.x;
b = 36969 * (b & 65535) + (b >> 16);
a = 18000 * (a & 65535) + (a >> 16);
return ((b << 16) + a) / 4294967295.0;
}
// Collide a single point (given by thread-id through @index) against all points in own and neighboring cells
__global__
void markCollidingPointsD(
float4* posOriginal, // output: new positions, same or zeroed. This is actually mDevicePointPos, so its the original position location
float4* positionsSorted, // input: positions sorted according to containing grid cell
uint* gridPointIndex, // input: particle indices sorted according to containing grid cell
uint* pointCellStart, // input: pointCellStart[19] contains the index of gridParticleIndex in which cell 19 starts
uint* pointCellStopp, // input: pointCellStopp[19] contains the index of gridParticleIndex in which cell 19 ends
uint numPoints) // input: number of total particles
{
uint threadIndex = getThreadIndex1D();
if(threadIndex >= numPoints) return;
// read particle data from sorted arrays
const float4 worldPos = positionsSorted[threadIndex];
// get address of particle in grid
const int3 gridCellCoordinate = paramsPointCloud.grid.getCellCoordinate(make_float3(worldPos));
// Do not process points that are not in the defined grid!
if(gridCellCoordinate.x == -1)
{
printf("got a point not in grid, ouch!\n");
return;
}
const uint originalIndex = gridPointIndex[threadIndex];
float4 clusterPosition = make_float4(0.0);
unsigned int numberOfCollisionsInOwnCell = 0;
unsigned int numberOfCollisionsInNeighborCells = 0;
// This code tries to optimize, thinking: If we already find many neighbors in our own cell, there's really not much
// use in looking in other cells, too. We could even go so far as to test for the minimumDistance vs cellSize: If the
// mindist is 20 cm, the cell contains 100 points and the cellsize is 20cm, then we know we have lots of neighbors
// without even looking at them. But we don't know their .w component values!
checkCellForNeighbors(
gridCellCoordinate,
threadIndex,
worldPos,
positionsSorted,
pointCellStart,
pointCellStopp,
clusterPosition,
numberOfCollisionsInOwnCell);
// examine neighbouring cells
for(int z=-1; z<=1 && numberOfCollisionsInOwnCell < 5; z++)
{
for(int y=-1; y<=1; y++)
{
for(int x=-1; x<=1; x++)
{
const int3 neighbourGridCoordinate = gridCellCoordinate + make_int3(x, y, z);
if(x == 0 && y == 0 && z == 0) continue;
checkCellForNeighbors(
neighbourGridCoordinate,
threadIndex,
worldPos,
positionsSorted,
pointCellStart,
pointCellStopp,
clusterPosition,
numberOfCollisionsInNeighborCells);
}
}
}
const float numberOfCollisionsTotal = numberOfCollisionsInOwnCell + numberOfCollisionsInNeighborCells;
if(numberOfCollisionsTotal > 0.0)
{
const float averageNeighborScanDistance = clusterPosition.w / numberOfCollisionsTotal;
if(averageNeighborScanDistance > worldPos.w /*&& randomNumber(numPoints + threadIndex) * numberOfCollisionsTotal > 1.0*/)
{
// If the other neighbors are of better quality, delete ourselves
posOriginal[originalIndex] = make_float4(0.0);
}
else
{
// If we're better, move us into the center of our neighborhood
posOriginal[originalIndex] = clusterPosition / numberOfCollisionsTotal;
}
}
//posOriginal[originalIndex] = make_float4(worldPos.x, worldPos.y, worldPos.z, numberOfCollisionsTotal);
}
void markCollidingPoints(
float* posOriginal,
float* posSorted,
unsigned int* gridPointIndex,
unsigned int* pointCellStart,
unsigned int* pointCellStopp,
unsigned int numPoints)
{
if(numPoints == 0) return;
// thread per particle
uint numThreads, numBlocks;
computeExecutionKernelGrid(numPoints, 128, numBlocks, numThreads);
//std::cout << "markCollidingPoints(): we have " << numPoints << " points, " << numThreads << " threads and " << numBlocks << " blocks" << std::endl;
// execute the kernel
// TODO: test optimization: Write back not into posOriginal, but into posSorted (should be faster), then memcpy posSorted into posOriginal.
// If writing back into posSorted is faster, we should be able to just switch buffers after every reduction.
markCollidingPointsD<<< numBlocks, numThreads >>>(
(float4*)posOriginal,
(float4*)posSorted,
gridPointIndex,
pointCellStart,
pointCellStopp,
numPoints
);
cudaCheckSuccess("markCollidingPoints");
}
// bounding box type
typedef thrust::pair<float4, float4> bbox;
// reduce a pair of bounding boxes (a,b) to a bounding box containing a and b
struct bbox_reduction : public thrust::binary_function<bbox,bbox,bbox>
{
__host__ __device__
bbox operator()(bbox a, bbox b)
{
// min corner
float4 min = make_float4(thrust::min(a.first.x, b.first.x), thrust::min(a.first.y, b.first.y), thrust::min(a.first.z, b.first.z), 0);
// max corner
float4 max = make_float4(thrust::max(a.second.x, b.second.x), thrust::max(a.second.y, b.second.y), thrust::max(a.second.z, b.second.z), 0);
return bbox(min, max);
}
};
// convert a point to a bbox containing that point, (point) -> (point, point)
struct bbox_transformation : public thrust::unary_function<float4,bbox>
{
__host__ __device__
bbox operator()(float4 point)
{
return bbox(point, point);
}
};
void getBoundingBox(float *dPoints, uint numPoints, float3& min, float3& max)
{
float4* points = (float4*)dPoints;
// wrap raw pointer with a device_ptr
thrust::device_ptr<float4> dev_ptr = thrust::device_pointer_cast(points);
bbox init = bbox(dev_ptr[0], dev_ptr[0]);
// transformation operation
bbox_transformation opConvertPointToBoundingBox;
// binary reduction operation
bbox_reduction opUnifyBoundingBoxes;
// compute the bounding box for the point set
bbox result = thrust::transform_reduce(
thrust::device_ptr<float4>(points),
thrust::device_ptr<float4>(points + numPoints),
opConvertPointToBoundingBox,
init,
opUnifyBoundingBoxes);
min = make_float3(result.first);
max = make_float3(result.second);
}
void sortMapAccordingToKeys(uint *dGridCellIndex, uint *dGridPointIndex, uint numPoints)
{
if(numPoints == 0) return;
thrust::sort_by_key(thrust::device_ptr<uint>(dGridCellIndex), // KeysBeginning
thrust::device_ptr<uint>(dGridCellIndex + numPoints), // KeysEnd
thrust::device_ptr<uint>(dGridPointIndex)); // ValuesBeginning
cudaCheckSuccess("sortMapAccordingToKeys");
}
inline __host__ __device__ bool operator==(float4 a, float4 b)
{
return
a.x == b.x &&
a.y == b.y &&
a.z == b.z &&
a.w == b.w;
}
unsigned int removeClearedPoints(float *devicePoints, unsigned int numberOfPoints)
{
float4* points = (float4*)devicePoints;
thrust::device_ptr<float4> newEnd;
try
{
// Just for debugging!
int result = thrust::count(thrust::device_ptr<float4>(points),
thrust::device_ptr<float4>(points + numberOfPoints),
make_float4(0.0f));
std::cerr << __PRETTY_FUNCTION__ << " removing zero points: " << result << " of " << numberOfPoints << std::endl;
newEnd = thrust::remove(
thrust::device_ptr<float4>(points),
thrust::device_ptr<float4>(points + numberOfPoints),
make_float4(0.0f)
);
}
catch(thrust::system_error &e)
{
// output an error message and exit
std::cerr << "Error accessing vector element: " << e.what() << std::endl;
exit(-1);
}
cudaCheckSuccess("removeZeroPoints");
unsigned int numberOfPointsLeft = newEnd.get() - points;
return numberOfPointsLeft;
}
struct IsOutsideBoundingBoxOp
{
const float3 mBBoxMin, mBBoxMax;
IsOutsideBoundingBoxOp(const float3& boxMin, const float3& boxMax) :
mBBoxMin(boxMin),
mBBoxMax(boxMax)
{ }
__host__ __device__
bool operator()(const float4 point)
{
return
mBBoxMin.x > point.x ||
mBBoxMin.y > point.y ||
mBBoxMin.z > point.z ||
mBBoxMax.x < point.x ||
mBBoxMax.y < point.y ||
mBBoxMax.z < point.z;
}
};
unsigned int removePointsOutsideBoundingBox(float* points, unsigned int numberOfPoints, Grid* grid)
{
printf("removePointsOutsideBoundingBox(): clearing %d points if outside of %.2f %.2f %.2f and %.2f %.2f %.2f\n", numberOfPoints, grid->worldMin.x, grid->worldMin.y, grid->worldMin.z, grid->worldMax.x, grid->worldMax.y, grid->worldMax.z);
// move all points in bbox to beginning of devicePointsBase and return number of points left
IsOutsideBoundingBoxOp op(grid->worldMin, grid->worldMax);
float4* pointsf4 = (float4*)points;
const thrust::device_ptr<float4> newEnd = thrust::remove_if(
thrust::device_ptr<float4>(pointsf4),
thrust::device_ptr<float4>(pointsf4 + numberOfPoints),
op);
cudaCheckSuccess("removePointsOutsideBoundingBox");
unsigned int numberOfPointsRemaining = newEnd.get() - pointsf4;
printf("removePointsOutsideBoundingBox(): done, %d points deleted, %d points remaining\n", numberOfPoints - numberOfPointsRemaining, numberOfPointsRemaining);
return numberOfPointsRemaining;
}
unsigned int copyPoints(float* devicePointsBaseDst, float* devicePointsBaseSrc, unsigned int numberOfPointsToCopy)
{
float4* pointsSrc = (float4*)devicePointsBaseSrc;
float4* pointsDst = (float4*)devicePointsBaseDst;
const thrust::device_ptr<float4> newEnd = thrust::copy(
thrust::device_ptr<float4>(pointsSrc),
thrust::device_ptr<float4>(pointsSrc + numberOfPointsToCopy),
thrust::device_ptr<float4>(pointsDst));
cudaCheckSuccess("copyPoints");
const unsigned int numberOfPointsCopied = newEnd.get() - pointsDst;
return numberOfPointsCopied;
}
struct IsInsideBoundingBoxOp
{
const float3 mBBoxMin, mBBoxMax;
IsInsideBoundingBoxOp(const float3& boxMin, const float3& boxMax) :
mBBoxMin(boxMin),
mBBoxMax(boxMax)
{ }
__host__ __device__
bool operator()(const float4 point)
{
return
mBBoxMin.x < point.x &&
mBBoxMin.y < point.y &&
mBBoxMin.z < point.z &&
mBBoxMax.x > point.x &&
mBBoxMax.y > point.y &&
mBBoxMax.z > point.z;
}
};
// Requires dst and src to live in device memory space
unsigned int copyPointsInBoundingBox(float* devicePointsBaseDst, float* devicePointsBaseSrc, float3 &bBoxMin, float3 &bBoxMax, unsigned int numberOfPointsToCopy)
{
float4* pointsSrc = (float4*)devicePointsBaseSrc;
float4* pointsDst = (float4*)devicePointsBaseDst;
IsInsideBoundingBoxOp op(bBoxMin, bBoxMax);
const thrust::device_ptr<float4> newEnd = thrust::copy_if(
thrust::device_ptr<float4>(pointsSrc),
thrust::device_ptr<float4>(pointsSrc + numberOfPointsToCopy),
thrust::device_ptr<float4>(pointsDst),
op);
cudaCheckSuccess("copyPointsInBoundingBox");
const unsigned int numberOfPointsCopied = newEnd.get() - pointsDst;
return numberOfPointsCopied;
}
|
54ddf7457a06e59c2f0f81a18cfca14b1d56e793.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "update_part_props.cuh"
#include "fill.cuh"
#include <catboost/cuda/cuda_lib/kernel/arch.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
#include <catboost/cuda/cuda_util/gpu_data/partitions.h>
namespace NKernel {
template <int BlockSize, int N = 1>
__forceinline__ __device__ double ComputeSum(const float* __restrict__ stat, ui32 offset, ui32 size, int blockIdx, int blockCount) {
float4 sum;
sum.x = sum.y = sum.z = sum.w = 0;
stat += offset;
const int warpSize = 32;
const int alignSize = 4 * warpSize;
{
int lastId = min(size, alignSize - (offset % alignSize));
if (blockIdx == 0) {
if (threadIdx.x < lastId) {
sum.x += Ldg(stat + threadIdx.x);
}
}
size = max(size - lastId, 0);
stat += lastId;
}
//now lets align end
const int unalignedTail = (size % alignSize);
if (unalignedTail != 0) {
if (blockIdx == 0) {
const int tailOffset = size - unalignedTail;
if (threadIdx.x < unalignedTail) {
sum.y += Ldg(stat + tailOffset + threadIdx.x);
}
}
}
size -= unalignedTail;
const int entriesPerWarp = warpSize * 4;
const int warpsPerBlock = (BlockSize / 32);
const int globalWarpId = (blockIdx * warpsPerBlock) + (threadIdx.x / 32);
stat += globalWarpId * entriesPerWarp;
size = max(size - globalWarpId * entriesPerWarp, 0);
const int stripeSize = entriesPerWarp * warpsPerBlock * blockCount;
const int localIdx = (threadIdx.x & 31) * 4;
const int iterCount = (size - localIdx + stripeSize - 1) / stripeSize;
stat += localIdx;
if (size > 0) {
#pragma unroll N
for (int i = 0; i < iterCount; ++i) {
const float4* stat4 = (const float4*) stat;
float4 val = Ldg(stat4);
sum.x += val.x;
sum.y += val.y;
sum.z += val.z;
sum.w += val.w;
stat += stripeSize;
}
}
return (double)sum.x + (double)sum.y + (double)sum.z + (double)sum.w;
};
template <class TOutput>
__global__ void SaveResultsImpl(const ui32* partIds,
const double* tempVars,
ui32 partCount,
ui32 statCount,
int tempVarsBlockCount,
TOutput* statSums) {
const ui32 i = blockIdx.x * blockDim.x + threadIdx.x;
const ui32 statId = i % statCount;
const ui32 y = i / statCount;
if (i < partCount * statCount) {
const ui32 leafId = partIds != nullptr ? partIds[y] : y;
double total = 0;
for (int x = 0; x < tempVarsBlockCount; ++x) {
total += tempVars[i];
tempVars += statCount * partCount;
}
statSums[leafId * statCount + statId] = total;
}
}
template <int BlockSize>
__launch_bounds__(BlockSize, 2)
__global__ void UpdatePartitionsPropsForOffsetsImpl(const ui32* offsets,
const float* source,
ui64 statLineSize,
ui32 partCount,
double* statPartSums) {
ui32 partId = blockIdx.y;
const ui32 statId = blockIdx.z;
source += statId * statLineSize;
while (partId < partCount) {
const ui32 partOffset = __ldg(offsets + partId);
const ui32 partSize = __ldg(offsets + partId + 1) - partOffset;
__shared__ volatile double localBuffer[BlockSize];
const int minDocsPerBlock = BlockSize * 16;
const int effectiveBlockCount = min(gridDim.x, (partSize + minDocsPerBlock - 1) / minDocsPerBlock);
double result = 0;
if (blockIdx.x < effectiveBlockCount) {
const int blockId = blockIdx.x % effectiveBlockCount;
localBuffer[threadIdx.x] = ComputeSum < BlockSize > (source, partOffset, partSize, blockId, effectiveBlockCount);
__syncthreads();
result = FastInBlockReduce(threadIdx.x, localBuffer, BlockSize);
}
if (threadIdx.x == 0) {
const int statCount = gridDim.z;
const int lineSize = statCount * partCount;
ui64 idx = blockIdx.x * lineSize + partId * statCount + statId;
statPartSums[idx] = result;
}
partId += gridDim.y;
}
}
template <int BlockSize>
__launch_bounds__(BlockSize, 2)
__global__ void UpdatePartitionsPropsImpl(const ui32* partIds,
const TDataPartition* parts,
const float* source,
ui64 statLineSize,
double* tempVars) {
const ui32 leafId = partIds[blockIdx.y];
TDataPartition part = parts[leafId];
const ui32 statId = blockIdx.z;
__shared__ volatile double localBuffer[BlockSize];
source += statId * statLineSize;
const int minDocsPerBlock = BlockSize * 16;
const int effectiveBlockCount = min(gridDim.x, (part.Size + minDocsPerBlock - 1) / minDocsPerBlock);
double result = 0;
if (blockIdx.x < effectiveBlockCount) {
const int blockId = blockIdx.x % effectiveBlockCount;
localBuffer[threadIdx.x] = ComputeSum <BlockSize>(source, part.Offset, part.Size, blockId, effectiveBlockCount);
__syncthreads();
result = FastInBlockReduce(threadIdx.x, localBuffer, BlockSize);
}
if (threadIdx.x == 0) {
tempVars[gridDim.z * gridDim.y * blockIdx.x + blockIdx.y * gridDim.z + blockIdx.z] = result;
}
}
void UpdatePartitionsProps(const TDataPartition* parts,
const ui32* partIds,
ui32 partCount,
const float* source,
ui32 statCount,
ui64 statLineSize,
ui32 tempVarsCount,
double* tempVars,
double* statSums,
TCudaStream stream
) {
const ui32 blockSize = 512;
dim3 numBlocks;
numBlocks.y = partCount;
numBlocks.z = statCount;
numBlocks.x = CeilDivide(2 * TArchProps::SMCount(), (int)statCount);
Y_VERIFY(numBlocks.x * numBlocks.y * numBlocks.z <= tempVarsCount);
hipLaunchKernelGGL(( UpdatePartitionsPropsImpl<blockSize>), dim3(numBlocks), dim3(blockSize), 0, stream, partIds, parts, source, statLineSize, tempVars);
{
const ui32 saveBlockSize = 256;
const ui32 numSaveBlocks = (numBlocks.y * numBlocks.z + saveBlockSize - 1) / saveBlockSize;
hipLaunchKernelGGL(( SaveResultsImpl), dim3(numSaveBlocks), dim3(saveBlockSize), 0, stream, partIds, tempVars, partCount, statCount, numBlocks.x, statSums);
}
}
template <int BlockSize>
__launch_bounds__(BlockSize, 2)
__global__ void UpdatePartitionsPropsForSplitImpl(const ui32* leftPartIds,
const ui32* rightPartIds,
const TDataPartition* parts,
const float* source,
ui64 statLineSize,
double* tempVars) {
const ui32 sourcePartCount = gridDim.y / 2;
const bool isLeft = blockIdx.y < sourcePartCount;
const ui32* partIds = isLeft ? leftPartIds : rightPartIds;
const ui32 leafId = partIds[isLeft ? blockIdx.y : blockIdx.y - sourcePartCount];
TDataPartition part = parts[leafId];
const ui32 statId = blockIdx.z;
__shared__ volatile double localBuffer[BlockSize];
source += statId * statLineSize;
const int minDocsPerBlock = BlockSize;
const int effectiveBlockCount = min(gridDim.x, (part.Size + minDocsPerBlock - 1) / minDocsPerBlock);
double result = 0;
if (blockIdx.x < effectiveBlockCount) {
const int blockId = blockIdx.x % effectiveBlockCount;
localBuffer[threadIdx.x] = ComputeSum<BlockSize, 4>(source, part.Offset, part.Size, blockId, effectiveBlockCount);
__syncthreads();
result = FastInBlockReduce(threadIdx.x, localBuffer, BlockSize);
}
if (threadIdx.x == 0) {
tempVars[gridDim.z * gridDim.y * blockIdx.x + blockIdx.y * gridDim.z + blockIdx.z] = result;
}
}
template <int BlockSize>
__launch_bounds__(BlockSize, 2)
__global__ void UpdatePartitionsPropsForSingleSplitImpl(const ui32 leftPartId,
const ui32 rightPartId,
const TDataPartition* parts,
const float* source,
ui64 statLineSize,
double* tempVars) {
// const ui32 sourcePartCount = 2;
const bool isLeft = blockIdx.y == 0;
const ui32 leafId = isLeft ? leftPartId : rightPartId;
TDataPartition part = parts[leafId];
const ui32 statId = blockIdx.z;
__shared__ volatile double localBuffer[BlockSize];
source += statId * statLineSize;
const int minDocsPerBlock = BlockSize;
const int effectiveBlockCount = min(gridDim.x, (part.Size + minDocsPerBlock - 1) / minDocsPerBlock);
double result = 0;
if (blockIdx.x < effectiveBlockCount) {
const int blockId = blockIdx.x % effectiveBlockCount;
localBuffer[threadIdx.x] = ComputeSum<BlockSize, 4>(source, part.Offset, part.Size, blockId, effectiveBlockCount);
__syncthreads();
result = FastInBlockReduce(threadIdx.x, localBuffer, BlockSize);
}
if (threadIdx.x == 0) {
tempVars[gridDim.z * gridDim.y * blockIdx.x + blockIdx.y * gridDim.z + blockIdx.z] = result;
}
}
template <class TOutput>
__global__ void SaveResultsForSplitImpl(const ui32* leftPartIds,
const ui32* rightPartIds,
const double* tempVars,
ui32 partCount,
ui32 statCount,
int tempVarsBlockCount,
TOutput* statSums) {
const ui32 i = blockIdx.x * blockDim.x + threadIdx.x;
const ui32 statId = i % statCount;
const ui32 y = i / statCount;
if (i < partCount * statCount) {
const ui32 leafId = (y < partCount / 2) ? leftPartIds[y] : rightPartIds[y - partCount / 2];
double total = 0;
for (int x = 0; x < tempVarsBlockCount; ++x) {
total += __ldg(tempVars + i);
tempVars += statCount * partCount;
}
statSums[leafId * statCount + statId] = total;
}
}
template <class TOutput>
__global__ void SaveResultsForSingleSplitImpl(const ui32 leftPartId,
const ui32 rightPartId,
const double* tempVars,
ui32 statCount,
int tempVarsBlockCount,
TOutput* statSums) {
const ui32 i = blockIdx.x * blockDim.x + threadIdx.x;
const ui32 statId = i % statCount;
const ui32 y = i / statCount;
if (i < 2 * statCount) {
const ui32 leafId = y == 0 ? leftPartId : rightPartId;
double total = 0;
for (int x = 0; x < tempVarsBlockCount; ++x) {
total += __ldg(tempVars + i);
tempVars += statCount * 2;
}
statSums[leafId * statCount + statId] = total;
}
}
void UpdatePartitionsPropsForSplit(const TDataPartition* parts,
const ui32* leftPartIds,
const ui32* rightPartIds,
ui32 partCount,
const float* source,
ui32 statCount,
ui64 statLineSize,
ui32 tempVarsCount,
double* tempVars,
double* statSums,
TCudaStream stream) {
//TODO(noxoomo): if it'll be "slow", could be made in one kernel
UpdatePartitionsProps(parts, leftPartIds, partCount, source, statCount, statLineSize, tempVarsCount, tempVars, statSums, stream);
UpdatePartitionsProps(parts, rightPartIds, partCount, source, statCount, statLineSize, tempVarsCount, tempVars, statSums, stream);
}
void UpdatePartitionsPropsForOffsets(const ui32* offsets,
ui32 count,
const float* source,
ui32 statCount,
ui64 statLineSize,
ui32 tempVarsCount,
double* tempVars,
double* statSums,
TCudaStream stream
) {
const ui32 blockSize = 512;
dim3 numBlocks;
numBlocks.y = min(count, 65535);
numBlocks.z = statCount;
numBlocks.x = CeilDivide(2 * TArchProps::SMCount(), (int)statCount);
Y_VERIFY(numBlocks.x * numBlocks.y * numBlocks.z <= tempVarsCount);
hipLaunchKernelGGL(( UpdatePartitionsPropsForOffsetsImpl<blockSize>), dim3(numBlocks), dim3(blockSize), 0, stream, offsets, source, statLineSize, count, tempVars);
{
const ui32 saveBlockSize = 256;
const ui32 numSaveBlocks = (count * statCount + saveBlockSize - 1) / saveBlockSize;
hipLaunchKernelGGL(( SaveResultsImpl), dim3(numSaveBlocks), dim3(saveBlockSize), 0, stream, nullptr, tempVars, count, statCount, numBlocks.x, statSums);
}
}
__global__ void FloatToDoubleImpl(const float* src, ui32 size, double* dst) {
const ui32 i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size) {
WriteThrough(dst + i, (double)__ldg(src + i));
}
}
void CopyFloatToDouble(const float* src, ui32 size, double* dst, TCudaStream stream) {
const ui32 blockSize = 128;
const ui32 numBlocks = CeilDivide(size, blockSize);
if (numBlocks) {
hipLaunchKernelGGL(( FloatToDoubleImpl), dim3(numBlocks), dim3(blockSize), 0, stream, src, size, dst);
}
}
ui32 GetTempVarsCount(ui32 statCount, ui32 count) {
return CeilDivide(2 * TArchProps::SMCount(), (int)statCount) * statCount * count;
}
}
| 54ddf7457a06e59c2f0f81a18cfca14b1d56e793.cu | #include "update_part_props.cuh"
#include "fill.cuh"
#include <catboost/cuda/cuda_lib/kernel/arch.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
#include <catboost/cuda/cuda_util/gpu_data/partitions.h>
namespace NKernel {
template <int BlockSize, int N = 1>
__forceinline__ __device__ double ComputeSum(const float* __restrict__ stat, ui32 offset, ui32 size, int blockIdx, int blockCount) {
float4 sum;
sum.x = sum.y = sum.z = sum.w = 0;
stat += offset;
const int warpSize = 32;
const int alignSize = 4 * warpSize;
{
int lastId = min(size, alignSize - (offset % alignSize));
if (blockIdx == 0) {
if (threadIdx.x < lastId) {
sum.x += Ldg(stat + threadIdx.x);
}
}
size = max(size - lastId, 0);
stat += lastId;
}
//now lets align end
const int unalignedTail = (size % alignSize);
if (unalignedTail != 0) {
if (blockIdx == 0) {
const int tailOffset = size - unalignedTail;
if (threadIdx.x < unalignedTail) {
sum.y += Ldg(stat + tailOffset + threadIdx.x);
}
}
}
size -= unalignedTail;
const int entriesPerWarp = warpSize * 4;
const int warpsPerBlock = (BlockSize / 32);
const int globalWarpId = (blockIdx * warpsPerBlock) + (threadIdx.x / 32);
stat += globalWarpId * entriesPerWarp;
size = max(size - globalWarpId * entriesPerWarp, 0);
const int stripeSize = entriesPerWarp * warpsPerBlock * blockCount;
const int localIdx = (threadIdx.x & 31) * 4;
const int iterCount = (size - localIdx + stripeSize - 1) / stripeSize;
stat += localIdx;
if (size > 0) {
#pragma unroll N
for (int i = 0; i < iterCount; ++i) {
const float4* stat4 = (const float4*) stat;
float4 val = Ldg(stat4);
sum.x += val.x;
sum.y += val.y;
sum.z += val.z;
sum.w += val.w;
stat += stripeSize;
}
}
return (double)sum.x + (double)sum.y + (double)sum.z + (double)sum.w;
};
template <class TOutput>
__global__ void SaveResultsImpl(const ui32* partIds,
const double* tempVars,
ui32 partCount,
ui32 statCount,
int tempVarsBlockCount,
TOutput* statSums) {
const ui32 i = blockIdx.x * blockDim.x + threadIdx.x;
const ui32 statId = i % statCount;
const ui32 y = i / statCount;
if (i < partCount * statCount) {
const ui32 leafId = partIds != nullptr ? partIds[y] : y;
double total = 0;
for (int x = 0; x < tempVarsBlockCount; ++x) {
total += tempVars[i];
tempVars += statCount * partCount;
}
statSums[leafId * statCount + statId] = total;
}
}
template <int BlockSize>
__launch_bounds__(BlockSize, 2)
__global__ void UpdatePartitionsPropsForOffsetsImpl(const ui32* offsets,
const float* source,
ui64 statLineSize,
ui32 partCount,
double* statPartSums) {
ui32 partId = blockIdx.y;
const ui32 statId = blockIdx.z;
source += statId * statLineSize;
while (partId < partCount) {
const ui32 partOffset = __ldg(offsets + partId);
const ui32 partSize = __ldg(offsets + partId + 1) - partOffset;
__shared__ volatile double localBuffer[BlockSize];
const int minDocsPerBlock = BlockSize * 16;
const int effectiveBlockCount = min(gridDim.x, (partSize + minDocsPerBlock - 1) / minDocsPerBlock);
double result = 0;
if (blockIdx.x < effectiveBlockCount) {
const int blockId = blockIdx.x % effectiveBlockCount;
localBuffer[threadIdx.x] = ComputeSum < BlockSize > (source, partOffset, partSize, blockId, effectiveBlockCount);
__syncthreads();
result = FastInBlockReduce(threadIdx.x, localBuffer, BlockSize);
}
if (threadIdx.x == 0) {
const int statCount = gridDim.z;
const int lineSize = statCount * partCount;
ui64 idx = blockIdx.x * lineSize + partId * statCount + statId;
statPartSums[idx] = result;
}
partId += gridDim.y;
}
}
template <int BlockSize>
__launch_bounds__(BlockSize, 2)
__global__ void UpdatePartitionsPropsImpl(const ui32* partIds,
const TDataPartition* parts,
const float* source,
ui64 statLineSize,
double* tempVars) {
const ui32 leafId = partIds[blockIdx.y];
TDataPartition part = parts[leafId];
const ui32 statId = blockIdx.z;
__shared__ volatile double localBuffer[BlockSize];
source += statId * statLineSize;
const int minDocsPerBlock = BlockSize * 16;
const int effectiveBlockCount = min(gridDim.x, (part.Size + minDocsPerBlock - 1) / minDocsPerBlock);
double result = 0;
if (blockIdx.x < effectiveBlockCount) {
const int blockId = blockIdx.x % effectiveBlockCount;
localBuffer[threadIdx.x] = ComputeSum <BlockSize>(source, part.Offset, part.Size, blockId, effectiveBlockCount);
__syncthreads();
result = FastInBlockReduce(threadIdx.x, localBuffer, BlockSize);
}
if (threadIdx.x == 0) {
tempVars[gridDim.z * gridDim.y * blockIdx.x + blockIdx.y * gridDim.z + blockIdx.z] = result;
}
}
void UpdatePartitionsProps(const TDataPartition* parts,
const ui32* partIds,
ui32 partCount,
const float* source,
ui32 statCount,
ui64 statLineSize,
ui32 tempVarsCount,
double* tempVars,
double* statSums,
TCudaStream stream
) {
const ui32 blockSize = 512;
dim3 numBlocks;
numBlocks.y = partCount;
numBlocks.z = statCount;
numBlocks.x = CeilDivide(2 * TArchProps::SMCount(), (int)statCount);
Y_VERIFY(numBlocks.x * numBlocks.y * numBlocks.z <= tempVarsCount);
UpdatePartitionsPropsImpl<blockSize><<<numBlocks, blockSize, 0, stream>>>(partIds, parts, source, statLineSize, tempVars);
{
const ui32 saveBlockSize = 256;
const ui32 numSaveBlocks = (numBlocks.y * numBlocks.z + saveBlockSize - 1) / saveBlockSize;
SaveResultsImpl<<<numSaveBlocks, saveBlockSize, 0, stream>>>(partIds, tempVars, partCount, statCount, numBlocks.x, statSums);
}
}
template <int BlockSize>
__launch_bounds__(BlockSize, 2)
__global__ void UpdatePartitionsPropsForSplitImpl(const ui32* leftPartIds,
const ui32* rightPartIds,
const TDataPartition* parts,
const float* source,
ui64 statLineSize,
double* tempVars) {
const ui32 sourcePartCount = gridDim.y / 2;
const bool isLeft = blockIdx.y < sourcePartCount;
const ui32* partIds = isLeft ? leftPartIds : rightPartIds;
const ui32 leafId = partIds[isLeft ? blockIdx.y : blockIdx.y - sourcePartCount];
TDataPartition part = parts[leafId];
const ui32 statId = blockIdx.z;
__shared__ volatile double localBuffer[BlockSize];
source += statId * statLineSize;
const int minDocsPerBlock = BlockSize;
const int effectiveBlockCount = min(gridDim.x, (part.Size + minDocsPerBlock - 1) / minDocsPerBlock);
double result = 0;
if (blockIdx.x < effectiveBlockCount) {
const int blockId = blockIdx.x % effectiveBlockCount;
localBuffer[threadIdx.x] = ComputeSum<BlockSize, 4>(source, part.Offset, part.Size, blockId, effectiveBlockCount);
__syncthreads();
result = FastInBlockReduce(threadIdx.x, localBuffer, BlockSize);
}
if (threadIdx.x == 0) {
tempVars[gridDim.z * gridDim.y * blockIdx.x + blockIdx.y * gridDim.z + blockIdx.z] = result;
}
}
template <int BlockSize>
__launch_bounds__(BlockSize, 2)
__global__ void UpdatePartitionsPropsForSingleSplitImpl(const ui32 leftPartId,
const ui32 rightPartId,
const TDataPartition* parts,
const float* source,
ui64 statLineSize,
double* tempVars) {
// const ui32 sourcePartCount = 2;
const bool isLeft = blockIdx.y == 0;
const ui32 leafId = isLeft ? leftPartId : rightPartId;
TDataPartition part = parts[leafId];
const ui32 statId = blockIdx.z;
__shared__ volatile double localBuffer[BlockSize];
source += statId * statLineSize;
const int minDocsPerBlock = BlockSize;
const int effectiveBlockCount = min(gridDim.x, (part.Size + minDocsPerBlock - 1) / minDocsPerBlock);
double result = 0;
if (blockIdx.x < effectiveBlockCount) {
const int blockId = blockIdx.x % effectiveBlockCount;
localBuffer[threadIdx.x] = ComputeSum<BlockSize, 4>(source, part.Offset, part.Size, blockId, effectiveBlockCount);
__syncthreads();
result = FastInBlockReduce(threadIdx.x, localBuffer, BlockSize);
}
if (threadIdx.x == 0) {
tempVars[gridDim.z * gridDim.y * blockIdx.x + blockIdx.y * gridDim.z + blockIdx.z] = result;
}
}
template <class TOutput>
__global__ void SaveResultsForSplitImpl(const ui32* leftPartIds,
const ui32* rightPartIds,
const double* tempVars,
ui32 partCount,
ui32 statCount,
int tempVarsBlockCount,
TOutput* statSums) {
const ui32 i = blockIdx.x * blockDim.x + threadIdx.x;
const ui32 statId = i % statCount;
const ui32 y = i / statCount;
if (i < partCount * statCount) {
const ui32 leafId = (y < partCount / 2) ? leftPartIds[y] : rightPartIds[y - partCount / 2];
double total = 0;
for (int x = 0; x < tempVarsBlockCount; ++x) {
total += __ldg(tempVars + i);
tempVars += statCount * partCount;
}
statSums[leafId * statCount + statId] = total;
}
}
template <class TOutput>
__global__ void SaveResultsForSingleSplitImpl(const ui32 leftPartId,
const ui32 rightPartId,
const double* tempVars,
ui32 statCount,
int tempVarsBlockCount,
TOutput* statSums) {
const ui32 i = blockIdx.x * blockDim.x + threadIdx.x;
const ui32 statId = i % statCount;
const ui32 y = i / statCount;
if (i < 2 * statCount) {
const ui32 leafId = y == 0 ? leftPartId : rightPartId;
double total = 0;
for (int x = 0; x < tempVarsBlockCount; ++x) {
total += __ldg(tempVars + i);
tempVars += statCount * 2;
}
statSums[leafId * statCount + statId] = total;
}
}
void UpdatePartitionsPropsForSplit(const TDataPartition* parts,
const ui32* leftPartIds,
const ui32* rightPartIds,
ui32 partCount,
const float* source,
ui32 statCount,
ui64 statLineSize,
ui32 tempVarsCount,
double* tempVars,
double* statSums,
TCudaStream stream) {
//TODO(noxoomo): if it'll be "slow", could be made in one kernel
UpdatePartitionsProps(parts, leftPartIds, partCount, source, statCount, statLineSize, tempVarsCount, tempVars, statSums, stream);
UpdatePartitionsProps(parts, rightPartIds, partCount, source, statCount, statLineSize, tempVarsCount, tempVars, statSums, stream);
}
void UpdatePartitionsPropsForOffsets(const ui32* offsets,
ui32 count,
const float* source,
ui32 statCount,
ui64 statLineSize,
ui32 tempVarsCount,
double* tempVars,
double* statSums,
TCudaStream stream
) {
const ui32 blockSize = 512;
dim3 numBlocks;
numBlocks.y = min(count, 65535);
numBlocks.z = statCount;
numBlocks.x = CeilDivide(2 * TArchProps::SMCount(), (int)statCount);
Y_VERIFY(numBlocks.x * numBlocks.y * numBlocks.z <= tempVarsCount);
UpdatePartitionsPropsForOffsetsImpl<blockSize><<<numBlocks, blockSize, 0, stream>>>(offsets, source, statLineSize, count, tempVars);
{
const ui32 saveBlockSize = 256;
const ui32 numSaveBlocks = (count * statCount + saveBlockSize - 1) / saveBlockSize;
SaveResultsImpl<<<numSaveBlocks, saveBlockSize, 0, stream>>>(nullptr, tempVars, count, statCount, numBlocks.x, statSums);
}
}
__global__ void FloatToDoubleImpl(const float* src, ui32 size, double* dst) {
const ui32 i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size) {
WriteThrough(dst + i, (double)__ldg(src + i));
}
}
void CopyFloatToDouble(const float* src, ui32 size, double* dst, TCudaStream stream) {
const ui32 blockSize = 128;
const ui32 numBlocks = CeilDivide(size, blockSize);
if (numBlocks) {
FloatToDoubleImpl<<<numBlocks, blockSize, 0, stream>>>(src, size, dst);
}
}
ui32 GetTempVarsCount(ui32 statCount, ui32 count) {
return CeilDivide(2 * TArchProps::SMCount(), (int)statCount) * statCount * count;
}
}
|
5683f37dc07f5b27a96970fc353c49f1ffd88199.hip | // !!! This is a file automatically generated by hipify!!!
#include "split/device/sfs/sobel_normals.cuh"
#include "split/device/detail/unary_functional.cuh"
#include "split/device/detail/matrix_functional.cuh"
#include "split/device/detail/zip_it.cuh"
#include "split/device/detail/cycle_iterator.cuh"
#include "split/device/detail/transposed_copy.cuh"
#include "split/device/detail/view_util.cuh"
#include "split/device/detail/normalize_vectors.cuh"
#include <cusp/print.h>
SPLIT_DEVICE_NAMESPACE_BEGIN
namespace sfs
{
SPLIT_API void sobel_derivative(
cusp::array2d<real, cusp::device_memory>::const_view di_heights,
cusp::array1d<real, cusp::device_memory>::view do_derivative)
{
// How many points we're working with
const int n_data = di_heights.num_entries;
const int width = di_heights.num_cols;
const int height = di_heights.num_rows;
// Convenience
auto height_begin = di_heights.values.begin();
const auto count = thrust::make_counting_iterator(0);
const thrust::plus<real> add;
const thrust::minus<real> subtract;
// Allocate two buffers for intermediate results
cusp::array1d<real, cusp::device_memory> d_ping(n_data + 1);
// Actually re-use the output memory as the second buffer
auto& d_pong = do_derivative;
// Insert a sentinel value here, useful later
d_ping[0] = 0.f;
// Ignore the sentinel value for the most part
auto ping_begin = d_ping.begin() + 1;
// Scan each row of the data, make sure to enforce the row boundaries
// ---------------------------------------------------------------------------
const auto rows_begin = detail::make_row_iterator(width);
const auto rows_end = rows_begin + n_data;
thrust::inclusive_scan_by_key(rows_begin, rows_end, height_begin, ping_begin);
// Subtract the i - 3 neighbor from each value, and shift results one left
// ---------------------------------------------------------------------------
// If we're past the initial 2 values we can look up a previous neighbor,
// otherwise we would be out of bounds or looking up a previous row.
// Instead we index a sentinel value inserted at the start of the range.
const auto base_op = [=] __host__ __device__(int i) {
return (i % width) >= 2 ? i - 2 : -1;
};
const auto base_it = thrust::make_permutation_iterator(
ping_begin, thrust::make_transform_iterator(count, base_op));
// Calculate a capped index, which allows us to shift one left and duplicate
// the final row element. Cap our value at the end of this row
const auto cap_op = [=] __host__ __device__(int i) {
return min(i + 1, (i / width + 1) * width - 1);
};
const auto cap_it = thrust::make_permutation_iterator(
ping_begin, thrust::make_transform_iterator(count, cap_op));
// Subtract the base value from each scanned value
thrust::transform(cap_it, cap_it + n_data, base_it, d_pong.begin(), subtract);
// Add the original values
thrust::transform(
d_pong.begin(), d_pong.end(), height_begin, ping_begin, add);
// Now subtract columns either side of a value to obtain it's derivative
// ---------------------------------------------------------------------------
// Clamp the row and column indices inside the image
const auto bottom_op = [=] __host__ __device__(int i) {
const int r = min(i / width + 1, height - 1);
return r * width + (i % width);
};
const auto bottom_it = thrust::make_permutation_iterator(
ping_begin, thrust::make_transform_iterator(count, bottom_op));
const auto top_op = [=] __host__ __device__(int i) {
const int r = max(i / width - 1, 0);
return r * width + (i % width);
};
const auto top_it = thrust::make_permutation_iterator(
ping_begin, thrust::make_transform_iterator(count, top_op));
thrust::transform(
bottom_it, bottom_it + n_data, top_it, do_derivative.begin(), subtract);
}
SPLIT_API void sobel_normals(
cusp::array2d<real, cusp::device_memory>::const_view di_heights,
cusp::array2d<real, cusp::device_memory>::view do_normals,
const real i_depth)
{
// Clearer references
auto d_Xderivative = do_normals.row(0);
auto d_Yderivative = do_normals.row(1);
auto d_Z = do_normals.row(2);
// Transpose the image
cusp::array2d<real, cusp::device_memory> d_height_transposed(
di_heights.num_cols, di_heights.num_rows, di_heights.num_entries);
cusp::array1d<real, cusp::device_memory> d_Xderivative_transposed(
d_Xderivative.size());
detail::transposed_copy<real>(di_heights.num_cols,
di_heights.num_rows,
di_heights.values,
d_height_transposed.values);
// Compute the Y derivative using the original image
sobel_derivative(di_heights, d_Yderivative);
// Compute the X derivative using the transposed image
sobel_derivative(d_height_transposed, d_Xderivative_transposed);
// Transpose the X derivatives back
detail::transposed_copy<real>(d_height_transposed.num_cols,
d_height_transposed.num_rows,
d_Xderivative_transposed,
d_Xderivative);
// Fill the Z with the provided depth
thrust::fill(d_Z.begin(), d_Z.end(), 1.f / ::max(0.001f, i_depth));
// Finally normalize the vectors
detail::normalize_vectors(do_normals);
}
} // namespace sfs
SPLIT_DEVICE_NAMESPACE_END
| 5683f37dc07f5b27a96970fc353c49f1ffd88199.cu | #include "split/device/sfs/sobel_normals.cuh"
#include "split/device/detail/unary_functional.cuh"
#include "split/device/detail/matrix_functional.cuh"
#include "split/device/detail/zip_it.cuh"
#include "split/device/detail/cycle_iterator.cuh"
#include "split/device/detail/transposed_copy.cuh"
#include "split/device/detail/view_util.cuh"
#include "split/device/detail/normalize_vectors.cuh"
#include <cusp/print.h>
SPLIT_DEVICE_NAMESPACE_BEGIN
namespace sfs
{
SPLIT_API void sobel_derivative(
cusp::array2d<real, cusp::device_memory>::const_view di_heights,
cusp::array1d<real, cusp::device_memory>::view do_derivative)
{
// How many points we're working with
const int n_data = di_heights.num_entries;
const int width = di_heights.num_cols;
const int height = di_heights.num_rows;
// Convenience
auto height_begin = di_heights.values.begin();
const auto count = thrust::make_counting_iterator(0);
const thrust::plus<real> add;
const thrust::minus<real> subtract;
// Allocate two buffers for intermediate results
cusp::array1d<real, cusp::device_memory> d_ping(n_data + 1);
// Actually re-use the output memory as the second buffer
auto& d_pong = do_derivative;
// Insert a sentinel value here, useful later
d_ping[0] = 0.f;
// Ignore the sentinel value for the most part
auto ping_begin = d_ping.begin() + 1;
// Scan each row of the data, make sure to enforce the row boundaries
// ---------------------------------------------------------------------------
const auto rows_begin = detail::make_row_iterator(width);
const auto rows_end = rows_begin + n_data;
thrust::inclusive_scan_by_key(rows_begin, rows_end, height_begin, ping_begin);
// Subtract the i - 3 neighbor from each value, and shift results one left
// ---------------------------------------------------------------------------
// If we're past the initial 2 values we can look up a previous neighbor,
// otherwise we would be out of bounds or looking up a previous row.
// Instead we index a sentinel value inserted at the start of the range.
const auto base_op = [=] __host__ __device__(int i) {
return (i % width) >= 2 ? i - 2 : -1;
};
const auto base_it = thrust::make_permutation_iterator(
ping_begin, thrust::make_transform_iterator(count, base_op));
// Calculate a capped index, which allows us to shift one left and duplicate
// the final row element. Cap our value at the end of this row
const auto cap_op = [=] __host__ __device__(int i) {
return min(i + 1, (i / width + 1) * width - 1);
};
const auto cap_it = thrust::make_permutation_iterator(
ping_begin, thrust::make_transform_iterator(count, cap_op));
// Subtract the base value from each scanned value
thrust::transform(cap_it, cap_it + n_data, base_it, d_pong.begin(), subtract);
// Add the original values
thrust::transform(
d_pong.begin(), d_pong.end(), height_begin, ping_begin, add);
// Now subtract columns either side of a value to obtain it's derivative
// ---------------------------------------------------------------------------
// Clamp the row and column indices inside the image
const auto bottom_op = [=] __host__ __device__(int i) {
const int r = min(i / width + 1, height - 1);
return r * width + (i % width);
};
const auto bottom_it = thrust::make_permutation_iterator(
ping_begin, thrust::make_transform_iterator(count, bottom_op));
const auto top_op = [=] __host__ __device__(int i) {
const int r = max(i / width - 1, 0);
return r * width + (i % width);
};
const auto top_it = thrust::make_permutation_iterator(
ping_begin, thrust::make_transform_iterator(count, top_op));
thrust::transform(
bottom_it, bottom_it + n_data, top_it, do_derivative.begin(), subtract);
}
SPLIT_API void sobel_normals(
cusp::array2d<real, cusp::device_memory>::const_view di_heights,
cusp::array2d<real, cusp::device_memory>::view do_normals,
const real i_depth)
{
// Clearer references
auto d_Xderivative = do_normals.row(0);
auto d_Yderivative = do_normals.row(1);
auto d_Z = do_normals.row(2);
// Transpose the image
cusp::array2d<real, cusp::device_memory> d_height_transposed(
di_heights.num_cols, di_heights.num_rows, di_heights.num_entries);
cusp::array1d<real, cusp::device_memory> d_Xderivative_transposed(
d_Xderivative.size());
detail::transposed_copy<real>(di_heights.num_cols,
di_heights.num_rows,
di_heights.values,
d_height_transposed.values);
// Compute the Y derivative using the original image
sobel_derivative(di_heights, d_Yderivative);
// Compute the X derivative using the transposed image
sobel_derivative(d_height_transposed, d_Xderivative_transposed);
// Transpose the X derivatives back
detail::transposed_copy<real>(d_height_transposed.num_cols,
d_height_transposed.num_rows,
d_Xderivative_transposed,
d_Xderivative);
// Fill the Z with the provided depth
thrust::fill(d_Z.begin(), d_Z.end(), 1.f / std::max(0.001f, i_depth));
// Finally normalize the vectors
detail::normalize_vectors(do_normals);
}
} // namespace sfs
SPLIT_DEVICE_NAMESPACE_END
|
d737ae3d8fef23bae56403f9b341637d9c4097a1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
typedef struct
{
int width;
int height;
float* elements;
} Matrix;
#define BLOCK_SIZE 2
#define MATRIX_SIZE 2
__global__ void MatMulKernel(const Matrix, const Matrix, const Matrix);
void MatMul(const Matrix A, const Matrix B, Matrix C)
{
Matrix d_A;
d_A.width = A.width;
d_A.height = A.height;
size_t size_a = A.width * A.height * sizeof(float);
hipMalloc(&d_A.elements, size_a);
hipMemcpy(d_A.elements, A.elements, size_a, hipMemcpyHostToDevice);
Matrix d_B;
d_B.width = B.width;
d_B.height = B.height;
size_t size_b = B.width * B.height * sizeof(float);
hipMalloc(&d_B.elements, size_b);
hipMemcpy(d_B.elements, B.elements, size_b, hipMemcpyHostToDevice);
Matrix d_C;
d_C.width = C.width;
d_C.height = C.height;
size_t size_c = B.width * B.height * sizeof(float);
hipMalloc(&d_C.elements, size_c);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y);
hipLaunchKernelGGL(( MatMulKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_A, d_B, d_C);
hipMemcpy(C.elements, d_C.elements, size_c, hipMemcpyDeviceToHost);
printf("MutMul: %f, %f", C.elements[0], C.elements[MATRIX_SIZE*MATRIX_SIZE-1]);
hipFree(d_A.elements);
hipFree(d_B.elements);
hipFree(d_C.elements);
}
__global__ void MatMulKernel(Matrix A, Matrix B, Matrix C)
{
float Cvalue = 0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
for (int e=0; e<A.width; ++e)
{
Cvalue += A.elements[row*A.width+e] * B.elements[e*B.width+col];
}
C.elements[row*C.width+col] = Cvalue;
printf("Kernel: Cvalue = %f\n", Cvalue);
}
void initialData(float *ip, int size)
{
time_t t;
srand((unsigned int) time(&t));
for (int i=0; i<size; ++i)
{
ip[i] = (float)( rand() & 0xFF )/10.0f;
}
}
int main()
{
Matrix A;
A.width = MATRIX_SIZE;
A.height = MATRIX_SIZE;
A.elements = (float*)malloc(sizeof(float)*MATRIX_SIZE*MATRIX_SIZE);
initialData(A.elements, MATRIX_SIZE*MATRIX_SIZE);
Matrix B;
B.width = MATRIX_SIZE;
B.height = MATRIX_SIZE;
B.elements = (float*)malloc(sizeof(float)*MATRIX_SIZE*MATRIX_SIZE);
initialData(A.elements, MATRIX_SIZE*MATRIX_SIZE);
Matrix C;
C.width = MATRIX_SIZE;
C.height = MATRIX_SIZE;
C.elements = (float*)malloc(sizeof(float)*MATRIX_SIZE*MATRIX_SIZE);
printf("Main1, first element: %f, last element: %f\n", A.elements[0], A.elements[MATRIX_SIZE*MATRIX_SIZE-1]);
MatMul(A, B, C);
printf("Main2, first element: %f, last element: %f\n", C.elements[0], C.elements[MATRIX_SIZE*MATRIX_SIZE-1]);
free(A.elements);
free(B.elements);
} | d737ae3d8fef23bae56403f9b341637d9c4097a1.cu | #include <stdlib.h>
#include <stdio.h>
#include <time.h>
typedef struct
{
int width;
int height;
float* elements;
} Matrix;
#define BLOCK_SIZE 2
#define MATRIX_SIZE 2
__global__ void MatMulKernel(const Matrix, const Matrix, const Matrix);
void MatMul(const Matrix A, const Matrix B, Matrix C)
{
Matrix d_A;
d_A.width = A.width;
d_A.height = A.height;
size_t size_a = A.width * A.height * sizeof(float);
cudaMalloc(&d_A.elements, size_a);
cudaMemcpy(d_A.elements, A.elements, size_a, cudaMemcpyHostToDevice);
Matrix d_B;
d_B.width = B.width;
d_B.height = B.height;
size_t size_b = B.width * B.height * sizeof(float);
cudaMalloc(&d_B.elements, size_b);
cudaMemcpy(d_B.elements, B.elements, size_b, cudaMemcpyHostToDevice);
Matrix d_C;
d_C.width = C.width;
d_C.height = C.height;
size_t size_c = B.width * B.height * sizeof(float);
cudaMalloc(&d_C.elements, size_c);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y);
MatMulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
cudaMemcpy(C.elements, d_C.elements, size_c, cudaMemcpyDeviceToHost);
printf("MutMul: %f, %f", C.elements[0], C.elements[MATRIX_SIZE*MATRIX_SIZE-1]);
cudaFree(d_A.elements);
cudaFree(d_B.elements);
cudaFree(d_C.elements);
}
__global__ void MatMulKernel(Matrix A, Matrix B, Matrix C)
{
float Cvalue = 0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
for (int e=0; e<A.width; ++e)
{
Cvalue += A.elements[row*A.width+e] * B.elements[e*B.width+col];
}
C.elements[row*C.width+col] = Cvalue;
printf("Kernel: Cvalue = %f\n", Cvalue);
}
void initialData(float *ip, int size)
{
time_t t;
srand((unsigned int) time(&t));
for (int i=0; i<size; ++i)
{
ip[i] = (float)( rand() & 0xFF )/10.0f;
}
}
int main()
{
Matrix A;
A.width = MATRIX_SIZE;
A.height = MATRIX_SIZE;
A.elements = (float*)malloc(sizeof(float)*MATRIX_SIZE*MATRIX_SIZE);
initialData(A.elements, MATRIX_SIZE*MATRIX_SIZE);
Matrix B;
B.width = MATRIX_SIZE;
B.height = MATRIX_SIZE;
B.elements = (float*)malloc(sizeof(float)*MATRIX_SIZE*MATRIX_SIZE);
initialData(A.elements, MATRIX_SIZE*MATRIX_SIZE);
Matrix C;
C.width = MATRIX_SIZE;
C.height = MATRIX_SIZE;
C.elements = (float*)malloc(sizeof(float)*MATRIX_SIZE*MATRIX_SIZE);
printf("Main1, first element: %f, last element: %f\n", A.elements[0], A.elements[MATRIX_SIZE*MATRIX_SIZE-1]);
MatMul(A, B, C);
printf("Main2, first element: %f, last element: %f\n", C.elements[0], C.elements[MATRIX_SIZE*MATRIX_SIZE-1]);
free(A.elements);
free(B.elements);
} |
daf3c2aaca75427c60c33d9196b124e1f427a259.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__
void main_kernel()
{
}
| daf3c2aaca75427c60c33d9196b124e1f427a259.cu |
__global__
void main_kernel()
{
}
|
ffd78f6abcd6a3494b2cbb8853c9d68c5c112da4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hip/device_functions.h"
#include "rocblas.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <fstream>
#include <cstdio>
#include <cstdlib>
#include <ctime>
#include <windows.h>
#include <queue>
#include <map>
#include <winbase.h>
#include <omp.h>
#define CHECK_CUDA_STATUS(val) { if (val) printf("Err: %s: line %d, file %s\n", hipGetErrorString((hipError_t)val), __LINE__, __FILE__); }
using namespace std;
int blk_cnt, thr_cnt;
float kernal_sum, global_sum;
int thread_count;
inline unsigned __int64 GetCycle() {
__asm _emit 0x0F
__asm _emit 0x31
};
struct point_t {
int dimen;
double cor[2];
int idx; // -1 noice, >= 0 cluster id
int vis; // -1 unvisited, 1 core, 2 border
};
point_t pts[100000];
//__constant__ point_t pts[1000];
double __device__ cudaCalcDistance(const point_t &src, const point_t &dest) {
double res = 0.0;
for (int i = 0; i < src.dimen; i++) {
res += (src.cor[i] - dest.cor[i]) * (src.cor[i] - dest.cor[i]);
}
return res;
}
double __host__ hostCalcDistance(const point_t &src, const point_t &dest) {
double res = 0.0;
for (int i = 0; i < src.dimen; i++) {
res += (src.cor[i] - dest.cor[i]) * (src.cor[i] - dest.cor[i]);
}
return res;
}
/* p0 p1 p2 p3 ... pn
* point0-> * * * * ... *
* point1-> * * * * ... *
* point2-> * * * * ... *
* point3-> * * * * ... *
* ... ...
* pointn-> * * * * ... *
*/
void __global__ cudaGetNeighbors(point_t* points, int len, int* neighbors, double minEps, int minPts) {
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int src;
unsigned int dest;
unsigned int point_id = tid;
unsigned int neighborscnt;
while (point_id < len * len) {
src = point_id / len;
dest = point_id % len;
double dist = 0.0;
if (src <= dest) {
dist = cudaCalcDistance(points[src], points[dest]);
if (dist < minEps * minEps) {
neighbors[point_id] = 1;
}
neighbors[dest * len + src] = neighbors[point_id];
}
point_id += blockDim.x * gridDim.x;
}
__syncthreads();
point_id = tid;
while (point_id < len) {
neighborscnt = 0;
src = point_id * len;
for (int i = 0; i < len; i++) {
if (point_id != i) {
if (neighbors[src + i]) {
neighborscnt++;
}
}
}
if (neighborscnt >= minPts) {
points[point_id].vis++;
}
point_id += blockDim.x * gridDim.x;
}
}
void __host__ hostGetNeighbors(point_t* points, int len, int* neighbors, double minEps, int minPts) {
int src, dest, neighborscnt;
# pragma omp parallel for num_threads(thread_count) schedule(dynamic)
for (int i = 0; i < len * len; i++) {
src = i / len;
dest = i % len;
double dist = 0.0;
if (src <= dest) {
dist = hostCalcDistance(points[src], points[dest]);
if (dist < minEps * minEps) {
neighbors[i] = 1;
}
neighbors[dest * len + src] = neighbors[i];
}
}
for (int i = 0; i < len; i++) {
neighborscnt = 0;
src = i * len;
for (int j = 0; j < len; j++) {
if (i != j) {
if (neighbors[src + j]) neighborscnt++;
}
}
if (neighborscnt >= minPts) points[i].vis++;
}
}
void hostSetIdx(point_t* points, int len, int* hostNeighbors) {
queue<int> s;
int t_idx = 1;
//for (int i = 0; i < len; i++) cout << points[i].vis << " ";
//cout << endl;
for (int i = 0; i < len; i++) {
if (points[i].vis >= 0) {
if (points[i].idx < 1) {
points[i].idx = t_idx;
int src = i * len;
for (int j = 0; j < len; j++) {
if (hostNeighbors[src + j]) {
points[j].idx = t_idx;
s.push(j);
}
}
while (!s.empty()) {
if (points[s.front()].vis >= 0) {
src = s.front() * len;
for (int j = 0; j < len; j++) {
if (hostNeighbors[src + j]) {
if (points[j].idx < 1) {
points[j].idx = t_idx;
s.push(j);
}
}
}
}
s.pop();
}
}
//for (int i = 0; i < len; i++) cout << points[i].idx << " ";
//cout << endl;
t_idx++;
}
}
}
point_t* DBSCAN(point_t* points, int len, double minEps, int minPts) {
int *hostNeighborArray = (int*)malloc(len * len * sizeof(int));
for (int i = 0; i < len * len; i++) hostNeighborArray[i] = 0;
point_t* cudaPoints;
int *cudaNeighborArray;
CHECK_CUDA_STATUS(hipMalloc((void**)&cudaPoints, len * sizeof(point_t)));
CHECK_CUDA_STATUS(hipMalloc((void**)&cudaNeighborArray, len * len * sizeof(int)));
CHECK_CUDA_STATUS(hipMemcpy(cudaPoints, points, len * sizeof(point_t), hipMemcpyHostToDevice));
hipEvent_t kernalStart, kernalEnd;
hipEventCreate(&kernalStart);
hipEventCreate(&kernalEnd);
hipEventRecord(kernalStart, 0);
cudaGetNeighbors << <blk_cnt, thr_cnt>> > (cudaPoints, len, cudaNeighborArray, minEps, minPts);
hipEventRecord(kernalEnd, 0);
hipEventSynchronize(kernalEnd);
float eps = 0.0;
hipEventElapsedTime(&eps, kernalStart, kernalEnd);
printf("CUDA __device__ time: %8.6f ms\n", eps);
kernal_sum += eps;
CHECK_CUDA_STATUS(hipMemcpy(hostNeighborArray, cudaNeighborArray, len * len * sizeof(int), hipMemcpyDeviceToHost));
CHECK_CUDA_STATUS(hipMemcpy(points, cudaPoints, len * sizeof(point_t), hipMemcpyDeviceToHost));
hostSetIdx(points, len, hostNeighborArray);
/*
for (int i = 0; i < len; i++)
{
for (int j = 0; j < len; j++)
{
cout << hostNeighborArray[i * len + j] << " ";
}
cout << endl;
}
*/
hipFree(cudaPoints);
hipFree(cudaNeighborArray);
return points;
}
int main(int argc, char* argv[]) {
thread_count = 1;
srand(time(0));
hipEvent_t start, end;
hipEventCreate(&start);
hipEventCreate(&end);
ifstream fin("cluster4.txt");
ofstream fout("result.txt");
//freopen("cluster.txt", "r", stdin);
srand(time(0));
int len = 0;
double a, b;
while (fin >> a >> b) {
pts[len].cor[0] = a;
pts[len++].cor[1] = b;
}
int t = len;
for (int iter = 0; iter < 0; iter++) {
for (int i = 0; i < t; i++) {
pts[len].cor[0] = pts[i].cor[0];
pts[len].cor[1] = pts[i].cor[1];
len++;
}
}
printf("%d\n", len);
for (int i = 0; i < len; i++) {
pts[i].dimen = 2;
pts[i].vis = -1;
pts[i].idx = -1;
}
printf("CUDA Blocks, Threads number:\n");
while (cin >> blk_cnt >> thr_cnt) {
//for (int i = 0; i < len; i++) cout << pts[i].cor[0] << " " << pts[i].cor[1] << " " << pts[i].idx << endl;
kernal_sum = 0.0;
global_sum = 0.0;
//for (int i = 0; i < 2; i++) {
clock_t st;
hipEventRecord(start, 0);
//st = clock();
DBSCAN(pts, len, 0.04, 3);
//printf("Parallel time: %d ms\n", clock() - st);
hipEventRecord(end, 0);
hipEventSynchronize(end);
float epstime;
hipEventElapsedTime(&epstime, start, end);
printf("CUDA __host__ time: %8.6f ms\n", (double)epstime);
global_sum += epstime;
map <int, int> mp;
for (int i = 0; i < len; i++) {
//cout << pts[i].cor[0] << " " << pts[i].cor[1] << " " << pts[i].idx << endl;
mp[pts[i].idx]++;
fout << pts[i].cor[0] << ", " << pts[i].cor[1] << ", " << pts[i].idx << endl;
}
map <int, int>::iterator it = mp.begin();
for (; it != mp.end(); it++) cout << it->first << " " << it->second << endl;
//}
//printf("Average Kernal: %f, Average Global: %f\n", kernal_sum / 2.0, global_sum / 2.0);
}
return 0;
}
| ffd78f6abcd6a3494b2cbb8853c9d68c5c112da4.cu |
#include "cuda_runtime.h"
#include "device_functions.h"
#include "cublas_v2.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <fstream>
#include <cstdio>
#include <cstdlib>
#include <ctime>
#include <windows.h>
#include <queue>
#include <map>
#include <winbase.h>
#include <omp.h>
#define CHECK_CUDA_STATUS(val) { if (val) printf("Err: %s: line %d, file %s\n", cudaGetErrorString((cudaError_t)val), __LINE__, __FILE__); }
using namespace std;
int blk_cnt, thr_cnt;
float kernal_sum, global_sum;
int thread_count;
inline unsigned __int64 GetCycle() {
__asm _emit 0x0F
__asm _emit 0x31
};
struct point_t {
int dimen;
double cor[2];
int idx; // -1 noice, >= 0 cluster id
int vis; // -1 unvisited, 1 core, 2 border
};
point_t pts[100000];
//__constant__ point_t pts[1000];
double __device__ cudaCalcDistance(const point_t &src, const point_t &dest) {
double res = 0.0;
for (int i = 0; i < src.dimen; i++) {
res += (src.cor[i] - dest.cor[i]) * (src.cor[i] - dest.cor[i]);
}
return res;
}
double __host__ hostCalcDistance(const point_t &src, const point_t &dest) {
double res = 0.0;
for (int i = 0; i < src.dimen; i++) {
res += (src.cor[i] - dest.cor[i]) * (src.cor[i] - dest.cor[i]);
}
return res;
}
/* p0 p1 p2 p3 ... pn
* point0-> * * * * ... *
* point1-> * * * * ... *
* point2-> * * * * ... *
* point3-> * * * * ... *
* ... ...
* pointn-> * * * * ... *
*/
void __global__ cudaGetNeighbors(point_t* points, int len, int* neighbors, double minEps, int minPts) {
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int src;
unsigned int dest;
unsigned int point_id = tid;
unsigned int neighborscnt;
while (point_id < len * len) {
src = point_id / len;
dest = point_id % len;
double dist = 0.0;
if (src <= dest) {
dist = cudaCalcDistance(points[src], points[dest]);
if (dist < minEps * minEps) {
neighbors[point_id] = 1;
}
neighbors[dest * len + src] = neighbors[point_id];
}
point_id += blockDim.x * gridDim.x;
}
__syncthreads();
point_id = tid;
while (point_id < len) {
neighborscnt = 0;
src = point_id * len;
for (int i = 0; i < len; i++) {
if (point_id != i) {
if (neighbors[src + i]) {
neighborscnt++;
}
}
}
if (neighborscnt >= minPts) {
points[point_id].vis++;
}
point_id += blockDim.x * gridDim.x;
}
}
void __host__ hostGetNeighbors(point_t* points, int len, int* neighbors, double minEps, int minPts) {
int src, dest, neighborscnt;
# pragma omp parallel for num_threads(thread_count) schedule(dynamic)
for (int i = 0; i < len * len; i++) {
src = i / len;
dest = i % len;
double dist = 0.0;
if (src <= dest) {
dist = hostCalcDistance(points[src], points[dest]);
if (dist < minEps * minEps) {
neighbors[i] = 1;
}
neighbors[dest * len + src] = neighbors[i];
}
}
for (int i = 0; i < len; i++) {
neighborscnt = 0;
src = i * len;
for (int j = 0; j < len; j++) {
if (i != j) {
if (neighbors[src + j]) neighborscnt++;
}
}
if (neighborscnt >= minPts) points[i].vis++;
}
}
void hostSetIdx(point_t* points, int len, int* hostNeighbors) {
queue<int> s;
int t_idx = 1;
//for (int i = 0; i < len; i++) cout << points[i].vis << " ";
//cout << endl;
for (int i = 0; i < len; i++) {
if (points[i].vis >= 0) {
if (points[i].idx < 1) {
points[i].idx = t_idx;
int src = i * len;
for (int j = 0; j < len; j++) {
if (hostNeighbors[src + j]) {
points[j].idx = t_idx;
s.push(j);
}
}
while (!s.empty()) {
if (points[s.front()].vis >= 0) {
src = s.front() * len;
for (int j = 0; j < len; j++) {
if (hostNeighbors[src + j]) {
if (points[j].idx < 1) {
points[j].idx = t_idx;
s.push(j);
}
}
}
}
s.pop();
}
}
//for (int i = 0; i < len; i++) cout << points[i].idx << " ";
//cout << endl;
t_idx++;
}
}
}
point_t* DBSCAN(point_t* points, int len, double minEps, int minPts) {
int *hostNeighborArray = (int*)malloc(len * len * sizeof(int));
for (int i = 0; i < len * len; i++) hostNeighborArray[i] = 0;
point_t* cudaPoints;
int *cudaNeighborArray;
CHECK_CUDA_STATUS(cudaMalloc((void**)&cudaPoints, len * sizeof(point_t)));
CHECK_CUDA_STATUS(cudaMalloc((void**)&cudaNeighborArray, len * len * sizeof(int)));
CHECK_CUDA_STATUS(cudaMemcpy(cudaPoints, points, len * sizeof(point_t), cudaMemcpyHostToDevice));
cudaEvent_t kernalStart, kernalEnd;
cudaEventCreate(&kernalStart);
cudaEventCreate(&kernalEnd);
cudaEventRecord(kernalStart, 0);
cudaGetNeighbors << <blk_cnt, thr_cnt>> > (cudaPoints, len, cudaNeighborArray, minEps, minPts);
cudaEventRecord(kernalEnd, 0);
cudaEventSynchronize(kernalEnd);
float eps = 0.0;
cudaEventElapsedTime(&eps, kernalStart, kernalEnd);
printf("CUDA __device__ time: %8.6f ms\n", eps);
kernal_sum += eps;
CHECK_CUDA_STATUS(cudaMemcpy(hostNeighborArray, cudaNeighborArray, len * len * sizeof(int), cudaMemcpyDeviceToHost));
CHECK_CUDA_STATUS(cudaMemcpy(points, cudaPoints, len * sizeof(point_t), cudaMemcpyDeviceToHost));
hostSetIdx(points, len, hostNeighborArray);
/*
for (int i = 0; i < len; i++)
{
for (int j = 0; j < len; j++)
{
cout << hostNeighborArray[i * len + j] << " ";
}
cout << endl;
}
*/
cudaFree(cudaPoints);
cudaFree(cudaNeighborArray);
return points;
}
int main(int argc, char* argv[]) {
thread_count = 1;
srand(time(0));
cudaEvent_t start, end;
cudaEventCreate(&start);
cudaEventCreate(&end);
ifstream fin("cluster4.txt");
ofstream fout("result.txt");
//freopen("cluster.txt", "r", stdin);
srand(time(0));
int len = 0;
double a, b;
while (fin >> a >> b) {
pts[len].cor[0] = a;
pts[len++].cor[1] = b;
}
int t = len;
for (int iter = 0; iter < 0; iter++) {
for (int i = 0; i < t; i++) {
pts[len].cor[0] = pts[i].cor[0];
pts[len].cor[1] = pts[i].cor[1];
len++;
}
}
printf("%d\n", len);
for (int i = 0; i < len; i++) {
pts[i].dimen = 2;
pts[i].vis = -1;
pts[i].idx = -1;
}
printf("CUDA Blocks, Threads number:\n");
while (cin >> blk_cnt >> thr_cnt) {
//for (int i = 0; i < len; i++) cout << pts[i].cor[0] << " " << pts[i].cor[1] << " " << pts[i].idx << endl;
kernal_sum = 0.0;
global_sum = 0.0;
//for (int i = 0; i < 2; i++) {
clock_t st;
cudaEventRecord(start, 0);
//st = clock();
DBSCAN(pts, len, 0.04, 3);
//printf("Parallel time: %d ms\n", clock() - st);
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
float epstime;
cudaEventElapsedTime(&epstime, start, end);
printf("CUDA __host__ time: %8.6f ms\n", (double)epstime);
global_sum += epstime;
map <int, int> mp;
for (int i = 0; i < len; i++) {
//cout << pts[i].cor[0] << " " << pts[i].cor[1] << " " << pts[i].idx << endl;
mp[pts[i].idx]++;
fout << pts[i].cor[0] << ", " << pts[i].cor[1] << ", " << pts[i].idx << endl;
}
map <int, int>::iterator it = mp.begin();
for (; it != mp.end(); it++) cout << it->first << " " << it->second << endl;
//}
//printf("Average Kernal: %f, Average Global: %f\n", kernal_sum / 2.0, global_sum / 2.0);
}
return 0;
}
|
45f2abaa20d2b80d7ee4c7cbf28d3680423142d5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/// \brief
/// This implements the Smith-Waterman algorithm based on a 45 degree
/// rotated dynamic programming matrix. The benefit of the
/// rotation is:
/// - each diagonal can be represented by contiguous memory
/// --> reduces page faults
/// --> reduces resident memory use in the core algorithm
/// --> enables the algorithm to run more efficiently on large
/// inputs.
///
/// The disadvantages of the rotation is:
/// - if the original output matrix needs to be maintained, copying
/// back may lead to costly page faults, outweighing the benefits of
/// the rotation.
/// see @todo_1 for details.
///
/// \email pirkelbauer2@llnl.gov
/*
* Compilation: nvcc -std=c++11 -O3 -DNDEBUG=1 sw-rotated-cuda-unified.cu -o smithW-cuda
* nvcc -std=c++11 -O0 -G -g sw-rotated-cuda-unified.cu -o dbg-smithW-cuda
*/
#include <vector>
#include <limits>
#include <cassert>
#include <algorithm>
#include <utility>
#include <iostream>
#include <chrono>
#include "parameters.h"
static const bool DEBUG_MODE = false;
/*--------------------------------------------------------------------
* Text Tweaks
*/
#define RESET "\033[0m"
#define BOLDRED "\033[1m\033[31m" /* Bold Red */
/* End of text tweaks */
/// defines type for indices into arrays and matrices
/// (needs to be a signed type)
typedef long long int index_t;
/// defines data type for scoring
typedef int score_t;
/// defines data type for linking paths
enum link_t { UNDEF = -1, NOLINK = 0, UP = 1, LEFT = 2, DIAGONAL = 3 };
// global constants
static const score_t PATH = -1;
static const score_t NONE = 0; // -4
static const score_t MATCH_SCORE = 3; // 5 in omp_smithW_orig
static const score_t MISSMATCH_SCORE = -3; // -3
static const score_t GAP_SCORE = -2; // -4
typedef std::vector<char> char_seq;
template <class T>
static inline
void rotate3(T& a, T& b, T& c)
{
T tmp = a;
a = c; c = b; b = tmp;
}
static inline
__device__
int matchMissmatchScore(const char* a, const char* b, size_t ai, size_t bi)
{
return a[ai] == b[bi] ? MATCH_SCORE : MISSMATCH_SCORE;
} /* End of matchMissmatchScore */
static
__global__
void similarityScore_kernel( index_t iterspace_lb,
index_t iterspace_ub,
index_t i,
score_t* M_0,
link_t* P_0,
const score_t* M_1,
const score_t* M_2,
const char* a,
const char* b,
const score_t** maxpos
)
{
const index_t loop_j = blockIdx.x * blockDim.x + threadIdx.x;
if (loop_j >= iterspace_ub - iterspace_lb) return;
const index_t ai = (iterspace_ub - loop_j) - 1;
const index_t bi = i - ai;
const index_t j = iterspace_lb + loop_j;
assert(!DEBUG_MODE || (M_1[j] >= 0 && M_1[j-1] >= 0 && M_2[j-1] >= 0));
const index_t up = M_1[j] + GAP_SCORE;
const index_t lft = M_1[j-1] + GAP_SCORE;
const index_t diag = M_2[j-1] + matchMissmatchScore(a, b, ai-1, bi-1);
score_t max = NONE;
link_t pred = NOLINK;
if (up > max)
{
max = up;
pred = UP;
}
if (lft > max)
{
max = lft;
pred = LEFT;
}
if (diag > max)
{
max = diag;
pred = DIAGONAL;
}
assert(!DEBUG_MODE || (M_0[j] < 0));
M_0[j] = max;
P_0[j] = pred;
// Updates maximum score to be used as seed on backtrack
{
// \note \pp
// locks seem to be a NOGO in CUDA warps,
// thus the update to set the maximum is made nonblocking.
const score_t* assumed = nullptr;
const score_t* current = *maxpos;
while ((current != assumed) && max > *current)
{
// \note consider atomicCAS_system for multi GPU systems
assumed = current;
current = (const score_t*) atomicCAS( (unsigned long long int*) maxpos,
(unsigned long long int) assumed,
(unsigned long long int) (M_0 + j)
);
}
}
}
/*--------------------------------------------------------------------
* Function: calcElement
* Purpose: Calculate the first element of a given diagonal
*/
index_t diagonalBasePoint(index_t i, index_t w)
{
// base point is on the first row
if (i-1 <= w) return i-1;
// base point is the last element on the (i-w)+2 th row
return (w+1)*(i-w)-1;
}
static inline
void check_cuda_success(hipError_t err)
{
if (err == hipSuccess) return;
std::cerr << "CUDA error: " << hipGetErrorString(err) << std::endl;
exit(0);
}
/// malloc replacement
template<class T>
static
T* unified_alloc(size_t numelems)
{
void* ptr /* = NULL*/;
hipError_t err = hipMallocManaged(&ptr, numelems * sizeof(T), hipMemAttachGlobal);
check_cuda_success(err);
return reinterpret_cast<T*>(ptr);
}
static
void unified_free(void* ptr)
{
hipError_t err = hipFree(ptr);
check_cuda_success(err);
}
/// \brief computes smith-waterman
/// \param a input sequence of length w
/// \param b input sequence of length h
/// \param w length of input sequence a
/// \param h length of input sequence b
/// \param H output matrix (size == (w+1) * (h+1)) representing all scores
/// \param P output matrix (size == (w+1) * (h+1)) to link longest sequences
/// \param maxscore output score of longest matching sequence in H and P
/// \param maxloc output position of longest matching sequence in H and P
/// \note output data does not need to be initialized
void smithWaterman( const char* a,
const char* b,
index_t w,
index_t h,
score_t* H,
link_t* P,
score_t** maxloc
)
{
// Size is important for pointer CAS in CUDA Kernel
static_assert( sizeof(maxloc) == sizeof(unsigned long long int),
"pointer/int size mismatch (req. for CUDA atomicCAS)!"
);
const index_t MAXITER = 2 + w + h - 1;
// wavefront arrays for three iterations
score_t* const wavefronts = unified_alloc<score_t>(3*MAXITER);
link_t* pred_0 = unified_alloc<link_t>(MAXITER);
link_t* pred_1 = unified_alloc<link_t>(MAXITER);
score_t* const maxscr = unified_alloc<score_t>(1);
const score_t** const maxpos = unified_alloc<const score_t*>(1);
// wavefront representation _time
score_t* M_2 = wavefronts;
score_t* M_1 = wavefronts + MAXITER;
// wavefront output
score_t* M_0 = wavefronts + 2*MAXITER;
// initialize t == 0
M_1[0] = NONE;
// set maxloc to origin, and origin to 0
*maxloc = H;
**maxloc = 0;
*maxscr = 0;
// smith waterman
for (index_t i = 1; i <= MAXITER; ++i)
{
const index_t lb = (i<=h) ? (M_0[0] = NONE, 1) : i - h;
const index_t ub = (i<=w) ? (M_0[i] = NONE, i) : w + 1;
*maxpos = maxscr;
assert((ub - lb >= 0) && (ub - lb <= h));
const index_t THREADS_PER_BLOCK = 1024;
const index_t ITER_SPACE = (ub-lb+THREADS_PER_BLOCK-1)/THREADS_PER_BLOCK;
hipLaunchKernelGGL(( similarityScore_kernel)
, dim3(ITER_SPACE), dim3(THREADS_PER_BLOCK), 0, 0,
lb, ub, i, M_0, pred_0, M_1, M_2, a, b, maxpos);
// rotate wavefront vectors
rotate3(M_0, M_1, M_2);
// swap output vectors
std::swap(pred_0, pred_1);
hipDeviceSynchronize();
// for debugging purposes clear all M_0
if (DEBUG_MODE)
std::fill(M_0, M_0+MAXITER, std::numeric_limits<score_t>::min());
// \note
// The problem with the CUDA algorithm is that getting the
// data off of the GPU produces an enormous amount of page faults.
// e.g., on Lassen with a 100000x10000 data set copying the data
// out from the GPU results in a 20x performance hit.
//
// \todo @todo_1
// Can we hide the data transfer behind the computation of the
// next iteration?
// - in principle, this should be possible. The next round of
// computation only changes M_0, and pred_0, thus we could
// overlap the computation with data transfer back to the CPU.
// - in a first attempt, CUDA streams were attempted. In this
// versions there existed a computestream and a transferstream.
// the transferstream synchronized one time step after the
// computation. However, the result was a minor slow down to
// the version in this file.
// see file sw-rotated-cuda-global-stream.cu
//
// - TRY to copy the data back into the H,P arrays using OpenMP
// ...
// - Alternatively, the interface to SmithWaterman could be changed.
// The H,P representations are artifacts from the standard
// algorithm. By moving towards a 45 degree rotated base version
// copying back could be entirely avoided at the expense that
// the data representation becomes twice is big (though this
// could possibly be remedied by using a clever data layout).
{
// results from this iteration are in M_1
// -> strided copies back to H and P
index_t ofs = diagonalBasePoint(i, w) + w + 1;
hipError_t errH = hipMemcpy2D( H+ofs,
w*sizeof(*H),
M_1 + lb,
sizeof(*M_1),
sizeof(*M_1),
ub-lb,
hipMemcpyDefault
);
check_cuda_success(errH);
hipError_t errP = hipMemcpy2D( P+ofs,
w*sizeof(*P),
pred_1 + lb,
sizeof(*pred_1),
sizeof(*pred_1),
ub-lb,
hipMemcpyDefault
);
check_cuda_success(errP);
}
{
const score_t* maxx = *maxpos;
// update maxscore, if maxpos points to an improved location
if (maxx != maxscr)
{
index_t j = maxx - M_1;
index_t ofs = diagonalBasePoint(i, w) + w + 1;
ofs += (j - lb) * w;
*maxloc = H + ofs;
*maxscr = *maxx;
}
}
}
unified_free(wavefronts);
unified_free(pred_0);
unified_free(pred_1);
unified_free(maxscr);
unified_free(maxpos);
}
/*--------------------------------------------------------------------
* Function: backtrack
* Purpose: Modify matrix to print, path change from value to PATH
*/
void backtrack(link_t* P, index_t maxPos, index_t m) {
//hold maxPos value
index_t predPos = 0;
//backtrack from maxPos to startPos = 0
do {
switch (P[maxPos])
{
case DIAGONAL:
predPos = maxPos - m - 1;
break;
case UP:
predPos = maxPos - m;
break;
case LEFT:
predPos = maxPos - 1;
break;
default:
assert(false);
}
P[maxPos] = static_cast<link_t>(P[maxPos] * PATH);
maxPos = predPos;
} while (P[maxPos] != NONE);
} /* End of backtrack */
/*--------------------------------------------------------------------
* Function: printMatrix
* Purpose: Print Matrix
*/
void printMatrix(score_t* matrix, const char* a, const char* b, index_t m, index_t n) {
printf("-\t-\t");
for (index_t j = 0; j < m; j++) {
printf("%c\t", a[j]);
}
printf("\n-\t");
for (index_t i = 0; i < n+1; i++) { // Lines
for (index_t j = 0; j < m+1; j++) {
if (j==0 && i>0) printf("%c\t", b[i-1]);
printf("%d\t", ::max(0, matrix[(m+1) * i + j]));
}
printf("\n");
}
} /* End of printMatrix */
/*--------------------------------------------------------------------
* Function: printPredecessorMatrix
* Purpose: Print predecessor matrix
*/
void printPredecessorMatrix(link_t* matrix, const char* a, const char* b, index_t m, index_t n) {
printf(" ");
for (index_t j = 0; j < m; j++) {
printf("%c ", a[j]);
}
printf("\n ");
for (index_t i = 0; i < n+1; i++) { //Lines
for (index_t j = 0; j < m+1; j++) {
if (j==0 && i>0) printf("%c ", b[i-1]);
index_t index = m * i + j;
if (matrix[index] < 0) {
printf(BOLDRED);
if (matrix[index] == -UP)
printf(" ");
else if (matrix[index] == -LEFT)
printf(" ");
else if (matrix[index] == -DIAGONAL)
printf(" ");
else
printf("- ");
printf(RESET);
} else {
if (matrix[index] == UP)
printf(" ");
else if (matrix[index] == LEFT)
printf(" ");
else if (matrix[index] == DIAGONAL)
printf(" ");
else
printf("- ");
}
}
printf("\n");
}
} /* End of printPredecessorMatrix */
/*--------------------------------------------------------------------
* Function: generate
* Purpose: Generate arrays a and b
*/
void generate(char* a, char* b, index_t m, index_t n) {
//Random seed
srand(time(NULL));
//Generates the values of a
long long int i;
for (i = 0; i < m; i++) {
int aux = rand() % 4;
if (aux == 0)
a[i] = 'A';
else if (aux == 2)
a[i] = 'C';
else if (aux == 3)
a[i] = 'G';
else
a[i] = 'T';
}
//Generates the values of b
for (i = 0; i < n; i++) {
int aux = rand() % 4;
if (aux == 0)
b[i] = 'A';
else if (aux == 2)
b[i] = 'C';
else if (aux == 3)
b[i] = 'G';
else
b[i] = 'T';
}
} /* End of generate */
/*--------------------------------------------------------------------
* Function: main
*/
int main(int argc, char* argv[])
{
typedef std::chrono::time_point<std::chrono::system_clock> time_point;
bool useBuiltInData = true;
index_t m = 8;
index_t n = 9;
if (argc==3)
{
m = strtoll(argv[1], NULL, 10);
n = strtoll(argv[2], NULL, 10);
useBuiltInData = false;
}
if (useBuiltInData)
printf ("Using built-in data for testing ..\n");
printf("Problem size: Matrix[%lld][%lld], FACTOR=%d CUTOFF=%d\n", n, m, FACTOR, CUTOFF);
// Allocates a and b
// \pp \note m (instead of m+1), b/c end marker is not needed
//~ char* a = (char*)malloc(m * sizeof(char));
//~ char* b = (char*)malloc(n * sizeof(char));
char* a = unified_alloc<char>(m);
char* b = unified_alloc<char>(n);
std::cerr << "a,b allocated: " << m << "/" << n << std::endl;
//~ // Because now we have zeros
// \pp m and n are the lengths of input strings ..
//~ m++;
//~ n++;
if (useBuiltInData)
{
//Uncomment this to test the sequence available at
//http://vlab.amrita.edu/?sub=3&brch=274&sim=1433&cnt=1
// assert(m=11 && n=7);
// a[0] = 'C';
// a[1] = 'G';
// a[2] = 'T';
// a[3] = 'G';
// a[4] = 'A';
// a[5] = 'A';
// a[6] = 'T';
// a[7] = 'T';
// a[8] = 'C';
// a[9] = 'A';
// a[10] = 'T';
// b[0] = 'G';
// b[1] = 'A';
// b[2] = 'C';
// b[3] = 'T';
// b[4] = 'T';
// b[5] = 'A';
// b[6] = 'C';
// https://en.wikipedia.org/wiki/Smith%E2%80%93Waterman_algorithm#Example
// Using the wiki example to verify the results
assert(m==8 && n==9);
b[0] = 'G';
b[1] = 'G';
b[2] = 'T';
b[3] = 'T';
b[4] = 'G';
b[5] = 'A';
b[6] = 'C';
b[7] = 'T';
b[8] = 'A';
a[0] = 'T';
a[1] = 'G';
a[2] = 'T';
a[3] = 'T';
a[4] = 'A';
a[5] = 'C';
a[6] = 'G';
a[7] = 'G';
}
else
{
// Gen random arrays a and b
generate(a, b, m, n);
}
time_point starttime = std::chrono::system_clock::now();
// Allocates similarity matrix H
score_t* H = (score_t*) calloc((m+1) * (n+1), sizeof(score_t));
// Allocates predecessor matrix P
link_t* P = (link_t*) calloc((m+1) * (n+1), sizeof(link_t));
score_t* maxloc = nullptr;
smithWaterman(a, b, m, n, H, P, &maxloc);
time_point endtime = std::chrono::system_clock::now();
if (DEBUG_MODE)
{
printf("\nSimilarity Matrix:\n");
printMatrix(H, a, b, m, n);
printf("\nPredecessor Matrix:\n");
printPredecessorMatrix(P, a, b, m, n);
}
if (useBuiltInData)
{
printf ("Verifying results using the builtinIn data: %s\n", (H[(n+1)*(m+1)-1]==7)?"true":"false");
assert (H[(n+1)*(m+1)-1]==7);
}
backtrack(P, maxloc - H, m+1);
int elapsed = std::chrono::duration_cast<std::chrono::milliseconds>(endtime-starttime).count();
printf("\nElapsed time: %d ms\n\n", elapsed);
// Frees similarity matrixes
free(H);
free(P);
//Frees input arrays
//~ free(a);
//~ free(b);
unified_free(a);
unified_free(b);
return 0;
} /* End of main */
| 45f2abaa20d2b80d7ee4c7cbf28d3680423142d5.cu |
/// \brief
/// This implements the Smith-Waterman algorithm based on a 45 degree
/// rotated dynamic programming matrix. The benefit of the
/// rotation is:
/// - each diagonal can be represented by contiguous memory
/// --> reduces page faults
/// --> reduces resident memory use in the core algorithm
/// --> enables the algorithm to run more efficiently on large
/// inputs.
///
/// The disadvantages of the rotation is:
/// - if the original output matrix needs to be maintained, copying
/// back may lead to costly page faults, outweighing the benefits of
/// the rotation.
/// see @todo_1 for details.
///
/// \email pirkelbauer2@llnl.gov
/*
* Compilation: nvcc -std=c++11 -O3 -DNDEBUG=1 sw-rotated-cuda-unified.cu -o smithW-cuda
* nvcc -std=c++11 -O0 -G -g sw-rotated-cuda-unified.cu -o dbg-smithW-cuda
*/
#include <vector>
#include <limits>
#include <cassert>
#include <algorithm>
#include <utility>
#include <iostream>
#include <chrono>
#include "parameters.h"
static const bool DEBUG_MODE = false;
/*--------------------------------------------------------------------
* Text Tweaks
*/
#define RESET "\033[0m"
#define BOLDRED "\033[1m\033[31m" /* Bold Red */
/* End of text tweaks */
/// defines type for indices into arrays and matrices
/// (needs to be a signed type)
typedef long long int index_t;
/// defines data type for scoring
typedef int score_t;
/// defines data type for linking paths
enum link_t { UNDEF = -1, NOLINK = 0, UP = 1, LEFT = 2, DIAGONAL = 3 };
// global constants
static const score_t PATH = -1;
static const score_t NONE = 0; // -4
static const score_t MATCH_SCORE = 3; // 5 in omp_smithW_orig
static const score_t MISSMATCH_SCORE = -3; // -3
static const score_t GAP_SCORE = -2; // -4
typedef std::vector<char> char_seq;
template <class T>
static inline
void rotate3(T& a, T& b, T& c)
{
T tmp = a;
a = c; c = b; b = tmp;
}
static inline
__device__
int matchMissmatchScore(const char* a, const char* b, size_t ai, size_t bi)
{
return a[ai] == b[bi] ? MATCH_SCORE : MISSMATCH_SCORE;
} /* End of matchMissmatchScore */
static
__global__
void similarityScore_kernel( index_t iterspace_lb,
index_t iterspace_ub,
index_t i,
score_t* M_0,
link_t* P_0,
const score_t* M_1,
const score_t* M_2,
const char* a,
const char* b,
const score_t** maxpos
)
{
const index_t loop_j = blockIdx.x * blockDim.x + threadIdx.x;
if (loop_j >= iterspace_ub - iterspace_lb) return;
const index_t ai = (iterspace_ub - loop_j) - 1;
const index_t bi = i - ai;
const index_t j = iterspace_lb + loop_j;
assert(!DEBUG_MODE || (M_1[j] >= 0 && M_1[j-1] >= 0 && M_2[j-1] >= 0));
const index_t up = M_1[j] + GAP_SCORE;
const index_t lft = M_1[j-1] + GAP_SCORE;
const index_t diag = M_2[j-1] + matchMissmatchScore(a, b, ai-1, bi-1);
score_t max = NONE;
link_t pred = NOLINK;
if (up > max)
{
max = up;
pred = UP;
}
if (lft > max)
{
max = lft;
pred = LEFT;
}
if (diag > max)
{
max = diag;
pred = DIAGONAL;
}
assert(!DEBUG_MODE || (M_0[j] < 0));
M_0[j] = max;
P_0[j] = pred;
// Updates maximum score to be used as seed on backtrack
{
// \note \pp
// locks seem to be a NOGO in CUDA warps,
// thus the update to set the maximum is made nonblocking.
const score_t* assumed = nullptr;
const score_t* current = *maxpos;
while ((current != assumed) && max > *current)
{
// \note consider atomicCAS_system for multi GPU systems
assumed = current;
current = (const score_t*) atomicCAS( (unsigned long long int*) maxpos,
(unsigned long long int) assumed,
(unsigned long long int) (M_0 + j)
);
}
}
}
/*--------------------------------------------------------------------
* Function: calcElement
* Purpose: Calculate the first element of a given diagonal
*/
index_t diagonalBasePoint(index_t i, index_t w)
{
// base point is on the first row
if (i-1 <= w) return i-1;
// base point is the last element on the (i-w)+2 th row
return (w+1)*(i-w)-1;
}
static inline
void check_cuda_success(cudaError_t err)
{
if (err == cudaSuccess) return;
std::cerr << "CUDA error: " << cudaGetErrorString(err) << std::endl;
exit(0);
}
/// malloc replacement
template<class T>
static
T* unified_alloc(size_t numelems)
{
void* ptr /* = NULL*/;
cudaError_t err = cudaMallocManaged(&ptr, numelems * sizeof(T), cudaMemAttachGlobal);
check_cuda_success(err);
return reinterpret_cast<T*>(ptr);
}
static
void unified_free(void* ptr)
{
cudaError_t err = cudaFree(ptr);
check_cuda_success(err);
}
/// \brief computes smith-waterman
/// \param a input sequence of length w
/// \param b input sequence of length h
/// \param w length of input sequence a
/// \param h length of input sequence b
/// \param H output matrix (size == (w+1) * (h+1)) representing all scores
/// \param P output matrix (size == (w+1) * (h+1)) to link longest sequences
/// \param maxscore output score of longest matching sequence in H and P
/// \param maxloc output position of longest matching sequence in H and P
/// \note output data does not need to be initialized
void smithWaterman( const char* a,
const char* b,
index_t w,
index_t h,
score_t* H,
link_t* P,
score_t** maxloc
)
{
// Size is important for pointer CAS in CUDA Kernel
static_assert( sizeof(maxloc) == sizeof(unsigned long long int),
"pointer/int size mismatch (req. for CUDA atomicCAS)!"
);
const index_t MAXITER = 2 + w + h - 1;
// wavefront arrays for three iterations
score_t* const wavefronts = unified_alloc<score_t>(3*MAXITER);
link_t* pred_0 = unified_alloc<link_t>(MAXITER);
link_t* pred_1 = unified_alloc<link_t>(MAXITER);
score_t* const maxscr = unified_alloc<score_t>(1);
const score_t** const maxpos = unified_alloc<const score_t*>(1);
// wavefront representation _time
score_t* M_2 = wavefronts;
score_t* M_1 = wavefronts + MAXITER;
// wavefront output
score_t* M_0 = wavefronts + 2*MAXITER;
// initialize t == 0
M_1[0] = NONE;
// set maxloc to origin, and origin to 0
*maxloc = H;
**maxloc = 0;
*maxscr = 0;
// smith waterman
for (index_t i = 1; i <= MAXITER; ++i)
{
const index_t lb = (i<=h) ? (M_0[0] = NONE, 1) : i - h;
const index_t ub = (i<=w) ? (M_0[i] = NONE, i) : w + 1;
*maxpos = maxscr;
assert((ub - lb >= 0) && (ub - lb <= h));
const index_t THREADS_PER_BLOCK = 1024;
const index_t ITER_SPACE = (ub-lb+THREADS_PER_BLOCK-1)/THREADS_PER_BLOCK;
similarityScore_kernel
<<<ITER_SPACE, THREADS_PER_BLOCK>>>
(lb, ub, i, M_0, pred_0, M_1, M_2, a, b, maxpos);
// rotate wavefront vectors
rotate3(M_0, M_1, M_2);
// swap output vectors
std::swap(pred_0, pred_1);
cudaDeviceSynchronize();
// for debugging purposes clear all M_0
if (DEBUG_MODE)
std::fill(M_0, M_0+MAXITER, std::numeric_limits<score_t>::min());
// \note
// The problem with the CUDA algorithm is that getting the
// data off of the GPU produces an enormous amount of page faults.
// e.g., on Lassen with a 100000x10000 data set copying the data
// out from the GPU results in a 20x performance hit.
//
// \todo @todo_1
// Can we hide the data transfer behind the computation of the
// next iteration?
// - in principle, this should be possible. The next round of
// computation only changes M_0, and pred_0, thus we could
// overlap the computation with data transfer back to the CPU.
// - in a first attempt, CUDA streams were attempted. In this
// versions there existed a computestream and a transferstream.
// the transferstream synchronized one time step after the
// computation. However, the result was a minor slow down to
// the version in this file.
// see file sw-rotated-cuda-global-stream.cu
//
// - TRY to copy the data back into the H,P arrays using OpenMP
// ...
// - Alternatively, the interface to SmithWaterman could be changed.
// The H,P representations are artifacts from the standard
// algorithm. By moving towards a 45 degree rotated base version
// copying back could be entirely avoided at the expense that
// the data representation becomes twice is big (though this
// could possibly be remedied by using a clever data layout).
{
// results from this iteration are in M_1
// -> strided copies back to H and P
index_t ofs = diagonalBasePoint(i, w) + w + 1;
cudaError_t errH = cudaMemcpy2D( H+ofs,
w*sizeof(*H),
M_1 + lb,
sizeof(*M_1),
sizeof(*M_1),
ub-lb,
cudaMemcpyDefault
);
check_cuda_success(errH);
cudaError_t errP = cudaMemcpy2D( P+ofs,
w*sizeof(*P),
pred_1 + lb,
sizeof(*pred_1),
sizeof(*pred_1),
ub-lb,
cudaMemcpyDefault
);
check_cuda_success(errP);
}
{
const score_t* maxx = *maxpos;
// update maxscore, if maxpos points to an improved location
if (maxx != maxscr)
{
index_t j = maxx - M_1;
index_t ofs = diagonalBasePoint(i, w) + w + 1;
ofs += (j - lb) * w;
*maxloc = H + ofs;
*maxscr = *maxx;
}
}
}
unified_free(wavefronts);
unified_free(pred_0);
unified_free(pred_1);
unified_free(maxscr);
unified_free(maxpos);
}
/*--------------------------------------------------------------------
* Function: backtrack
* Purpose: Modify matrix to print, path change from value to PATH
*/
void backtrack(link_t* P, index_t maxPos, index_t m) {
//hold maxPos value
index_t predPos = 0;
//backtrack from maxPos to startPos = 0
do {
switch (P[maxPos])
{
case DIAGONAL:
predPos = maxPos - m - 1;
break;
case UP:
predPos = maxPos - m;
break;
case LEFT:
predPos = maxPos - 1;
break;
default:
assert(false);
}
P[maxPos] = static_cast<link_t>(P[maxPos] * PATH);
maxPos = predPos;
} while (P[maxPos] != NONE);
} /* End of backtrack */
/*--------------------------------------------------------------------
* Function: printMatrix
* Purpose: Print Matrix
*/
void printMatrix(score_t* matrix, const char* a, const char* b, index_t m, index_t n) {
printf("-\t-\t");
for (index_t j = 0; j < m; j++) {
printf("%c\t", a[j]);
}
printf("\n-\t");
for (index_t i = 0; i < n+1; i++) { // Lines
for (index_t j = 0; j < m+1; j++) {
if (j==0 && i>0) printf("%c\t", b[i-1]);
printf("%d\t", std::max(0, matrix[(m+1) * i + j]));
}
printf("\n");
}
} /* End of printMatrix */
/*--------------------------------------------------------------------
* Function: printPredecessorMatrix
* Purpose: Print predecessor matrix
*/
void printPredecessorMatrix(link_t* matrix, const char* a, const char* b, index_t m, index_t n) {
printf(" ");
for (index_t j = 0; j < m; j++) {
printf("%c ", a[j]);
}
printf("\n ");
for (index_t i = 0; i < n+1; i++) { //Lines
for (index_t j = 0; j < m+1; j++) {
if (j==0 && i>0) printf("%c ", b[i-1]);
index_t index = m * i + j;
if (matrix[index] < 0) {
printf(BOLDRED);
if (matrix[index] == -UP)
printf("↑ ");
else if (matrix[index] == -LEFT)
printf("← ");
else if (matrix[index] == -DIAGONAL)
printf("↖ ");
else
printf("- ");
printf(RESET);
} else {
if (matrix[index] == UP)
printf("↑ ");
else if (matrix[index] == LEFT)
printf("← ");
else if (matrix[index] == DIAGONAL)
printf("↖ ");
else
printf("- ");
}
}
printf("\n");
}
} /* End of printPredecessorMatrix */
/*--------------------------------------------------------------------
* Function: generate
* Purpose: Generate arrays a and b
*/
void generate(char* a, char* b, index_t m, index_t n) {
//Random seed
srand(time(NULL));
//Generates the values of a
long long int i;
for (i = 0; i < m; i++) {
int aux = rand() % 4;
if (aux == 0)
a[i] = 'A';
else if (aux == 2)
a[i] = 'C';
else if (aux == 3)
a[i] = 'G';
else
a[i] = 'T';
}
//Generates the values of b
for (i = 0; i < n; i++) {
int aux = rand() % 4;
if (aux == 0)
b[i] = 'A';
else if (aux == 2)
b[i] = 'C';
else if (aux == 3)
b[i] = 'G';
else
b[i] = 'T';
}
} /* End of generate */
/*--------------------------------------------------------------------
* Function: main
*/
int main(int argc, char* argv[])
{
typedef std::chrono::time_point<std::chrono::system_clock> time_point;
bool useBuiltInData = true;
index_t m = 8;
index_t n = 9;
if (argc==3)
{
m = strtoll(argv[1], NULL, 10);
n = strtoll(argv[2], NULL, 10);
useBuiltInData = false;
}
if (useBuiltInData)
printf ("Using built-in data for testing ..\n");
printf("Problem size: Matrix[%lld][%lld], FACTOR=%d CUTOFF=%d\n", n, m, FACTOR, CUTOFF);
// Allocates a and b
// \pp \note m (instead of m+1), b/c end marker is not needed
//~ char* a = (char*)malloc(m * sizeof(char));
//~ char* b = (char*)malloc(n * sizeof(char));
char* a = unified_alloc<char>(m);
char* b = unified_alloc<char>(n);
std::cerr << "a,b allocated: " << m << "/" << n << std::endl;
//~ // Because now we have zeros
// \pp m and n are the lengths of input strings ..
//~ m++;
//~ n++;
if (useBuiltInData)
{
//Uncomment this to test the sequence available at
//http://vlab.amrita.edu/?sub=3&brch=274&sim=1433&cnt=1
// assert(m=11 && n=7);
// a[0] = 'C';
// a[1] = 'G';
// a[2] = 'T';
// a[3] = 'G';
// a[4] = 'A';
// a[5] = 'A';
// a[6] = 'T';
// a[7] = 'T';
// a[8] = 'C';
// a[9] = 'A';
// a[10] = 'T';
// b[0] = 'G';
// b[1] = 'A';
// b[2] = 'C';
// b[3] = 'T';
// b[4] = 'T';
// b[5] = 'A';
// b[6] = 'C';
// https://en.wikipedia.org/wiki/Smith%E2%80%93Waterman_algorithm#Example
// Using the wiki example to verify the results
assert(m==8 && n==9);
b[0] = 'G';
b[1] = 'G';
b[2] = 'T';
b[3] = 'T';
b[4] = 'G';
b[5] = 'A';
b[6] = 'C';
b[7] = 'T';
b[8] = 'A';
a[0] = 'T';
a[1] = 'G';
a[2] = 'T';
a[3] = 'T';
a[4] = 'A';
a[5] = 'C';
a[6] = 'G';
a[7] = 'G';
}
else
{
// Gen random arrays a and b
generate(a, b, m, n);
}
time_point starttime = std::chrono::system_clock::now();
// Allocates similarity matrix H
score_t* H = (score_t*) calloc((m+1) * (n+1), sizeof(score_t));
// Allocates predecessor matrix P
link_t* P = (link_t*) calloc((m+1) * (n+1), sizeof(link_t));
score_t* maxloc = nullptr;
smithWaterman(a, b, m, n, H, P, &maxloc);
time_point endtime = std::chrono::system_clock::now();
if (DEBUG_MODE)
{
printf("\nSimilarity Matrix:\n");
printMatrix(H, a, b, m, n);
printf("\nPredecessor Matrix:\n");
printPredecessorMatrix(P, a, b, m, n);
}
if (useBuiltInData)
{
printf ("Verifying results using the builtinIn data: %s\n", (H[(n+1)*(m+1)-1]==7)?"true":"false");
assert (H[(n+1)*(m+1)-1]==7);
}
backtrack(P, maxloc - H, m+1);
int elapsed = std::chrono::duration_cast<std::chrono::milliseconds>(endtime-starttime).count();
printf("\nElapsed time: %d ms\n\n", elapsed);
// Frees similarity matrixes
free(H);
free(P);
//Frees input arrays
//~ free(a);
//~ free(b);
unified_free(a);
unified_free(b);
return 0;
} /* End of main */
|
89be868c67ac57d2a5264d8746f484343e1eae34.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "sequence_gpu.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *d_ptr = NULL;
hipMalloc(&d_ptr, XSIZE*YSIZE);
int length = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
sequence_gpu), dim3(gridBlock),dim3(threadBlock), 0, 0, d_ptr,length);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
sequence_gpu), dim3(gridBlock),dim3(threadBlock), 0, 0, d_ptr,length);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
sequence_gpu), dim3(gridBlock),dim3(threadBlock), 0, 0, d_ptr,length);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 89be868c67ac57d2a5264d8746f484343e1eae34.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "sequence_gpu.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *d_ptr = NULL;
cudaMalloc(&d_ptr, XSIZE*YSIZE);
int length = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
sequence_gpu<<<gridBlock,threadBlock>>>(d_ptr,length);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
sequence_gpu<<<gridBlock,threadBlock>>>(d_ptr,length);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
sequence_gpu<<<gridBlock,threadBlock>>>(d_ptr,length);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
9e42c4b914fa87bd73fde4af4214d0e556430c50.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cumlHandle.hpp>
#include <common/device_buffer.hpp>
#include <cuda_utils.cuh>
#include <cuml/common/cuml_allocator.hpp>
#include <cuml/linear_model/preprocess_mg.hpp>
#include <cuml/linear_model/ridge_mg.hpp>
#include <linalg/add.cuh>
#include <linalg/gemm.cuh>
#include <matrix/math.cuh>
#include <matrix/matrix.cuh>
#include <opg/linalg/mv_aTb.hpp>
#include <opg/linalg/svd.hpp>
#include <opg/stats/mean.hpp>
#include <raft/comms/comms.hpp>
using namespace MLCommon;
namespace ML {
namespace Ridge {
namespace opg {
template <typename T>
void ridgeSolve(const raft::handle_t &handle, T *S, T *V,
std::vector<Matrix::Data<T> *> &U,
const Matrix::PartDescriptor &UDesc,
const std::vector<Matrix::Data<T> *> &b, const T *alpha,
const int n_alpha, T *w, hipStream_t *streams, int n_streams,
bool verbose) {
auto cublasH = handle.get_cublas_handle();
auto cusolverH = handle.get_cusolver_dn_handle();
const auto &comm = handle.get_comms();
const auto allocator = handle.get_device_allocator();
// Implements this: w = V * inv(S^2 + *I) * S * U^T * b
T *S_nnz;
T alp = T(1);
T beta = T(0);
T thres = T(1e-10);
raft::matrix::setSmallValuesZero(S, UDesc.N, streams[0], thres);
// TO-DO: Update to use `device_buffer` here
// Tracking issue: https://github.com/rapidsai/cuml/issues/2524
allocate(S_nnz, UDesc.N, true);
copy(S_nnz, S, UDesc.N, streams[0]);
raft::matrix::power(S_nnz, UDesc.N, streams[0]);
LinAlg::addScalar(S_nnz, S_nnz, alpha[0], UDesc.N, streams[0]);
raft::matrix::matrixVectorBinaryDivSkipZero(S, S_nnz, size_t(1), UDesc.N,
false, true, streams[0], true);
raft::matrix::matrixVectorBinaryMult(V, S, UDesc.N, UDesc.N, false, true,
streams[0]);
Matrix::Data<T> S_nnz_data;
S_nnz_data.totalSize = UDesc.N;
S_nnz_data.ptr = S_nnz;
LinAlg::opg::mv_aTb(S_nnz_data, U, UDesc, b, comm, allocator, streams,
n_streams, cublasH);
LinAlg::gemm(V, UDesc.N, UDesc.N, S_nnz, w, UDesc.N, 1, HIPBLAS_OP_N,
HIPBLAS_OP_N, alp, beta, cublasH, streams[0]);
CUDA_CHECK(hipFree(S_nnz));
}
template <typename T>
void ridgeEig(raft::handle_t &handle, const std::vector<Matrix::Data<T> *> &A,
const Matrix::PartDescriptor &ADesc,
const std::vector<Matrix::Data<T> *> &b, const T *alpha,
const int n_alpha, T *coef, hipStream_t *streams, int n_streams,
bool verbose) {
const auto &comm = handle.get_comms();
const hipblasHandle_t cublas_handle = handle.get_cublas_handle();
const hipsolverDnHandle_t cusolver_handle = handle.get_cusolver_dn_handle();
const auto allocator = handle.get_device_allocator();
int rank = comm.get_rank();
device_buffer<T> S(allocator, streams[0], ADesc.N);
device_buffer<T> V(allocator, streams[0], ADesc.N * ADesc.N);
std::vector<Matrix::Data<T> *> U;
std::vector<Matrix::Data<T>> U_temp;
std::vector<Matrix::RankSizePair *> partsToRanks = ADesc.blocksOwnedBy(rank);
size_t total_size = 0;
for (int i = 0; i < partsToRanks.size(); i++) {
total_size += partsToRanks[i]->size;
}
total_size = total_size * ADesc.N;
device_buffer<T> U_parts(allocator, streams[0], total_size);
T *curr_ptr = U_parts.data();
for (int i = 0; i < partsToRanks.size(); i++) {
Matrix::Data<T> d;
d.totalSize = partsToRanks[i]->size;
d.ptr = curr_ptr;
curr_ptr = curr_ptr + (partsToRanks[i]->size * ADesc.N);
U_temp.push_back(d);
}
for (int i = 0; i < A.size(); i++) {
U.push_back(&(U_temp[i]));
}
LinAlg::opg::svdEig(A, ADesc, U, S.data(), V.data(), comm, allocator, streams,
n_streams, cublas_handle, cusolver_handle);
ridgeSolve(handle, S.data(), V.data(), U, ADesc, b, alpha, n_alpha, coef,
streams, n_streams, verbose);
}
template <typename T>
void fit_impl(raft::handle_t &handle,
std::vector<Matrix::Data<T> *> &input_data,
Matrix::PartDescriptor &input_desc,
std::vector<Matrix::Data<T> *> &labels, T *alpha, int n_alpha,
T *coef, T *intercept, bool fit_intercept, bool normalize,
int algo, hipStream_t *streams, int n_streams, bool verbose) {
const auto allocator = handle.get_device_allocator();
device_buffer<T> mu_input(allocator, streams[0]);
device_buffer<T> norm2_input(allocator, streams[0]);
device_buffer<T> mu_labels(allocator, streams[0]);
if (fit_intercept) {
mu_input.resize(input_desc.N, streams[0]);
mu_labels.resize(1, streams[0]);
if (normalize) {
norm2_input.resize(input_desc.N, streams[0]);
}
GLM::opg::preProcessData(handle, input_data, input_desc, labels,
mu_input.data(), mu_labels.data(),
norm2_input.data(), fit_intercept, normalize,
streams, n_streams, verbose);
}
if (algo == 0 || input_desc.N == 1) {
ASSERT(false, "olsFit: no algorithm with this id has been implemented");
} else if (algo == 1) {
ridgeEig(handle, input_data, input_desc, labels, alpha, n_alpha, coef,
streams, n_streams, verbose);
} else {
ASSERT(false, "olsFit: no algorithm with this id has been implemented");
}
if (fit_intercept) {
GLM::opg::postProcessData(handle, input_data, input_desc, labels, coef,
intercept, mu_input.data(), mu_labels.data(),
norm2_input.data(), fit_intercept, normalize,
streams, n_streams, verbose);
} else {
*intercept = T(0);
}
}
/**
* @brief performs MNMG fit operation for the ridge regression
* @input param handle: the internal cuml handle object
* @input param rank_sizes: includes all the partition size information for the rank
* @input param n_parts: number of partitions
* @input param input: input data
* @input param n_rows: number of rows of the input data
* @input param n_cols: number of cols of the input data
* @input param labels: labels data
* @input param alpha: ridge parameter
* @input param n_alpha: number of ridge parameters. Only one parameter is supported right now.
* @output param coef: learned regression coefficients
* @output param intercept: intercept value
* @input param fit_intercept: fit intercept or not
* @input param normalize: normalize the data or not
* @input param verbose
*/
template <typename T>
void fit_impl(raft::handle_t &handle,
std::vector<Matrix::Data<T> *> &input_data,
Matrix::PartDescriptor &input_desc,
std::vector<Matrix::Data<T> *> &labels, T *alpha, int n_alpha,
T *coef, T *intercept, bool fit_intercept, bool normalize,
int algo, bool verbose) {
int rank = handle.get_comms().get_rank();
// TODO: These streams should come from raft::handle_t
// Tracking issue: https://github.com/rapidsai/cuml/issues/2470
int n_streams = input_desc.blocksOwnedBy(rank).size();
hipStream_t streams[n_streams];
for (int i = 0; i < n_streams; i++) {
CUDA_CHECK(hipStreamCreate(&streams[i]));
}
fit_impl(handle, input_data, input_desc, labels, alpha, n_alpha, coef,
intercept, fit_intercept, normalize, algo, streams, n_streams,
verbose);
for (int i = 0; i < n_streams; i++) {
CUDA_CHECK(hipStreamSynchronize(streams[i]));
}
for (int i = 0; i < n_streams; i++) {
CUDA_CHECK(hipStreamDestroy(streams[i]));
}
}
template <typename T>
void predict_impl(raft::handle_t &handle,
std::vector<Matrix::Data<T> *> &input_data,
Matrix::PartDescriptor &input_desc, T *coef, T intercept,
std::vector<Matrix::Data<T> *> &preds, hipStream_t *streams,
int n_streams, bool verbose) {
std::vector<Matrix::RankSizePair *> local_blocks = input_desc.partsToRanks;
T alpha = T(1);
T beta = T(0);
for (int i = 0; i < input_data.size(); i++) {
int si = i % n_streams;
LinAlg::gemm(input_data[i]->ptr, local_blocks[i]->size, input_desc.N, coef,
preds[i]->ptr, local_blocks[i]->size, size_t(1), HIPBLAS_OP_N,
HIPBLAS_OP_N, alpha, beta, handle.get_cublas_handle(),
streams[si]);
LinAlg::addScalar(preds[i]->ptr, preds[i]->ptr, intercept,
local_blocks[i]->size, streams[si]);
}
}
template <typename T>
void predict_impl(raft::handle_t &handle, Matrix::RankSizePair **rank_sizes,
size_t n_parts, Matrix::Data<T> **input, size_t n_rows,
size_t n_cols, T *coef, T intercept, Matrix::Data<T> **preds,
bool verbose) {
int rank = handle.get_comms().get_rank();
std::vector<Matrix::RankSizePair *> ranksAndSizes(rank_sizes,
rank_sizes + n_parts);
std::vector<Matrix::Data<T> *> input_data(input, input + n_parts);
Matrix::PartDescriptor input_desc(n_rows, n_cols, ranksAndSizes, rank);
std::vector<Matrix::Data<T> *> preds_data(preds, preds + n_parts);
// TODO: These streams should come from raft::handle_t
int n_streams = n_parts;
hipStream_t streams[n_streams];
for (int i = 0; i < n_streams; i++) {
CUDA_CHECK(hipStreamCreate(&streams[i]));
}
predict_impl(handle, input_data, input_desc, coef, intercept, preds_data,
streams, n_streams, verbose);
for (int i = 0; i < n_streams; i++) {
CUDA_CHECK(hipStreamSynchronize(streams[i]));
}
for (int i = 0; i < n_streams; i++) {
CUDA_CHECK(hipStreamDestroy(streams[i]));
}
}
void fit(raft::handle_t &handle, std::vector<Matrix::Data<float> *> &input_data,
Matrix::PartDescriptor &input_desc,
std::vector<Matrix::Data<float> *> &labels, float *alpha, int n_alpha,
float *coef, float *intercept, bool fit_intercept, bool normalize,
int algo, bool verbose) {
fit_impl(handle, input_data, input_desc, labels, alpha, n_alpha, coef,
intercept, fit_intercept, normalize, algo, verbose);
}
void fit(raft::handle_t &handle,
std::vector<Matrix::Data<double> *> &input_data,
Matrix::PartDescriptor &input_desc,
std::vector<Matrix::Data<double> *> &labels, double *alpha,
int n_alpha, double *coef, double *intercept, bool fit_intercept,
bool normalize, int algo, bool verbose) {
fit_impl(handle, input_data, input_desc, labels, alpha, n_alpha, coef,
intercept, fit_intercept, normalize, algo, verbose);
}
void predict(raft::handle_t &handle, Matrix::RankSizePair **rank_sizes,
size_t n_parts, Matrix::Data<float> **input, size_t n_rows,
size_t n_cols, float *coef, float intercept,
Matrix::Data<float> **preds, bool verbose) {
predict_impl(handle, rank_sizes, n_parts, input, n_rows, n_cols, coef,
intercept, preds, verbose);
}
void predict(raft::handle_t &handle, Matrix::RankSizePair **rank_sizes,
size_t n_parts, Matrix::Data<double> **input, size_t n_rows,
size_t n_cols, double *coef, double intercept,
Matrix::Data<double> **preds, bool verbose) {
predict_impl(handle, rank_sizes, n_parts, input, n_rows, n_cols, coef,
intercept, preds, verbose);
}
} // namespace opg
} // namespace Ridge
} // namespace ML
| 9e42c4b914fa87bd73fde4af4214d0e556430c50.cu | /*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cumlHandle.hpp>
#include <common/device_buffer.hpp>
#include <cuda_utils.cuh>
#include <cuml/common/cuml_allocator.hpp>
#include <cuml/linear_model/preprocess_mg.hpp>
#include <cuml/linear_model/ridge_mg.hpp>
#include <linalg/add.cuh>
#include <linalg/gemm.cuh>
#include <matrix/math.cuh>
#include <matrix/matrix.cuh>
#include <opg/linalg/mv_aTb.hpp>
#include <opg/linalg/svd.hpp>
#include <opg/stats/mean.hpp>
#include <raft/comms/comms.hpp>
using namespace MLCommon;
namespace ML {
namespace Ridge {
namespace opg {
template <typename T>
void ridgeSolve(const raft::handle_t &handle, T *S, T *V,
std::vector<Matrix::Data<T> *> &U,
const Matrix::PartDescriptor &UDesc,
const std::vector<Matrix::Data<T> *> &b, const T *alpha,
const int n_alpha, T *w, cudaStream_t *streams, int n_streams,
bool verbose) {
auto cublasH = handle.get_cublas_handle();
auto cusolverH = handle.get_cusolver_dn_handle();
const auto &comm = handle.get_comms();
const auto allocator = handle.get_device_allocator();
// Implements this: w = V * inv(S^2 + λ*I) * S * U^T * b
T *S_nnz;
T alp = T(1);
T beta = T(0);
T thres = T(1e-10);
raft::matrix::setSmallValuesZero(S, UDesc.N, streams[0], thres);
// TO-DO: Update to use `device_buffer` here
// Tracking issue: https://github.com/rapidsai/cuml/issues/2524
allocate(S_nnz, UDesc.N, true);
copy(S_nnz, S, UDesc.N, streams[0]);
raft::matrix::power(S_nnz, UDesc.N, streams[0]);
LinAlg::addScalar(S_nnz, S_nnz, alpha[0], UDesc.N, streams[0]);
raft::matrix::matrixVectorBinaryDivSkipZero(S, S_nnz, size_t(1), UDesc.N,
false, true, streams[0], true);
raft::matrix::matrixVectorBinaryMult(V, S, UDesc.N, UDesc.N, false, true,
streams[0]);
Matrix::Data<T> S_nnz_data;
S_nnz_data.totalSize = UDesc.N;
S_nnz_data.ptr = S_nnz;
LinAlg::opg::mv_aTb(S_nnz_data, U, UDesc, b, comm, allocator, streams,
n_streams, cublasH);
LinAlg::gemm(V, UDesc.N, UDesc.N, S_nnz, w, UDesc.N, 1, CUBLAS_OP_N,
CUBLAS_OP_N, alp, beta, cublasH, streams[0]);
CUDA_CHECK(cudaFree(S_nnz));
}
template <typename T>
void ridgeEig(raft::handle_t &handle, const std::vector<Matrix::Data<T> *> &A,
const Matrix::PartDescriptor &ADesc,
const std::vector<Matrix::Data<T> *> &b, const T *alpha,
const int n_alpha, T *coef, cudaStream_t *streams, int n_streams,
bool verbose) {
const auto &comm = handle.get_comms();
const cublasHandle_t cublas_handle = handle.get_cublas_handle();
const cusolverDnHandle_t cusolver_handle = handle.get_cusolver_dn_handle();
const auto allocator = handle.get_device_allocator();
int rank = comm.get_rank();
device_buffer<T> S(allocator, streams[0], ADesc.N);
device_buffer<T> V(allocator, streams[0], ADesc.N * ADesc.N);
std::vector<Matrix::Data<T> *> U;
std::vector<Matrix::Data<T>> U_temp;
std::vector<Matrix::RankSizePair *> partsToRanks = ADesc.blocksOwnedBy(rank);
size_t total_size = 0;
for (int i = 0; i < partsToRanks.size(); i++) {
total_size += partsToRanks[i]->size;
}
total_size = total_size * ADesc.N;
device_buffer<T> U_parts(allocator, streams[0], total_size);
T *curr_ptr = U_parts.data();
for (int i = 0; i < partsToRanks.size(); i++) {
Matrix::Data<T> d;
d.totalSize = partsToRanks[i]->size;
d.ptr = curr_ptr;
curr_ptr = curr_ptr + (partsToRanks[i]->size * ADesc.N);
U_temp.push_back(d);
}
for (int i = 0; i < A.size(); i++) {
U.push_back(&(U_temp[i]));
}
LinAlg::opg::svdEig(A, ADesc, U, S.data(), V.data(), comm, allocator, streams,
n_streams, cublas_handle, cusolver_handle);
ridgeSolve(handle, S.data(), V.data(), U, ADesc, b, alpha, n_alpha, coef,
streams, n_streams, verbose);
}
template <typename T>
void fit_impl(raft::handle_t &handle,
std::vector<Matrix::Data<T> *> &input_data,
Matrix::PartDescriptor &input_desc,
std::vector<Matrix::Data<T> *> &labels, T *alpha, int n_alpha,
T *coef, T *intercept, bool fit_intercept, bool normalize,
int algo, cudaStream_t *streams, int n_streams, bool verbose) {
const auto allocator = handle.get_device_allocator();
device_buffer<T> mu_input(allocator, streams[0]);
device_buffer<T> norm2_input(allocator, streams[0]);
device_buffer<T> mu_labels(allocator, streams[0]);
if (fit_intercept) {
mu_input.resize(input_desc.N, streams[0]);
mu_labels.resize(1, streams[0]);
if (normalize) {
norm2_input.resize(input_desc.N, streams[0]);
}
GLM::opg::preProcessData(handle, input_data, input_desc, labels,
mu_input.data(), mu_labels.data(),
norm2_input.data(), fit_intercept, normalize,
streams, n_streams, verbose);
}
if (algo == 0 || input_desc.N == 1) {
ASSERT(false, "olsFit: no algorithm with this id has been implemented");
} else if (algo == 1) {
ridgeEig(handle, input_data, input_desc, labels, alpha, n_alpha, coef,
streams, n_streams, verbose);
} else {
ASSERT(false, "olsFit: no algorithm with this id has been implemented");
}
if (fit_intercept) {
GLM::opg::postProcessData(handle, input_data, input_desc, labels, coef,
intercept, mu_input.data(), mu_labels.data(),
norm2_input.data(), fit_intercept, normalize,
streams, n_streams, verbose);
} else {
*intercept = T(0);
}
}
/**
* @brief performs MNMG fit operation for the ridge regression
* @input param handle: the internal cuml handle object
* @input param rank_sizes: includes all the partition size information for the rank
* @input param n_parts: number of partitions
* @input param input: input data
* @input param n_rows: number of rows of the input data
* @input param n_cols: number of cols of the input data
* @input param labels: labels data
* @input param alpha: ridge parameter
* @input param n_alpha: number of ridge parameters. Only one parameter is supported right now.
* @output param coef: learned regression coefficients
* @output param intercept: intercept value
* @input param fit_intercept: fit intercept or not
* @input param normalize: normalize the data or not
* @input param verbose
*/
template <typename T>
void fit_impl(raft::handle_t &handle,
std::vector<Matrix::Data<T> *> &input_data,
Matrix::PartDescriptor &input_desc,
std::vector<Matrix::Data<T> *> &labels, T *alpha, int n_alpha,
T *coef, T *intercept, bool fit_intercept, bool normalize,
int algo, bool verbose) {
int rank = handle.get_comms().get_rank();
// TODO: These streams should come from raft::handle_t
// Tracking issue: https://github.com/rapidsai/cuml/issues/2470
int n_streams = input_desc.blocksOwnedBy(rank).size();
cudaStream_t streams[n_streams];
for (int i = 0; i < n_streams; i++) {
CUDA_CHECK(cudaStreamCreate(&streams[i]));
}
fit_impl(handle, input_data, input_desc, labels, alpha, n_alpha, coef,
intercept, fit_intercept, normalize, algo, streams, n_streams,
verbose);
for (int i = 0; i < n_streams; i++) {
CUDA_CHECK(cudaStreamSynchronize(streams[i]));
}
for (int i = 0; i < n_streams; i++) {
CUDA_CHECK(cudaStreamDestroy(streams[i]));
}
}
template <typename T>
void predict_impl(raft::handle_t &handle,
std::vector<Matrix::Data<T> *> &input_data,
Matrix::PartDescriptor &input_desc, T *coef, T intercept,
std::vector<Matrix::Data<T> *> &preds, cudaStream_t *streams,
int n_streams, bool verbose) {
std::vector<Matrix::RankSizePair *> local_blocks = input_desc.partsToRanks;
T alpha = T(1);
T beta = T(0);
for (int i = 0; i < input_data.size(); i++) {
int si = i % n_streams;
LinAlg::gemm(input_data[i]->ptr, local_blocks[i]->size, input_desc.N, coef,
preds[i]->ptr, local_blocks[i]->size, size_t(1), CUBLAS_OP_N,
CUBLAS_OP_N, alpha, beta, handle.get_cublas_handle(),
streams[si]);
LinAlg::addScalar(preds[i]->ptr, preds[i]->ptr, intercept,
local_blocks[i]->size, streams[si]);
}
}
template <typename T>
void predict_impl(raft::handle_t &handle, Matrix::RankSizePair **rank_sizes,
size_t n_parts, Matrix::Data<T> **input, size_t n_rows,
size_t n_cols, T *coef, T intercept, Matrix::Data<T> **preds,
bool verbose) {
int rank = handle.get_comms().get_rank();
std::vector<Matrix::RankSizePair *> ranksAndSizes(rank_sizes,
rank_sizes + n_parts);
std::vector<Matrix::Data<T> *> input_data(input, input + n_parts);
Matrix::PartDescriptor input_desc(n_rows, n_cols, ranksAndSizes, rank);
std::vector<Matrix::Data<T> *> preds_data(preds, preds + n_parts);
// TODO: These streams should come from raft::handle_t
int n_streams = n_parts;
cudaStream_t streams[n_streams];
for (int i = 0; i < n_streams; i++) {
CUDA_CHECK(cudaStreamCreate(&streams[i]));
}
predict_impl(handle, input_data, input_desc, coef, intercept, preds_data,
streams, n_streams, verbose);
for (int i = 0; i < n_streams; i++) {
CUDA_CHECK(cudaStreamSynchronize(streams[i]));
}
for (int i = 0; i < n_streams; i++) {
CUDA_CHECK(cudaStreamDestroy(streams[i]));
}
}
void fit(raft::handle_t &handle, std::vector<Matrix::Data<float> *> &input_data,
Matrix::PartDescriptor &input_desc,
std::vector<Matrix::Data<float> *> &labels, float *alpha, int n_alpha,
float *coef, float *intercept, bool fit_intercept, bool normalize,
int algo, bool verbose) {
fit_impl(handle, input_data, input_desc, labels, alpha, n_alpha, coef,
intercept, fit_intercept, normalize, algo, verbose);
}
void fit(raft::handle_t &handle,
std::vector<Matrix::Data<double> *> &input_data,
Matrix::PartDescriptor &input_desc,
std::vector<Matrix::Data<double> *> &labels, double *alpha,
int n_alpha, double *coef, double *intercept, bool fit_intercept,
bool normalize, int algo, bool verbose) {
fit_impl(handle, input_data, input_desc, labels, alpha, n_alpha, coef,
intercept, fit_intercept, normalize, algo, verbose);
}
void predict(raft::handle_t &handle, Matrix::RankSizePair **rank_sizes,
size_t n_parts, Matrix::Data<float> **input, size_t n_rows,
size_t n_cols, float *coef, float intercept,
Matrix::Data<float> **preds, bool verbose) {
predict_impl(handle, rank_sizes, n_parts, input, n_rows, n_cols, coef,
intercept, preds, verbose);
}
void predict(raft::handle_t &handle, Matrix::RankSizePair **rank_sizes,
size_t n_parts, Matrix::Data<double> **input, size_t n_rows,
size_t n_cols, double *coef, double intercept,
Matrix::Data<double> **preds, bool verbose) {
predict_impl(handle, rank_sizes, n_parts, input, n_rows, n_cols, coef,
intercept, preds, verbose);
}
} // namespace opg
} // namespace Ridge
} // namespace ML
|
035627dc6ccc3f19d9d82cca432ef3286f4b04f3.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "constrain_weight_updates_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int N = XSIZE*YSIZE;
float coef = 1;
float *weights_gpu = NULL;
hipMalloc(&weights_gpu, XSIZE*YSIZE);
float *weight_updates_gpu = NULL;
hipMalloc(&weight_updates_gpu, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
constrain_weight_updates_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, N,coef,weights_gpu,weight_updates_gpu);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
constrain_weight_updates_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, N,coef,weights_gpu,weight_updates_gpu);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
constrain_weight_updates_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, N,coef,weights_gpu,weight_updates_gpu);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 035627dc6ccc3f19d9d82cca432ef3286f4b04f3.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "constrain_weight_updates_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int N = XSIZE*YSIZE;
float coef = 1;
float *weights_gpu = NULL;
cudaMalloc(&weights_gpu, XSIZE*YSIZE);
float *weight_updates_gpu = NULL;
cudaMalloc(&weight_updates_gpu, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
constrain_weight_updates_kernel<<<gridBlock,threadBlock>>>(N,coef,weights_gpu,weight_updates_gpu);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
constrain_weight_updates_kernel<<<gridBlock,threadBlock>>>(N,coef,weights_gpu,weight_updates_gpu);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
constrain_weight_updates_kernel<<<gridBlock,threadBlock>>>(N,coef,weights_gpu,weight_updates_gpu);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
c37e3c633a6f41cc10ad73d40ff6a8ef45d2f3b6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<cuda.h>
#define row1 2 /* Number of rows of first matrix */
#define col1 3 /* Number of columns of first matrix */
#define row2 3 /* Number of rows of second matrix */
#define col2 2 /* Number of columns of second matrix */
__global__ void matproduct(int *l,int *m, int *n)
{
int x=blockIdx.x;
int y=blockIdx.y;
int k;
n[col2*y+x]=0;
for(k=0;k<col1;k++)
{
n[col2*y+x]+=n[col2*y+x]+l[col1*y+k]*m[col2*k+x];
}
}
int main()
{
//cpu pointers
int a[row1][col1];
int b[row2][col2];
int c[row1][col2];
//gpu pointers
int *d,*e,*f;
int i,j;
printf("\n Enter elements of first matrix of size 2*3\n");
for(i=0;i<row1;i++)
{
for(j=0;j<col1;j++)
{
scanf("%d",&a[i][j]);
}
}
printf("\n Enter elements of second matrix of size 3*2\n");
for(i=0;i<row2;i++)
{
for(j=0;j<col2;j++)
{
scanf("%d",&b[i][j]);
}
}
hipMalloc((void **)&d,row1*col1*sizeof(int));
hipMalloc((void **)&e,row2*col2*sizeof(int));
hipMalloc((void **)&f,row1*col2*sizeof(int));
hipMemcpy(d,a,row1*col1*sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(e,b,row2*col2*sizeof(int),hipMemcpyHostToDevice);
dim3 grid(col2,row1);
/* Here we are defining two dimensional Grid(collection of blocks) structure. Syntax is dim3 grid(no. of columns,no. of rows) */
hipLaunchKernelGGL(( matproduct), dim3(grid),dim3(1), 0, 0, d,e,f);
hipMemcpy(c,f,row1*col2*sizeof(int),hipMemcpyDeviceToHost);
printf("\nProduct of two matrices:\n ");
for(i=0;i<row1;i++)
{
for(j=0;j<col2;j++)
{
printf("%d\t",c[i][j]);
}
printf("\n");
}
hipFree(d);
hipFree(e);
hipFree(f);
return 0;
}
| c37e3c633a6f41cc10ad73d40ff6a8ef45d2f3b6.cu | #include<stdio.h>
#include<cuda.h>
#define row1 2 /* Number of rows of first matrix */
#define col1 3 /* Number of columns of first matrix */
#define row2 3 /* Number of rows of second matrix */
#define col2 2 /* Number of columns of second matrix */
__global__ void matproduct(int *l,int *m, int *n)
{
int x=blockIdx.x;
int y=blockIdx.y;
int k;
n[col2*y+x]=0;
for(k=0;k<col1;k++)
{
n[col2*y+x]+=n[col2*y+x]+l[col1*y+k]*m[col2*k+x];
}
}
int main()
{
//cpu pointers
int a[row1][col1];
int b[row2][col2];
int c[row1][col2];
//gpu pointers
int *d,*e,*f;
int i,j;
printf("\n Enter elements of first matrix of size 2*3\n");
for(i=0;i<row1;i++)
{
for(j=0;j<col1;j++)
{
scanf("%d",&a[i][j]);
}
}
printf("\n Enter elements of second matrix of size 3*2\n");
for(i=0;i<row2;i++)
{
for(j=0;j<col2;j++)
{
scanf("%d",&b[i][j]);
}
}
cudaMalloc((void **)&d,row1*col1*sizeof(int));
cudaMalloc((void **)&e,row2*col2*sizeof(int));
cudaMalloc((void **)&f,row1*col2*sizeof(int));
cudaMemcpy(d,a,row1*col1*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(e,b,row2*col2*sizeof(int),cudaMemcpyHostToDevice);
dim3 grid(col2,row1);
/* Here we are defining two dimensional Grid(collection of blocks) structure. Syntax is dim3 grid(no. of columns,no. of rows) */
matproduct<<<grid,1>>>(d,e,f);
cudaMemcpy(c,f,row1*col2*sizeof(int),cudaMemcpyDeviceToHost);
printf("\nProduct of two matrices:\n ");
for(i=0;i<row1;i++)
{
for(j=0;j<col2;j++)
{
printf("%d\t",c[i][j]);
}
printf("\n");
}
cudaFree(d);
cudaFree(e);
cudaFree(f);
return 0;
}
|
2887982e6e3f184a0f3c85e00f25c83d17351677.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "lite/kernels/cuda/gru_compute.h"
#include <string>
#include <vector>
#include "lite/backends/cuda/cuda_utils.h"
#include "lite/backends/cuda/math/bias.h"
#include "lite/backends/cuda/math/gru_forward.h"
#include "lite/backends/cuda/math/sequence2batch.h"
#include "lite/backends/cuda/target_wrapper.h"
#include "lite/core/op_registry.h"
namespace paddle {
namespace lite {
namespace kernels {
namespace cuda {
template <typename T>
struct GRUMetaValue {
T* gate_weight;
T* state_weight;
T* gate_value;
T* reset_output_value;
T* output_value;
T* prev_out_value;
};
template <typename T>
struct GRUUnitFunctor {
static void compute(GRUMetaValue<T> value,
int frame_size,
int batch_size,
const lite::cuda::math::ActivationType& active_node,
const lite::cuda::math::ActivationType& active_gate,
bool origin_mode,
lite::cuda::math::Gemm<T, T>* blas,
CUDAContext* context) {
dim3 threads, grids;
if (batch_size == 1) {
if (lite::TargetWrapperCuda::GetComputeCapability() >= 70) {
if (frame_size < 16) {
constexpr int tiled_size = 8;
int frame_blocks = (frame_size * 2 + tiled_size - 1) / tiled_size;
threads = dim3(tiled_size, 1);
grids = dim3(frame_blocks, 1);
hipLaunchKernelGGL(( lite::cuda::math::FastCollectiveGruGate<
T,
tiled_size>), dim3(grids), dim3(threads), 0, context->exec_stream(),
value.gate_value,
value.prev_out_value,
value.gate_weight,
value.reset_output_value,
frame_size,
active_gate);
frame_blocks = (frame_size + tiled_size - 1) / tiled_size;
grids = dim3(frame_blocks, 1);
hipLaunchKernelGGL(( lite::cuda::math::FastCollectiveGruOut<
T,
tiled_size>), dim3(grids), dim3(threads), 0, context->exec_stream(),
value.state_weight,
value.prev_out_value,
value.output_value,
value.gate_value,
value.reset_output_value,
frame_size,
active_node,
origin_mode);
} else {
constexpr int tiled_size = 16;
int frame_blocks = (frame_size * 2 + tiled_size - 1) / tiled_size;
threads = dim3(tiled_size, 1);
grids = dim3(frame_blocks, 1);
hipLaunchKernelGGL(( lite::cuda::math::FastCollectiveGruGate<
T,
tiled_size>), dim3(grids), dim3(threads), 0, context->exec_stream(),
value.gate_value,
value.prev_out_value,
value.gate_weight,
value.reset_output_value,
frame_size,
active_gate);
frame_blocks = (frame_size + tiled_size - 1) / tiled_size;
grids = dim3(frame_blocks, 1);
hipLaunchKernelGGL(( lite::cuda::math::FastCollectiveGruOut<
T,
tiled_size>), dim3(grids), dim3(threads), 0, context->exec_stream(),
value.state_weight,
value.prev_out_value,
value.output_value,
value.gate_value,
value.reset_output_value,
frame_size,
active_node,
origin_mode);
}
return;
} else {
int frame_per_block = frame_size <= 1024 ? frame_size : 1024;
int frame_blocks = (frame_size + 1024 - 1) / 1024;
threads = dim3(frame_per_block, 1);
grids = dim3(frame_blocks, 1);
}
} else {
threads = dim3(32, 32);
grids = dim3((frame_size + 32 - 1) / 32, (batch_size + 32 - 1) / 32);
}
if (value.prev_out_value) {
CHECK(blas->init(false,
false,
batch_size,
frame_size * 2,
frame_size,
frame_size,
frame_size * 2,
frame_size * 3,
context));
blas->run(1.0f,
1.0f,
value.prev_out_value,
value.gate_weight,
value.gate_value,
context);
}
hipLaunchKernelGGL(( lite::cuda::math::GruForwardResetOutput<
T>), dim3(grids), dim3(threads), 0, context->exec_stream(),
value.gate_value,
value.reset_output_value,
value.prev_out_value,
frame_size,
batch_size,
active_gate,
batch_size != 1);
CUDA_POST_KERNEL_CHECK;
if (value.prev_out_value) {
CHECK(blas->init(false,
false,
batch_size,
frame_size,
frame_size,
frame_size,
frame_size,
frame_size * 3,
context));
blas->run(1.0f,
1.0f,
value.reset_output_value,
value.state_weight,
value.gate_value + frame_size * 2,
context);
}
hipLaunchKernelGGL(( lite::cuda::math::GruForwardFinalOutput<
T>), dim3(grids), dim3(threads), 0, context->exec_stream(), value.gate_value,
value.prev_out_value,
value.output_value,
frame_size,
batch_size,
active_node,
origin_mode,
batch_size != 1);
CUDA_POST_KERNEL_CHECK;
}
};
template struct GRUUnitFunctor<float>;
template <>
struct GRUUnitFunctor<half> {
static void compute(GRUMetaValue<half> value,
int frame_size,
int batch_size,
const lite::cuda::math::ActivationType& active_node,
const lite::cuda::math::ActivationType& active_gate,
bool origin_mode,
lite::cuda::math::Gemm<half, half>* blas,
CUDAContext* context) {
dim3 threads, grids;
if (batch_size == 1) {
int frame_per_block = frame_size <= 1024 ? frame_size : 1024;
int frame_blocks = (frame_size + 1024 - 1) / 1024;
threads = dim3(frame_per_block, 1);
grids = dim3(frame_blocks, 1);
} else {
threads = dim3(32, 32);
grids = dim3((frame_size + 32 - 1) / 32, (batch_size + 32 - 1) / 32);
}
if (value.prev_out_value) {
CHECK(blas->init(false,
false,
batch_size,
frame_size * 2,
frame_size,
frame_size,
frame_size * 2,
frame_size * 3,
context));
blas->run(1.0f,
1.0f,
value.prev_out_value,
value.gate_weight,
value.gate_value,
context);
}
hipLaunchKernelGGL(( lite::cuda::math::GruForwardResetOutput<
half>), dim3(grids), dim3(threads), 0, context->exec_stream(),
value.gate_value,
value.reset_output_value,
value.prev_out_value,
frame_size,
batch_size,
active_gate,
batch_size == 1);
CUDA_POST_KERNEL_CHECK;
if (value.prev_out_value) {
CHECK(blas->init(false,
false,
batch_size,
frame_size,
frame_size,
frame_size,
frame_size,
frame_size * 3,
context));
blas->run(1.0f,
1.0f,
value.reset_output_value,
value.state_weight,
value.gate_value + frame_size * 2,
context);
}
hipLaunchKernelGGL(( lite::cuda::math::GruForwardFinalOutput<
half>), dim3(grids), dim3(threads), 0, context->exec_stream(),
value.gate_value,
value.prev_out_value,
value.output_value,
frame_size,
batch_size,
active_node,
origin_mode,
batch_size == 1);
CUDA_POST_KERNEL_CHECK;
}
};
template <typename T, PrecisionType PType>
void GRUCompute<T, PType>::PrepareForRun() {
gemm_impl_.reset(new lite::cuda::math::Gemm<T, T>);
}
template <typename T, PrecisionType PType>
void GRUCompute<T, PType>::Run() {
auto& context = this->ctx_->template As<CUDAContext>();
auto stream = context.exec_stream();
auto& param = this->template Param<param_t>();
auto* input = param.input;
T* x_data =
const_cast<lite::Tensor*>(input)->template mutable_data<T>(TARGET(kCUDA));
lite::Tensor* h0{nullptr};
if (param.h0) {
h0 = const_cast<lite::Tensor*>(param.h0);
}
lite::Tensor* bias{nullptr};
if (param.bias) {
bias = const_cast<lite::Tensor*>(param.bias);
}
const lite::Tensor* weight = param.weight;
T* weight_data = const_cast<T*>(weight->template data<T>());
lite::Tensor* batch_gate = param.batch_gate;
lite::Tensor* batch_reset_hidden_prev = param.batch_reset_hidden_prev;
lite::Tensor* batch_hidden = param.batch_hidden;
lite::Tensor* hidden = param.hidden;
T* batch_reset_hidden_prev_data =
batch_reset_hidden_prev->template mutable_data<T>(TARGET(kCUDA));
T* out_data = hidden->template mutable_data<T>(TARGET(kCUDA));
T* batch_gate_data = batch_gate->template mutable_data<T>(TARGET(kCUDA));
T* batch_hidden_data = batch_hidden->template mutable_data<T>(TARGET(kCUDA));
bool is_reverse = param.is_reverse;
auto active_node = lite::cuda::math::GetActiveType(param.activation);
auto active_gate = lite::cuda::math::GetActiveType(param.gate_activation);
bool origin_mode = param.origin_mode;
auto hidden_dims = hidden->dims();
int frame_size = hidden_dims[1];
LoD offset_vec_vec = input->lod();
std::vector<int> offset(offset_vec_vec[offset_vec_vec.size() - 1].size());
for (size_t i = 0; i < offset_vec_vec[offset_vec_vec.size() - 1].size();
++i) {
offset[i] = static_cast<int>(offset_vec_vec[offset_vec_vec.size() - 1][i]);
}
bool need_process = seq_utils_.GetSortedMap(offset, stream);
int emit_length = seq_utils_.GetEmitOffsetVec().size() - 1;
auto emit_offset_vec = seq_utils_.GetEmitOffsetVec();
if (need_process) {
seq_utils_.Seq2SortedSeq(
input->template data<T>(), batch_gate_data, 3 * frame_size, stream);
x_data = batch_gate_data;
out_data = batch_hidden_data;
}
if (bias) {
// TODO(wilber): validate when bias is not nullptr
lite::cuda::math::RowwiseAdd<T> add_bias;
add_bias(x_data,
bias->template data<T>(),
x_data,
frame_size,
batch_gate->numel(),
stream);
}
GRUMetaValue<T> gru_value;
gru_value.gate_weight = weight_data;
gru_value.state_weight = weight_data + 2 * frame_size * frame_size;
if (h0) {
// Since the batch computing for GRU reorders the input sequences
// according to their length. The initialized cell state also needs
// to reorder.
// TODO(wilber): validate when h0 is not nullptr
ordered_h0_.Resize(h0->dims());
lite::cuda::math::CopyMatrixRowsFunctor<T> row_shuffle;
row_shuffle(*h0, &ordered_h0_, batch_gate->lod()[2], true, stream);
gru_value.prev_out_value = ordered_h0_.mutable_data<T>(TARGET(kCUDA));
} else {
gru_value.prev_out_value = nullptr;
}
for (size_t n = 0; n < emit_length; ++n) {
int bstart = emit_offset_vec[n];
int bend = emit_offset_vec[n + 1];
int cur_batch_size = bend - bstart;
gru_value.output_value = out_data + bstart * frame_size;
gru_value.gate_value = x_data + bstart * frame_size * 3;
gru_value.reset_output_value =
batch_reset_hidden_prev_data + bstart * frame_size;
GRUUnitFunctor<T>::compute(gru_value,
frame_size,
cur_batch_size,
active_node,
active_gate,
origin_mode,
gemm_impl_.get(),
&context);
gru_value.prev_out_value = gru_value.output_value;
}
if (need_process) {
seq_utils_.SortedSeq2Seq(batch_hidden_data,
hidden->mutable_data<T>(TARGET(kCUDA)),
frame_size,
stream);
}
hidden->set_lod(input->lod());
}
} // namespace cuda
} // namespace kernels
} // namespace lite
} // namespace paddle
using GRUFp32 =
paddle::lite::kernels::cuda::GRUCompute<float, PRECISION(kFloat)>;
using GRUFp16 = paddle::lite::kernels::cuda::GRUCompute<half, PRECISION(kFP16)>;
REGISTER_LITE_KERNEL(gru, kCUDA, kFloat, kNCHW, GRUFp32, def)
.BindInput("Input", {LiteType::GetTensorTy(TARGET(kCUDA))})
.BindInput("H0", {LiteType::GetTensorTy(TARGET(kCUDA))})
.BindInput("Weight", {LiteType::GetTensorTy(TARGET(kCUDA))})
.BindInput("Bias", {LiteType::GetTensorTy(TARGET(kCUDA))})
.BindOutput("BatchGate", {LiteType::GetTensorTy(TARGET(kCUDA))})
.BindOutput("BatchResetHiddenPrev", {LiteType::GetTensorTy(TARGET(kCUDA))})
.BindOutput("BatchHidden", {LiteType::GetTensorTy(TARGET(kCUDA))})
.BindOutput("Hidden", {LiteType::GetTensorTy(TARGET(kCUDA))})
.Finalize();
REGISTER_LITE_KERNEL(gru, kCUDA, kFP16, kNCHW, GRUFp16, def)
.BindInput("Input",
{LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFP16))})
.BindInput("H0", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFP16))})
.BindInput("Weight",
{LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFP16))})
.BindInput("Bias", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFP16))})
.BindOutput("BatchGate",
{LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFP16))})
.BindOutput("BatchResetHiddenPrev",
{LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFP16))})
.BindOutput("BatchHidden",
{LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFP16))})
.BindOutput("Hidden",
{LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFP16))})
.Finalize();
| 2887982e6e3f184a0f3c85e00f25c83d17351677.cu | // Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "lite/kernels/cuda/gru_compute.h"
#include <string>
#include <vector>
#include "lite/backends/cuda/cuda_utils.h"
#include "lite/backends/cuda/math/bias.h"
#include "lite/backends/cuda/math/gru_forward.h"
#include "lite/backends/cuda/math/sequence2batch.h"
#include "lite/backends/cuda/target_wrapper.h"
#include "lite/core/op_registry.h"
namespace paddle {
namespace lite {
namespace kernels {
namespace cuda {
template <typename T>
struct GRUMetaValue {
T* gate_weight;
T* state_weight;
T* gate_value;
T* reset_output_value;
T* output_value;
T* prev_out_value;
};
template <typename T>
struct GRUUnitFunctor {
static void compute(GRUMetaValue<T> value,
int frame_size,
int batch_size,
const lite::cuda::math::ActivationType& active_node,
const lite::cuda::math::ActivationType& active_gate,
bool origin_mode,
lite::cuda::math::Gemm<T, T>* blas,
CUDAContext* context) {
dim3 threads, grids;
if (batch_size == 1) {
if (lite::TargetWrapperCuda::GetComputeCapability() >= 70) {
if (frame_size < 16) {
constexpr int tiled_size = 8;
int frame_blocks = (frame_size * 2 + tiled_size - 1) / tiled_size;
threads = dim3(tiled_size, 1);
grids = dim3(frame_blocks, 1);
lite::cuda::math::FastCollectiveGruGate<
T,
tiled_size><<<grids, threads, 0, context->exec_stream()>>>(
value.gate_value,
value.prev_out_value,
value.gate_weight,
value.reset_output_value,
frame_size,
active_gate);
frame_blocks = (frame_size + tiled_size - 1) / tiled_size;
grids = dim3(frame_blocks, 1);
lite::cuda::math::FastCollectiveGruOut<
T,
tiled_size><<<grids, threads, 0, context->exec_stream()>>>(
value.state_weight,
value.prev_out_value,
value.output_value,
value.gate_value,
value.reset_output_value,
frame_size,
active_node,
origin_mode);
} else {
constexpr int tiled_size = 16;
int frame_blocks = (frame_size * 2 + tiled_size - 1) / tiled_size;
threads = dim3(tiled_size, 1);
grids = dim3(frame_blocks, 1);
lite::cuda::math::FastCollectiveGruGate<
T,
tiled_size><<<grids, threads, 0, context->exec_stream()>>>(
value.gate_value,
value.prev_out_value,
value.gate_weight,
value.reset_output_value,
frame_size,
active_gate);
frame_blocks = (frame_size + tiled_size - 1) / tiled_size;
grids = dim3(frame_blocks, 1);
lite::cuda::math::FastCollectiveGruOut<
T,
tiled_size><<<grids, threads, 0, context->exec_stream()>>>(
value.state_weight,
value.prev_out_value,
value.output_value,
value.gate_value,
value.reset_output_value,
frame_size,
active_node,
origin_mode);
}
return;
} else {
int frame_per_block = frame_size <= 1024 ? frame_size : 1024;
int frame_blocks = (frame_size + 1024 - 1) / 1024;
threads = dim3(frame_per_block, 1);
grids = dim3(frame_blocks, 1);
}
} else {
threads = dim3(32, 32);
grids = dim3((frame_size + 32 - 1) / 32, (batch_size + 32 - 1) / 32);
}
if (value.prev_out_value) {
CHECK(blas->init(false,
false,
batch_size,
frame_size * 2,
frame_size,
frame_size,
frame_size * 2,
frame_size * 3,
context));
blas->run(1.0f,
1.0f,
value.prev_out_value,
value.gate_weight,
value.gate_value,
context);
}
lite::cuda::math::GruForwardResetOutput<
T><<<grids, threads, 0, context->exec_stream()>>>(
value.gate_value,
value.reset_output_value,
value.prev_out_value,
frame_size,
batch_size,
active_gate,
batch_size != 1);
CUDA_POST_KERNEL_CHECK;
if (value.prev_out_value) {
CHECK(blas->init(false,
false,
batch_size,
frame_size,
frame_size,
frame_size,
frame_size,
frame_size * 3,
context));
blas->run(1.0f,
1.0f,
value.reset_output_value,
value.state_weight,
value.gate_value + frame_size * 2,
context);
}
lite::cuda::math::GruForwardFinalOutput<
T><<<grids, threads, 0, context->exec_stream()>>>(value.gate_value,
value.prev_out_value,
value.output_value,
frame_size,
batch_size,
active_node,
origin_mode,
batch_size != 1);
CUDA_POST_KERNEL_CHECK;
}
};
template struct GRUUnitFunctor<float>;
template <>
struct GRUUnitFunctor<half> {
static void compute(GRUMetaValue<half> value,
int frame_size,
int batch_size,
const lite::cuda::math::ActivationType& active_node,
const lite::cuda::math::ActivationType& active_gate,
bool origin_mode,
lite::cuda::math::Gemm<half, half>* blas,
CUDAContext* context) {
dim3 threads, grids;
if (batch_size == 1) {
int frame_per_block = frame_size <= 1024 ? frame_size : 1024;
int frame_blocks = (frame_size + 1024 - 1) / 1024;
threads = dim3(frame_per_block, 1);
grids = dim3(frame_blocks, 1);
} else {
threads = dim3(32, 32);
grids = dim3((frame_size + 32 - 1) / 32, (batch_size + 32 - 1) / 32);
}
if (value.prev_out_value) {
CHECK(blas->init(false,
false,
batch_size,
frame_size * 2,
frame_size,
frame_size,
frame_size * 2,
frame_size * 3,
context));
blas->run(1.0f,
1.0f,
value.prev_out_value,
value.gate_weight,
value.gate_value,
context);
}
lite::cuda::math::GruForwardResetOutput<
half><<<grids, threads, 0, context->exec_stream()>>>(
value.gate_value,
value.reset_output_value,
value.prev_out_value,
frame_size,
batch_size,
active_gate,
batch_size == 1);
CUDA_POST_KERNEL_CHECK;
if (value.prev_out_value) {
CHECK(blas->init(false,
false,
batch_size,
frame_size,
frame_size,
frame_size,
frame_size,
frame_size * 3,
context));
blas->run(1.0f,
1.0f,
value.reset_output_value,
value.state_weight,
value.gate_value + frame_size * 2,
context);
}
lite::cuda::math::GruForwardFinalOutput<
half><<<grids, threads, 0, context->exec_stream()>>>(
value.gate_value,
value.prev_out_value,
value.output_value,
frame_size,
batch_size,
active_node,
origin_mode,
batch_size == 1);
CUDA_POST_KERNEL_CHECK;
}
};
template <typename T, PrecisionType PType>
void GRUCompute<T, PType>::PrepareForRun() {
gemm_impl_.reset(new lite::cuda::math::Gemm<T, T>);
}
template <typename T, PrecisionType PType>
void GRUCompute<T, PType>::Run() {
auto& context = this->ctx_->template As<CUDAContext>();
auto stream = context.exec_stream();
auto& param = this->template Param<param_t>();
auto* input = param.input;
T* x_data =
const_cast<lite::Tensor*>(input)->template mutable_data<T>(TARGET(kCUDA));
lite::Tensor* h0{nullptr};
if (param.h0) {
h0 = const_cast<lite::Tensor*>(param.h0);
}
lite::Tensor* bias{nullptr};
if (param.bias) {
bias = const_cast<lite::Tensor*>(param.bias);
}
const lite::Tensor* weight = param.weight;
T* weight_data = const_cast<T*>(weight->template data<T>());
lite::Tensor* batch_gate = param.batch_gate;
lite::Tensor* batch_reset_hidden_prev = param.batch_reset_hidden_prev;
lite::Tensor* batch_hidden = param.batch_hidden;
lite::Tensor* hidden = param.hidden;
T* batch_reset_hidden_prev_data =
batch_reset_hidden_prev->template mutable_data<T>(TARGET(kCUDA));
T* out_data = hidden->template mutable_data<T>(TARGET(kCUDA));
T* batch_gate_data = batch_gate->template mutable_data<T>(TARGET(kCUDA));
T* batch_hidden_data = batch_hidden->template mutable_data<T>(TARGET(kCUDA));
bool is_reverse = param.is_reverse;
auto active_node = lite::cuda::math::GetActiveType(param.activation);
auto active_gate = lite::cuda::math::GetActiveType(param.gate_activation);
bool origin_mode = param.origin_mode;
auto hidden_dims = hidden->dims();
int frame_size = hidden_dims[1];
LoD offset_vec_vec = input->lod();
std::vector<int> offset(offset_vec_vec[offset_vec_vec.size() - 1].size());
for (size_t i = 0; i < offset_vec_vec[offset_vec_vec.size() - 1].size();
++i) {
offset[i] = static_cast<int>(offset_vec_vec[offset_vec_vec.size() - 1][i]);
}
bool need_process = seq_utils_.GetSortedMap(offset, stream);
int emit_length = seq_utils_.GetEmitOffsetVec().size() - 1;
auto emit_offset_vec = seq_utils_.GetEmitOffsetVec();
if (need_process) {
seq_utils_.Seq2SortedSeq(
input->template data<T>(), batch_gate_data, 3 * frame_size, stream);
x_data = batch_gate_data;
out_data = batch_hidden_data;
}
if (bias) {
// TODO(wilber): validate when bias is not nullptr
lite::cuda::math::RowwiseAdd<T> add_bias;
add_bias(x_data,
bias->template data<T>(),
x_data,
frame_size,
batch_gate->numel(),
stream);
}
GRUMetaValue<T> gru_value;
gru_value.gate_weight = weight_data;
gru_value.state_weight = weight_data + 2 * frame_size * frame_size;
if (h0) {
// Since the batch computing for GRU reorders the input sequences
// according to their length. The initialized cell state also needs
// to reorder.
// TODO(wilber): validate when h0 is not nullptr
ordered_h0_.Resize(h0->dims());
lite::cuda::math::CopyMatrixRowsFunctor<T> row_shuffle;
row_shuffle(*h0, &ordered_h0_, batch_gate->lod()[2], true, stream);
gru_value.prev_out_value = ordered_h0_.mutable_data<T>(TARGET(kCUDA));
} else {
gru_value.prev_out_value = nullptr;
}
for (size_t n = 0; n < emit_length; ++n) {
int bstart = emit_offset_vec[n];
int bend = emit_offset_vec[n + 1];
int cur_batch_size = bend - bstart;
gru_value.output_value = out_data + bstart * frame_size;
gru_value.gate_value = x_data + bstart * frame_size * 3;
gru_value.reset_output_value =
batch_reset_hidden_prev_data + bstart * frame_size;
GRUUnitFunctor<T>::compute(gru_value,
frame_size,
cur_batch_size,
active_node,
active_gate,
origin_mode,
gemm_impl_.get(),
&context);
gru_value.prev_out_value = gru_value.output_value;
}
if (need_process) {
seq_utils_.SortedSeq2Seq(batch_hidden_data,
hidden->mutable_data<T>(TARGET(kCUDA)),
frame_size,
stream);
}
hidden->set_lod(input->lod());
}
} // namespace cuda
} // namespace kernels
} // namespace lite
} // namespace paddle
using GRUFp32 =
paddle::lite::kernels::cuda::GRUCompute<float, PRECISION(kFloat)>;
using GRUFp16 = paddle::lite::kernels::cuda::GRUCompute<half, PRECISION(kFP16)>;
REGISTER_LITE_KERNEL(gru, kCUDA, kFloat, kNCHW, GRUFp32, def)
.BindInput("Input", {LiteType::GetTensorTy(TARGET(kCUDA))})
.BindInput("H0", {LiteType::GetTensorTy(TARGET(kCUDA))})
.BindInput("Weight", {LiteType::GetTensorTy(TARGET(kCUDA))})
.BindInput("Bias", {LiteType::GetTensorTy(TARGET(kCUDA))})
.BindOutput("BatchGate", {LiteType::GetTensorTy(TARGET(kCUDA))})
.BindOutput("BatchResetHiddenPrev", {LiteType::GetTensorTy(TARGET(kCUDA))})
.BindOutput("BatchHidden", {LiteType::GetTensorTy(TARGET(kCUDA))})
.BindOutput("Hidden", {LiteType::GetTensorTy(TARGET(kCUDA))})
.Finalize();
REGISTER_LITE_KERNEL(gru, kCUDA, kFP16, kNCHW, GRUFp16, def)
.BindInput("Input",
{LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFP16))})
.BindInput("H0", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFP16))})
.BindInput("Weight",
{LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFP16))})
.BindInput("Bias", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFP16))})
.BindOutput("BatchGate",
{LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFP16))})
.BindOutput("BatchResetHiddenPrev",
{LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFP16))})
.BindOutput("BatchHidden",
{LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFP16))})
.BindOutput("Hidden",
{LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFP16))})
.Finalize();
|
7943b3d571b8457f2159c726b1cc3f82ecc1ba87.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Proj 3-2 SKELETON
*/
#include <float.h>
#include <stdlib.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <cutil.h>
#include "utils.h"
/* kernel for horizontal flip on GPU. */
__global__ void flip_horizontal_kernel(float *arr, int width) {
int thisThreadIndex = blockIdx.x * blockDim.x + threadIdx.x;
int div_val = thisThreadIndex / width;
int mod_val = thisThreadIndex % width;
if (mod_val < width / 2 && thisThreadIndex < (width * width)) {
float temp = arr[thisThreadIndex];
arr[thisThreadIndex] = arr[(div_val + 1) * width - (mod_val + 1)];
arr[(div_val + 1) * width - (mod_val + 1)] = temp;
}
}
/* Does a horizontal flip of the array arr */
void flip_horizontal(float *arr, int width) {
int threads_per_block = 512;
int blocks_per_grid = (width * width / threads_per_block) + 1;
dim3 dim_blocks_per_grid(blocks_per_grid, 1);
dim3 dim_threads_per_block(threads_per_block, 1, 1);
hipLaunchKernelGGL(( flip_horizontal_kernel), dim3(dim_blocks_per_grid), dim3(dim_threads_per_block), 0, 0, arr, width);
hipDeviceSynchronize();
CUT_CHECK_ERROR("");
}
/* kernel for transpose on GPU. */
__global__ void transpose_kernel(float *arr, int width) {
int thisThreadIndex = blockIdx.x * blockDim.x + threadIdx.x;
int div_val = thisThreadIndex / width;
int mod_val = thisThreadIndex % width;
if (thisThreadIndex < (width * width) && thisThreadIndex % (width + 1) != 0) {
float temp = arr[thisThreadIndex];
arr[thisThreadIndex] = arr[mod_val * width + div_val];
arr[mod_val * width + div_val] = temp;
}
}
/* Transposes the square array ARR. */
void transpose(float *arr, int width) {
int threads_per_block = 512;
int blocks_per_grid = (width * width / threads_per_block) + 1;
dim3 dim_blocks_per_grid(blocks_per_grid, 1);
dim3 dim_threads_per_block(threads_per_block, 1, 1);
hipLaunchKernelGGL(( transpose_kernel), dim3(dim_blocks_per_grid), dim3(dim_threads_per_block), 0, 0, arr, width);
hipDeviceSynchronize();
CUT_CHECK_ERROR("");
}
/* kernel for rotation on GPU. */
__global__ void rotate_kernel(float *arr, int width) {
int thisThreadIndex = blockIdx.x * blockDim.x + threadIdx.x;
int div_val = thisThreadIndex / width;
int mod_val = thisThreadIndex % width;
if (thisThreadIndex < (width * width) && div_val > (width / 2)) {
float temp = arr[thisThreadIndex];
arr[thisThreadIndex] = arr[(width - div_val - 1) * width + mod_val];
arr[(width - div_val - 1) * width + mod_val] = temp;
}
}
/* Rotates the square array ARR by 90 degrees counterclockwise. */
void rotate_ccw_90(float *arr, int width) {
int threads_per_block = 512;
int blocks_per_grid = (width * width / threads_per_block) + 1;
dim3 dim_blocks_per_grid(blocks_per_grid, 1);
dim3 dim_threads_per_block(threads_per_block, 1, 1);
hipLaunchKernelGGL(( rotate_kernel), dim3(dim_blocks_per_grid), dim3(dim_threads_per_block), 0, 0, arr, width);
hipDeviceSynchronize();
CUT_CHECK_ERROR("");
}
/* The kernel for the calc_min_dist function. It takes in a TEMPLATE and an IMAGE
* and calculates the euclidean distance between the two. It then puts the answer
* into TOTAL.
*/
__global__ void calc_min_dist_kernel(float *total, float *temp, float *image, int width) {
int blockId = blockIdx.y * gridDim.x + blockIdx.x;
int threadId = blockId * blockDim.x + threadIdx.x;
if (threadId < (width * width)) {
total[threadId] = pow((temp[threadId] - image[threadId]), 2);
}
}
__global__ void add_distance_kernel(float *arr, int len, int level) {
int blockId = blockIdx.y * gridDim.x + blockIdx.x;
int threadId = blockId * blockDim.x + threadIdx.x;
if (threadId * level * 2 < len) {
arr[threadId * level * 2] += arr[threadId * level * 2 + level];
}
}
/* Returns the squared Euclidean distance between TEMPLATE and IMAGE. The size of IMAGE
* is I_WIDTH * I_HEIGHT, while TEMPLATE is square with side length T_WIDTH. The template
* image should be flipped, rotated, and translated across IMAGE.
*/
float calc_min_dist(float *image, int i_width, int i_height, float *temp, int t_width) {
// float* image and float* temp are pointers to GPU addressible memory
// You MAY NOT copy this data back to CPU addressible memory and you MAY
// NOT perform any computation using values from image or temp on the CPU.
int len = t_width * t_width;
// host copies of min_dist, curr_dist_temp
float min_dist = FLT_MAX;
//float *curr_dist_temp;
//curr_dist_temp = (float *) malloc(len * sizeof(float));
// device copies of curr_dist
float *curr_dist;
hipMalloc(&curr_dist, len * sizeof(float));
// instantiating the grid and block dimensions to 2D Grid and 1D block
int threads_per_block = 512;
int blocks_per_grid = (t_width / threads_per_block) + 1;
dim3 dim_blocks_per_grid(blocks_per_grid, blocks_per_grid); // gridDim.x = blocks_per_grid_x and gridDim.y = blocks_per_grid_y
dim3 dim_threads_per_block(threads_per_block, 1, 1); // blockDim.x = threads_per_block
// Launch calc_min_dist_kernel on GPU
hipLaunchKernelGGL(( calc_min_dist_kernel), dim3(dim_blocks_per_grid), dim3(dim_threads_per_block), 0, 0, curr_dist, temp, image, t_width);
// Wait for GPU to finish computation
hipDeviceSynchronize();
CUT_CHECK_ERROR("");
// Copy result back to host
// hipMemcpy(curr_dist_temp, curr_dist, t_width * t_width * sizeof(float), hipMemcpyDeviceToHost);
int level = 1;
while (level != len) {
int threads_per_block2 = 512;
int blocks_per_grid2 = (t_width / threads_per_block2) + 1;
dim3 dim_blocks_per_grid2(blocks_per_grid2, blocks_per_grid2); // gridDim.x = blocks_per_grid_x and gridDim.y = blocks_per_grid_y
dim3 dim_threads_per_block2(threads_per_block, 1, 1); // blockDim.x = threads_per_block
hipLaunchKernelGGL(( add_distance_kernel), dim3(dim_blocks_per_grid2), dim3(dim_threads_per_block2), 0, 0, curr_dist, t_width * t_width, level);
hipDeviceSynchronize();
CUT_CHECK_ERROR("");
level *= 2;
blocks_per_grid2 = ((sqrt(len / 2) + 1) / threads_per_block2);
if (blocks_per_grid2 == 0) {
blocks_per_grid2 = 1;
}
}
float curr_min = 0.0;
hipMemcpy(&curr_min, curr_dist, sizeof(float), hipMemcpyDeviceToHost);
if (curr_min < min_dist) {
min_dist = curr_min;
}
// Cleanup
hipFree(curr_dist);
return min_dist;
}
| 7943b3d571b8457f2159c726b1cc3f82ecc1ba87.cu | /*
* Proj 3-2 SKELETON
*/
#include <float.h>
#include <stdlib.h>
#include <stdio.h>
#include <cuda_runtime.h>
#include <cutil.h>
#include "utils.h"
/* kernel for horizontal flip on GPU. */
__global__ void flip_horizontal_kernel(float *arr, int width) {
int thisThreadIndex = blockIdx.x * blockDim.x + threadIdx.x;
int div_val = thisThreadIndex / width;
int mod_val = thisThreadIndex % width;
if (mod_val < width / 2 && thisThreadIndex < (width * width)) {
float temp = arr[thisThreadIndex];
arr[thisThreadIndex] = arr[(div_val + 1) * width - (mod_val + 1)];
arr[(div_val + 1) * width - (mod_val + 1)] = temp;
}
}
/* Does a horizontal flip of the array arr */
void flip_horizontal(float *arr, int width) {
int threads_per_block = 512;
int blocks_per_grid = (width * width / threads_per_block) + 1;
dim3 dim_blocks_per_grid(blocks_per_grid, 1);
dim3 dim_threads_per_block(threads_per_block, 1, 1);
flip_horizontal_kernel<<<dim_blocks_per_grid, dim_threads_per_block>>>(arr, width);
cudaThreadSynchronize();
CUT_CHECK_ERROR("");
}
/* kernel for transpose on GPU. */
__global__ void transpose_kernel(float *arr, int width) {
int thisThreadIndex = blockIdx.x * blockDim.x + threadIdx.x;
int div_val = thisThreadIndex / width;
int mod_val = thisThreadIndex % width;
if (thisThreadIndex < (width * width) && thisThreadIndex % (width + 1) != 0) {
float temp = arr[thisThreadIndex];
arr[thisThreadIndex] = arr[mod_val * width + div_val];
arr[mod_val * width + div_val] = temp;
}
}
/* Transposes the square array ARR. */
void transpose(float *arr, int width) {
int threads_per_block = 512;
int blocks_per_grid = (width * width / threads_per_block) + 1;
dim3 dim_blocks_per_grid(blocks_per_grid, 1);
dim3 dim_threads_per_block(threads_per_block, 1, 1);
transpose_kernel<<<dim_blocks_per_grid, dim_threads_per_block>>>(arr, width);
cudaThreadSynchronize();
CUT_CHECK_ERROR("");
}
/* kernel for rotation on GPU. */
__global__ void rotate_kernel(float *arr, int width) {
int thisThreadIndex = blockIdx.x * blockDim.x + threadIdx.x;
int div_val = thisThreadIndex / width;
int mod_val = thisThreadIndex % width;
if (thisThreadIndex < (width * width) && div_val > (width / 2)) {
float temp = arr[thisThreadIndex];
arr[thisThreadIndex] = arr[(width - div_val - 1) * width + mod_val];
arr[(width - div_val - 1) * width + mod_val] = temp;
}
}
/* Rotates the square array ARR by 90 degrees counterclockwise. */
void rotate_ccw_90(float *arr, int width) {
int threads_per_block = 512;
int blocks_per_grid = (width * width / threads_per_block) + 1;
dim3 dim_blocks_per_grid(blocks_per_grid, 1);
dim3 dim_threads_per_block(threads_per_block, 1, 1);
rotate_kernel<<<dim_blocks_per_grid, dim_threads_per_block>>>(arr, width);
cudaThreadSynchronize();
CUT_CHECK_ERROR("");
}
/* The kernel for the calc_min_dist function. It takes in a TEMPLATE and an IMAGE
* and calculates the euclidean distance between the two. It then puts the answer
* into TOTAL.
*/
__global__ void calc_min_dist_kernel(float *total, float *temp, float *image, int width) {
int blockId = blockIdx.y * gridDim.x + blockIdx.x;
int threadId = blockId * blockDim.x + threadIdx.x;
if (threadId < (width * width)) {
total[threadId] = pow((temp[threadId] - image[threadId]), 2);
}
}
__global__ void add_distance_kernel(float *arr, int len, int level) {
int blockId = blockIdx.y * gridDim.x + blockIdx.x;
int threadId = blockId * blockDim.x + threadIdx.x;
if (threadId * level * 2 < len) {
arr[threadId * level * 2] += arr[threadId * level * 2 + level];
}
}
/* Returns the squared Euclidean distance between TEMPLATE and IMAGE. The size of IMAGE
* is I_WIDTH * I_HEIGHT, while TEMPLATE is square with side length T_WIDTH. The template
* image should be flipped, rotated, and translated across IMAGE.
*/
float calc_min_dist(float *image, int i_width, int i_height, float *temp, int t_width) {
// float* image and float* temp are pointers to GPU addressible memory
// You MAY NOT copy this data back to CPU addressible memory and you MAY
// NOT perform any computation using values from image or temp on the CPU.
int len = t_width * t_width;
// host copies of min_dist, curr_dist_temp
float min_dist = FLT_MAX;
//float *curr_dist_temp;
//curr_dist_temp = (float *) malloc(len * sizeof(float));
// device copies of curr_dist
float *curr_dist;
cudaMalloc(&curr_dist, len * sizeof(float));
// instantiating the grid and block dimensions to 2D Grid and 1D block
int threads_per_block = 512;
int blocks_per_grid = (t_width / threads_per_block) + 1;
dim3 dim_blocks_per_grid(blocks_per_grid, blocks_per_grid); // gridDim.x = blocks_per_grid_x and gridDim.y = blocks_per_grid_y
dim3 dim_threads_per_block(threads_per_block, 1, 1); // blockDim.x = threads_per_block
// Launch calc_min_dist_kernel on GPU
calc_min_dist_kernel<<<dim_blocks_per_grid, dim_threads_per_block>>>(curr_dist, temp, image, t_width);
// Wait for GPU to finish computation
cudaThreadSynchronize();
CUT_CHECK_ERROR("");
// Copy result back to host
// cudaMemcpy(curr_dist_temp, curr_dist, t_width * t_width * sizeof(float), cudaMemcpyDeviceToHost);
int level = 1;
while (level != len) {
int threads_per_block2 = 512;
int blocks_per_grid2 = (t_width / threads_per_block2) + 1;
dim3 dim_blocks_per_grid2(blocks_per_grid2, blocks_per_grid2); // gridDim.x = blocks_per_grid_x and gridDim.y = blocks_per_grid_y
dim3 dim_threads_per_block2(threads_per_block, 1, 1); // blockDim.x = threads_per_block
add_distance_kernel<<<dim_blocks_per_grid2, dim_threads_per_block2>>>(curr_dist, t_width * t_width, level);
cudaThreadSynchronize();
CUT_CHECK_ERROR("");
level *= 2;
blocks_per_grid2 = ((sqrt(len / 2) + 1) / threads_per_block2);
if (blocks_per_grid2 == 0) {
blocks_per_grid2 = 1;
}
}
float curr_min = 0.0;
cudaMemcpy(&curr_min, curr_dist, sizeof(float), cudaMemcpyDeviceToHost);
if (curr_min < min_dist) {
min_dist = curr_min;
}
// Cleanup
cudaFree(curr_dist);
return min_dist;
}
|
a03e06c53e43471d5053c1625712d9fa7416294e.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include "CudaPitch.h"
// to prevent IDE complains about unknown CUDA keywords
#ifdef __CDT_PARSER__
#define __global__
#define __device__
#define __host__
#define __shared__
#define __syncthreads();
#define CUDA_KERNEL_DIM(...)
#else
#define CUDA_KERNEL_DIM(...) <<< __VA_ARGS__ >>>
#endif
#define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ )
inline void __cudaSafeCall( hipError_t err, const char *file, const int line )
{
if( err != hipSuccess )
{
fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n", file, line, hipGetErrorString( err ) );
}
}
__global__ void cudaAddOne( ScalarT* const mem, size_t width, size_t height, size_t pitch )
{
size_t gid = blockDim.x * blockIdx.x + threadIdx.x;
if( gid >= width )
return;
// BEGIN: Alternative #1
pitch = pitch / sizeof(ScalarT);
ScalarT* row = mem;
for( size_t h = 0; h < height; ++h )
{
row[gid] += 1;
row += pitch;
}
// END: Alternative #1
// BEGIN: Alternative #2
// ScalarT* row;
// for( size_t h = 0; h < height; ++h )
// {
// row = ( ScalarT* )( ( char* )mem + h * pitch );
// row[gid] += 1;
// }
// END: Alternative #2
}
/**
* Add 1 to each item.
*/
void addOne( ScalarT* const mem, size_t width, size_t height )
{
ScalarT* dev_mem = NULL;
size_t pitch;
// Allocate an aligned 2D memory
CudaSafeCall( hipMallocPitch(&dev_mem, &pitch, width * sizeof(ScalarT), height) );
// Copy the input on the device, considering the pitched memory!
CudaSafeCall(
hipMemcpy2D(dev_mem, pitch, mem, width*sizeof(ScalarT), width*sizeof(ScalarT), height, hipMemcpyHostToDevice) );
printf( "pitch (bytes): %zd\n", pitch );
printf( "pitch (elements): %zd\n", pitch / sizeof(ScalarT) );
size_t threadsPerBlock = 32;
size_t blocksPerGrid = ( width + threadsPerBlock - 1 ) / threadsPerBlock;
cudaAddOne CUDA_KERNEL_DIM(blocksPerGrid, threadsPerBlock) (dev_mem, width, height, pitch);
// Copy the device memory back to host, considering the pitched memory!
CudaSafeCall(
hipMemcpy2D( mem, width * sizeof(ScalarT), dev_mem, pitch, width * sizeof(ScalarT), height, hipMemcpyDeviceToHost ) );
CudaSafeCall( hipFree((void*)dev_mem) );
}
/**
* Copies a matrix (height rows of width bytes each) from the memory area pointed to by src to the memory area pointed to by dst,
* where kind is one of hipMemcpyHostToHost, hipMemcpyHostToDevice, hipMemcpyDeviceToHost, or hipMemcpyDeviceToDevice,
* and specifies the direction of the copy.
* dpitch and spitch are the widths in memory in bytes of the 2D arrays pointed to by dst and src, including any padding added to the end of each row.
* The memory areas may not overlap. width must not exceed either dpitch or spitch.
* Calling hipMemcpy2D() with dst and src pointers that do not match the direction of the copy results in an undefined behavior.
* hipMemcpy2D() returns an error if dpitch or spitch exceeds the maximum allowed.
*
* \param dst - Destination memory address
* \param dpitch - Pitch of destination memory
* \param src - Source memory address
* \param spitch - Pitch of source memory
* \param width - Width of matrix transfer (columns in bytes)
* \param height - Height of matrix transfer (rows)
* \param kind - Type of transfer
*
* \return ...
*
* hipError_t hipMemcpy2D( void * dst, size_t dpitch, const void * src, size_t spitch, size_t width, size_t height,
enum hipMemcpyKind kind );
*/
| a03e06c53e43471d5053c1625712d9fa7416294e.cu | #include <stdlib.h>
#include <stdio.h>
#include <cuda_runtime.h>
#include "CudaPitch.h"
// to prevent IDE complains about unknown CUDA keywords
#ifdef __CDT_PARSER__
#define __global__
#define __device__
#define __host__
#define __shared__
#define __syncthreads();
#define CUDA_KERNEL_DIM(...)
#else
#define CUDA_KERNEL_DIM(...) <<< __VA_ARGS__ >>>
#endif
#define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ )
inline void __cudaSafeCall( cudaError err, const char *file, const int line )
{
if( err != cudaSuccess )
{
fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n", file, line, cudaGetErrorString( err ) );
}
}
__global__ void cudaAddOne( ScalarT* const mem, size_t width, size_t height, size_t pitch )
{
size_t gid = blockDim.x * blockIdx.x + threadIdx.x;
if( gid >= width )
return;
// BEGIN: Alternative #1
pitch = pitch / sizeof(ScalarT);
ScalarT* row = mem;
for( size_t h = 0; h < height; ++h )
{
row[gid] += 1;
row += pitch;
}
// END: Alternative #1
// BEGIN: Alternative #2
// ScalarT* row;
// for( size_t h = 0; h < height; ++h )
// {
// row = ( ScalarT* )( ( char* )mem + h * pitch );
// row[gid] += 1;
// }
// END: Alternative #2
}
/**
* Add 1 to each item.
*/
void addOne( ScalarT* const mem, size_t width, size_t height )
{
ScalarT* dev_mem = NULL;
size_t pitch;
// Allocate an aligned 2D memory
CudaSafeCall( cudaMallocPitch(&dev_mem, &pitch, width * sizeof(ScalarT), height) );
// Copy the input on the device, considering the pitched memory!
CudaSafeCall(
cudaMemcpy2D(dev_mem, pitch, mem, width*sizeof(ScalarT), width*sizeof(ScalarT), height, cudaMemcpyHostToDevice) );
printf( "pitch (bytes): %zd\n", pitch );
printf( "pitch (elements): %zd\n", pitch / sizeof(ScalarT) );
size_t threadsPerBlock = 32;
size_t blocksPerGrid = ( width + threadsPerBlock - 1 ) / threadsPerBlock;
cudaAddOne CUDA_KERNEL_DIM(blocksPerGrid, threadsPerBlock) (dev_mem, width, height, pitch);
// Copy the device memory back to host, considering the pitched memory!
CudaSafeCall(
cudaMemcpy2D( mem, width * sizeof(ScalarT), dev_mem, pitch, width * sizeof(ScalarT), height, cudaMemcpyDeviceToHost ) );
CudaSafeCall( cudaFree((void*)dev_mem) );
}
/**
* Copies a matrix (height rows of width bytes each) from the memory area pointed to by src to the memory area pointed to by dst,
* where kind is one of cudaMemcpyHostToHost, cudaMemcpyHostToDevice, cudaMemcpyDeviceToHost, or cudaMemcpyDeviceToDevice,
* and specifies the direction of the copy.
* dpitch and spitch are the widths in memory in bytes of the 2D arrays pointed to by dst and src, including any padding added to the end of each row.
* The memory areas may not overlap. width must not exceed either dpitch or spitch.
* Calling cudaMemcpy2D() with dst and src pointers that do not match the direction of the copy results in an undefined behavior.
* cudaMemcpy2D() returns an error if dpitch or spitch exceeds the maximum allowed.
*
* \param dst - Destination memory address
* \param dpitch - Pitch of destination memory
* \param src - Source memory address
* \param spitch - Pitch of source memory
* \param width - Width of matrix transfer (columns in bytes)
* \param height - Height of matrix transfer (rows)
* \param kind - Type of transfer
*
* \return ...
*
* cudaError_t cudaMemcpy2D( void * dst, size_t dpitch, const void * src, size_t spitch, size_t width, size_t height,
enum cudaMemcpyKind kind );
*/
|
7eaae15a7d7595285545d498efd1b9c7303e11d0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_view.hpp>
#include <cudf/copying.hpp>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/table/table_view.hpp>
#include <cudf/utilities/bit.hpp>
#include <numeric>
namespace cudf {
namespace detail {
namespace {
/**
* @brief Copies contents of `in` to `out`. Copies validity if present
* but does not compute null count.
*
* @param in column_view to copy from
* @param out mutable_column_view to copy to.
*/
template <size_type block_size, typename T, bool has_validity>
__launch_bounds__(block_size) __global__
void copy_in_place_kernel(column_device_view const in, mutable_column_device_view out)
{
const size_type tid = threadIdx.x + blockIdx.x * block_size;
const int warp_id = tid / cudf::detail::warp_size;
const size_type warps_per_grid = gridDim.x * block_size / cudf::detail::warp_size;
// begin/end indices for the column data
size_type begin = 0;
size_type end = in.size();
// warp indices. since 1 warp == 32 threads == sizeof(bit_mask_t) * 8,
// each warp will process one (32 bit) of the validity mask via
// __ballot_sync()
size_type warp_begin = cudf::word_index(begin);
size_type warp_end = cudf::word_index(end - 1);
// lane id within the current warp
const int lane_id = threadIdx.x % cudf::detail::warp_size;
// current warp.
size_type warp_cur = warp_begin + warp_id;
size_type index = tid;
while (warp_cur <= warp_end) {
bool in_range = (index >= begin && index < end);
bool valid = true;
if (has_validity) { valid = in_range && in.is_valid(index); }
if (in_range) { out.element<T>(index) = in.element<T>(index); }
// update validity
if (has_validity) {
// the final validity mask for this warp
int warp_mask = __ballot_sync(0xFFFF'FFFF, valid && in_range);
// only one guy in the warp needs to update the mask and count
if (lane_id == 0) { out.set_mask_word(warp_cur, warp_mask); }
}
// next grid
warp_cur += warps_per_grid;
index += block_size * gridDim.x;
}
}
/**
* @brief Copies contents of one string column to another. Copies validity if present
* but does not compute null count.
*
* The purpose of this kernel is to reduce the number of
* kernel calls for copying a string column from 2 to 1, since number of kernel calls is the
* dominant factor in large scale contiguous_split() calls. To do this, the kernel is
* invoked with using max(num_chars, num_offsets) threads and then doing separate
* bounds checking on offset, chars and validity indices.
*
* Outgoing offset values are shifted down to account for the new base address
* each column gets as a result of the contiguous_split() process.
*
* @param in num_strings number of strings (rows) in the column
* @param in offsets_in pointer to incoming offsets to be copied
* @param out offsets_out pointer to output offsets
* @param in validity_in_offset offset into validity buffer to add to element indices
* @param in validity_in pointer to incoming validity vector to be copied
* @param out validity_out pointer to output validity vector
* @param in offset_shift value to shift copied offsets down by
* @param in num_chars number of chars to copy
* @param in chars_in input chars to be copied
* @param out chars_out output chars to be copied.
*/
template <size_type block_size, bool has_validity>
__launch_bounds__(block_size) __global__
void copy_in_place_strings_kernel(size_type num_strings,
size_type const* __restrict__ offsets_in,
size_type* __restrict__ offsets_out,
size_type validity_in_offset,
bitmask_type const* __restrict__ validity_in,
bitmask_type* __restrict__ validity_out,
size_type offset_shift,
size_type num_chars,
char const* __restrict__ chars_in,
char* __restrict__ chars_out)
{
const size_type tid = threadIdx.x + blockIdx.x * block_size;
const int warp_id = tid / cudf::detail::warp_size;
const size_type warps_per_grid = gridDim.x * block_size / cudf::detail::warp_size;
// how many warps we'll be processing. with strings, the chars and offsets
// lengths may be different. so we'll just march the worst case.
size_type warp_begin = cudf::word_index(0);
size_type warp_end = cudf::word_index(::max(num_chars, num_strings + 1) - 1);
// end indices for chars
size_type chars_end = num_chars;
// end indices for offsets
size_type offsets_end = num_strings + 1;
// end indices for validity and the last warp that actually should
// be updated
size_type validity_end = num_strings;
size_type validity_warp_end = cudf::word_index(num_strings - 1);
// lane id within the current warp
const int lane_id = threadIdx.x % cudf::detail::warp_size;
size_type warp_cur = warp_begin + warp_id;
size_type index = tid;
while (warp_cur <= warp_end) {
if (index < chars_end) { chars_out[index] = chars_in[index]; }
if (index < offsets_end) {
// each output column starts at a new base pointer. so we have to
// shift every offset down by the point (in chars) at which it was split.
offsets_out[index] = offsets_in[index] - offset_shift;
}
// if we're still in range of validity at all
if (has_validity && warp_cur <= validity_warp_end) {
bool valid = (index < validity_end) && bit_is_set(validity_in, validity_in_offset + index);
// the final validity mask for this warp
int warp_mask = __ballot_sync(0xFFFF'FFFF, valid);
// only one guy in the warp needs to update the mask and count
if (lane_id == 0) { validity_out[warp_cur] = warp_mask; }
}
// next grid
warp_cur += warps_per_grid;
index += block_size * gridDim.x;
}
}
// align all column size allocations to this boundary so that all output column buffers
// start at that alignment.
static constexpr size_t split_align = 64;
/**
* @brief Information about the split for a given column. Bundled together
* into a struct because tuples were getting pretty unreadable.
*/
struct column_split_info {
size_t data_buf_size; // size of the data (including padding)
size_t validity_buf_size; // validity vector size (including padding)
size_t offsets_buf_size; // (strings only) size of offset column (including padding)
size_type num_chars; // (strings only) number of chars in the column
size_type chars_offset; // (strings only) offset from head of chars data
};
/**
* @brief Functor called by the `type_dispatcher` to incrementally compute total
* memory buffer size needed to allocate a contiguous copy of all columns within
* a source table.
*/
struct column_buffer_size_functor {
template <typename T>
size_t operator()(column_view const& c, column_split_info& split_info)
{
split_info.data_buf_size = cudf::util::round_up_safe(c.size() * sizeof(T), split_align);
split_info.validity_buf_size =
(c.has_nulls() ? cudf::bitmask_allocation_size_bytes(c.size(), split_align) : 0);
return split_info.data_buf_size + split_info.validity_buf_size;
}
};
template <>
size_t column_buffer_size_functor::operator()<string_view>(column_view const& c,
column_split_info& split_info)
{
// this has already been precomputed in an earlier step. return the sum.
return split_info.data_buf_size + split_info.validity_buf_size + split_info.offsets_buf_size;
}
/**
* @brief Functor called by the `type_dispatcher` to copy a column into a contiguous
* buffer of output memory.
*
* Used for copying each column in a source table into one contiguous buffer of memory.
*/
struct column_copy_functor {
template <typename T>
void operator()(column_view const& in,
column_split_info const& split_info,
char*& dst,
std::vector<column_view>& out_cols)
{
// outgoing pointers
char* data = dst;
bitmask_type* validity = split_info.validity_buf_size == 0
? nullptr
: reinterpret_cast<bitmask_type*>(dst + split_info.data_buf_size);
// increment working buffer
dst += (split_info.data_buf_size + split_info.validity_buf_size);
// no work to do
if (in.size() == 0) {
out_cols.push_back(column_view{in.type(), 0, nullptr});
return;
}
// custom copy kernel (which could probably just be an in-place copy() function in cudf).
cudf::size_type num_els = cudf::util::round_up_safe(in.size(), cudf::detail::warp_size);
constexpr int block_size = 256;
cudf::detail::grid_1d grid{num_els, block_size, 1};
// output copied column
mutable_column_view mcv{in.type(), in.size(), data, validity, in.null_count()};
if (in.has_nulls()) {
hipLaunchKernelGGL(( copy_in_place_kernel<block_size, T, true>), dim3(grid.num_blocks), dim3(block_size), 0, 0,
*column_device_view::create(in), *mutable_column_device_view::create(mcv));
} else {
hipLaunchKernelGGL(( copy_in_place_kernel<block_size, T, false>), dim3(grid.num_blocks), dim3(block_size), 0, 0,
*column_device_view::create(in), *mutable_column_device_view::create(mcv));
}
out_cols.push_back(mcv);
}
};
template <>
void column_copy_functor::operator()<string_view>(column_view const& in,
column_split_info const& split_info,
char*& dst,
std::vector<column_view>& out_cols)
{
// outgoing pointers
char* chars_buf = dst;
bitmask_type* validity_buf = split_info.validity_buf_size == 0
? nullptr
: reinterpret_cast<bitmask_type*>(dst + split_info.data_buf_size);
size_type* offsets_buf =
reinterpret_cast<size_type*>(dst + split_info.data_buf_size + split_info.validity_buf_size);
// increment working buffer
dst += (split_info.data_buf_size + split_info.validity_buf_size + split_info.offsets_buf_size);
// offsets column.
strings_column_view strings_c(in);
column_view in_offsets = strings_c.offsets();
// note, incoming columns are sliced, so their size is fundamentally different from their child
// offset columns, which are unsliced.
size_type num_offsets = in.size() + 1;
cudf::size_type num_threads =
cudf::util::round_up_safe(::max(split_info.num_chars, num_offsets), cudf::detail::warp_size);
column_view in_chars = strings_c.chars();
// a column with no strings will still have a single offset.
CUDF_EXPECTS(num_offsets > 0, "Invalid offsets child column");
// 1 combined kernel call that copies chars, offsets and validity in one pass. see notes on why
// this exists in the kernel brief.
constexpr int block_size = 256;
cudf::detail::grid_1d grid{num_threads, block_size, 1};
if (in.has_nulls()) {
hipLaunchKernelGGL(( copy_in_place_strings_kernel<block_size, true>), dim3(grid.num_blocks), dim3(block_size), 0, 0,
in.size(), // num_rows
in_offsets.head<size_type>() + in.offset(), // offsets_in
offsets_buf, // offsets_out
in.offset(), // validity_in_offset
in.null_mask(), // validity_in
validity_buf, // validity_out
split_info.chars_offset, // offset_shift
split_info.num_chars, // num_chars
in_chars.head<char>() + split_info.chars_offset, // chars_in
chars_buf);
} else {
hipLaunchKernelGGL(( copy_in_place_strings_kernel<block_size, false>), dim3(grid.num_blocks), dim3(block_size), 0, 0,
in.size(), // num_rows
in_offsets.head<size_type>() + in.offset(), // offsets_in
offsets_buf, // offsets_out
0, // validity_in_offset
nullptr, // validity_in
nullptr, // validity_out
split_info.chars_offset, // offset_shift
split_info.num_chars, // num_chars
in_chars.head<char>() + split_info.chars_offset, // chars_in
chars_buf);
}
// output child columns
column_view out_offsets{in_offsets.type(), num_offsets, offsets_buf};
column_view out_chars{in_chars.type(), static_cast<size_type>(split_info.num_chars), chars_buf};
// result
out_cols.push_back(column_view(
in.type(), in.size(), nullptr, validity_buf, in.null_count(), 0, {out_offsets, out_chars}));
}
/**
* @brief Information about a string column in a table view.
*
* Used internally by preprocess_string_column_info as part of a device-accessible
* vector for computing final string information in a single kernel call.
*/
struct column_preprocess_info {
size_type index;
size_type offset;
size_type size;
bool has_nulls;
cudf::column_device_view offsets;
};
/**
* @brief Preprocess information about all strings columns in a table view.
*
* In order to minimize how often we touch the gpu, we need to preprocess various pieces of
* information about the string columns in a table as a batch process. This function builds a list
* of the offset columns for all input string columns and computes this information with a single
* thrust call. In addition, the vector returned is allocated for -all- columns in the table so
* further processing of non-string columns can happen afterwards.
*
* The key things this function avoids
* - avoiding reaching into gpu memory on the cpu to retrieve offsets to compute string sizes.
* - creating column_device_views on the base string_column_view itself as that causes gpu memory
* allocation.
*/
thrust::host_vector<column_split_info> preprocess_string_column_info(
cudf::table_view const& t,
rmm::device_vector<column_split_info>& device_split_info,
hipStream_t stream)
{
// build a list of all the offset columns and their indices for all input string columns and put
// them on the gpu
thrust::host_vector<column_preprocess_info> offset_columns;
offset_columns.reserve(t.num_columns()); // worst case
// collect only string columns
size_type column_index = 0;
std::for_each(t.begin(), t.end(), [&offset_columns, &column_index](cudf::column_view const& c) {
if (c.type().id() == STRING) {
cudf::column_device_view cdv((strings_column_view(c)).offsets(), 0, 0);
offset_columns.push_back(
column_preprocess_info{column_index, c.offset(), c.size(), c.has_nulls(), cdv});
}
column_index++;
});
rmm::device_vector<column_preprocess_info> device_offset_columns = offset_columns;
// compute column split information
rmm::device_vector<thrust::pair<size_type, size_type>> device_offsets(t.num_columns());
auto* offsets_p = device_offsets.data().get();
thrust::for_each(rmm::exec_policy(stream)->on(stream),
device_offset_columns.begin(),
device_offset_columns.end(),
[offsets_p] __device__(column_preprocess_info const& cpi) {
offsets_p[cpi.index] =
thrust::make_pair(cpi.offsets.head<int32_t>()[cpi.offset],
cpi.offsets.head<int32_t>()[cpi.offset + cpi.size]);
});
thrust::host_vector<thrust::pair<size_type, size_type>> host_offsets(device_offsets);
thrust::host_vector<column_split_info> split_info(t.num_columns());
std::for_each(offset_columns.begin(),
offset_columns.end(),
[&split_info, &host_offsets](column_preprocess_info const& cpi) {
int32_t offset_start = host_offsets[cpi.index].first;
int32_t offset_end = host_offsets[cpi.index].second;
auto num_chars = offset_end - offset_start;
split_info[cpi.index].data_buf_size =
cudf::util::round_up_safe(static_cast<size_t>(num_chars), split_align);
split_info[cpi.index].validity_buf_size =
cpi.has_nulls ? cudf::bitmask_allocation_size_bytes(cpi.size, split_align) : 0;
split_info[cpi.index].offsets_buf_size =
cudf::util::round_up_safe((cpi.size + 1) * sizeof(size_type), split_align);
split_info[cpi.index].num_chars = num_chars;
split_info[cpi.index].chars_offset = offset_start;
});
return split_info;
}
/**
* @brief Creates a contiguous_split_result object which contains a deep-copy of the input
* table_view into a single contiguous block of memory.
*
* The table_view contained within the contiguous_split_result will pass an expect_tables_equal()
* call with the input table. The memory referenced by the table_view and its internal column_views
* is entirely contained in single block of memory.
*/
contiguous_split_result alloc_and_copy(cudf::table_view const& t,
rmm::device_vector<column_split_info>& device_split_info,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
// preprocess column split information for string columns.
thrust::host_vector<column_split_info> split_info =
preprocess_string_column_info(t, device_split_info, stream);
// compute the rest of the column sizes (non-string columns, and total buffer size)
size_t total_size = 0;
size_type column_index = 0;
std::for_each(
t.begin(), t.end(), [&total_size, &column_index, &split_info](cudf::column_view const& c) {
total_size +=
cudf::type_dispatcher(c.type(), column_buffer_size_functor{}, c, split_info[column_index]);
column_index++;
});
// allocate
auto device_buf = std::make_unique<rmm::device_buffer>(total_size, stream, mr);
char* buf = static_cast<char*>(device_buf->data());
// copy (this would be cleaner with a std::transform, but there's an nvcc compiler issue in the
// way)
std::vector<column_view> out_cols;
out_cols.reserve(t.num_columns());
column_index = 0;
std::for_each(
t.begin(), t.end(), [&out_cols, &buf, &column_index, &split_info](cudf::column_view const& c) {
cudf::type_dispatcher(
c.type(), column_copy_functor{}, c, split_info[column_index], buf, out_cols);
column_index++;
});
return contiguous_split_result{cudf::table_view{out_cols}, std::move(device_buf)};
}
}; // anonymous namespace
std::vector<contiguous_split_result> contiguous_split(cudf::table_view const& input,
std::vector<size_type> const& splits,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
auto subtables = cudf::split(input, splits);
// optimization : for large numbers of splits this allocation can dominate total time
// spent if done inside alloc_and_copy(). so we'll allocate it once
// and reuse it.
//
// benchmark: 1 GB data, 10 columns, 256 splits.
// no optimization: 106 ms (8 GB/s)
// optimization: 20 ms (48 GB/s)
rmm::device_vector<column_split_info> device_split_info(input.num_columns());
std::vector<contiguous_split_result> result;
std::transform(subtables.begin(),
subtables.end(),
std::back_inserter(result),
[mr, stream, &device_split_info](table_view const& t) {
return alloc_and_copy(t, device_split_info, mr, stream);
});
return result;
}
}; // namespace detail
std::vector<contiguous_split_result> contiguous_split(cudf::table_view const& input,
std::vector<size_type> const& splits,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return cudf::detail::contiguous_split(input, splits, mr, (hipStream_t)0);
}
}; // namespace cudf
| 7eaae15a7d7595285545d498efd1b9c7303e11d0.cu | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_view.hpp>
#include <cudf/copying.hpp>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/table/table_view.hpp>
#include <cudf/utilities/bit.hpp>
#include <numeric>
namespace cudf {
namespace detail {
namespace {
/**
* @brief Copies contents of `in` to `out`. Copies validity if present
* but does not compute null count.
*
* @param in column_view to copy from
* @param out mutable_column_view to copy to.
*/
template <size_type block_size, typename T, bool has_validity>
__launch_bounds__(block_size) __global__
void copy_in_place_kernel(column_device_view const in, mutable_column_device_view out)
{
const size_type tid = threadIdx.x + blockIdx.x * block_size;
const int warp_id = tid / cudf::detail::warp_size;
const size_type warps_per_grid = gridDim.x * block_size / cudf::detail::warp_size;
// begin/end indices for the column data
size_type begin = 0;
size_type end = in.size();
// warp indices. since 1 warp == 32 threads == sizeof(bit_mask_t) * 8,
// each warp will process one (32 bit) of the validity mask via
// __ballot_sync()
size_type warp_begin = cudf::word_index(begin);
size_type warp_end = cudf::word_index(end - 1);
// lane id within the current warp
const int lane_id = threadIdx.x % cudf::detail::warp_size;
// current warp.
size_type warp_cur = warp_begin + warp_id;
size_type index = tid;
while (warp_cur <= warp_end) {
bool in_range = (index >= begin && index < end);
bool valid = true;
if (has_validity) { valid = in_range && in.is_valid(index); }
if (in_range) { out.element<T>(index) = in.element<T>(index); }
// update validity
if (has_validity) {
// the final validity mask for this warp
int warp_mask = __ballot_sync(0xFFFF'FFFF, valid && in_range);
// only one guy in the warp needs to update the mask and count
if (lane_id == 0) { out.set_mask_word(warp_cur, warp_mask); }
}
// next grid
warp_cur += warps_per_grid;
index += block_size * gridDim.x;
}
}
/**
* @brief Copies contents of one string column to another. Copies validity if present
* but does not compute null count.
*
* The purpose of this kernel is to reduce the number of
* kernel calls for copying a string column from 2 to 1, since number of kernel calls is the
* dominant factor in large scale contiguous_split() calls. To do this, the kernel is
* invoked with using max(num_chars, num_offsets) threads and then doing separate
* bounds checking on offset, chars and validity indices.
*
* Outgoing offset values are shifted down to account for the new base address
* each column gets as a result of the contiguous_split() process.
*
* @param in num_strings number of strings (rows) in the column
* @param in offsets_in pointer to incoming offsets to be copied
* @param out offsets_out pointer to output offsets
* @param in validity_in_offset offset into validity buffer to add to element indices
* @param in validity_in pointer to incoming validity vector to be copied
* @param out validity_out pointer to output validity vector
* @param in offset_shift value to shift copied offsets down by
* @param in num_chars number of chars to copy
* @param in chars_in input chars to be copied
* @param out chars_out output chars to be copied.
*/
template <size_type block_size, bool has_validity>
__launch_bounds__(block_size) __global__
void copy_in_place_strings_kernel(size_type num_strings,
size_type const* __restrict__ offsets_in,
size_type* __restrict__ offsets_out,
size_type validity_in_offset,
bitmask_type const* __restrict__ validity_in,
bitmask_type* __restrict__ validity_out,
size_type offset_shift,
size_type num_chars,
char const* __restrict__ chars_in,
char* __restrict__ chars_out)
{
const size_type tid = threadIdx.x + blockIdx.x * block_size;
const int warp_id = tid / cudf::detail::warp_size;
const size_type warps_per_grid = gridDim.x * block_size / cudf::detail::warp_size;
// how many warps we'll be processing. with strings, the chars and offsets
// lengths may be different. so we'll just march the worst case.
size_type warp_begin = cudf::word_index(0);
size_type warp_end = cudf::word_index(std::max(num_chars, num_strings + 1) - 1);
// end indices for chars
size_type chars_end = num_chars;
// end indices for offsets
size_type offsets_end = num_strings + 1;
// end indices for validity and the last warp that actually should
// be updated
size_type validity_end = num_strings;
size_type validity_warp_end = cudf::word_index(num_strings - 1);
// lane id within the current warp
const int lane_id = threadIdx.x % cudf::detail::warp_size;
size_type warp_cur = warp_begin + warp_id;
size_type index = tid;
while (warp_cur <= warp_end) {
if (index < chars_end) { chars_out[index] = chars_in[index]; }
if (index < offsets_end) {
// each output column starts at a new base pointer. so we have to
// shift every offset down by the point (in chars) at which it was split.
offsets_out[index] = offsets_in[index] - offset_shift;
}
// if we're still in range of validity at all
if (has_validity && warp_cur <= validity_warp_end) {
bool valid = (index < validity_end) && bit_is_set(validity_in, validity_in_offset + index);
// the final validity mask for this warp
int warp_mask = __ballot_sync(0xFFFF'FFFF, valid);
// only one guy in the warp needs to update the mask and count
if (lane_id == 0) { validity_out[warp_cur] = warp_mask; }
}
// next grid
warp_cur += warps_per_grid;
index += block_size * gridDim.x;
}
}
// align all column size allocations to this boundary so that all output column buffers
// start at that alignment.
static constexpr size_t split_align = 64;
/**
* @brief Information about the split for a given column. Bundled together
* into a struct because tuples were getting pretty unreadable.
*/
struct column_split_info {
size_t data_buf_size; // size of the data (including padding)
size_t validity_buf_size; // validity vector size (including padding)
size_t offsets_buf_size; // (strings only) size of offset column (including padding)
size_type num_chars; // (strings only) number of chars in the column
size_type chars_offset; // (strings only) offset from head of chars data
};
/**
* @brief Functor called by the `type_dispatcher` to incrementally compute total
* memory buffer size needed to allocate a contiguous copy of all columns within
* a source table.
*/
struct column_buffer_size_functor {
template <typename T>
size_t operator()(column_view const& c, column_split_info& split_info)
{
split_info.data_buf_size = cudf::util::round_up_safe(c.size() * sizeof(T), split_align);
split_info.validity_buf_size =
(c.has_nulls() ? cudf::bitmask_allocation_size_bytes(c.size(), split_align) : 0);
return split_info.data_buf_size + split_info.validity_buf_size;
}
};
template <>
size_t column_buffer_size_functor::operator()<string_view>(column_view const& c,
column_split_info& split_info)
{
// this has already been precomputed in an earlier step. return the sum.
return split_info.data_buf_size + split_info.validity_buf_size + split_info.offsets_buf_size;
}
/**
* @brief Functor called by the `type_dispatcher` to copy a column into a contiguous
* buffer of output memory.
*
* Used for copying each column in a source table into one contiguous buffer of memory.
*/
struct column_copy_functor {
template <typename T>
void operator()(column_view const& in,
column_split_info const& split_info,
char*& dst,
std::vector<column_view>& out_cols)
{
// outgoing pointers
char* data = dst;
bitmask_type* validity = split_info.validity_buf_size == 0
? nullptr
: reinterpret_cast<bitmask_type*>(dst + split_info.data_buf_size);
// increment working buffer
dst += (split_info.data_buf_size + split_info.validity_buf_size);
// no work to do
if (in.size() == 0) {
out_cols.push_back(column_view{in.type(), 0, nullptr});
return;
}
// custom copy kernel (which could probably just be an in-place copy() function in cudf).
cudf::size_type num_els = cudf::util::round_up_safe(in.size(), cudf::detail::warp_size);
constexpr int block_size = 256;
cudf::detail::grid_1d grid{num_els, block_size, 1};
// output copied column
mutable_column_view mcv{in.type(), in.size(), data, validity, in.null_count()};
if (in.has_nulls()) {
copy_in_place_kernel<block_size, T, true><<<grid.num_blocks, block_size, 0, 0>>>(
*column_device_view::create(in), *mutable_column_device_view::create(mcv));
} else {
copy_in_place_kernel<block_size, T, false><<<grid.num_blocks, block_size, 0, 0>>>(
*column_device_view::create(in), *mutable_column_device_view::create(mcv));
}
out_cols.push_back(mcv);
}
};
template <>
void column_copy_functor::operator()<string_view>(column_view const& in,
column_split_info const& split_info,
char*& dst,
std::vector<column_view>& out_cols)
{
// outgoing pointers
char* chars_buf = dst;
bitmask_type* validity_buf = split_info.validity_buf_size == 0
? nullptr
: reinterpret_cast<bitmask_type*>(dst + split_info.data_buf_size);
size_type* offsets_buf =
reinterpret_cast<size_type*>(dst + split_info.data_buf_size + split_info.validity_buf_size);
// increment working buffer
dst += (split_info.data_buf_size + split_info.validity_buf_size + split_info.offsets_buf_size);
// offsets column.
strings_column_view strings_c(in);
column_view in_offsets = strings_c.offsets();
// note, incoming columns are sliced, so their size is fundamentally different from their child
// offset columns, which are unsliced.
size_type num_offsets = in.size() + 1;
cudf::size_type num_threads =
cudf::util::round_up_safe(std::max(split_info.num_chars, num_offsets), cudf::detail::warp_size);
column_view in_chars = strings_c.chars();
// a column with no strings will still have a single offset.
CUDF_EXPECTS(num_offsets > 0, "Invalid offsets child column");
// 1 combined kernel call that copies chars, offsets and validity in one pass. see notes on why
// this exists in the kernel brief.
constexpr int block_size = 256;
cudf::detail::grid_1d grid{num_threads, block_size, 1};
if (in.has_nulls()) {
copy_in_place_strings_kernel<block_size, true><<<grid.num_blocks, block_size, 0, 0>>>(
in.size(), // num_rows
in_offsets.head<size_type>() + in.offset(), // offsets_in
offsets_buf, // offsets_out
in.offset(), // validity_in_offset
in.null_mask(), // validity_in
validity_buf, // validity_out
split_info.chars_offset, // offset_shift
split_info.num_chars, // num_chars
in_chars.head<char>() + split_info.chars_offset, // chars_in
chars_buf);
} else {
copy_in_place_strings_kernel<block_size, false><<<grid.num_blocks, block_size, 0, 0>>>(
in.size(), // num_rows
in_offsets.head<size_type>() + in.offset(), // offsets_in
offsets_buf, // offsets_out
0, // validity_in_offset
nullptr, // validity_in
nullptr, // validity_out
split_info.chars_offset, // offset_shift
split_info.num_chars, // num_chars
in_chars.head<char>() + split_info.chars_offset, // chars_in
chars_buf);
}
// output child columns
column_view out_offsets{in_offsets.type(), num_offsets, offsets_buf};
column_view out_chars{in_chars.type(), static_cast<size_type>(split_info.num_chars), chars_buf};
// result
out_cols.push_back(column_view(
in.type(), in.size(), nullptr, validity_buf, in.null_count(), 0, {out_offsets, out_chars}));
}
/**
* @brief Information about a string column in a table view.
*
* Used internally by preprocess_string_column_info as part of a device-accessible
* vector for computing final string information in a single kernel call.
*/
struct column_preprocess_info {
size_type index;
size_type offset;
size_type size;
bool has_nulls;
cudf::column_device_view offsets;
};
/**
* @brief Preprocess information about all strings columns in a table view.
*
* In order to minimize how often we touch the gpu, we need to preprocess various pieces of
* information about the string columns in a table as a batch process. This function builds a list
* of the offset columns for all input string columns and computes this information with a single
* thrust call. In addition, the vector returned is allocated for -all- columns in the table so
* further processing of non-string columns can happen afterwards.
*
* The key things this function avoids
* - avoiding reaching into gpu memory on the cpu to retrieve offsets to compute string sizes.
* - creating column_device_views on the base string_column_view itself as that causes gpu memory
* allocation.
*/
thrust::host_vector<column_split_info> preprocess_string_column_info(
cudf::table_view const& t,
rmm::device_vector<column_split_info>& device_split_info,
cudaStream_t stream)
{
// build a list of all the offset columns and their indices for all input string columns and put
// them on the gpu
thrust::host_vector<column_preprocess_info> offset_columns;
offset_columns.reserve(t.num_columns()); // worst case
// collect only string columns
size_type column_index = 0;
std::for_each(t.begin(), t.end(), [&offset_columns, &column_index](cudf::column_view const& c) {
if (c.type().id() == STRING) {
cudf::column_device_view cdv((strings_column_view(c)).offsets(), 0, 0);
offset_columns.push_back(
column_preprocess_info{column_index, c.offset(), c.size(), c.has_nulls(), cdv});
}
column_index++;
});
rmm::device_vector<column_preprocess_info> device_offset_columns = offset_columns;
// compute column split information
rmm::device_vector<thrust::pair<size_type, size_type>> device_offsets(t.num_columns());
auto* offsets_p = device_offsets.data().get();
thrust::for_each(rmm::exec_policy(stream)->on(stream),
device_offset_columns.begin(),
device_offset_columns.end(),
[offsets_p] __device__(column_preprocess_info const& cpi) {
offsets_p[cpi.index] =
thrust::make_pair(cpi.offsets.head<int32_t>()[cpi.offset],
cpi.offsets.head<int32_t>()[cpi.offset + cpi.size]);
});
thrust::host_vector<thrust::pair<size_type, size_type>> host_offsets(device_offsets);
thrust::host_vector<column_split_info> split_info(t.num_columns());
std::for_each(offset_columns.begin(),
offset_columns.end(),
[&split_info, &host_offsets](column_preprocess_info const& cpi) {
int32_t offset_start = host_offsets[cpi.index].first;
int32_t offset_end = host_offsets[cpi.index].second;
auto num_chars = offset_end - offset_start;
split_info[cpi.index].data_buf_size =
cudf::util::round_up_safe(static_cast<size_t>(num_chars), split_align);
split_info[cpi.index].validity_buf_size =
cpi.has_nulls ? cudf::bitmask_allocation_size_bytes(cpi.size, split_align) : 0;
split_info[cpi.index].offsets_buf_size =
cudf::util::round_up_safe((cpi.size + 1) * sizeof(size_type), split_align);
split_info[cpi.index].num_chars = num_chars;
split_info[cpi.index].chars_offset = offset_start;
});
return split_info;
}
/**
* @brief Creates a contiguous_split_result object which contains a deep-copy of the input
* table_view into a single contiguous block of memory.
*
* The table_view contained within the contiguous_split_result will pass an expect_tables_equal()
* call with the input table. The memory referenced by the table_view and its internal column_views
* is entirely contained in single block of memory.
*/
contiguous_split_result alloc_and_copy(cudf::table_view const& t,
rmm::device_vector<column_split_info>& device_split_info,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
// preprocess column split information for string columns.
thrust::host_vector<column_split_info> split_info =
preprocess_string_column_info(t, device_split_info, stream);
// compute the rest of the column sizes (non-string columns, and total buffer size)
size_t total_size = 0;
size_type column_index = 0;
std::for_each(
t.begin(), t.end(), [&total_size, &column_index, &split_info](cudf::column_view const& c) {
total_size +=
cudf::type_dispatcher(c.type(), column_buffer_size_functor{}, c, split_info[column_index]);
column_index++;
});
// allocate
auto device_buf = std::make_unique<rmm::device_buffer>(total_size, stream, mr);
char* buf = static_cast<char*>(device_buf->data());
// copy (this would be cleaner with a std::transform, but there's an nvcc compiler issue in the
// way)
std::vector<column_view> out_cols;
out_cols.reserve(t.num_columns());
column_index = 0;
std::for_each(
t.begin(), t.end(), [&out_cols, &buf, &column_index, &split_info](cudf::column_view const& c) {
cudf::type_dispatcher(
c.type(), column_copy_functor{}, c, split_info[column_index], buf, out_cols);
column_index++;
});
return contiguous_split_result{cudf::table_view{out_cols}, std::move(device_buf)};
}
}; // anonymous namespace
std::vector<contiguous_split_result> contiguous_split(cudf::table_view const& input,
std::vector<size_type> const& splits,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
auto subtables = cudf::split(input, splits);
// optimization : for large numbers of splits this allocation can dominate total time
// spent if done inside alloc_and_copy(). so we'll allocate it once
// and reuse it.
//
// benchmark: 1 GB data, 10 columns, 256 splits.
// no optimization: 106 ms (8 GB/s)
// optimization: 20 ms (48 GB/s)
rmm::device_vector<column_split_info> device_split_info(input.num_columns());
std::vector<contiguous_split_result> result;
std::transform(subtables.begin(),
subtables.end(),
std::back_inserter(result),
[mr, stream, &device_split_info](table_view const& t) {
return alloc_and_copy(t, device_split_info, mr, stream);
});
return result;
}
}; // namespace detail
std::vector<contiguous_split_result> contiguous_split(cudf::table_view const& input,
std::vector<size_type> const& splits,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return cudf::detail::contiguous_split(input, splits, mr, (cudaStream_t)0);
}
}; // namespace cudf
|
ed20180611de2d27c2af4d0582e002e7c886a2c3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <unistd.h>
struct IntSandwich {
int beginning;
int middle[1];
int end;
};
__global__ void access_offset_kernel(struct IntSandwich *hostMem, int offset) {
#ifdef R
volatile int i = hostMem->middle[offset];
#elif W
hostMem->middle[offset] = 42;
#endif
}
int main(int argc, char** argv) {
if (argc != 3) {
fprintf(stderr, "Usage: %s -o <offset>\n", argv[0]);
abort();
}
int offset = 0;
int c;
while ((c = getopt(argc, argv, "o:")) != -1) {
switch(c) {
case 'o':
offset = atoi(optarg);
break;
default:
fprintf(stderr, "Usage: %s -o <offset>\n", argv[0]);
abort();
}
}
struct IntSandwich *hostMem;
hipMalloc((void**)&hostMem, sizeof(struct IntSandwich));
hipLaunchKernelGGL(( access_offset_kernel), dim3(1),dim3(1), 0, 0, hostMem, offset);
hipFree(hostMem);
hipDeviceReset();
return 0;
}
| ed20180611de2d27c2af4d0582e002e7c886a2c3.cu | #include <stdio.h>
#include <unistd.h>
struct IntSandwich {
int beginning;
int middle[1];
int end;
};
__global__ void access_offset_kernel(struct IntSandwich *hostMem, int offset) {
#ifdef R
volatile int i = hostMem->middle[offset];
#elif W
hostMem->middle[offset] = 42;
#endif
}
int main(int argc, char** argv) {
if (argc != 3) {
fprintf(stderr, "Usage: %s -o <offset>\n", argv[0]);
abort();
}
int offset = 0;
int c;
while ((c = getopt(argc, argv, "o:")) != -1) {
switch(c) {
case 'o':
offset = atoi(optarg);
break;
default:
fprintf(stderr, "Usage: %s -o <offset>\n", argv[0]);
abort();
}
}
struct IntSandwich *hostMem;
cudaMalloc((void**)&hostMem, sizeof(struct IntSandwich));
access_offset_kernel<<<1,1>>>(hostMem, offset);
cudaFree(hostMem);
cudaDeviceReset();
return 0;
}
|
c9702232915edf627be403a78fa3d5002056fe76.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "process_image.h"
void convert_color_to_gray_image()
{
std::cout << "TODO" << std::endl;
}
| c9702232915edf627be403a78fa3d5002056fe76.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "process_image.h"
void convert_color_to_gray_image()
{
std::cout << "TODO" << std::endl;
}
|
5d5f48aebafa12a4b9026770ddf68832379803f9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#pragma once
#include "header.h"
float hTime=0.0;
float dTime=0.0;
//kernel khi to b nh v to ni dung cho dQ
__global__ void kernelInitializeDataEmbedding(Embedding *dQ,int sizedQ){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<sizedQ){
dQ[i].idx=i;
dQ[i].vid=i+100;
}
}
//Hm khi to b nh v to ni dung cho dQ
inline hipError_t createEmbeddingElement(Embedding *&dQ,int sizedQ,int &first){
hipError_t cudaStatus;
//Khi to b nh cho dQ1 trn device
size_t nBytes = sizedQ*sizeof(Embedding);
cudaStatus=hipMalloc((void**)&dQ,nBytes);
cudaStatus=hipGetLastError();
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipMalloc dQ of createEmbeddingElement failed",cudaStatus);
goto Error;
}
if(first==0)
{
hipMemset(dQ,-1,nBytes);
++first;
return cudaStatus;
}
//Khi to d liu bt k cho dQ
dim3 block(blocksize);
dim3 grid((sizedQ + block.x - 1)/block.x);
hipLaunchKernelGGL(( kernelInitializeDataEmbedding), dim3(grid),dim3(block), 0, 0, dQ,sizedQ);
hipDeviceSynchronize();
cudaStatus=hipGetLastError();
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipDeviceSynchronize of createEmbeddingElement failed",cudaStatus);
goto Error;
}
Error:
return cudaStatus;
}
__global__ void kernelPrint(Embedding *dQ,int sizedQ){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<sizedQ){
printf("\n Thread %d: %p (idx:%d,vid:%d) (%p,%p)",i,dQ,dQ[i].idx,dQ[i].vid,&(dQ[i].idx),&(dQ[i].vid));
}
}
//Hm in ni dung Embedding *dQ khi bit kch thc
inline hipError_t print(Embedding *dQ,int sizedQ){
hipError_t cudaStatus;
dim3 block(blocksize);
dim3 grid((sizedQ + block.x - 1)/block.x);
hipLaunchKernelGGL(( kernelPrint), dim3(grid),dim3(block), 0, 0, dQ,sizedQ);
hipDeviceSynchronize();
cudaStatus=hipGetLastError();
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipDeviceSynchronize of createEmbeddingElement failed",cudaStatus);
goto Error;
}
Error:
return cudaStatus;
}
//kernel In phn t Embedding **pdQ
__global__ void kernelPrint(Embedding **pdQ,int *d_arrSizedQ,int *d_arrPrevQ,int sizepdQ){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < sizepdQ){
printf("\n Thread %d: Value of pdQ:%p",i,pdQ+i);
Embedding *dQ = pdQ[i];
//printf("\n Thread %d: PrevQ: %d",i,d_arrPrevQ[i]);
int prevQ = d_arrPrevQ[i];
for (int j = 0; j < d_arrSizedQ[i]; j++)
{
printf("\n i=%d %p PrevQ:%d (idx:%d, vid:%d) ",i,dQ,prevQ,dQ[j].idx,dQ[j].vid);
}
}
}
//Hm in phn t Embedding **pdQ khi bit kch thc ca dQ trong mng h_arrSizedQ tng ng
inline hipError_t print(Embedding **pdQ,int *h_arrSizedQ,int *d_arrPrevQ,int sizepdQ){
hipError_t cudaStatus;
dim3 block(blocksize);
dim3 grid((sizepdQ+block.x-1)/block.x);
printf("\n\n Array pdQ:\n");
hipLaunchKernelGGL(( kernelPrint), dim3(grid),dim3(block), 0, 0, pdQ,h_arrSizedQ,d_arrPrevQ,sizepdQ);
printf("\n");
hipDeviceSynchronize();
cudaStatus=hipGetLastError();
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipDeviceSynchronize of kernelPrint failed",cudaStatus);
goto Error;
}
Error:
return cudaStatus;
}
//kernel ly pointer ca dQ lu vo pdQ
__global__ void kernelgetPointer(Embedding **pdQ,Embedding *dQ){
*pdQ=dQ;
}
__global__ void kernelCopyEmbedding(Embedding **pdQ,int sizepdQ,Embedding **d_temp){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<sizepdQ){
d_temp[i]=pdQ[i];
}
}
__global__ void kernelPrintDoubleEmbedding(Embedding **d_temp,int newsize){
int i=blockDim.x * blockIdx.x + threadIdx.x;
if (i<newsize){
printf("\n Thread %d: %p",i,d_temp[i]);
}
}
__global__ void kernelCopyLastEmbedding(Embedding **d_temp,Embedding *dQ,int newsize){
d_temp[newsize-1]=dQ;
}
//Hm ly pointer ca phn t Embedding *dQ bng hm hipMemcpy
inline hipError_t getPointer(Embedding **&pdQ,int &sizepdQ,Embedding *dQ){
hipError_t cudaStatus;
//
int currentsize = sizepdQ;
int newsize = ++sizepdQ;
if (currentsize==0){
cudaStatus=hipMalloc((void**)&pdQ,newsize*sizeof(Embedding*));
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipMalloc pdQ failed",cudaStatus);
goto Error;
}
else
{
hipLaunchKernelGGL(( kernelgetPointer), dim3(1),dim3(1), 0, 0, pdQ,dQ);
hipDeviceSynchronize();
}
goto Error;
}
//Khi to mng tm
Embedding **d_temp=nullptr;
size_t nBytes=newsize*sizeof(Embedding*);
cudaStatus=hipMalloc((void**)&d_temp,nBytes);
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipMalloc d_temp in getPointer failed",cudaStatus);
goto Error;
}
else
{
hipMemset(d_temp,0,nBytes);
}
//chp mng hin ti
hipLaunchKernelGGL(( kernelCopyEmbedding), dim3(1),dim3(currentsize), 0, 0, pdQ,currentsize,d_temp);
hipDeviceSynchronize();
//chp phn t cn thm vo cui mng d_temp
hipLaunchKernelGGL(( kernelCopyLastEmbedding), dim3(1),dim3(1), 0, 0, d_temp,dQ,newsize);
hipDeviceSynchronize();
//Hin th ni dung mng d_temp
hipLaunchKernelGGL(( kernelPrintDoubleEmbedding), dim3(1),dim3(sizepdQ), 0, 0, d_temp,sizepdQ);
hipDeviceSynchronize();
//Cp pht li b nh cho mng chnh vi kch thc ln hn 1 v chp mng d_temp vo mng chnh
hipFree(pdQ);
cudaStatus=hipMalloc((void**)&pdQ,nBytes);
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipMalloc pdQ in getPointer failed",cudaStatus);
goto Error;
}
else
{
hipMemset(pdQ,-1,nBytes);
}
hipLaunchKernelGGL(( kernelCopyEmbedding), dim3(1),dim3(sizepdQ), 0, 0, d_temp,sizepdQ,pdQ);
hipDeviceSynchronize();
hipDeviceSynchronize();
cudaStatus=hipGetLastError();
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipDeviceSynchronize kernelgetPointer of getPointer failed",stderr);
goto Error;
}
Error:
return cudaStatus;
}
//kernel chp d liu kiu int t device sang device
__global__ void kernelCopyInt(int *d_arrSizedQ,int *d_tempArrSizedQ,int currentSize){
int i=blockIdx.x * blockDim.x + threadIdx.x;
if(i<currentSize){
d_tempArrSizedQ[i]=d_arrSizedQ[i];
}
}
__global__ void kernelCopyLastInt(int *temp,int *d_tempArrSizedQ,int newsize){
d_tempArrSizedQ[newsize-1]=*temp;
}
inline hipError_t copyDeviceToDeviceInt(int *d_FromIntArray,int *d_ToIntArray,int size){
hipError_t cudaStatus;
dim3 block(blocksize);
dim3 grid((size + block.x)/block.x);
hipLaunchKernelGGL(( kernelCopyInt), dim3(grid),dim3(block), 0, 0, d_FromIntArray,d_ToIntArray,size);
hipDeviceSynchronize();
cudaStatus=hipGetLastError();
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipDeviceSynchronize in copyDeviceToDeviceInt failed",stderr);
goto Error;
}
Error:
return cudaStatus;
}
//Tr v mng kch thc l mt h_arrSizedQ trn device
inline hipError_t getSizedQ(int *&d_arrSizedQ,int &sized_arrSizedQ,int sizedQ){
hipError_t cudaStatus;
//M rng kch thc mng d_arrSizedQ
int currentSize = sized_arrSizedQ;
int newsize =++sized_arrSizedQ;
if(currentSize==0){
cudaStatus = hipMalloc((void**)&d_arrSizedQ,newsize*sizeof(int));
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipMalloc d_arrSizedQ in getPointer failed",cudaStatus);
goto Error;
}
else
{
hipMemcpy(d_arrSizedQ,&sizedQ,sizeof(int),hipMemcpyHostToDevice);
}
goto Error;
}
size_t nBytes = newsize*sizeof(int);
int *d_tempArrSizedQ;
cudaStatus=hipMalloc((void**)&d_tempArrSizedQ,nBytes);
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipMalloc d_tempArrSizedQ in getPointer failed",cudaStatus);
goto Error;
}
else
{
hipMemset(d_tempArrSizedQ,-1,nBytes);
}
//Chp mng c qua mng mi
cudaStatus = copyDeviceToDeviceInt(d_arrSizedQ,d_tempArrSizedQ,currentSize);
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n copyDeviceToDeviceInt in getSizedQ failed");
goto Error;
}
//Kim tra th kt qu trn mng tm
//print(d_tempArrSizedQ,currentSize);
//To mt bit temp cp pht phn t kiu int trn device v chp sizedQ sang bin tm
int * temp;
cudaStatus=hipMalloc((void**)&temp,sizeof(int));
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipMalloc temp in getPointer failed",cudaStatus);
goto Error;
}
else
{
hipMemcpy(temp,&sizedQ,sizeof(int),hipMemcpyHostToDevice);
}
hipLaunchKernelGGL(( kernelCopyLastInt), dim3(1),dim3(1), 0, 0, temp,d_tempArrSizedQ,newsize);
hipDeviceSynchronize();
hipFree(d_arrSizedQ);
hipMalloc((void**)&d_arrSizedQ,nBytes);
hipMemset(d_arrSizedQ,0,nBytes);
copyDeviceToDeviceInt(d_tempArrSizedQ,d_arrSizedQ,newsize);
/*
printf("\n\n value of d_arrSizedQ array on device\n");
print(d_arrSizedQ,newsize);*/
hipDeviceSynchronize();
cudaStatus = hipGetLastError();
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipDeviceSynchronize getPointer failed",cudaStatus);
goto Error;
}
Error:
return cudaStatus;
}
//kernel in mng kiu int trn device
__global__ void kernelPrintInt(int *dArray,int sizedArray){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<sizedArray){
printf("\n Thread %d: dArray:%d",i,dArray[i]);
}
}
//Hm in mng kiu int trn device
inline hipError_t print(int *dArray,int sizedArray){
hipError_t cudaStatus;
dim3 block(blocksize);
dim3 grid((sizedArray + block.x -1)/block.x);
hipLaunchKernelGGL(( kernelPrintInt), dim3(grid),dim3(block), 0, 0, dArray,sizedArray);
hipDeviceSynchronize();
cudaStatus = hipGetLastError();
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipDeviceSynchronize getPointer failed",cudaStatus);
goto Error;
}
Error:
return cudaStatus;
}
//To v cp nht ColumnQ
hipError_t makeColumnQ(Embedding *dQ,int sizedQ,Embedding **&pdQ,int &sizepdQ,int *&d_arrSizedQ,int &sized_arrSizedQ,int *&d_arrPrevQ,int &sized_arrPrevQ,int iPrevQ,int &first){
hipError_t cudaStatus;
//To ni dung cho cc phn t ca dQ
cudaStatus=createEmbeddingElement(dQ,sizedQ,first);
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n cudaStatuscreateEmbeddingElement in makeColumnQ failed",stderr);
goto Error;
}
////In ni dung dQ
//cudaStatus=print(dQ,sizedQ);
//if(cudaStatus!=hipSuccess){
// fprintf(stderr,"\n print of kernel.cu failed",stderr);
// goto Error;
//}
//Ly con tr ca dQ lu vo pdQ
cudaStatus = getPointer(pdQ,sizepdQ,dQ);
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n getPointer in makeColumnQ failed",stderr);
goto Error;
}
//Ly kch thc ca dQ lu vo mng d_arrSizedQ
cudaStatus = getSizedQ(d_arrSizedQ,sized_arrSizedQ,sizedQ);
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n getSizedQ in makeColumnQ failed",stderr);
goto Error;
}
//Ly prevQ
cudaStatus = getSizedQ(d_arrPrevQ,sized_arrPrevQ,iPrevQ);
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n getSizedQ of kernel.cu failed",stderr);
goto Error;
}
hipDeviceSynchronize();
cudaStatus = hipGetLastError();
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipDeviceSynchronize() in makeColumnQ failed");
goto Error;
}
Error:
return cudaStatus;
}
//kernel in ni dung embedding th i.
__global__ void kernelPrintEmbedding(Embedding **pdQ,int *d_arrSizedQ,int *d_arrPrevQ,int sizepdQ,int firstEmbedding,int lastColumnQ){
Embedding *dQ = pdQ[lastColumnQ];
int vid = dQ[firstEmbedding].vid;
int idx = dQ[firstEmbedding].idx;
int prevQ = d_arrPrevQ[lastColumnQ];
printf("\n Q%d: (idx:%d, vid:%d) prevQ:%d",lastColumnQ,idx,vid,prevQ);
while (true)
{
dQ=pdQ[prevQ];
vid = dQ[idx].vid;
idx = dQ[idx].idx;
printf("\n Q%d: (idx:%d, vid:%d)",prevQ,idx,vid);
prevQ = d_arrPrevQ[prevQ];
if(prevQ==-1){
printf("\nEnd of Embedding\n");
return;
}
}
}
//In embedding th i. Cn phi bit ct Q cui truy xut Embedding ngc v pha trc
inline hipError_t printEmbedding(Embedding **pdQ,int *d_arrSizedQ,int *d_arrPrevQ,int sizepdQ,int firstEmbedding,int lastColumnQ){
hipError_t cudaStatus;
hipLaunchKernelGGL(( kernelPrintEmbedding), dim3(1),dim3(1), 0, 0, pdQ,d_arrSizedQ,d_arrPrevQ,sizepdQ,firstEmbedding,lastColumnQ);
hipDeviceSynchronize();
cudaStatus = hipGetLastError();
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipDeviceSynchronize() in printEmbedding failed",cudaStatus);
goto Error;
}
Error:
return cudaStatus;
}
__global__ void kernelSetValueFordQ(Extension *d_ValidExtension,int noElem_d_ValidExtension,Embedding *dQ1,Embedding *dQ2,int *d_scanResult,int li,int lij,int lj){
int i = blockDim.x *blockIdx.x +threadIdx.x;
if(i<noElem_d_ValidExtension){
if(d_ValidExtension[i].li==li &&d_ValidExtension[i].lij == lij && d_ValidExtension[i].lj){
dQ1[d_scanResult[i]].idx=-1;
dQ1[d_scanResult[i]].vid=d_ValidExtension[i].vgi;
dQ2[d_scanResult[i]].idx=d_scanResult[i];
dQ2[d_scanResult[i]].vid=d_ValidExtension[i].vgj;
}
}
}
inline hipError_t createEmbeddingRoot(Embedding **&dArrPointerEmbedding,int &noElem_dArrPointerEmbedding,int *&dArrSizedQ,int &noElem_dArrSizedQ,int *&dArrPrevQ,int &noElem_dArrPrevQ,Extension *d_ValidExtension,int noElem_d_ValidExtension,int li,int lij,int lj){
hipError_t cudaStatus;
//V y l ln u tin to Embedding, chng ta to 2 ct Q c kch thc bng nhau v bng s lng Embedding tm thy trong d_ValidExtension ca nhn cnh (li,lij,lj)
//To Q1 v Q2 trn b nh device, sau chp a ch ca n vo bin mng dArrPointerEmbedding. Do , chng ta khng hu b nh ca Q1 v Q2 sau khi gi hm createEmbeddingRoot.
Embedding *Q1=nullptr;//embedding dQ.
Embedding *Q2=nullptr;
int sizedQ=0;
//To bo nhiu mng dQ, mi mng c s lng phn t l bao nhiu v ni dung mng l g?
//To 2 mng dQ
/*1.To mng M c kch thc bng vi d_ValidExtension v khi to gi tr cho cc phn t trong M bng 0.*/
int* d_M;
cudaStatus=hipMalloc((int**)&d_M,noElem_d_ValidExtension*sizeof(int));
if (cudaStatus!=hipSuccess){
fprintf(stderr,"\ncudaMalloc M failed");
//exit(1);
goto Error;
}
else
{
hipMemset(d_M,0,noElem_d_ValidExtension*sizeof(int));
}
/*//2. To noElem_d_ValidExtension threads. Mi thread s kim tra phn t tng ng trong mng d_ValidExtension xem c bng cnh (li,lij,lj)
Nu bng th bc v tr ti M ln gi tr l 1*/
//printf("\nMang d_ValidExtension");
//printfExtension(d_ValidExtension,noElem_d_ValidExtension);
//hipDeviceSynchronize();
dim3 block(blocksize);
dim3 grid((noElem_d_ValidExtension+block.x-1)/block.x);
hipLaunchKernelGGL(( kernelMarkExtension), dim3(grid),dim3(block), 0, 0, d_ValidExtension,noElem_d_ValidExtension,d_M,li,lij,lj);
hipDeviceSynchronize();
/*printf("\n\nMang d_ValidExtension");
printfExtension(d_ValidExtension,noElem_d_ValidExtension);
hipDeviceSynchronize();
printf("\nMang d-M:");
printInt(d_M,noElem_d_ValidExtension);*/
/* 3. Exclusive Scan d_M
Kt qu scan lu vo mng d_scanResult
*/
int* d_scanResult;
cudaStatus=hipMalloc((int**)&d_scanResult,noElem_d_ValidExtension*sizeof(int));
if (cudaStatus!=hipSuccess){
fprintf(stderr,"\ncudaMalloc M failed");
//exit(1);
goto Error;
}
else
{
hipMemset(d_scanResult,0,noElem_d_ValidExtension*sizeof(int));
}
cudaStatus=scanV(d_M,noElem_d_ValidExtension,d_scanResult);
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\nscanV() d_M createForwardEmbedding failed");
//exit(1);
goto Error;
}
/*
4. To mng Q1 v Q2 c kch thc l (scanM[LastIndex]) nu phn t cui cng ca d_ValidExtension khng phi l (li,lij,lj).
Ngc li th Q c kch thc l (scanM[LastIndex]+1).
Mi phn t ca Q c cu trc l {int idx, int vid}
*/
bool same = false;
hipLaunchKernelGGL(( kernelMatchLastElement), dim3(1),dim3(1), 0, 0, d_ValidExtension,noElem_d_ValidExtension,li,lij,lj,same);
hipDeviceSynchronize();
int noElem_d_Q=0;
cudaStatus=getLastElement(d_scanResult,noElem_d_ValidExtension,noElem_d_Q);
if (same==true){
noElem_d_Q++;
}
sizedQ=noElem_d_Q;
printf("\nnoElem_d_Q1:%d",noElem_d_Q);
//To Embedding dQ1, khi bit kch thc ca chng
cudaStatus = hipMalloc((void**)&Q1,sizedQ*sizeof(Embedding));
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipMalloc dQ1 in createEmbeddingRoot() failed",cudaStatus);
goto Error;
}
else
{
hipMemset(Q1,-1,sizedQ*sizeof(Embedding));
}
cudaStatus = hipMalloc((void**)&Q2,sizedQ*sizeof(Embedding));
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipMalloc dQ1 in createEmbeddingRoot() failed",cudaStatus);
goto Error;
}
else
{
hipMemset(Q2,-1,sizedQ*sizeof(Embedding));
}
hipLaunchKernelGGL(( kernelSetValueFordQ), dim3(grid),dim3(block), 0, 0, d_ValidExtension,noElem_d_ValidExtension,Q1,Q2,d_scanResult,li,lij,lj);
hipDeviceSynchronize();
getPointer(dArrPointerEmbedding,noElem_dArrPointerEmbedding,Q1);
getPointer(dArrPointerEmbedding,noElem_dArrPointerEmbedding,Q2);
int iPrevQ=-1;
for (int j = 0; j < 2; j++)
{
//Ly kch thc ca dQ lu vo mng d_arrSizedQ
cudaStatus = getSizedQ(dArrSizedQ,noElem_dArrSizedQ,sizedQ);
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n getSizedQ in makeColumnQ failed",stderr);
goto Error;
}
//Ly prevQ
cudaStatus = getSizedQ(dArrPrevQ,noElem_dArrPrevQ,iPrevQ);
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n getSizedQ of kernel.cu failed",stderr);
goto Error;
}
iPrevQ++;
}
hipDeviceSynchronize();
cudaStatus = hipGetLastError();
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipDeviceSynchronize() in createEmbeddingRoot() failed",cudaStatus);
goto Error;
}
Error:
return cudaStatus;
}
//Hm to Embedding Q ban u
inline hipError_t createEmbeddingRoot1(Embedding **&dArrPointerEmbedding,int &noElem_dArrPointerEmbedding,int *&dArrSizedQ,int &noElem_dArrSizedQ,Extension *d_ValidExtension,int noElem_d_ValidExtension,int li,int lij,int lj){
hipError_t cudaStatus;
//V y l ln u tin to Embedding, chng ta to 2 ct Q c kch thc bng nhau v bng s lng Embedding tm thy trong d_ValidExtension ca nhn cnh (li,lij,lj)
//To Q1 v Q2 trn b nh device, sau chp a ch ca n vo bin mng dArrPointerEmbedding. Do , chng ta khng hu b nh ca Q1 v Q2 sau khi gi hm createEmbeddingRoot.
Embedding *Q1=nullptr;//embedding dQ.
Embedding *Q2=nullptr;
int sizedQ=0;
//To bo nhiu mng dQ, mi mng c s lng phn t l bao nhiu v ni dung mng l g?
//To 2 mng dQ
/*1.To mng M c kch thc bng vi d_ValidExtension v khi to gi tr cho cc phn t trong M bng 0.*/
int* d_M;
cudaStatus=hipMalloc((int**)&d_M,noElem_d_ValidExtension*sizeof(int));
if (cudaStatus!=hipSuccess){
fprintf(stderr,"\ncudaMalloc M failed");
//exit(1);
goto Error;
}
else
{
hipMemset(d_M,0,noElem_d_ValidExtension*sizeof(int));
}
/*//2. To noElem_d_ValidExtension threads. Mi thread s kim tra phn t tng ng trong mng d_ValidExtension xem c bng cnh (li,lij,lj)
Nu bng th bc v tr ti M ln gi tr l 1*/
//printf("\nMang d_ValidExtension");
//printfExtension(d_ValidExtension,noElem_d_ValidExtension);
//hipDeviceSynchronize();
dim3 block(blocksize);
dim3 grid((noElem_d_ValidExtension+block.x-1)/block.x);
hipLaunchKernelGGL(( kernelMarkExtension), dim3(grid),dim3(block), 0, 0, d_ValidExtension,noElem_d_ValidExtension,d_M,li,lij,lj);
hipDeviceSynchronize();
/*printf("\n\nMang d_ValidExtension");
printfExtension(d_ValidExtension,noElem_d_ValidExtension);
hipDeviceSynchronize();
printf("\nMang d-M:");
printInt(d_M,noElem_d_ValidExtension);*/
/* 3. Exclusive Scan d_M
Kt qu scan lu vo mng d_scanResult
*/
int* d_scanResult;
cudaStatus=hipMalloc((int**)&d_scanResult,noElem_d_ValidExtension*sizeof(int));
if (cudaStatus!=hipSuccess){
fprintf(stderr,"\ncudaMalloc M failed");
//exit(1);
goto Error;
}
else
{
hipMemset(d_scanResult,0,noElem_d_ValidExtension*sizeof(int));
}
cudaStatus=scanV(d_M,noElem_d_ValidExtension,d_scanResult);
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\nscanV() d_M createForwardEmbedding failed");
//exit(1);
goto Error;
}
/*
4. To mng Q1 v Q2 c kch thc l (scanM[LastIndex]) nu phn t cui cng ca d_ValidExtension khng phi l (li,lij,lj).
Ngc li th Q c kch thc l (scanM[LastIndex]+1).
Mi phn t ca Q c cu trc l {int idx, int vid}
*/
//bool same = false;
//kernelMatchLastElement<<<1,1>>>(d_ValidExtension,noElem_d_ValidExtension,li,lij,lj,same);
//hipDeviceSynchronize();
int noElem_d_Q=0;
//cudaStatus=getLastElement(d_scanResult,noElem_d_ValidExtension,noElem_d_Q);
//
//if (same==true){
// noElem_d_Q++;
//}
cudaStatus = getSizeBaseOnScanResult(d_M,d_scanResult,noElem_d_ValidExtension,noElem_d_Q);
if(cudaStatus !=hipSuccess){
fprintf(stderr,"\n getSizeBaseOnScanResult noELem_d_Q in createEmbeddingRoot1() failed",cudaStatus);
goto Error;
}
sizedQ=noElem_d_Q;
printf("\nnoElem_d_Q1:%d",noElem_d_Q);
//To Embedding dQ1, khi bit kch thc ca chng
cudaStatus = hipMalloc((void**)&Q1,sizedQ*sizeof(Embedding));
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipMalloc dQ1 in createEmbeddingRoot1() failed",cudaStatus);
goto Error;
}
else
{
hipMemset(Q1,-1,sizedQ*sizeof(Embedding));
}
cudaStatus = hipMalloc((void**)&Q2,sizedQ*sizeof(Embedding));
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipMalloc dQ1 in createEmbeddingRoot1() failed",cudaStatus);
goto Error;
}
else
{
hipMemset(Q2,-1,sizedQ*sizeof(Embedding));
}
hipLaunchKernelGGL(( kernelSetValueFordQ), dim3(grid),dim3(block), 0, 0, d_ValidExtension,noElem_d_ValidExtension,Q1,Q2,d_scanResult,li,lij,lj);
hipDeviceSynchronize();
cudaStatus = hipGetLastError();
if(cudaStatus !=hipSuccess){
fprintf(stderr,"\n kernelSetValueFordQ in createEmbeddingRoot1() failed",cudaStatus);
goto Error;
}
cudaStatus = getPointer(dArrPointerEmbedding,noElem_dArrPointerEmbedding,Q1);
if(cudaStatus !=hipSuccess){
fprintf(stderr,"\n getPointer() into dArrPointerEmbedding in createEmbeddingRoot1() failed",cudaStatus);
goto Error;
}
cudaStatus = getPointer(dArrPointerEmbedding,noElem_dArrPointerEmbedding,Q2);
if(cudaStatus !=hipSuccess){
fprintf(stderr,"\n getPointer() into dArrPointerEmbedding in createEmbeddingRoot1() failed",cudaStatus);
goto Error;
}
for (int j = 0; j < 2; j++)
{
//Ly kch thc ca dQ lu vo mng d_arrSizedQ
cudaStatus = getSizedQ(dArrSizedQ,noElem_dArrSizedQ,sizedQ);
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n getSizedQ in makeColumnQ failed",stderr);
goto Error;
}
}
Error:
return cudaStatus;
}
//Kernel khi to gi tr cho right most path trn device */
__global__ void kernelInitializeValueForRMPath(int *dRMPath,int noElem_dRMPath){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<noElem_dRMPath){
dRMPath[i]=i;
}
}
/* To mt right most path trn device */
inline hipError_t createRMPath(int *&dRMPath,int &noElem_dRMPath){
hipError_t cudaStatus;
//Khi to kch thc ban u ca dRMPath bng 2
noElem_dRMPath=2;
cudaStatus = hipMalloc((void**)&dRMPath,noElem_dRMPath*sizeof(int));
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipMalloc dRMPath failed",cudaStatus);
goto Error;
}
hipLaunchKernelGGL(( kernelInitializeValueForRMPath), dim3(1),dim3(2), 0, 0, dRMPath,noElem_dRMPath);
hipDeviceSynchronize();
cudaStatus=hipGetLastError();
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n CudaDeviceSynchronize() in createRMPath() failed",cudaStatus);
goto Error;
}
Error:
return cudaStatus;
}
__global__ void kernelPrintRMPath(int *dRMPath,int noElem_dRMPath){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<noElem_dRMPath){
printf("\n dRMPath[%d]: %d",i,dRMPath[i]);
}
}
//Hm hin th ni dung dRMPath trn device
inline hipError_t printRMPath(int *dRMPath,int noElem_dRMPath){
hipError_t cudaStatus;
dim3 block(blocksize);
dim3 grid((noElem_dRMPath + block.x - 1)/block.x);
hipLaunchKernelGGL(( kernelPrintRMPath), dim3(grid),dim3(block), 0, 0, dRMPath,noElem_dRMPath);
hipDeviceSynchronize();
cudaStatus=hipGetLastError();
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n CudaDeviceSynchronize() in createRMPath() failed",cudaStatus);
goto Error;
}
Error:
return cudaStatus;
}
//kernel tm s lng embedding hin ti
__global__ void kernelGetNumberOfEmbedding(int *dArrSizedQ,int noElem_dArrSizedQ,int *dNumberOfEmbedding){
dNumberOfEmbedding[0] = dArrSizedQ[noElem_dArrSizedQ-1];
}
//Hm tm s lng embedding hin ti
inline hipError_t findNumberOfEmbedding(int *dArrSizedQ,int noElem_dArrSizedQ,int &noElem_dArrPointerdHO){
hipError_t cudaStatus;
noElem_dArrPointerdHO=0;
int *dNumberOfEmbedding;
cudaStatus = hipMalloc((void**)&dNumberOfEmbedding,sizeof(int));
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipMalloc dNumberOfEmbedding in findNumberOfEmbedding() failed",cudaStatus);
goto Error;
}
else
{
hipMemset(dNumberOfEmbedding,0,sizeof(int));
}
hipLaunchKernelGGL(( kernelGetNumberOfEmbedding), dim3(1),dim3(1), 0, 0, dArrSizedQ,noElem_dArrSizedQ,dNumberOfEmbedding);
hipDeviceSynchronize();
cudaStatus=hipGetLastError();
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipDeviceSynchronize() in createGraphHistory() failed",cudaStatus);
goto Error;
}
hipMemcpy(&noElem_dArrPointerdHO,dNumberOfEmbedding,sizeof(int),hipMemcpyDeviceToHost);
hipDeviceSynchronize();
cudaStatus=hipGetLastError();
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipDeviceSynchronize() in createGraphHistory() failed",cudaStatus);
goto Error;
}
Error:
hipFree(dNumberOfEmbedding);
return cudaStatus;
}
inline hipError_t createElementdHO(int *&dHO,int maxOfVer){
hipError_t cudaStatus;
cudaStatus = hipMalloc((void**)&dHO,maxOfVer*sizeof(int));
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipMalloc() for dHO in createElementdHO() failed",cudaStatus);
goto Error;
}
else
{
hipMemset(dHO,0,maxOfVer*sizeof(int));
}
//hipDeviceSynchronize();
//cudaStatus=hipGetLastError();
//if(cudaStatus!=hipSuccess){
// fprintf(stderr,"\n hipDeviceSynchronize() in createElementdHO() failed",cudaStatus);
// goto Error;
//}
Error:
return cudaStatus;
}
//kernel ly pointer tr n b nh mng device ri gn cho dArrPointerdHO
__global__ void kernelAssignPointer(int **dArrPointerdHO,int pos,int *dHO){
dArrPointerdHO[pos]=dHO;
}
//Hm ly pointer tr n b nh mng device ri gn cho dArrPointerdHO
inline hipError_t assignPointer(int **&dArrPointerdHO,int pos,int *dHO){
hipError_t cudaStatus;
hipLaunchKernelGGL(( kernelAssignPointer), dim3(1),dim3(1), 0, 0, dArrPointerdHO,pos,dHO);
hipDeviceSynchronize();
cudaStatus=hipGetLastError();
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipDeviceSynchronize() in assignPointer() failed",cudaStatus);
goto Error;
}
Error:
return cudaStatus;
}
//Kernel in mng double pointer Int trn device
__global__ void kernelPrintDoublePointerInt(int **dArrPointerdHO,int noElem_dArrPointerdHO,unsigned int maxOfVer){
int i = blockDim.x *blockIdx.x + threadIdx.x;
if(i<noElem_dArrPointerdHO){
for (int j = 0; j < maxOfVer; j++)
{
printf("\n Thread %d: j:%d V[%d]:%d",i,j,j,dArrPointerdHO[i][j]);;
}
}
}
/* Hm in mng double pointer int (dArrPointerdHO) khi bit s lng phn t mng (noElem_dArrPointerdHO) v
* Kch thc ca mi phn t mng */
inline hipError_t printDoublePointerInt(int **dArrPointerdHO,int noElem_dArrPointerdHO,unsigned int maxOfVer){
hipError_t cudaStatus;
dim3 block(blocksize);
dim3 grid((noElem_dArrPointerdHO + block.x - 1)/block.x);
hipLaunchKernelGGL(( kernelPrintDoublePointerInt), dim3(grid),dim3(block), 0, 0, dArrPointerdHO,noElem_dArrPointerdHO,maxOfVer);
hipDeviceSynchronize();
cudaStatus=hipGetLastError();
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipDeviceSynchronize() in printDoublePointerInt() failed",cudaStatus);
goto Error;
}
Error:
return cudaStatus;
}
//Hm to mng double pointer Int trn device (dArrPointerdHO) khi bit trc s lng phn t cn to v kch thc ca mi mng.
inline hipError_t createdArrPointerdHO(int **&dArrPointerdHO,int noElem_dArrPointerdHO,unsigned int maxOfVer){
hipError_t cudaStatus;
cudaStatus = hipMalloc((void**)&dArrPointerdHO,noElem_dArrPointerdHO*sizeof(int*));
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipMalloc() for dArrPointerdHO in createdArrPointerdHO() failed",cudaStatus);
goto Error;
}
for (int i = 0; i < noElem_dArrPointerdHO; i++)
{
int noElem_dHO=maxOfVer;
int *dHO=nullptr;
cudaStatus = createElementdHO(dHO,maxOfVer);
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n createElementdHO() in createdArrPointerdHO() failed",cudaStatus);
goto Error;
}
int pos = i;
assignPointer(dArrPointerdHO,pos,dHO);
}
hipDeviceSynchronize();
cudaStatus=hipGetLastError();
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipDeviceSynchronize() in createdArrPointerdHO() failed",cudaStatus);
goto Error;
}
Error:
return cudaStatus;
}
//Hm to phn t dHLN trn device
inline hipError_t createElementdHLN(int *&dHLN,int noElem_dHLN){
hipError_t cudaStatus;
cudaStatus = hipMalloc((void**)&dHLN,noElem_dHLN*sizeof(int));
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipMalloc() dHLN in createElementdHLN() failed",cudaStatus);
goto Error;
}
else
{
hipMemset(dHLN,0,noElem_dHLN*sizeof(int));
}
hipDeviceSynchronize();
cudaStatus=hipGetLastError();
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipDeviceSynchronize() in createElementdHLN() failed",cudaStatus);
goto Error;
}
Error:
return cudaStatus;
}
//Hm to mng double pointer int dHLN
inline hipError_t createdArrPointerdHLN(int **&dArrPointerdHLN,int noElem_dArrPointerdHO,int *hNumberEdgeInEachGraph,int *hArrGraphId){
hipError_t cudaStatus;
//Cp pht b nh trn device cho dArrpointerdHLN theo s lng embedding, cng chnh bng s lng phn t ca mng dArrPointerdHO (noElem_dArrPointerdHO)
cudaStatus = hipMalloc((void**)&dArrPointerdHLN, noElem_dArrPointerdHO*sizeof(int*));
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipMalloc() dArrPointerdHLN in createdArrPointerdHLN() failed",cudaStatus);
goto Error;
}
for (int i = 0; i < noElem_dArrPointerdHO; i++)
{
int index = hArrGraphId[i];
int *dHLN=nullptr;
cudaStatus = createElementdHLN(dHLN,hNumberEdgeInEachGraph[index]);
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n createElementdHLN() in createdArrPointerdHLN() failed",cudaStatus);
goto Error;
}
cudaStatus = assignPointer(dArrPointerdHLN,i,dHLN);
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n assignPointer() in createdArrPointerdHLN() failed",cudaStatus);
goto Error;
}
}
hipDeviceSynchronize();
cudaStatus=hipGetLastError();
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipDeviceSynchronize() in createdArrPointerdHLN() failed",cudaStatus);
goto Error;
}
Error:
return cudaStatus;
}
//kernel tm graphid ca tt c cc embedding v lu kt qu vo mng
__global__ void kernelFindGraphIdOfAllEmbedding(Embedding **dArrPointerEmbedding,int noElem_dArrPointerEmbedding,int *dArrGraphId,unsigned int maxOfVer,int noElemOfEmbedding,int *dArrSizedQ){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<noElemOfEmbedding){
int vid =dArrPointerEmbedding[noElem_dArrPointerEmbedding-1][i].vid;
int graphId=vid/maxOfVer;
dArrGraphId[i]=graphId;
//printf("\nThread %d: vid:%d graphId:%d maxOfVer:%d",i,vid,graphId,maxOfVer);
}
}
//Hm tm graphid ca tt c cc embedding v lu kt qu vo mng
inline hipError_t findGraphIdOfAllEmbedding(Embedding **dArrPointerEmbedding,int noElem_dArrPointerEmbedding,int *&hArrGraphId,unsigned int maxOfVer,int *&dArrGraphId,int noElemOfEmbedding,int *dArrSizedQ){
hipError_t cudaStatus;
hArrGraphId = (int*)malloc(noElemOfEmbedding*sizeof(int));
if(hArrGraphId==NULL){
printf("\nMalloc hArrGraphId in findGraphIdOfAllEmbedding() failed\n");
exit(1);
}
//int *dArrGraphId=nullptr;
cudaStatus = hipMalloc((void**)&dArrGraphId,noElemOfEmbedding*sizeof(int));
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipMalloc() dArrGraphId in findGraphIdOfAllEmbedding() failed",cudaStatus);
goto Error;
}
dim3 block(blocksize);
dim3 grid((noElemOfEmbedding + block.x -1)/block.x);
hipLaunchKernelGGL(( kernelFindGraphIdOfAllEmbedding), dim3(grid),dim3(block), 0, 0, dArrPointerEmbedding,noElem_dArrPointerEmbedding,dArrGraphId,maxOfVer,noElemOfEmbedding,dArrSizedQ);
hipDeviceSynchronize();
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipDeviceSynchronize() kernelFindGraphIdOfAllEmbedding in findGraphIdOfAllEmbedding() failed",cudaStatus);
goto Error;
}
hipMemcpy(hArrGraphId,dArrGraphId,noElemOfEmbedding*sizeof(int),hipMemcpyDeviceToHost);
/*printf("\n**********hArrGraphId ***********\n");
for (int j = 0; j < noElemOfEmbedding; j++)
{
printf("\n hArrGraphId[%d]:%d",j,hArrGraphId[j]);
}*/
hipDeviceSynchronize();
cudaStatus=hipGetLastError();
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipDeviceSynchronize() kernelFindGraphIdOfAllEmbedding in findGraphIdOfAllEmbedding() failed",cudaStatus);
goto Error;
}
Error:
return cudaStatus;
}
//kernel in mng double pointer int dArrPointerdHLN
__global__ void kernelPrintdArrPointerdHLN(int **dArrPointerdHLN,int noElem_dArrPointerdHO,int *dNumberEdgeInEachGraph,int *dArrGraphId){
int i = blockDim.x *blockIdx.x + threadIdx.x;
if(i<noElem_dArrPointerdHO){
int n =dNumberEdgeInEachGraph[dArrGraphId[i]];
for (int j = 0; j < n; j++)
{
printf("\n Thread %d: j:%d dArrPointerdHLN[%d][%d]:%d",i,j,i,j,dArrPointerdHLN[i][j]);
}
}
}
//Hm in mng double pointer int dArrPointerdHLN
inline hipError_t printdArrPointerdHLN(int **dArrPointerdHLN,int noElem_dArrPointerdHO,int *dNumberEdgeInEachGraph,int *dArrGraphId){
hipError_t cudaStatus;
dim3 block(blocksize);
dim3 grid((noElem_dArrPointerdHO+block.x - 1)/block.x);
hipLaunchKernelGGL(( kernelPrintdArrPointerdHLN), dim3(grid),dim3(block), 0, 0, dArrPointerdHLN,noElem_dArrPointerdHO,dNumberEdgeInEachGraph,dArrGraphId);
hipDeviceSynchronize();
cudaStatus=hipGetLastError();
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipDeviceSynchronize() kernelFindGraphIdOfAllEmbedding in findGraphIdOfAllEmbedding() failed",cudaStatus);
goto Error;
}
Error:
return cudaStatus;
}
//kernel to mng dArrNumberEdgeOfEachdHLN da vo graphId thu thp c theo th t ca tng embedding lu trong mng dArrGraphId
__global__ void kernelCreatedArrNumberEdgeOfEachdHLN(int *dArrNumberEdgeOfEachdHLN,int noElemOfEmbedding,int *dArrGraphId,int *dNumberEdgeInEachGraph){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<noElemOfEmbedding){
dArrNumberEdgeOfEachdHLN[i]= dNumberEdgeInEachGraph[dArrGraphId[i]];
}
}
//Hm to mng dArrNumberEdgeOfEachdHLN da vo graphId thu thp c theo th t ca tng embedding lu trong mng dArrGraphId
inline hipError_t createdArrNumberEdgeOfEachdHLN(int *&dArrNumberEdgeOfEachdHLN,int noElemOfEmbedding,int *dArrGraphId,int *dNumberEdgeInEachGraph){
hipError_t cudaStatus;
//Cp pht b nh cho mng dArrNumberEdgeOfEachdHLN vi s lng phn t bng vi s lng embedding
cudaStatus = hipMalloc((void**)&dArrNumberEdgeOfEachdHLN, noElemOfEmbedding*sizeof(int));
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipMalloc() dArrNumberEdgeOfEachdHLN in createdArrPointerdHLN() failed",cudaStatus);
goto Error;
}
dim3 block(blocksize);
dim3 grid((noElemOfEmbedding+block.x - 1)/block.x);
hipLaunchKernelGGL(( kernelCreatedArrNumberEdgeOfEachdHLN), dim3(grid),dim3(block), 0, 0, dArrNumberEdgeOfEachdHLN,noElemOfEmbedding,dArrGraphId,dNumberEdgeInEachGraph);
hipDeviceSynchronize();
cudaStatus=hipGetLastError();
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipDeviceSynchronize() kernelCreatedArrNumberEdgeOfEachdHLN in createdArrNumberEdgeOfEachdHLN() failed",cudaStatus);
goto Error;
}
Error:
return cudaStatus;
}
//kernel in ni dung ca mng dArrPointerdHLN khi bit s lng cnh ca mi phn t tng ng ca embedding c lu trong mng dArrNumberEdgeOfEachdHLN
__global__ void kernelprintDoublePointerInt(int **dArrPointerdHLN,int noElemOfEmbedding,int *dArrNumberEdgeOfEachdHLN){
int i = blockDim.x *blockIdx.x + threadIdx.x;
if(i<noElemOfEmbedding){
int length = dArrNumberEdgeOfEachdHLN[i];
for (int j = 0; j < length; j++)
{
printf("\n Thread %d: j:%d dArrPointerdHLN[%d][%d]:%d",i,j,i,j,dArrPointerdHLN[i][j]);
}
}
}
//Overloading function printDoublePointerInt() in ni dung mng dArrPointerdHLN da vo s lng embedding v s lng cnh trong mi phn t
inline hipError_t printDoublePointerInt(int **dArrPointerdHLN,int noElemOfEmbedding,int *dArrNumberEdgeOfEachdHLN){
hipError_t cudaStatus;
dim3 block(blocksize);
dim3 grid((noElemOfEmbedding + block.x -1)/block.x);
hipLaunchKernelGGL(( kernelprintDoublePointerInt), dim3(grid),dim3(block), 0, 0, dArrPointerdHLN,noElemOfEmbedding,dArrNumberEdgeOfEachdHLN);
hipDeviceSynchronize();
cudaStatus=hipGetLastError();
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipDeviceSynchronize() in printDoublePointerInt() failed",cudaStatus);
goto Error;
}
Error:
return cudaStatus;
}
__global__ void kernelAssignValueForGraphHistory(Embedding **dArrPointerEmbedding,int noElem_dArrPointerEmbedding,int *dArrPrevQ,int noElemOfEmbedding,int *d_O,int *d_N,unsigned int maxOfVer,int **dArrPointerdHO,int **dArrPointerdHLN,int *dArrNumberEdgeOfEachdHLN){
int i = blockDim.x * blockIdx.x + threadIdx.x;
//Mi mt embedding s cp nht graphHistory tng ng ca n (gm 2 mng: dArrPointerdHO(mng cc nh ca embedding m thread i ang x l) v dArrPointerdHLN(mng cc cnh tng ng vi nh x nh).)
if(i<noElemOfEmbedding){
int vid = dArrPointerEmbedding[noElem_dArrPointerEmbedding-1][i].vid; //T ct Q cui cng, chng ta ly ra c vid ca 6 embedding tng ng
int indexOfFirstVertexInGraph = vid-(vid%maxOfVer); //the first global id vertex in graph
int toVid = vid;//nh to ca cnh thuc embedding
int idxOfVertex= (vid%maxOfVer); //V tr ca phn t nh cn cp nht trong mng dArrPointerdHO[i][idxOfVertex];
dArrPointerdHO[i][idxOfVertex]=2; //Cp nht nh thuc right most path ca embedding trong mng dArrPointerdHO tng ng.
int prevQ= dArrPrevQ[noElem_dArrPointerEmbedding-1];
int newi=dArrPointerEmbedding[noElem_dArrPointerEmbedding-1][i].idx; //ly index gn cho newi
while (true)
{
vid = dArrPointerEmbedding[prevQ][newi].vid; //truy xut phn t pha trc theo prevQ v newi
int fromVid=vid; //nh from ca cnh thuc embedding
int idxEdge = d_O[vid]-d_O[indexOfFirstVertexInGraph]; //v tr cnh cn cp nht c khi to bng gi tr index ca vid ang xt tr i gi tr index ca nh u tin trong th .
int indexOfdN=d_O[fromVid];
while (d_N[indexOfdN]!=toVid){
idxEdge=idxEdge+1;
indexOfdN++;
}
int fromVidR=toVid;
int toVidR=fromVid;
int indexOfEdgeR=d_O[fromVidR]-d_O[indexOfFirstVertexInGraph];
indexOfdN=d_O[fromVidR];
while(d_N[indexOfdN]!=toVidR){
indexOfEdgeR++;
indexOfdN++;
}
//Nu khng phi l nh u tin th phi cng vo idxEdge mt lng bng tng bc ca cc nh trc
//Tng bc ca cc nh trc chnh bng
idxOfVertex = (vid%maxOfVer); //nh du nh thuc Embedding
dArrPointerdHO[i][idxOfVertex]=2;
dArrPointerdHLN[i][idxEdge]=2;//nh du cnh thuc Embedding. v y l n th v hng nn cnh AB cng bng cnh BA,do ta phi nh du cnh BA cng thuc embedding.
dArrPointerdHLN[i][indexOfEdgeR]=2;
if(dArrPrevQ[prevQ]==-1) return; //nu l ct Q u tin th dng li v duyt xong embedding
newi=dArrPointerEmbedding[prevQ][newi].idx; //ngc li th ly index ca ct Q pha trc
prevQ=dArrPrevQ[prevQ]; //Ly Q pha trc
toVid=fromVid; //cp nht li nh to.
}
}
}
//Xy dng graphHistory cho tt c cc embedding
inline hipError_t createGraphHistory(Embedding **dArrPointerEmbedding,int *dArrSizedQ,int *dArrPrevQ,int noElem_dArrPointerEmbedding,int noElem_dArrSizedQ,int noElem_dArrPrevQ,int *d_O,int *d_LO,int numberOfElementd_O,int *d_N,int *d_LN,int numberOfElementd_N,unsigned int maxOfVer,int **&dArrPointerdHO,int &noElem_dArrPointerdHO,int **&dArrPointerdHLN,int *&dArrNumberEdgeOfEachdHLN,int *hNumberEdgeInEachGraph,int noElem_hNumberEdgeInEachGraph,int *dNumberEdgeInEachGraph){
hipError_t cudaStatus;
//s lng embedding chnh bng gi tr ca bin noElem_dArrPointerdHO
cudaStatus = findNumberOfEmbedding(dArrSizedQ,noElem_dArrSizedQ,noElem_dArrPointerdHO);
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n findNumberOfEmbedding() in createGraphHistory() failed",cudaStatus);
goto Error;
}
int noElemOfEmbedding=noElem_dArrPointerdHO;
//In ni dung s lng phn t embedding va tm c
//printf("\nNumber Of Embedding: %d",noElem_dArrPointerdHO);
/* To graphHistory
* 1. To mng dArrPointerdHO
* 2. To mng dArrPointerdHLN
* 3. To mng dArrNumberEdgeOfEachdHLN: mng ny m t s cnh ca mi phn t trong mng dArrPointerdHLN
* Bc 2 v 3 c th c thc hin mt cch c lp, nn c th x l song song bc ny.
* 4. Cp nht ni dung cho 3 mng trn.
*/
//1. To 5 mng c s lng phn t l maxOfVer trn device, v chp pointer ca cc mng b vo phn t dArrPointerEmbedding
cudaStatus = createdArrPointerdHO(dArrPointerdHO,noElem_dArrPointerdHO,maxOfVer);
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n createdArrPointerdHO() in createGraphHistory() failed",cudaStatus);
goto Error;
}
//In ni dung ca mng va to c
/*printf("\n ********** dArrPointerdHO *****************\n");
printDoublePointerInt(dArrPointerdHO,noElem_dArrPointerdHO,maxOfVer);
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n printDoublePointerInt() in createGraphHistory() failed",cudaStatus);
goto Error;
}*/
//2. To dArrPointerHLN
/* Tm s lng cnh ca mi embedding
* Bit c global vertex id ca embedding th chng ta bit c graphId ca embedding
* Bit c graphID th suy ra c s lng cnh ca embedding.
* Trc tin nn tnh s lng cnh ca mi th trong CSDL v lu chng vo mt mng <-- Lm c
* Sau duyt qua cc vid ca embedding last column Q bit c graphID m embedding thuc vo
*/
//Tnh graphId ca tng embedding v lu vo mng
int *dArrGraphId=nullptr; //Mng ny dng in ni dung ca mng dArrPointerdHLN
int *hArrGraphId=nullptr; //Ly graphId mng ny mang i tra trong mng hNumberEdgeInEachGraph ly s lng cnh cho embedding to dArrPointerdHLN
cudaStatus = findGraphIdOfAllEmbedding(dArrPointerEmbedding,noElem_dArrPointerEmbedding,hArrGraphId,maxOfVer,dArrGraphId,noElemOfEmbedding,dArrSizedQ);
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n findGraphIdOfAllEmbedding() in createGraphHistory() failed",cudaStatus);
goto Error;
}
cudaStatus =createdArrPointerdHLN(dArrPointerdHLN,noElem_dArrPointerdHO,hNumberEdgeInEachGraph,hArrGraphId);
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n createdArrPointerdHLN() in createGraphHistory() failed",cudaStatus);
goto Error;
}
//In ni dung mng dArrPointerdHLN
/*printf("\n***************** dArrPointerdHLN ***************\n");
cudaStatus = printdArrPointerdHLN(dArrPointerdHLN,noElem_dArrPointerdHO,dNumberEdgeInEachGraph,dArrGraphId);
cudaStatus=hipGetLastError();
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n printdArrPointerdHLN() in createGraphHistory() failed",cudaStatus);
goto Error;
}*/
//3. To mng dArrNumberEdgeOfEachdHLN
cudaStatus = createdArrNumberEdgeOfEachdHLN(dArrNumberEdgeOfEachdHLN,noElemOfEmbedding,dArrGraphId,dNumberEdgeInEachGraph);
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n createdArrNumberEdgeOfEachdHLN() in createGraphHistory() failed",cudaStatus);
goto Error;
}
//printf("\n**************dArrNumberEdgeOfEachdHLN**************\n");
//printInt(dArrNumberEdgeOfEachdHLN,noElemOfEmbedding);
//printf("\n**************dArrNumberEdgeOfEachdHLN**************\n");
//printDoublePointerInt(dArrPointerdHLN,noElemOfEmbedding,dArrNumberEdgeOfEachdHLN);
//4.1 Cp nht ni dung cho graphHistory
/* Cn c c s d liu nh x nh v cnh ph hp vo mng dArrPointerEmbedding (cha idx v vid), dArrPointerdHLN (cha cnh)
* Mi mt thread s chu trch nhim cp nht d liu cho 1 embedding
*/
hipLaunchKernelGGL(( kernelAssignValueForGraphHistory), dim3(1),dim3(noElemOfEmbedding), 0, 0, dArrPointerEmbedding,noElem_dArrPointerEmbedding,dArrPrevQ,noElemOfEmbedding,d_O,d_N,maxOfVer,dArrPointerdHO,dArrPointerdHLN,dArrNumberEdgeOfEachdHLN);
hipDeviceSynchronize();
cudaStatus=hipGetLastError();
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipDeviceSynchronize() in createGraphHistory() failed",cudaStatus);
goto Error;
}
Error:
return cudaStatus;
}
//kernel tnh s cnh trong mi th trong CSDL v lu vo bin mng tng ng.
__global__ void kernelGetNumberOfEdgeInGraph(int *d_O,int numberOfElementd_N,unsigned int numberOfGraph,unsigned int maxOfVer,int *dNumberEdgeInEachGraph){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<numberOfGraph){
int graphId=i;
int idxFrom = graphId*maxOfVer;
int idxFirstNext = (graphId+1)*maxOfVer;
int r=0;
if (graphId!=(numberOfGraph-1)){
r=d_O[idxFirstNext]-d_O[idxFrom];
}else
{
r=numberOfElementd_N-d_O[idxFrom];
}
dNumberEdgeInEachGraph[i]=r;
}
}
//Hm tnh s cnh ca tt c cc th trong CSDL, kt qu lu vo mt mng tng ng
inline hipError_t getNumberOfEdgeInGraph(int *d_O,int numberOfElementd_N,unsigned int maxOfVer,int *&hNumberEdgeInEachGraph,int *&dNumberEdgeInEachGraph,unsigned int numberOfGraph){
hipError_t cudaStatus;
cudaStatus = hipMalloc((void**)&dNumberEdgeInEachGraph,numberOfGraph*sizeof(int));
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipMalloc() in dNumberEdgeInEachGraph createGraphHistory() failed",cudaStatus);
goto Error;
}
dim3 block(blocksize);
dim3 grid((numberOfGraph + block.x-1)/block.x);
hipLaunchKernelGGL(( kernelGetNumberOfEdgeInGraph), dim3(grid),dim3(block), 0, 0, d_O,numberOfElementd_N,numberOfGraph,maxOfVer,dNumberEdgeInEachGraph);
hipDeviceSynchronize();
cudaStatus=hipGetLastError();
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipDeviceSynchronize() kernelGetNumberOfEdgeInGraph in getNumberOfEdgeInGraph() failed",cudaStatus);
goto Error;
}
//printf("\n *************dNumberEdgeInEachGraph********\n" );
//printInt(dNumberEdgeInEachGraph,numberOfGraph);
hNumberEdgeInEachGraph = (int*)malloc(numberOfGraph*sizeof(int));
if(hNumberEdgeInEachGraph==NULL){
printf("\n Malloc hNumberEdgeInEachGraph in getNumberOfEdgeInGraph() failed" );
exit(1);
}
hipMemcpy(hNumberEdgeInEachGraph,dNumberEdgeInEachGraph,numberOfGraph*sizeof(int),hipMemcpyDeviceToHost);
hipDeviceSynchronize();
cudaStatus=hipGetLastError();
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipDeviceSynchronize() in createGraphHistory() failed",cudaStatus);
goto Error;
}
Error:
return cudaStatus;
}
//kernel in tt c column Q ca embedding
__global__ void kernelprintAllEmbeddingColumn(Embedding **dArrPointerEmbedding,int *dArrSizedQ,int noElem_dArrPointerEmbedding){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<noElem_dArrPointerEmbedding){
Embedding* Q = dArrPointerEmbedding[i];
int lenght = dArrSizedQ[i];
for (int j = 0; j < lenght; j++)
{
printf("\n Thread %d: j:%d (idx:%d vid:%d)",i,j,Q[j].idx,Q[j].vid);
}
}
}
//Hm in tt c cc column Q ca embedding
inline hipError_t printAllEmbeddingColumn(Embedding **dArrPointerEmbedding,int *dArrSizedQ,int noElem_dArrPointerEmbedding){
hipError_t cudaStatus;
dim3 block(blocksize);
dim3 grid((noElem_dArrPointerEmbedding + block.x - 1)/block.x);
printf("\n****************** All Columm in Embedding dArrPointerEmbedding *************\n");
hipLaunchKernelGGL(( kernelprintAllEmbeddingColumn), dim3(grid),dim3(block), 0, 0, dArrPointerEmbedding,dArrSizedQ,noElem_dArrPointerEmbedding);
hipDeviceSynchronize();
cudaStatus=hipGetLastError();
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipDeviceSynchronize() in printAllEmbeddingColumn() failed",cudaStatus);
goto Error;
}
Error:
return cudaStatus;
}
//kernel in mt embedding khi bit v tr Row ca n trong last column Q.
__global__ void kernelprintEmbeddingFromPos(Embedding **dArrPointerEmbedding,int posColumn,int posRow){
Embedding *Q =dArrPointerEmbedding[posColumn];
printf("\n Q[%d] Row[%d] (idx:%d vid:%d)",posColumn,posRow,Q[posRow].idx,Q[posRow].vid);
while (true)
{
posRow = Q[posRow].idx;
posColumn=posColumn-1;
Q=dArrPointerEmbedding[posColumn];
printf("\n Q[%d] Row[%d] (idx:%d vid:%d)",posColumn,posRow,Q[posRow].idx,Q[posRow].vid);
posRow=Q[posRow].idx;
if(posRow==-1) return;
}
}
//Hm in mt embedding khi bit v tr Row ca n trong last column Q.
inline hipError_t printEmbeddingFromPos(Embedding **dArrPointerEmbedding,int posColumn,int posRow){
hipError_t cudaStatus;
printf("\n ****Embeding from posColumn: %d posRow:%d **************\n",posColumn,posRow);
hipLaunchKernelGGL(( kernelprintEmbeddingFromPos), dim3(1),dim3(1), 0, 0, dArrPointerEmbedding,posColumn,posRow);
hipDeviceSynchronize();
cudaStatus=hipGetLastError();
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipDeviceSynchronize() in printEmbeddingFromPos() failed",cudaStatus);
goto Error;
}
Error:
return cudaStatus;
}
//kernel tm bc ca cc vid trn ct Q v lu kt qu vo mng dArrDegreeOfVid
__global__ void kernelCalDegreeOfVid(Embedding **dArrPointerEmbedding,int idxQ,int *d_O, int numberOfElementd_O,int noElem_Embedding,int numberOfElementd_N,unsigned int maxOfVer,float *dArrDegreeOfVid){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<noElem_Embedding){
int vid = dArrPointerEmbedding[idxQ][i].vid;
float degreeOfV =0;
int nextVid;
int graphid;
int lastGraphId=(numberOfElementd_O-1)/maxOfVer;
if (vid==numberOfElementd_O-1){ //nu nh y l nh cui cng trong d_O
degreeOfV=numberOfElementd_N-d_O[vid]; //th bc ca nh vid chnh bng tng s cnh tr cho gi tr ca d_O[vid].
}
else
{
nextVid = vid+1; //xt nh pha sau c khc 1 hay khng?
graphid=vid/maxOfVer;
if(d_O[nextVid]==-1 && graphid==lastGraphId){
degreeOfV=numberOfElementd_N-d_O[vid];
}
else if(d_O[nextVid]==-1 && graphid!=lastGraphId){
nextVid=(graphid+1)*maxOfVer;
degreeOfV=d_O[nextVid]-d_O[vid];
}
else
{
degreeOfV=d_O[nextVid]-d_O[vid];
}
}
dArrDegreeOfVid[i]=degreeOfV;
}
}
//Hm tm bc ca cc nh trn column Q v lu kt qu vo mng dArrDegreeOfVid
inline hipError_t findDegreeOfVer(Embedding **dArrPointerEmbedding,int idxQ,int *d_O, int numberOfElementd_O,int noElem_Embedding,int numberOfElementd_N, unsigned int maxOfVer,float *&dArrDegreeOfVid){
hipError_t cudaStatus;
cudaStatus = hipMalloc((void**)&dArrDegreeOfVid,noElem_Embedding*sizeof(float));
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\ncudaMalloc dArrDegreeOfVid in findMaxDegreeOfVer() failed");
goto Error;
}
else
{
hipMemset(dArrDegreeOfVid,0,noElem_Embedding*sizeof(float));
}
dim3 block(blocksize);
dim3 grid((noElem_Embedding + block.x -1)/block.x);
hipLaunchKernelGGL(( kernelCalDegreeOfVid), dim3(grid),dim3(block), 0, 0, dArrPointerEmbedding,idxQ,d_O, numberOfElementd_O,noElem_Embedding,numberOfElementd_N, maxOfVer,dArrDegreeOfVid);
hipDeviceSynchronize();
cudaStatus=hipGetLastError();
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipDeviceSynchronize() of kernelCalDegreeOfVid in findDegreeOfVer() failed",cudaStatus);
goto Error;
}
Error:
return cudaStatus;
}
//Hm tm bc ln nht ca cc nh vid trong ct Q v lu kt qu vo bin maxDegreeOfVer v float *dArrDegreeOfVid
inline hipError_t findMaxDegreeOfVer(Embedding **dArrPointerEmbedding,int idxQ,int *d_O, int numberOfElementd_O,int noElem_Embedding,int numberOfElementd_N,unsigned int maxOfVer,int &maxDegreeOfVer,float *&dArrDegreeOfVid){
hipError_t cudaStatus;
//Ly bc ca cc nh vid trong ct Q v lu vo mng dArrDegreeOfVid c s lng phn t bng s lng phn t ca embedding
cudaStatus = findDegreeOfVer(dArrPointerEmbedding,idxQ,d_O, numberOfElementd_O,noElem_Embedding,numberOfElementd_N, maxOfVer,dArrDegreeOfVid);
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\findDegreeOfVer() in findMaxDegreeOfVer() failed");
goto Error;
}
printf("\n*******dArrDegreeOfVid*************\n");
printFloat(dArrDegreeOfVid,noElem_Embedding);
//Tm bc ln nht v lu kt qu vo bin maxDegreeOfVer
float *h_max;
h_max = (float*)malloc(sizeof(float));
if(h_max==NULL){
printf("\nMalloc h_max failed");
exit(1);
}
float *d_max;
int *d_mutex;
cudaStatus=hipMalloc((void**)&d_max,sizeof(float));
if (cudaStatus!=hipSuccess){
fprintf(stderr,"\ncudaMalloc d_max failed");
goto Error;
}
else
{
hipMemset(d_max,0,sizeof(float));
}
cudaStatus=hipMalloc((void**)&d_mutex,sizeof(int));
if (cudaStatus!=hipSuccess){
fprintf(stderr,"\ncudaMalloc d_mutex failed");
goto Error;
}
else
{
hipMemset(d_mutex,0,sizeof(int));
}
dim3 gridSize = 256;
dim3 blockSize = 256;
hipLaunchKernelGGL(( find_maximum_kernel), dim3(gridSize), dim3(blockSize), 0, 0, dArrDegreeOfVid, d_max, d_mutex, noElem_Embedding);
hipDeviceSynchronize();
cudaStatus = hipGetLastError();
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\ncudaDeviceSynchronize find_maximum_kernel in findMaxDegreeOfVer() failed");
goto Error;
}
// copy from device to host
hipMemcpy(h_max, d_max, sizeof(float), hipMemcpyDeviceToHost);
//report results
maxDegreeOfVer = (int)(*h_max); //bc ln nht ca cc nh trong 1 ct Q
printf("\nMax degree of vid in Q column is: %d",maxDegreeOfVer);
Error:
free(h_max);
hipFree(d_max);
//hipFree(dArrDegreeOfVid); Gi li bc ca cc nh trong ct Q thun li cho vic tm cc m rng bc k tip
return cudaStatus;
}
//kernel tm cc m rng hp l v ghi nhn vo mng dArrV v dArrExtension tng ng.
__global__ void kernelFindValidForwardExtension(Embedding **dArrPointerEmbedding,int noElem_dArrPointerEmbedding,int noElem_Embedding,int *d_O,int *d_LO,int *d_N,int *d_LN,float *dArrDegreeOfVid,int maxDegreeOfVer,struct_V *dArrV,EXT *dArrExtension,int idxQ,int minLabel,int maxid){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<noElem_Embedding){
int posColumn =noElem_dArrPointerEmbedding-1;
int posRow=i;
int col = posColumn;
int row = posRow;
Embedding *Q=dArrPointerEmbedding[idxQ];
int vid = Q[i].vid;
int degreeVid=__float2int_rn(dArrDegreeOfVid[i]);
//Duyt qua cc nh k vi nh vid da vo s ln duyt l bc
int indexToVidIndN=d_O[vid];
int labelFromVid = d_LO[vid];
int toVid;
int labelToVid;
bool b=true;
for (int j = 0; j < degreeVid; j++,indexToVidIndN++) //Duyt qua tt c cc nh k vi nh vid, nu nh khng thuc embedding th --> cnh cng khng thuc embedding v y l Q cui
{
toVid=d_N[indexToVidIndN]; //Ly vid ca nh cn kim tra
labelToVid = d_LO[toVid]; //ly label ca nh cn kim tra
posColumn=col;
posRow=row;
Q=dArrPointerEmbedding[posColumn];
printf("\nThread %d, j: %d has ToVidLabel:%d",i,j,labelToVid);
//1. Trc tin kim tra nhn ca labelToVid c nh hn minLabel hay khng. Nu nh hn th return
if(labelToVid<minLabel) continue;
//2. kim tra xem nh toVid c tn ti trong embedding hay khng nu tn ti th return
//Duyt qua embedding column t Q cui n Q u, ln lt ly vid so snh vi toVid
//printf("\n Q[%d] Row[%d] (idx:%d vid:%d)",posColumn,posRow,Q[posRow].idx,Q[posRow].vid);//Q[1][0]
if(toVid==Q[posRow].vid) continue;
//printf("\nj:%d toVid:%d Q.vid:%d",j,toVid,Q[posRow].vid);
while (true)
{
posRow = Q[posRow].idx;//0
posColumn=posColumn-1; //0
Q=dArrPointerEmbedding[posColumn];
//printf("\n posColumn[%d] Row[%d] (idx:%d vid:%d)",posColumn,posRow,Q[posRow].idx,Q[posRow].vid);//Q[0][0]
//printf("\nj:%d toVid:%d Q.vid:%d",j,toVid,Q[posRow].vid);
if(toVid==Q[posRow].vid) {
b=false; break;
}
posRow=Q[posRow].idx;//-1
//printf("\nposRow:%d",posRow);
if(posRow==-1) break;
}
if (b==false){b=true; continue;}
int indexOfd_arr_V=i*maxDegreeOfVer+j;
//printf("\nThread %d: m:%d",i,maxDegreeOfVer);
int indexOfd_LN=indexToVidIndN;
dArrV[indexOfd_arr_V].valid=1;
printf("\ndArrV[%d].valid:%d",indexOfd_arr_V,dArrV[indexOfd_arr_V].valid);
//cp nht d liu cho mng dArrExtension
dArrExtension[indexOfd_arr_V].vgi=vid;
dArrExtension[indexOfd_arr_V].vgj=toVid;
dArrExtension[indexOfd_arr_V].lij=d_LN[indexOfd_LN];
printf("\n");
printf("d_LN[%d]:%d ",indexOfd_LN,d_LN[indexOfd_LN]);
dArrExtension[indexOfd_arr_V].li=labelFromVid;
dArrExtension[indexOfd_arr_V].lj=labelToVid;
dArrExtension[indexOfd_arr_V].vi=idxQ;
dArrExtension[indexOfd_arr_V].vj=maxid+1;
dArrExtension[indexOfd_arr_V].posColumn=col;
dArrExtension[indexOfd_arr_V].posRow=row;
}
}
}
//kernel in mng struct_V *dArrV trn device
__global__ void kernelprintdArrV(struct_V *dArrV,int noElem_dArrV,EXT *dArrExtension){
int i = blockDim.x *blockIdx.x + threadIdx.x;
if(i<noElem_dArrV){
int vi = dArrExtension[i].vi;
int vj = dArrExtension[i].vj;
int li = dArrExtension[i].li;
int lij = dArrExtension[i].lij;
int lj = dArrExtension[i].lj;
printf("\n dArrV[%d].backward:%d ,dArrV[%d].valid:%d Extension:(vgi:%d,vgj:%d) (vi:%d vj:%d li:%d lij:%d lj:%d)",i,dArrV[i].backward,i,dArrV[i].valid,dArrExtension[i].vgi,dArrExtension[i].vgj,vi,vj,li,lij,lj);
}
}
//Hm in mng struct_V *dArrV
inline hipError_t printdArrV(struct_V *dArrV,int noElem_dArrV,EXT *dArrExtension){
hipError_t cudaStatus;
dim3 block(blocksize);
dim3 grid((noElem_dArrV + block.x -1 )/block.x);
hipLaunchKernelGGL(( kernelprintdArrV), dim3(grid),dim3(block), 0, 0, dArrV,noElem_dArrV,dArrExtension);
hipDeviceSynchronize();
cudaStatus = hipGetLastError();
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipDeviceSynchronize() in printdArrV() failed", cudaStatus);
goto Error;
}
Error:
return cudaStatus;
}
//kernel trch phn t valid t mng dArrV v lu vo mng dArrValid
__global__ void kernelExtractValidFromdArrV(struct_V *dArrV,int noElem_dArrV,int *dArrValid){
int i = threadIdx.x + blockDim.x*blockIdx.x;
if(i<noElem_dArrV){
dArrValid[i]=dArrV[i].valid;
}
}
//kernel trch cc m rng hp l t mng dArrExtension sang mng dExt
__global__ void kernelExtractValidExtensionTodExt(EXT *dArrExtension,int *dArrValid,int *dArrValidScanResult,int noElem_dArrV,EXT *dExt,int noElem_dExt){
int i =blockDim.x * blockIdx.x + threadIdx.x;
if(i<noElem_dArrV){
if(dArrValid[i]==1){
dExt[dArrValidScanResult[i]].vi = dArrExtension[i].vi;
dExt[dArrValidScanResult[i]].vj = dArrExtension[i].vj;
dExt[dArrValidScanResult[i]].li = dArrExtension[i].li;
dExt[dArrValidScanResult[i]].lij = dArrExtension[i].lij;
dExt[dArrValidScanResult[i]].lj = dArrExtension[i].lj;
dExt[dArrValidScanResult[i]].vgi = dArrExtension[i].vgi;
dExt[dArrValidScanResult[i]].vgj = dArrExtension[i].vgj;
dExt[dArrValidScanResult[i]].posColumn = dArrExtension[i].posColumn;
dExt[dArrValidScanResult[i]].posRow = dArrExtension[i].posRow;
}
}
}
//Kernel in ni dung mng EXT *dExt
__global__ void kernelPrintdExt(EXT *dExt,int noElem_dExt){
int i = blockDim.x *blockIdx.x + threadIdx.x;
if(i<noElem_dExt){
int vi=dExt[i].vi;
int vj=dExt[i].vj;
int li= dExt[i].li;
int lij=dExt[i].lij;
int lj=dExt[i].lj;
int vgi=dExt[i].vgi;
int vgj=dExt[i].vgj;
int posColumn= dExt[i].posColumn;
int posRow=dExt[i].posRow;
printf("\n Thread %d (vi:%d vj:%d li:%d lij:%d lj:%d) (vgi:%d vgj:%d) (posColumn:%d posRow:%d)",i,vi,vj,li,lij,lj,vgi,vgj,posColumn,posRow);
}
}
//Hm in dExt
inline hipError_t printdExt(EXT *dExt,int noElem_dExt){
hipError_t cudaStatus;
dim3 block(blocksize);
dim3 grid((noElem_dExt+block.x -1)/block.x);
hipLaunchKernelGGL(( kernelPrintdExt), dim3(grid),dim3(block), 0, 0, dExt,noElem_dExt);
hipDeviceSynchronize();
cudaStatus = hipGetLastError();
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipDeviceSynchronize() kernelPrintdExt in printdExt() failed", cudaStatus);
goto Error;
}
Error:
return cudaStatus;
}
//Hm trch cc m rng hp l t mng dArrExtension sang mng dExt
inline hipError_t extractValidExtensionTodExt(EXT *dArrExtension,struct_V *dArrV,int noElem_dArrV,EXT *&dExt,int &noElem_dExt){
hipError_t cudaStatus;
//1. Trch d liu ra mng dArrvalid
int *dArrValid = nullptr;
cudaStatus = hipMalloc((void**)&dArrValid, noElem_dArrV*sizeof(int));
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipMalloc dArrValid in extractValidExtensionTodExt() failed", cudaStatus);
goto Error;
}
dim3 block(blocksize);
dim3 grid((noElem_dArrV + block.x -1)/block.x);
hipLaunchKernelGGL(( kernelExtractValidFromdArrV), dim3(grid),dim3(block), 0, 0, dArrV,noElem_dArrV,dArrValid);
hipDeviceSynchronize();
cudaStatus = hipGetLastError();
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipDeviceSynchronize() kernelExtractValidFromdArrV in extractValidExtensionTodExt() failed", cudaStatus);
goto Error;
}
//In ni dung dArrValid
printf("\n********dArrValid******\n");
printInt(dArrValid,noElem_dArrV);
//2. Scan mng dArrValid ly kch thc ca mng cn to
int *dArrValidScanResult = nullptr;
cudaStatus = hipMalloc((void**)&dArrValidScanResult,sizeof(int)*noElem_dArrV);
if (cudaStatus!=hipSuccess){
fprintf(stderr,"\n CudaMalloc dArrValidScanResult in extractValidExtensionToExt() failed");
goto Error;
}
else
{
hipMemset(dArrValidScanResult,0,sizeof(int)*noElem_dArrV);
}
cudaStatus = scanV(dArrValid,noElem_dArrV,dArrValidScanResult);
if (cudaStatus!=hipSuccess){
fprintf(stderr,"\n scanV dArrValid in extractValidExtensionToExt() failed");
goto Error;
}
//In ni dung kt qu dArrValidScanResult
printf("\n********dArrValidScanResult******\n");
printInt(dArrValidScanResult,noElem_dArrV);
//3. Ly kch thc ca mng EXT *dExt;
noElem_dExt=0;
cudaStatus=getSizeBaseOnScanResult(dArrValid,dArrValidScanResult,noElem_dArrV,noElem_dExt);
if (cudaStatus!=hipSuccess){
fprintf(stderr,"\n getSizeBaseOnScanResult in extractValidExtensionToExt() failed");
goto Error;
}
//In ni dung noElem_dExt
printf("\n******** noElem_dExt ******\n");
printf("\n noElem_dExt:%d",noElem_dExt);
//4. Khi to mng dExt c kch thc noElem_dExt ri trch d liu t dArrExtension sang da vo dArrValid.
cudaStatus = hipMalloc((void**)&dExt,sizeof(EXT)*noElem_dExt);
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipMalloc dExt in extractValidExtensionTodExt() failed", cudaStatus);
goto Error;
}
else
{
hipMemset(dExt,0,sizeof(EXT)*noElem_dExt);
}
dim3 blockb(blocksize);
dim3 gridb((noElem_dArrV+blockb.x -1)/blockb.x);
hipLaunchKernelGGL(( kernelExtractValidExtensionTodExt), dim3(gridb),dim3(blockb), 0, 0, dArrExtension,dArrValid,dArrValidScanResult,noElem_dArrV,dExt,noElem_dExt);
hipDeviceSynchronize();
cudaStatus = hipGetLastError();
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipDeviceSynchronize() kernelExtractValidExtensionTodExt in extractValidExtensionTodExt() failed", cudaStatus);
goto Error;
}
//In mng dExt;
printf("\n********** dExt **********\n");
cudaStatus =printdExt(dExt,noElem_dExt);
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n printdExt() in extractValidExtensionTodExt() failed", cudaStatus);
goto Error;
}
Error:
hipFree(dArrValid);
hipFree(dArrValidScanResult);
return cudaStatus;
}
//Hm Tm tt c cc m rng hp l forward t cc nh trn ct Q v lu vo mng dExt v noElem_dExt
inline hipError_t forwardExtensionQ(Embedding **dArrPointerEmbedding,int noElem_dArrPointerEmbedding,int *dArrSizedQ,int noElem_dArrSizedQ,int noElem_Embedding,int idxQ,EXT *&dExt,int &noElem_dExt,int *d_O,int *d_LO,int *d_N,int *d_LN,int numberOfElementd_O,int numberOfElementd_N,unsigned int maxOfVer,int minLabel,int maxid){
hipError_t cudaStatus;
//Tm bc ln nht ca cc nh vid trong ct Q
int maxDegreeOfVer=0;
float *dArrDegreeOfVid=nullptr; //c s dng tm cc m rng t cc vid trn column Q
cudaStatus = findMaxDegreeOfVer(dArrPointerEmbedding,idxQ,d_O,numberOfElementd_O,noElem_Embedding, numberOfElementd_N,maxOfVer,maxDegreeOfVer,dArrDegreeOfVid);
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n findMaxDegreeOfVer() in forwardExtensionQ() failed",cudaStatus);
goto Error;
}
//To mng dArrV c s lng phn t bng s lng embedding nhn vi bc ln nht ca cc vid va tm c
//To mng d_arr_V c kch thc: maxDegree_vid_Q * |Q|
// Lu , mng d_arr_V phi c dng cu trc th hin cnh m rng c hp l hay khng v l forward extension hay backward extension.
// struct struct_V
// {
// int valid; //default: 0, valid: 1
// int backward; //default: 0- forward; backward: 1
// }
struct_V *dArrV;
int noElem_dArrV=maxDegreeOfVer*noElem_Embedding;
cudaStatus=hipMalloc((void**)&dArrV,noElem_dArrV*sizeof(struct_V));
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipMalloc dArrV in failed");
goto Error;
}
else
{
hipMemset(dArrV,0,noElem_dArrV*sizeof(struct_V));
}
//Cc m rng hp l s c ghi nhn vo mng dArrV, ng thi thng tin ca cnh m rng gm dfscode, vgi, vgj v row pointer ca n cng c xy dng
//v lu tr trong mng EXT *dExtension, mng ny c s lng phn t bng vi mng dArrV. Sau chng ta s rt trch nhng m rng hp l ny v lu vo dExt.
// xy dng dfscode (vi,vj,li,lij,lj) th chng ta cn:
// - Da vo gi tr ca right most path xc nh vi
// - Da vo maxid xc nh vj
// - Da vo CSDL xc nh cc thnh phn cn li.
//Chng ta c th gii phng b nh ca dExtension sau khi trch cc m rng hp l thnh cng.
EXT *dArrExtension= nullptr;
cudaStatus = hipMalloc((void**)&dArrExtension,noElem_dArrV*sizeof(EXT));
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipMalloc dArrExtension forwardExtensionQ() failed",cudaStatus);
goto Error;
}
else
{
hipMemset(dArrExtension,0,noElem_dArrV*sizeof(EXT));
}
printf("\nnoElem_dArrV:%d",noElem_dArrV );
//Gi kernel vi cc i s: CSDL, bc ca cc nh, dArrV, dArrExtension,noElem_Embedding,maxDegreeOfVer,idxQ,dArrPointerEmbedding,minLabel,maxid
dim3 block(blocksize);
dim3 grid((noElem_Embedding+block.x - 1)/block.x);
hipLaunchKernelGGL(( kernelFindValidForwardExtension), dim3(grid),dim3(block), 0, 0, dArrPointerEmbedding,noElem_dArrPointerEmbedding,noElem_Embedding,d_O,d_LO,d_N,d_LN,dArrDegreeOfVid,maxDegreeOfVer,dArrV,dArrExtension,idxQ,minLabel,maxid);
hipDeviceSynchronize();
cudaStatus=hipGetLastError();
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipDeviceSynchronize() kernelFindValidForwardExtension in forwardExtensionQ() failed",cudaStatus);
goto Error;
}
//In mng dArrV kim tra th
/*printf("\n****************dArrV*******************\n");
cudaStatus = printdArrV(dArrV,noElem_dArrV,dArrExtension);
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n printdArrV() in forwardExtensionQ() failed",cudaStatus);
goto Error;
}*/
//Chp kt qu t dArrExtension sang dExt
cudaStatus =extractValidExtensionTodExt(dArrExtension,dArrV,noElem_dArrV,dExt,noElem_dExt);
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n extractValidExtensionTodExt() in forwardExtensionQ() failed");
goto Error;
}
Error:
hipFree(dArrExtension);
hipFree(dArrV);
return cudaStatus;
}
//kernel ly chp a ch ca dExt lu vo dArrPointerExt
__global__ void kernelGetPointerExt(EXT **dArrPointerExt,EXT *dExt,int pos){
dArrPointerExt[pos]=dExt;
}
//Tm tt c cc m rng hp l forward v lu vo mng dArrPointerExt
inline hipError_t forwardExtension(Embedding **dArrPointerEmbedding,int noElem_dArrPointerEmbedding,int *dArrSizedQ,int noElem_dArrSizedQ,int *dRMPath,int noElem_dRMPath,int *d_O,int *d_LO,int *d_N,int *d_LN,int numberOfElementd_O,int numberOfElementd_N,unsigned int maxOfVer,EXT **&dArrPointerExt,int &noElem_dArrPointerExt,int minLabel,int maxid,int *&dArrNoElemPointerExt){
hipError_t cudaStatus;
//Ly s lng embedding
#pragma region "get noElem_Embedding"
int noElem_Embedding = 0;
cudaStatus = findNumberOfEmbedding(dArrSizedQ,noElem_dArrSizedQ,noElem_Embedding);
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n findNumberOfEmbedding() in forwardExtension() failed",cudaStatus);
goto Error;
}
//printf("\n noElem_Embedding:%d",noElem_Embedding);
#pragma endregion
//Duyt qua cc column Q thuc dRMPath v tm cc m rng hp l t chng
int *hRMPath =(int*)malloc(sizeof(int)*noElem_dRMPath);
if (hRMPath==NULL){
printf("\n malloc hRMPath in forwardExtension() failed");
exit(1);
}
cudaStatus = hipMemcpy(hRMPath,dRMPath,sizeof(int)*noElem_dRMPath,hipMemcpyDeviceToHost);
if (cudaStatus !=hipSuccess){
fprintf(stderr,"\n hipMemcpy dRMPath --> hRMPath failed",cudaStatus);
goto Error;
}
printf("\n ***************** hRMPath **************\n");
for (int i = 0; i < noElem_dRMPath; i++)
{
printf("\n hRMPath[%d]:%d",i,hRMPath[i]);
}
cudaStatus = hipMalloc((void**)&dArrPointerExt,noElem_dRMPath*sizeof(EXT*));
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipMalloc dArrPointerExt in forwardExtension() failed",cudaStatus);
goto Error;
}
else
{
hipMemset(dArrPointerExt,0,noElem_dRMPath*sizeof(EXT*));
}
int *hArrNoElemPointerExt;
hArrNoElemPointerExt = (int*)malloc(sizeof(int)*noElem_dRMPath);
if(hArrNoElemPointerExt==NULL){
printf("\nMalloc hArrNoElemPointerExt in kernel.cu failed");
goto Error;
}
else
{
memset(hArrNoElemPointerExt,0,sizeof(int)*noElem_dRMPath);
}
cudaStatus = hipMalloc((void**)&dArrNoElemPointerExt,sizeof(int)*noElem_dRMPath);
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipMalloc dArrNoElemPointerExt in forwardExtension() failed",cudaStatus);
goto Error;
}
else
{
hipMemset(dArrNoElemPointerExt,0,sizeof(int)*noElem_dRMPath);
}
for (int i = noElem_dRMPath-1; i>=0 ; i--)
{
int idxQ=hRMPath[i];
printf("\n*********idxQ:%d***************\n",idxQ);
EXT *dExt=nullptr; //Nhng m rng hp l s c trch sang mng dExt
int noElem_dExt=0;
cudaStatus = forwardExtensionQ(dArrPointerEmbedding,noElem_dArrPointerEmbedding,dArrSizedQ,noElem_dArrSizedQ,noElem_Embedding,idxQ,dExt,noElem_dExt,d_O,d_LO,d_N,d_LN, numberOfElementd_O, numberOfElementd_N, maxOfVer,minLabel,maxid);
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n forwardExtensionQ() in forwardExtension() failed",cudaStatus);
goto Error;
}
//Chp pointer ca dExt b vo mng dArrPointerExt
hArrNoElemPointerExt[i]=noElem_dExt;
hipLaunchKernelGGL(( kernelGetPointerExt), dim3(1),dim3(1), 0, 0, dArrPointerExt,dExt,i);
hipDeviceSynchronize();
cudaStatus=hipGetLastError();
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipDeviceSynchronize() kernelGetPointerExt in forwardExtension() failed",cudaStatus);
goto Error;
}
}
//chp d liu t hArrNoElemPointerExt sang dArrNoElemPointerExt
cudaStatus = hipMemcpy(dArrNoElemPointerExt,hArrNoElemPointerExt,sizeof(int)*noElem_dRMPath,hipMemcpyHostToDevice);
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipMemcpy() hArrNoElemPointerExt sang dArrNoElemPointerExt in forwardExtension() failed",cudaStatus);
goto Error;
}
//hipDeviceSynchronize();
//cudaStatus=hipGetLastError();
//if(cudaStatus!=hipSuccess){
// fprintf(stderr,"\n hipDeviceSynchronize() in forwardExtension() failed",cudaStatus);
// goto Error;
//}
Error:
return cudaStatus;
}
//kernel in mng dArrPointerExt
__global__ void kernelprintdArrPointerExt(EXT **dArrPointerExt,int *dArrNoElemPointerExt,int noElem_dArrPointerExt){
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i<noElem_dArrPointerExt){
int noElem_dExt=dArrNoElemPointerExt[i];
printf("\nThread %d: noElem_dExt:%d",i,noElem_dExt);
if(noElem_dExt>0){
EXT* dExt= dArrPointerExt[i];
printf("\n dExt_value:%p dExt_address:%p ",dExt,&dExt);
int length = dArrNoElemPointerExt[i];
for (int i = 0; i < length; i++)
{
int vi=dExt[i].vi;
int vj=dExt[i].vj;
int li= dExt[i].li;
int lij=dExt[i].lij;
int lj=dExt[i].lj;
int vgi=dExt[i].vgi;
int vgj=dExt[i].vgj;
int posColumn= dExt[i].posColumn;
int posRow=dExt[i].posRow;
printf("\n Thread %d (vi:%d vj:%d li:%d lij:%d lj:%d) (vgi:%d vgj:%d) (posColumn:%d posRow:%d)",i,vi,vj,li,lij,lj,vgi,vgj,posColumn,posRow);
}
}
}
}
//Hm in mng dArrPointerExt
inline hipError_t printdArrPointerExt(EXT **dArrPointerExt,int *dArrNoElemPointerExt,int noElem_dArrPointerExt){
hipError_t cudaStatus;
dim3 block(blocksize);
dim3 grid((noElem_dArrPointerExt + block.x - 1)/block.x);
hipLaunchKernelGGL(( kernelprintdArrPointerExt), dim3(grid),dim3(block), 0, 0, dArrPointerExt,dArrNoElemPointerExt,noElem_dArrPointerExt);
hipDeviceSynchronize();
cudaStatus = hipGetLastError();
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\ncudaDeviceSynchronize() in printArrPointerdExt() failed");
goto Error;
}
Error:
return cudaStatus;
}
//Hm gii phng b nh Ext** dArrPointerExt v dArr
inline hipError_t cudaFreeArrPointerExt(EXT **&dArrPointerExt,int *&dArrNoElemPointerExt,int noElem_dArrPointerExt){
hipError_t cudaStatus;
EXT **hArrPointerExt=nullptr;
hArrPointerExt = (EXT**)malloc(sizeof(EXT*)*noElem_dArrPointerExt);
if(hArrPointerExt==NULL){
printf("\n malloc hArrPointerExt in cudaFreeArrpointerExt failed"),
exit(1);
}
cudaStatus = hipMemcpy(hArrPointerExt,dArrPointerExt,noElem_dArrPointerExt*sizeof(EXT*),hipMemcpyDeviceToHost);
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\hipMemcpy() in printArrPointerdExt() failed");
goto Error;
}
int length = noElem_dArrPointerExt;
for (int i = 0; i < length; i++)
{
if (hArrPointerExt[i]!=NULL){
hipFree(hArrPointerExt[i]);
}
}
hipFree(dArrPointerExt);
hipFree(dArrNoElemPointerExt);
Error:
return cudaStatus;
}
//Hm gii phng b nh Embedding *dArrPointerEmbedding v dArrSizeQ
inline hipError_t cudaFreeArrPointerEmbedding(Embedding **&dArrPointerEmbedding,int *&dArrSizedQ,int noElem_dArrPointerEmbedding){
hipError_t cudaStatus;
Embedding **hArrPointerExt=nullptr;
hArrPointerExt = (Embedding**)malloc(sizeof(Embedding*)*noElem_dArrPointerEmbedding);
if(hArrPointerExt==NULL){
printf("\n malloc hArrPointerExt in cudaFreeArrPointerEmbedding() failed"),
exit(1);
}
cudaStatus = hipMemcpy(hArrPointerExt,dArrPointerEmbedding,noElem_dArrPointerEmbedding*sizeof(Embedding*),hipMemcpyDeviceToHost);
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\hipMemcpy() in cudaFreeArrPointerEmbedding() failed");
goto Error;
}
int length = noElem_dArrPointerEmbedding;
for (int i = 0; i < length; i++)
{
if (hArrPointerExt[i]!=NULL){
hipFree(hArrPointerExt[i]);
}
}
hipFree(dArrPointerEmbedding);
hipFree(dArrSizedQ);
Error:
return cudaStatus;
}
//Kernel nh x nhn cnh sang v tr tng ng trong dArrAllPossibleExtension v set gi tr ti bng 1
__global__ void kernelassigndAllPossibleExtension(EXT **dArrPointerExt,int posdArrPointerExt,int Lv,int Le,int *dArrAllPossibleExtension,int noElem_PointerExt){
int i = blockDim.x *blockIdx.x + threadIdx.x;
if(i<noElem_PointerExt){
int lij,lj;
lij=dArrPointerExt[posdArrPointerExt][i].lij;
lj=dArrPointerExt[posdArrPointerExt][i].lj;
int idx=lij*Lv+lj;
dArrAllPossibleExtension[idx]=1;
}
}
//Hm duyt qua cc phn t trong mng dExt v set gi tr 1 ti v tr tng ng trong mng kt qu dArrAllPossibleExtension
inline hipError_t assigndAllPossibleExtension(EXT **dArrPointerExt,int posdArrPointerExt,int Lv,int Le,int *dArrAllPossibleExtension,int noElem_PointerExt){
hipError_t cudaStatus;
dim3 block(blocksize);
dim3 grid((noElem_PointerExt+block.x -1)/block.x);
hipLaunchKernelGGL(( kernelassigndAllPossibleExtension), dim3(grid),dim3(block), 0, 0, dArrPointerExt, posdArrPointerExt, Lv, Le,dArrAllPossibleExtension,noElem_PointerExt);
hipDeviceSynchronize();
cudaStatus=hipGetLastError();
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipDeviceSynchronize kernelassigndAllPossibleExtension in assigndAllPossibleExtension() failed");
goto Error;
}
//In ni dung dArrAllPossibleExtension
cudaStatus = printInt(dArrAllPossibleExtension,Lv*Le);
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n printInt(dArrAllPossibleExtension,Lv*Le) in assigndAllPossibleExtension() failed");
goto Error;
}
Error:
return cudaStatus;
}
//Kernel gn gi tr cho mng dArrUniEdge
__global__ void kernelassigndArrUniEdge(int *dArrAllPossibleExtension,int *dArrAllPossibleExtensionScanResult,int noElem_dArrAllPossibleExtension,UniEdge *dArrUniEdge,int Lv,int *dFromLi){
int i = blockDim.x*blockIdx.x +threadIdx.x;
if(i<noElem_dArrAllPossibleExtension){
if(dArrAllPossibleExtension[i]==1){
int li,lij,lj;
li=dFromLi[0];
lij = i/Lv;
lj=i%Lv;
dArrUniEdge[dArrAllPossibleExtensionScanResult[i]].li=li;
dArrUniEdge[dArrAllPossibleExtensionScanResult[i]].lij=lij;
dArrUniEdge[dArrAllPossibleExtensionScanResult[i]].lj=lj;
}
}
}
//Hm gn gi tr cho mng dArrUniEdge
inline hipError_t assigndArrUniEdge(int *dArrAllPossibleExtension,int *dArrAllPossibleExtensionScanResult,int noElem_dArrAllPossibleExtension,UniEdge *&dArrUniEdge,int Lv,int *dFromLi){
hipError_t cudaStatus;
dim3 block(blocksize);
dim3 grid((noElem_dArrAllPossibleExtension+block.x-1)/block.x);
hipLaunchKernelGGL(( kernelassigndArrUniEdge), dim3(grid),dim3(block), 0, 0, dArrAllPossibleExtension,dArrAllPossibleExtensionScanResult,noElem_dArrAllPossibleExtension,dArrUniEdge,Lv,dFromLi);
hipDeviceSynchronize();
cudaStatus = hipGetLastError();
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipDeviceSynchronize() in assigndArrUniEdge() failed");
goto Error;
}
Error:
return cudaStatus;}
//kernel ly nhn from Li
__global__ void kernelGetFromLabel(EXT **dArrPointerExt,int pos,int *dFromLi){
dFromLi[0] = dArrPointerExt[pos][0].li;
}
//kernel getPointerUniEdge
__global__ void kernelGetPointerUniEdge(UniEdge **dArrPointerUniEdge,UniEdge *dArrUniEdge,int pos){
dArrPointerUniEdge[pos]=dArrUniEdge;
}
//Hm trch cc m rng duy nht v lu kt qu vo mng dArrPointerUniEdge, mi phn t ca n l mt pointer tr n mng dArrUniEdge trn device
inline hipError_t extractUniExtension(EXT **dArrPointerExt,int noElem_dArrPointerExt,int Lv,int Le,UniEdge **&dArrPointerUniEdge,int noElem_dArrPointerUniEdge,int *&dArrNoELemPointerUniEdge,int *hArrNoElemPointerExt,int *dArrNoElemPointerExt){
hipError_t cudaStatus;
/*Duyt qua tng EXTk thc hin rt trch v lu kt qu vo UniEdge **dArrPointerUniEdge
* Mi phn t ca mng UniEdge **dArrPointerUniEdge l mt pointer, chnh l kt qu ca 1 ln x l EXTk
* Trch cc unique forward extention lu vo dUniqueEdgeForward
* Trch cc unique backward extension lu vo dUniqueEdgeBackward (Backward Extension ch tn ti EXTk cui)
*/
//1. Khi to mng UniEdge **dArrPointerUniEdge vi s lng phn t bng kch thc dRMPath
//Cp pht b nh cho mng dArrPointerUniEdge
cudaStatus=hipMalloc((void**)&dArrPointerUniEdge,sizeof(UniEdge*)*noElem_dArrPointerUniEdge);
if (cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipMalloc dArrPointerUniEdge in extractUniExtension() failed",cudaStatus);
goto Error;
}
else
{
hipMemset(dArrPointerUniEdge,0,sizeof(UniEdge*)*noElem_dArrPointerUniEdge);
}
//Cp pht b nh cho mng dArrNoELemPointerUniEdge
cudaStatus=hipMalloc((void**)&dArrNoELemPointerUniEdge,sizeof(int)*noElem_dArrPointerUniEdge);
if (cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipMalloc dArrPointerUniEdge in extractUniExtension() failed",cudaStatus);
goto Error;
}
else
{
hipMemset(dArrNoELemPointerUniEdge,0,sizeof(int)*noElem_dArrPointerUniEdge);
}
int *hArrNoELemPointerUniEdge=(int*)malloc(sizeof(int)*noElem_dArrPointerUniEdge); //Ni lu tr tm thi phi c gii phng cui hm ny, d liu s c chp sang b nh dArrNoELemPointerUniEdge
if(hArrNoELemPointerUniEdge==NULL){
printf("\n Malloc hArrNoELemPointerUniEdge in extractUniExtension() failed");
exit(1);
}
else
{
memset(hArrNoELemPointerUniEdge,0,sizeof(int)*noElem_dArrPointerUniEdge);
}
for (int i = 0; i < noElem_dArrPointerExt; i++)
{
//Khai bo b nh dArrAllPossibleExtension v s lng phn t ca n
int *dArrAllPossibleExtension =nullptr; //Phi c gii phng bn trong vng for sau khi dng xong
int noElem_dArrAllPossibleExtension = Lv*Le;
//printf("\n hArrNoElemPointerExt:%d",hArrNoElemPointerExt[i]);
//Nu s lng phn t ti EXTk ln hn bng minsup th mi duyt.
//Ngon hn na th xt S lng phn t phn bit trong EXTk >= minsup th mi duyt
if(hArrNoElemPointerExt[i]>0){
int *dFromLi;
cudaStatus = hipMalloc((void**)&dFromLi,sizeof(int));
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipMalloc dFromLi in extractUniExtension failed");
goto Error;
}
else
{
hipMemset(dFromLi,0,sizeof(int));
}
//ly nhn Li lu vo bin dFromLi
hipLaunchKernelGGL(( kernelGetFromLabel), dim3(1),dim3(1), 0, 0, dArrPointerExt,i,dFromLi);
hipDeviceSynchronize();
cudaStatus=hipGetLastError();
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipDeviceSynchronize() kernelGetFromLabel in extracUniExtension failed");
goto Error;
}
//Hin th ni dung nhn dFromLi
printf("\n ****dFrom *******\n");
cudaStatus =printInt(dFromLi,1);
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n printInt(dFromLi,1) in extracUniExtension failed");
goto Error;
}
UniEdge * dArrUniEdge=nullptr;
int noElem_dArrUniEdge=0;
//Khi to mt mng dArrAllPossileExtension c kch thc bng Lv*Le vi gi tr l zero
cudaStatus=hipMalloc((void**)&dArrAllPossibleExtension,noElem_dArrAllPossibleExtension*sizeof(int));
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipMalloc((void**)&dArrAllPossibleExtension in extractUniExtension() failed",cudaStatus);
goto Error;
}
else
{
hipMemset(dArrAllPossibleExtension,0,noElem_dArrAllPossibleExtension*sizeof(int));
}
//Gi hm assigndAllPossibleExtension nh x (li,lij,lj) sang v tr trn mng dArrAllPossibleExtension v set 1 value ti index .
//Gi kernel gm hArrNoElemPointerExt[i] threads, mi thread s c nhn li,lij,lj v nh x thnh v tr tng ng trn mng dArrAllPossibleExtension
//ng thi set gi tr 1 ti v tr trn mng dArrAllPossibleExtension.
//
cudaStatus = assigndAllPossibleExtension(dArrPointerExt,i,Lv,Le,dArrAllPossibleExtension,hArrNoElemPointerExt[i]);
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n assigndAllPossibleExtension in extractUniExtension() failed",cudaStatus);
goto Error;
}
//Scan mng dArrAllPossibleExtension bit kch thc ca mng dArrUniEdge v nh x t v tr trong dArrAllPossibleExtension thnh nhn lu vo dArrUniEdge
int *dArrAllPossibleExtensionScanResult =nullptr;
cudaStatus = hipMalloc((void**)&dArrAllPossibleExtensionScanResult,noElem_dArrAllPossibleExtension*sizeof(int));
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipMalloc dArrAllPossibleExtensionScanResult in extractUniExtension() failed",cudaStatus);
goto Error;
}
cudaStatus = scanV(dArrAllPossibleExtension,noElem_dArrAllPossibleExtension,dArrAllPossibleExtensionScanResult);
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n scanV dArrAllPossibleExtension in extractUniExtension() failed",cudaStatus);
goto Error;
}
//Tnh kch thc ca dArrUniEdge v lu vo noElem_dArrUniEdge
cudaStatus =getSizeBaseOnScanResult(dArrAllPossibleExtension,dArrAllPossibleExtensionScanResult,noElem_dArrAllPossibleExtension,noElem_dArrUniEdge);
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n scanV dArrAllPossibleExtension in extractUniExtension() failed",cudaStatus);
goto Error;
}
//HIn th gi tr ca noElem_dArrUniEdge
printf("\n******noElem_dArrUniEdge************\n");
//printf("\n noElem_dArrUniEdge:%d",noElem_dArrUniEdge);
//Cp pht b nh cho dArrUniEdge
cudaStatus = hipMalloc((void**)&dArrUniEdge,noElem_dArrUniEdge*sizeof(UniEdge));
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipMalloc dArrUniEdge in extractUniExtension() failed",cudaStatus);
goto Error;
}
//Gi hm nh x d liu t dArrAllPossibleExtension sang mng dArrUniEdge
/* Input Data: dArrAllPossibleExtension, dArrAllPossibleExtensionScanResult, */
cudaStatus =assigndArrUniEdge(dArrAllPossibleExtension,dArrAllPossibleExtensionScanResult,noElem_dArrAllPossibleExtension,dArrUniEdge,Lv,dFromLi);
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n assigndArrUniEdge in extractUniExtension() failed",cudaStatus);
goto Error;
}
//In ni dung mng dArrUniEdge
printf("\n**********printf************");
printfUniEdge(dArrUniEdge,noElem_dArrUniEdge);
//Lu li s lng cnh duy nht
hArrNoELemPointerUniEdge[i]=noElem_dArrUniEdge;
hipLaunchKernelGGL(( kernelGetPointerUniEdge), dim3(1),dim3(1), 0, 0, dArrPointerUniEdge,dArrUniEdge,i);
hipDeviceSynchronize();
hipFree(dArrAllPossibleExtensionScanResult);
hipFree(dFromLi);
} //end if
hipFree(dArrAllPossibleExtension);
} //end for
hipMemcpy(dArrNoELemPointerUniEdge,hArrNoELemPointerUniEdge,sizeof(int)*noElem_dArrPointerUniEdge,hipMemcpyHostToDevice);
hipDeviceSynchronize();
cudaStatus=hipGetLastError();
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipDeviceSynchronize() in extractUniExtension() failed");
goto Error;
}
Error:
free(hArrNoELemPointerUniEdge);
return cudaStatus;
}
//kernel in ni dung mngdArrPointerUniEdge
__global__ void kernelprintArrPointerUniEdge(UniEdge **dArrPointerUniEdge,int *dArrNoELemPointerUniEdge,int noElem_dArrPointerUniEdge){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<noElem_dArrPointerUniEdge){
if(dArrNoELemPointerUniEdge[i]!=0){
UniEdge * dArrUniEdge = dArrPointerUniEdge[i];
int n = dArrNoELemPointerUniEdge[i];
for (int j = 0; j < n; j++)
{
printf("\n Thread %d: j:%d (li:%d lij:%d lj:%d)",i,j,dArrUniEdge[j].li,dArrUniEdge[j].lij,dArrUniEdge[j].lj);
}
}
}
}
//Hm in ni dung mngdArrPointerUniEdge
inline hipError_t printArrPointerUniEdge(UniEdge **dArrPointerUniEdge,int *dArrNoELemPointerUniEdge,int noElem_dArrPointerUniEdge){
hipError_t cudaStatus;
dim3 block(blocksize);
dim3 grid((noElem_dArrPointerUniEdge+block.x-1)/block.x);
hipLaunchKernelGGL(( kernelprintArrPointerUniEdge), dim3(grid),dim3(block), 0, 0, dArrPointerUniEdge,dArrNoELemPointerUniEdge,noElem_dArrPointerUniEdge);
hipDeviceSynchronize();
cudaStatus= hipGetLastError();
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipDeviceSynchronize() kernelprintArrPointerUniEdge in printArrPointerUniEdge() failed");
goto Error;
}
Error:
return cudaStatus;
}
//Hm gii phng b nh Ext** dArrPointerUniEdge v dArrNoELemPointerUniEdge
inline hipError_t cudaFreeArrPointerUniEdge(UniEdge **&dArrPointerUniEdge,int *&dArrNoELemPointerUniEdge,int noElem_dArrPointerUniEdge){
hipError_t cudaStatus;
UniEdge **hArrPointerUniEdge=nullptr;
hArrPointerUniEdge = (UniEdge**)malloc(sizeof(EXT*)*noElem_dArrPointerUniEdge);
if(hArrPointerUniEdge==NULL){
printf("\n malloc hArrPointerExt in cudaFreeArrpointerExt failed"),
exit(1);
}
cudaStatus = hipMemcpy(hArrPointerUniEdge,dArrPointerUniEdge,noElem_dArrPointerUniEdge*sizeof(EXT*),hipMemcpyDeviceToHost);
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\hipMemcpy() in printArrPointerdExt() failed");
goto Error;
}
int length = noElem_dArrPointerUniEdge;
for (int i = 0; i < length; i++)
{
if (hArrPointerUniEdge[i]!=NULL){
hipFree(hArrPointerUniEdge[i]);
}
}
hipFree(dArrPointerUniEdge);
hipFree(dArrNoELemPointerUniEdge);
Error:
return cudaStatus;
}
__global__ void kernelExtractPointerUniEdge(UniEdge **dPointerArrUniEdge,UniEdge **dArrPointerUniEdge,int pos){
dPointerArrUniEdge[0] = dArrPointerUniEdge[pos];
printf("\nPointer UniEdge:%p",dArrPointerUniEdge[pos]);
}
__global__ void kernelExtractPointerExt(EXT **dPointerArrExt,EXT **dArrPointerExt,int pos,unsigned int noElemdArrExt){
dPointerArrExt[0] = dArrPointerExt[pos];
printf("\nPointer:%p",dArrPointerExt[pos]);
}
__global__ void kernelfindBoundary(EXT **dPointerArrExt,unsigned int noElemdArrExt,unsigned int *dArrBoundary,unsigned int maxOfVer){
int i = blockDim.x*blockIdx.x + threadIdx.x;
EXT *dArrExt = dPointerArrExt[0];
if(i<noElemdArrExt-1){
unsigned int graphIdAfter=dArrExt[i+1].vgi/maxOfVer;
unsigned int graphIdCurrent=dArrExt[i].vgi/maxOfVer;
if(graphIdAfter!=graphIdCurrent){
dArrBoundary[i]=1;
}
}
}
inline hipError_t findBoundary(EXT **dPointerArrExt,unsigned int noElemdArrExt,unsigned int *&dArrBoundary,unsigned int maxOfVer){
hipError_t cudaStatus;
dim3 block(blocksize);
dim3 grid((noElemdArrExt+block.x-1)/block.x);
hipLaunchKernelGGL(( kernelfindBoundary), dim3(grid),dim3(block), 0, 0, dPointerArrExt,noElemdArrExt,dArrBoundary,maxOfVer);
hipDeviceSynchronize();
cudaStatus = hipGetLastError();
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipDeviceSynchronize() in findBoundary() failed",cudaStatus);
goto Error;
}
Error:
return cudaStatus;
}
__global__ void kernelPrint(EXT **dArrExt,unsigned int noElemdArrExt){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<noElemdArrExt){
EXT *arrExt = dArrExt[0];
printf("\nPointer ext:%p",dArrExt[0]);
printf("\n vgi:%d vgj:%d",arrExt[i].vgi,arrExt[i].vgj);
}
}
__global__ void kernelPrintUE(UniEdge **dPointerArrUniEdge,unsigned int noElem){
int i = blockDim.x*blockIdx.x + threadIdx.x;
if(i<noElem){
UniEdge *arrUniEdge = dPointerArrUniEdge[0];
printf("\nPointer ue:%p",dPointerArrUniEdge[0]);
printf("\n UniEdge: li:%d, lij:%d, lj:%d)",arrUniEdge[i].li,arrUniEdge[i].lij,arrUniEdge[i].lj);
}
}
__global__ void kernelFilldF(UniEdge **dPointerArrUniEdge,unsigned int pos,EXT **dPointerArrExt,unsigned int noElemdArrExt,unsigned int *dArrBoundaryScanResult,float *dF){
int i = blockDim.x*blockIdx.x + threadIdx.x;
if(i<noElemdArrExt){
UniEdge *dUniEdge = dPointerArrUniEdge[0];
int li = dUniEdge[pos].li;
int lij = dUniEdge[pos].lij;
int lj = dUniEdge[pos].lj;
EXT *dArrExt = dPointerArrExt[0];
int Li = dArrExt[i].li;
int Lij = dArrExt[i].lij;
int Lj = dArrExt[i].lj;
printf("\nThread %d: UniEdge(li:%d lij:%d lj:%d) (Li:%d Lij:%d Lj:%d)",i,li,lij,lj,Li,Lij,Lj);
if(li==Li && lij==Lij && lj==Lj){
dF[dArrBoundaryScanResult[i]]=1;
}
}
}
inline hipError_t calcSupport(UniEdge **dPointerArrUniEdge,unsigned int pos,EXT **dPointerArrExt,unsigned int noElemdArrExt,unsigned int *dArrBoundaryScanResult,float *dF,unsigned int noElemdF,float &support,unsigned int noElemdArrUniEdge){
hipError_t cudaStatus;
dim3 block(blocksize);
dim3 grid((noElemdArrExt+block.x-1)/block.x);
printf("\n**********dPointerArrExt***********\n");
hipLaunchKernelGGL(( kernelPrint), dim3(1),dim3(noElemdArrExt), 0, 0, dPointerArrExt,noElemdArrExt);
hipDeviceSynchronize();
cudaStatus=hipGetLastError();
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n kernelPrintExt in computeSupportv2() failed");
goto Error;
}
printf("\n**********dPointerArrUniEdge***********\n");
hipLaunchKernelGGL(( kernelPrintUE), dim3(1),dim3(noElemdArrUniEdge), 0, 0, dPointerArrUniEdge,noElemdArrUniEdge);
hipDeviceSynchronize();
cudaStatus=hipGetLastError();
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n kernelPrintUE in computeSupportv2() failed");
goto Error;
}
hipLaunchKernelGGL(( kernelFilldF), dim3(grid),dim3(block), 0, 0, dPointerArrUniEdge,pos,dPointerArrExt,noElemdArrExt,dArrBoundaryScanResult,dF);
hipDeviceSynchronize();
cudaStatus = hipGetLastError();
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipDeviceSynchronize() kernelFilldF in calcSupport() failed",cudaStatus);
goto Error;
}
printf("\n**********dF****************\n");
printFloat(dF,noElemdF);
reduction(dF,noElemdF,support);
printf("\n******support********");
printf("\n Support:%f",support);
hipMemset(dF,0,noElemdF*sizeof(float));
Error:
return cudaStatus;
}
//Hm tnh h tr computeSupportv2
inline hipError_t computeSupportv2(EXT **dArrPointerExt,int *dArrNoElemPointerExt,int *hArrNoElemPointerExt,int noElem_dArrPointerExt,UniEdge **dArrPointerUniEdge,int *dArrNoELemPointerUniEdge,int *hArrNoELemPointerUniEdge,int noElem_dArrPointerUniEdge,unsigned int **&hArrPointerSupport,unsigned int *&hArrNoElemPointerSupport,unsigned int noElem_hArrPointerSupport,unsigned int maxOfVer){
hipError_t cudaStatus;
//Cp pht b nh cho hArrPointerSupport. Mi phn t l mt a ch tr n 1 mng kiu unsigned int
hArrPointerSupport = (unsigned int**)malloc(sizeof(unsigned int*)*noElem_hArrPointerSupport);
if(hArrPointerSupport==NULL){
printf("\n malloc hArrPointerSupport in kernel.cu failed");
exit(1);
}
else
{
memset(hArrPointerSupport,0,sizeof(unsigned int*)*noElem_hArrPointerSupport);
}
hArrNoElemPointerSupport = (unsigned int*)malloc(sizeof(unsigned int)*noElem_hArrPointerSupport);
if(hArrNoElemPointerSupport==NULL){
printf("\n malloc hArrNoelemPointerSupport in computeSupportv2() failed");
exit(1);
}
else
{
memset(hArrNoElemPointerSupport,0,sizeof(unsigned int)*noElem_hArrPointerSupport);
}
//Duyt qua mng cc pointer tr n mng cha cc cnh duy nht. Mi vng lp j s ng vi mt segment EXTk, v mi EXTk s c mt boundary
for (int j = 0; j < noElem_dArrPointerUniEdge ; j++)
{
//Mng dArrBoundary dng lu tr boundary ca EXTk ( y l EXT th j theo nh vng lp for bn di)
unsigned int *dArrBoundary=nullptr;
unsigned int *dArrBoundaryScanResult=nullptr;
unsigned int noElemdArrBoundary=0; //Bng vi hArrNoElemPointerExt[j]
if(hArrNoELemPointerUniEdge[j]>0){ //Nu tn ti unique edge ti dArrPointerUniEdge j ang xt th tm boundary ca EXTk j tng ng
UniEdge **dPointerArrUniEdge=nullptr;
unsigned int noElemdArrUniEdge = hArrNoELemPointerUniEdge[j];
cudaStatus = hipMalloc((void**)&dPointerArrUniEdge,sizeof(UniEdge*));
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\ncudaMalloc dPointerArrUniEdge in computeSupportv2() failed",cudaStatus);
goto Error;
}
EXT **dPointerArrExt = nullptr;
unsigned int noElemdArrExt = hArrNoElemPointerExt[j];
cudaStatus = hipMalloc((void**)&dPointerArrExt,sizeof(EXT*));
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\ncudaMalloc dPointerArrExt in computeSupportv2() failed",cudaStatus);
goto Error;
}
//Hot ng rt trch din ra song song
hipLaunchKernelGGL(( kernelExtractPointerUniEdge), dim3(1),dim3(1), 0, 0, dPointerArrUniEdge,dArrPointerUniEdge,j); //Trch phn t trong mng dArrPointerUniEdge lu vo bin dArrUniEdge tin tnh ton
hipDeviceSynchronize();
hipLaunchKernelGGL(( kernelExtractPointerExt), dim3(1),dim3(1), 0, 0, dPointerArrExt,dArrPointerExt,j,noElemdArrExt); //Trch phn t trong mng dArrPointerExt lu vo bin dArrExt tin tnh ton
hipDeviceSynchronize();
cudaStatus = hipGetLastError();
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipDeviceSynchronize() kernelExtractPointerExt kernelExtractPointerUniEdge in computeSupportv2() failed",cudaStatus);
goto Error;
}
printf("\n**********dPointerArrExt***********\n");
hipLaunchKernelGGL(( kernelPrint), dim3(1),dim3(noElemdArrExt), 0, 0, dPointerArrExt,noElemdArrExt);
hipDeviceSynchronize();
cudaStatus=hipGetLastError();
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n kernelPrintExt in computeSupportv2() failed");
goto Error;
}
printf("\n**********dPointerArrUniEdge***********\n");
hipLaunchKernelGGL(( kernelPrintUE), dim3(1),dim3(noElemdArrUniEdge), 0, 0, dPointerArrUniEdge,noElemdArrUniEdge);
hipDeviceSynchronize();
cudaStatus=hipGetLastError();
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n kernelPrintUE in computeSupportv2() failed");
goto Error;
}
#pragma region "find Boundary and scan Boundary"
noElemdArrBoundary = noElemdArrExt;
cudaStatus=hipMalloc((void**)&dArrBoundary,sizeof(unsigned int)*noElemdArrBoundary);
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipMalloc dArrBoundary in computeSupportv2() failed");
goto Error;
}
else
{
hipMemset(dArrBoundary,0,sizeof(unsigned int)*noElemdArrBoundary);
}
cudaStatus=hipMalloc((void**)&dArrBoundaryScanResult,sizeof(unsigned int)*noElemdArrBoundary);
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipMalloc dArrBoundary in computeSupportv2() failed");
goto Error;
}
else
{
hipMemset(dArrBoundaryScanResult,0,sizeof(unsigned int)*noElemdArrBoundary);
}
//Tm boundary ca EXTk v lu kt qu vo mng dArrBoundary
cudaStatus = findBoundary(dPointerArrExt,noElemdArrExt,dArrBoundary,maxOfVer);
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n findBoundary() in computeSupportv2() failed");
goto Error;
}
printf("\n ************* dArrBoundary ************\n");
cudaStatus=printUnsignedInt(dArrBoundary,noElemdArrBoundary);
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n printUnsignedInt in computeSupportv2() failed", cudaStatus);
goto Error;
}
//Scan dArrBoundary lu kt qu vo dArrBoundaryScanResult
cudaStatus=scanV(dArrBoundary,noElemdArrBoundary,dArrBoundaryScanResult);
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n Exclusive scan dArrBoundary in computeSupportv2() failed",cudaStatus);
goto Error;
}
printf("\n**************dArrBoundaryScanResult****************\n");
printUnsignedInt(dArrBoundaryScanResult,noElemdArrBoundary);
float *dF=nullptr;
unsigned int noElemdF = 0;
cudaStatus = hipMemcpy(&noElemdF,&dArrBoundaryScanResult[noElemdArrBoundary-1],sizeof(unsigned int),hipMemcpyDeviceToHost);
if(cudaStatus !=hipSuccess){
fprintf(stderr,"\n cudamemcpy dF failed",cudaStatus);
goto Error;
}
noElemdF++;
printf("\n*****noElemdF******\n");
printf("noElemdF:%d",noElemdF);
cudaStatus = hipMalloc((void**)&dF,sizeof(unsigned int)*noElemdF);
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\ncudaMalloc dF failed",cudaStatus);
goto Error;
}
else
{
hipMemset(dF,0,sizeof(float)*noElemdF);
}
#pragma endregion "end of finding Boundary"
hArrNoElemPointerSupport[j]=noElemdArrUniEdge;
unsigned int * hArrSupport = (unsigned int*)malloc(sizeof(unsigned int)*noElemdArrUniEdge);
if(hArrSupport==NULL){
printf("\n Malloc hArrSupport in computeSupportv2() failed");
exit(1);
}
else
{
memset(hArrSupport,0,sizeof(unsigned int)*noElemdArrUniEdge);
}
//Duyt v tnh h tr ca cc cnh
for (int i = 0; i < noElemdArrUniEdge; i++)
{
float support=0;
cudaStatus =calcSupport(dPointerArrUniEdge,i,dPointerArrExt,noElemdArrExt,dArrBoundaryScanResult,dF,noElemdF,support,noElemdArrUniEdge);
if(cudaStatus !=hipSuccess){
fprintf(stderr,"\n calcSupport failed",cudaStatus);
goto Error;
}
hArrSupport[i]=support;
}
hArrPointerSupport[j]=hArrSupport;
/*printf("\n***************hArrPointerSupport*************\n");
for (int i = 0; i < noElemdArrUniEdge; i++)
{
printf("\n support:%d ",hArrSupport[i]);
}*/
}
}
hipDeviceSynchronize();
cudaStatus = hipGetLastError();
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipDeviceSynchronize() in computeSupportv2() failed",cudaStatus);
goto Error;
}
Error:
return cudaStatus;
}
//kernel trch nhn ca m rng tho minsup
__global__ void kernelgetEdgeLabel(UniEdge **dArrPointerUniEdge,int pointerPos,int edgePos,UniEdge *dUniEdge){
dUniEdge->li = dArrPointerUniEdge[pointerPos][edgePos].li;
dUniEdge->lij = dArrPointerUniEdge[pointerPos][edgePos].lij;
dUniEdge->lj = dArrPointerUniEdge[pointerPos][edgePos].lj;
printf("\n dUniEdge: (li:%d, lij:%d, lj:%d)",dUniEdge->li,dUniEdge->lij,dUniEdge->lj);
}
//Hm sao chp nhn (li,lij,lj) ca m rng tho minsup sang host build DFS_CODE
inline hipError_t getEdgeLabel(UniEdge **dArrPointerUniEdge,int pos,int edgePos,UniEdge *&hUniEdge){
hipError_t cudaStatus;
hUniEdge = (UniEdge*)malloc(sizeof(UniEdge));
if(hUniEdge==NULL){
printf("\n malloc hUniEdge in getEdgeLabel() failed\n");
exit(1);
}
else
{
memset(hUniEdge,0,sizeof(UniEdge));
}
UniEdge *dUniEdge =nullptr;
cudaStatus = hipMalloc((void**)&dUniEdge,sizeof(UniEdge));
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipMalloc dUniEdge in getEdgeLabel() failed\n",cudaStatus);
goto Error;
}
else
{
hipMemset(dUniEdge,0,sizeof(UniEdge));
}
hipLaunchKernelGGL(( kernelgetEdgeLabel), dim3(1),dim3(1), 0, 0, dArrPointerUniEdge,pos,edgePos,dUniEdge);
hipDeviceSynchronize();
cudaStatus = hipGetLastError();
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n CudaDeviceSynchornize() kernelgetEdgeLabel in getEdgeLabel() failed", cudaStatus);
goto Error;
}
cudaStatus = hipMemcpy(hUniEdge,dUniEdge,sizeof(UniEdge),hipMemcpyDeviceToHost);
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipMemcpy() (dUniEdge --> hUniEdge) in getEdgeLabel() failed", cudaStatus);
goto Error;
}
/*printf("\n*********hUniEdge*********\n");
printf("\n hUniEdge: (li:%d, lij:%d, lj:%d)",hUniEdge->li,hUniEdge->lij,hUniEdge->lj);*/
Error:
return cudaStatus;
}
__global__ void kernelgetViVj(EXT **dArrPointerExt,int posPointer,int *Vi,int *Vj){
*Vi = dArrPointerExt[posPointer][0].vi;
*Vj = dArrPointerExt[posPointer][0].vj;
//printf("\n Vi:%d, Vj:%d",Vi[0],Vj[0]);
}
//Hm trch ViVj t ExtK xy dng DFS code
inline hipError_t getViVj(EXT **dArrPointerExt,int posPointer, int &vi, int &vj){
hipError_t cudaStatus;
int *Vi=nullptr;
int *Vj=nullptr;
cudaStatus = hipMalloc((void**)&Vi,sizeof(int));
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipMalloc() Vi in getViVj() failed",cudaStatus);
goto Error;
}
else
{
hipMemset(Vi,-1,sizeof(int));
}
cudaStatus = hipMalloc((void**)&Vj,sizeof(int));
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipMalloc() Vj in getViVj() failed",cudaStatus);
goto Error;
}
else
{
hipMemset(Vj,-1,sizeof(int));
}
hipLaunchKernelGGL(( kernelgetViVj), dim3(1),dim3(1), 0, 0, dArrPointerExt,posPointer,Vi,Vj);
hipDeviceSynchronize();
cudaStatus=hipGetLastError();
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipDeviceSynchronize() in getViVj() failed",cudaStatus);
goto Error;
}
hipMemcpy(&vi,Vi,sizeof(int),hipMemcpyDeviceToHost);
hipMemcpy(&vj,Vj,sizeof(int),hipMemcpyDeviceToHost);
/*printf("\n**********(vi,vj)*************\n");
printf("\n (vi:%d,vj:%d)",vi,vj);*/
Error:
return cudaStatus;
}
__global__ void kernelMatchValueInEXT(EXT **dPointerArrExt,unsigned int noElemInArrExt,unsigned int *dValidEdge,int li,int lij,int lj,unsigned int maxOfVer){
int i =blockDim.x*blockIdx.x + threadIdx.x;
if(i<noElemInArrExt){
EXT *dArrExt = dPointerArrExt[0];
int Li,Lij,Lj;
Li=dArrExt[i].li;
Lij=dArrExt[i].lij;
Lj=dArrExt[i].lj;
if(li==Li && lij==Lij && lj==Lj){
int vgi = dArrExt[i].vgi/maxOfVer;
dValidEdge[vgi]=1;
}
}
}
__global__ void kernelGetLastElement(unsigned int *dValidEdge,unsigned int *dValidEdgeScanResult,unsigned int noElemdValidEdge,int *dnoElem_hArrGraphId){
if(dValidEdge[noElemdValidEdge-1]==1){
*dnoElem_hArrGraphId = dValidEdgeScanResult[noElemdValidEdge-1]+1;
}
else
{
*dnoElem_hArrGraphId = dValidEdgeScanResult[noElemdValidEdge-1];
}
}
inline hipError_t findNumberElemBaseOnScanResult(unsigned int *dValidEdge,unsigned int *dValidEdgeScanResult,unsigned int noElemdValidEdge,int &noElem_hArrGraphId){
hipError_t cudaStatus;
int *dnoElem_hArrGraphId=nullptr;
cudaStatus = hipMalloc((void**)&dnoElem_hArrGraphId,sizeof(int));
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipMalloc dnoElem_hArrGraphId in findNumberElemBaseOnScanResult() failfed",cudaStatus);
goto Error;
}
else
{
hipMemset(dnoElem_hArrGraphId,0,sizeof(int));
}
hipLaunchKernelGGL(( kernelGetLastElement), dim3(1),dim3(1), 0, 0, dValidEdge,dValidEdgeScanResult,noElemdValidEdge,dnoElem_hArrGraphId);
hipDeviceSynchronize();
cudaStatus=hipGetLastError();
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n kernelGetLastElement dValidEdgeScanResult in findNumberElemBaseOnScanResult() failed",cudaStatus);
goto Error;
}
cudaStatus = hipMemcpy(&noElem_hArrGraphId,dnoElem_hArrGraphId,sizeof(int),hipMemcpyDeviceToHost);
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipMemcpy (dnoElem_hArrGraphId -->noElem_hArrGraphId) in findNumberElemBaseOnScanResult() failed",cudaStatus);
goto Error;
}
Error:
hipFree(dnoElem_hArrGraphId);
return cudaStatus;
}
__global__ void kernelFilldArrGraphId(unsigned int *dValidEdge,unsigned int *dValidEdgeScanResult, unsigned int noElemdValidEdge, int *dArrGraphId){
int i = blockDim.x*blockIdx.x + threadIdx.x;
if(i<noElemdValidEdge){
if(dValidEdge[i]==1){
dArrGraphId[dValidEdgeScanResult[i]]=i;
}
}
}
__global__ void kernelGetLastElementExt(EXT **dPointerArrExt,unsigned int noElemInArrExt,unsigned int *dnoElemdValidEdge,unsigned int maxOfVer){
*dnoElemdValidEdge = dPointerArrExt[0][noElemInArrExt-1].vgi/maxOfVer;
printf("\ndnoElemdValidEdge:%d",*dnoElemdValidEdge);
}
//Hm ly graphid cha embedding tho minDFS_CODE
inline hipError_t getGraphId(UniEdge *hUniEdge,EXT **dPointerArrExt,unsigned int noElemInArrExt,int *&hArrGraphId,int &noElem_hArrGraphId,unsigned int maxOfVer){
hipError_t cudaStatus;
int li,lij,lj;
li=hUniEdge->li;
lij=hUniEdge->lij;
lj=hUniEdge->lj;
unsigned int *dValidEdge=nullptr;
unsigned int noElemdValidEdge=0;
unsigned int *dnoElemdValidEdge=nullptr;
cudaStatus=hipMalloc((void**)&dnoElemdValidEdge,sizeof(unsigned int));
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipMalloc dnoElemdValidEdge in getGraphId() failed",cudaStatus);
goto Error;
}
else
{
hipMemset(dnoElemdValidEdge,0,sizeof(unsigned int));
}
//First: Ly phn t cui cng trong EXTk, ly graphId ca n nh kch thc cho mng dValidEdge
hipLaunchKernelGGL(( kernelGetLastElementExt), dim3(1),dim3(1), 0, 0, dPointerArrExt,noElemInArrExt,dnoElemdValidEdge,maxOfVer);
hipDeviceSynchronize();
cudaStatus=hipGetLastError();
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n kernelGetLastElementExt in getGraphId() failed",cudaStatus);
goto Error;
}
hipMemcpy(&noElemdValidEdge,dnoElemdValidEdge,sizeof(unsigned int),hipMemcpyDeviceToHost);
noElemdValidEdge++;
printf("\n noElemdValidEdge:%d",noElemdValidEdge);
//1. Khi to mng c s lng phn t bng vi noELemInArrExt
cudaStatus = hipMalloc((void**)&dValidEdge,sizeof(unsigned int)*noElemdValidEdge);
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipMalloc dValidEdge in getGraphId() failed");
goto Error;
}
else
{
hipMemset(dValidEdge,0,sizeof(unsigned int)*noElemdValidEdge);
}
//2. Duyt qua EXTk v set value 1 trn mng dValidEdge ti index m gi tr (li,lij,li) trong EXTk bng vi trong hUniEdge
dim3 block(blocksize);
dim3 grid((noElemInArrExt+block.x-1)/block.x);
hipLaunchKernelGGL(( kernelMatchValueInEXT), dim3(grid),dim3(block), 0, 0, dPointerArrExt,noElemInArrExt,dValidEdge,li,lij,lj,maxOfVer);
hipDeviceSynchronize();
cudaStatus=hipGetLastError();
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n kernelMatchValueInEXT in getGraphId() failed",cudaStatus);
goto Error;
}
printf("\n*************dValidEdge*************\n");
printUnsignedInt(dValidEdge,noElemdValidEdge);
//3. Scan mng dValidEdge cung cp thng tin cho hArrGraphId
//3.1. Cp pht mng dValidEdgeScanResult lu kt qu scan
unsigned int *dValidEdgeScanResult=nullptr;
cudaStatus = hipMalloc((void**)&dValidEdgeScanResult,sizeof(unsigned int)*noElemdValidEdge);
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipMalloc dValidEdgeScanResult in getGraphId() failed");
goto Error;
}
else
{
hipMemset(dValidEdgeScanResult,0,sizeof(unsigned int)*noElemdValidEdge);
}
cudaStatus = scanV(dValidEdge,noElemdValidEdge,dValidEdgeScanResult);
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n scanV() dValidEdge in getGraphId() failed",cudaStatus);
goto Error;
}
printf("\n*************dValidEdgeScanResult*************\n");
printUnsignedInt(dValidEdgeScanResult,noElemdValidEdge);
//4. Xc nh s lng phn t ca mng hArrGraphId
cudaStatus = findNumberElemBaseOnScanResult(dValidEdge,dValidEdgeScanResult,noElemdValidEdge,noElem_hArrGraphId);
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n findNumberElemBaseOnScanResult dValiedEdgeScanResult in getGraphId() failed",cudaStatus);
goto Error;
}
printf("\n Value of noELem_hArrGraphId:%d",noElem_hArrGraphId);
//5. Khi to mng hArrGraphId vi s lng phn t va tm c noElem_hArrGraphId
hArrGraphId = (int*)malloc(sizeof(int)*noElem_hArrGraphId);
if(hArrGraphId==NULL){
printf("\n Malloc hArrGraphId in getGraphId() failed");
exit(1);
}
int *dArrGraphId=nullptr;
cudaStatus = hipMalloc((void**)&dArrGraphId,sizeof(int)*noElem_hArrGraphId);
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipMalloc dArrGraphId in getGraphId() failed",cudaStatus);
goto Error;
}
else
{
hipMemset(dArrGraphId,0,sizeof(int)*noElem_hArrGraphId);
}
dim3 blocka(blocksize);
dim3 grida((noElem_hArrGraphId+blocka.x-1)/blocka.x);
hipLaunchKernelGGL(( kernelFilldArrGraphId), dim3(grida),dim3(blocka), 0, 0, dValidEdge, dValidEdgeScanResult, noElemdValidEdge, dArrGraphId);
hipDeviceSynchronize();
cudaStatus=hipGetLastError();
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n kernelFilldArrGraphId in getGraphId() failed",cudaStatus);
goto Error;
}
cudaStatus = hipMemcpy(hArrGraphId,dArrGraphId,sizeof(int)*noElem_hArrGraphId,hipMemcpyDeviceToHost);
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipMemcpy (dArrGraphId --> hArrGraphId)",cudaStatus);
goto Error;
}
printf("\n*************dArrGraphId*************\n");
printInt(dArrGraphId,noElem_hArrGraphId);
Error:
hipFree(dValidEdge);
hipFree(dArrGraphId);
return cudaStatus;
}
inline hipError_t printdPointerArrExt(EXT **dPointerArrExt,unsigned int noElemInArrExt){
hipError_t cudaStatus;
dim3 block(blocksize);
dim3 grid((noElemInArrExt+block.x-1)/block.x);
hipLaunchKernelGGL(( kernelPrint), dim3(grid),dim3(block), 0, 0, dPointerArrExt,noElemInArrExt);
hipDeviceSynchronize();
cudaStatus=hipGetLastError();
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n kernelPrint in printdPointerArrExt() failed");
goto Error;
}
Error:
return cudaStatus;
}
inline hipError_t extendEmbeddingRoot(Embedding **&dArrPointerEmbedding,int &noElem_dArrPointerEmbedding,int *&dArrSizedQ,int &noElem_dArrSizedQ,EXT **dPointerArrExt,unsigned int noElemInArrExt,UniEdge *hUniEdge){
hipError_t cudaStatus;
//1. To mng dM v dMScanResult c s lng phn t bng vi noElemInArrExt ri khi to gi tr cc phn t mng bng 0
//1.1. To v khi to gi tr cho mng dM
unsigned int *dM=nullptr;
cudaStatus=hipMalloc((void**)&dM,sizeof(unsigned int)*noElemInArrExt);
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipMalloc dM in extendEmbeddingRoot() failed",cudaStatus);
goto Error;
}
else
{
hipMemset(dM,0,sizeof(unsigned int)*noElemInArrExt);
}
//1.1. To v khi to gi tr cho mng dMScanResult
unsigned int *dMScanResult=nullptr;
cudaStatus=hipMalloc((void**)&dMScanResult,sizeof(unsigned int)*noElemInArrExt);
if(cudaStatus!=hipSuccess){
fprintf(stderr,"\n hipMalloc dM in extendEmbeddingRoot() failed",cudaStatus);
goto Error;
}
else
{
hipMemset(dMScanResult,0,sizeof(unsigned int)*noElemInArrExt);
}
//2.
hipDeviceSynchronize();
cudaStatus=hipGetLastError();
Error:
return cudaStatus;
}
| 5d5f48aebafa12a4b9026770ddf68832379803f9.cu | #pragma once
#include "header.h"
float hTime=0.0;
float dTime=0.0;
//kernel khởi tạo bộ nhớ và tạo nội dung cho dQ
__global__ void kernelInitializeDataEmbedding(Embedding *dQ,int sizedQ){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<sizedQ){
dQ[i].idx=i;
dQ[i].vid=i+100;
}
}
//Hàm khởi tạo bộ nhớ và tạo nội dung cho dQ
inline cudaError_t createEmbeddingElement(Embedding *&dQ,int sizedQ,int &first){
cudaError_t cudaStatus;
//Khởi tạo bộ nhớ cho dQ1 trên device
size_t nBytes = sizedQ*sizeof(Embedding);
cudaStatus=cudaMalloc((void**)&dQ,nBytes);
cudaStatus=cudaGetLastError();
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaMalloc dQ of createEmbeddingElement failed",cudaStatus);
goto Error;
}
if(first==0)
{
cudaMemset(dQ,-1,nBytes);
++first;
return cudaStatus;
}
//Khởi tạo dữ liệu bất kỳ cho dQ
dim3 block(blocksize);
dim3 grid((sizedQ + block.x - 1)/block.x);
kernelInitializeDataEmbedding<<<grid,block>>>(dQ,sizedQ);
cudaDeviceSynchronize();
cudaStatus=cudaGetLastError();
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaDeviceSynchronize of createEmbeddingElement failed",cudaStatus);
goto Error;
}
Error:
return cudaStatus;
}
__global__ void kernelPrint(Embedding *dQ,int sizedQ){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<sizedQ){
printf("\n Thread %d: %p (idx:%d,vid:%d) (%p,%p)",i,dQ,dQ[i].idx,dQ[i].vid,&(dQ[i].idx),&(dQ[i].vid));
}
}
//Hàm in nội dung Embedding *dQ khi biết kích thước
inline cudaError_t print(Embedding *dQ,int sizedQ){
cudaError_t cudaStatus;
dim3 block(blocksize);
dim3 grid((sizedQ + block.x - 1)/block.x);
kernelPrint<<<grid,block>>>(dQ,sizedQ);
cudaDeviceSynchronize();
cudaStatus=cudaGetLastError();
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaDeviceSynchronize of createEmbeddingElement failed",cudaStatus);
goto Error;
}
Error:
return cudaStatus;
}
//kernel In phần tử Embedding **pdQ
__global__ void kernelPrint(Embedding **pdQ,int *d_arrSizedQ,int *d_arrPrevQ,int sizepdQ){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < sizepdQ){
printf("\n Thread %d: Value of pdQ:%p",i,pdQ+i);
Embedding *dQ = pdQ[i];
//printf("\n Thread %d: PrevQ: %d",i,d_arrPrevQ[i]);
int prevQ = d_arrPrevQ[i];
for (int j = 0; j < d_arrSizedQ[i]; j++)
{
printf("\n i=%d %p PrevQ:%d (idx:%d, vid:%d) ",i,dQ,prevQ,dQ[j].idx,dQ[j].vid);
}
}
}
//Hàm in phần tử Embedding **pdQ khi biết kích thước của dQ trong mảng h_arrSizedQ tương ứng
inline cudaError_t print(Embedding **pdQ,int *h_arrSizedQ,int *d_arrPrevQ,int sizepdQ){
cudaError_t cudaStatus;
dim3 block(blocksize);
dim3 grid((sizepdQ+block.x-1)/block.x);
printf("\n\n Array pdQ:\n");
kernelPrint<<<grid,block>>>(pdQ,h_arrSizedQ,d_arrPrevQ,sizepdQ);
printf("\n");
cudaDeviceSynchronize();
cudaStatus=cudaGetLastError();
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaDeviceSynchronize of kernelPrint failed",cudaStatus);
goto Error;
}
Error:
return cudaStatus;
}
//kernel lấy pointer của dQ lưu vào pdQ
__global__ void kernelgetPointer(Embedding **pdQ,Embedding *dQ){
*pdQ=dQ;
}
__global__ void kernelCopyEmbedding(Embedding **pdQ,int sizepdQ,Embedding **d_temp){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<sizepdQ){
d_temp[i]=pdQ[i];
}
}
__global__ void kernelPrintDoubleEmbedding(Embedding **d_temp,int newsize){
int i=blockDim.x * blockIdx.x + threadIdx.x;
if (i<newsize){
printf("\n Thread %d: %p",i,d_temp[i]);
}
}
__global__ void kernelCopyLastEmbedding(Embedding **d_temp,Embedding *dQ,int newsize){
d_temp[newsize-1]=dQ;
}
//Hàm lấy pointer của phần tử Embedding *dQ bằng hàm cudaMemcpy
inline cudaError_t getPointer(Embedding **&pdQ,int &sizepdQ,Embedding *dQ){
cudaError_t cudaStatus;
//
int currentsize = sizepdQ;
int newsize = ++sizepdQ;
if (currentsize==0){
cudaStatus=cudaMalloc((void**)&pdQ,newsize*sizeof(Embedding*));
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaMalloc pdQ failed",cudaStatus);
goto Error;
}
else
{
kernelgetPointer<<<1,1>>>(pdQ,dQ);
cudaDeviceSynchronize();
}
goto Error;
}
//Khởi tạo mảng tạm
Embedding **d_temp=nullptr;
size_t nBytes=newsize*sizeof(Embedding*);
cudaStatus=cudaMalloc((void**)&d_temp,nBytes);
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaMalloc d_temp in getPointer failed",cudaStatus);
goto Error;
}
else
{
cudaMemset(d_temp,0,nBytes);
}
//chép mảng hiện tại
kernelCopyEmbedding<<<1,currentsize>>>(pdQ,currentsize,d_temp);
cudaDeviceSynchronize();
//chép phần tử cần thêm vào cuối mảng d_temp
kernelCopyLastEmbedding<<<1,1>>>(d_temp,dQ,newsize);
cudaDeviceSynchronize();
//Hiển thị nội dung mảng d_temp
kernelPrintDoubleEmbedding<<<1,sizepdQ>>>(d_temp,sizepdQ);
cudaDeviceSynchronize();
//Cấp phát lại bộ nhớ cho mảng chính với kích thước lớn hơn 1 và chép mảng d_temp vào mảng chính
cudaFree(pdQ);
cudaStatus=cudaMalloc((void**)&pdQ,nBytes);
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaMalloc pdQ in getPointer failed",cudaStatus);
goto Error;
}
else
{
cudaMemset(pdQ,-1,nBytes);
}
kernelCopyEmbedding<<<1,sizepdQ>>>(d_temp,sizepdQ,pdQ);
cudaDeviceSynchronize();
cudaDeviceSynchronize();
cudaStatus=cudaGetLastError();
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaDeviceSynchronize kernelgetPointer of getPointer failed",stderr);
goto Error;
}
Error:
return cudaStatus;
}
//kernel chép dữ liệu kiểu int từ device sang device
__global__ void kernelCopyInt(int *d_arrSizedQ,int *d_tempArrSizedQ,int currentSize){
int i=blockIdx.x * blockDim.x + threadIdx.x;
if(i<currentSize){
d_tempArrSizedQ[i]=d_arrSizedQ[i];
}
}
__global__ void kernelCopyLastInt(int *temp,int *d_tempArrSizedQ,int newsize){
d_tempArrSizedQ[newsize-1]=*temp;
}
inline cudaError_t copyDeviceToDeviceInt(int *d_FromIntArray,int *d_ToIntArray,int size){
cudaError_t cudaStatus;
dim3 block(blocksize);
dim3 grid((size + block.x)/block.x);
kernelCopyInt<<<grid,block>>>(d_FromIntArray,d_ToIntArray,size);
cudaDeviceSynchronize();
cudaStatus=cudaGetLastError();
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaDeviceSynchronize in copyDeviceToDeviceInt failed",stderr);
goto Error;
}
Error:
return cudaStatus;
}
//Trả về mảng kích thước là một h_arrSizedQ trên device
inline cudaError_t getSizedQ(int *&d_arrSizedQ,int &sized_arrSizedQ,int sizedQ){
cudaError_t cudaStatus;
//Mở rộng kích thước mảng d_arrSizedQ
int currentSize = sized_arrSizedQ;
int newsize =++sized_arrSizedQ;
if(currentSize==0){
cudaStatus = cudaMalloc((void**)&d_arrSizedQ,newsize*sizeof(int));
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaMalloc d_arrSizedQ in getPointer failed",cudaStatus);
goto Error;
}
else
{
cudaMemcpy(d_arrSizedQ,&sizedQ,sizeof(int),cudaMemcpyHostToDevice);
}
goto Error;
}
size_t nBytes = newsize*sizeof(int);
int *d_tempArrSizedQ;
cudaStatus=cudaMalloc((void**)&d_tempArrSizedQ,nBytes);
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaMalloc d_tempArrSizedQ in getPointer failed",cudaStatus);
goto Error;
}
else
{
cudaMemset(d_tempArrSizedQ,-1,nBytes);
}
//Chép mảng cũ qua mảng mới
cudaStatus = copyDeviceToDeviceInt(d_arrSizedQ,d_tempArrSizedQ,currentSize);
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n copyDeviceToDeviceInt in getSizedQ failed");
goto Error;
}
//Kiểm tra thử kết quả trên mảng tạm
//print(d_tempArrSizedQ,currentSize);
//Tạo một biết temp để cấp phát phần tử kiểu int trên device và chép sizedQ sang biến tạm
int * temp;
cudaStatus=cudaMalloc((void**)&temp,sizeof(int));
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaMalloc temp in getPointer failed",cudaStatus);
goto Error;
}
else
{
cudaMemcpy(temp,&sizedQ,sizeof(int),cudaMemcpyHostToDevice);
}
kernelCopyLastInt<<<1,1>>>(temp,d_tempArrSizedQ,newsize);
cudaDeviceSynchronize();
cudaFree(d_arrSizedQ);
cudaMalloc((void**)&d_arrSizedQ,nBytes);
cudaMemset(d_arrSizedQ,0,nBytes);
copyDeviceToDeviceInt(d_tempArrSizedQ,d_arrSizedQ,newsize);
/*
printf("\n\n value of d_arrSizedQ array on device\n");
print(d_arrSizedQ,newsize);*/
cudaDeviceSynchronize();
cudaStatus = cudaGetLastError();
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaDeviceSynchronize getPointer failed",cudaStatus);
goto Error;
}
Error:
return cudaStatus;
}
//kernel in mảng kiểu int trên device
__global__ void kernelPrintInt(int *dArray,int sizedArray){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<sizedArray){
printf("\n Thread %d: dArray:%d",i,dArray[i]);
}
}
//Hàm in mảng kiểu int trên device
inline cudaError_t print(int *dArray,int sizedArray){
cudaError_t cudaStatus;
dim3 block(blocksize);
dim3 grid((sizedArray + block.x -1)/block.x);
kernelPrintInt<<<grid,block>>>(dArray,sizedArray);
cudaDeviceSynchronize();
cudaStatus = cudaGetLastError();
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaDeviceSynchronize getPointer failed",cudaStatus);
goto Error;
}
Error:
return cudaStatus;
}
//Tạo và cập nhật ColumnQ
cudaError_t makeColumnQ(Embedding *dQ,int sizedQ,Embedding **&pdQ,int &sizepdQ,int *&d_arrSizedQ,int &sized_arrSizedQ,int *&d_arrPrevQ,int &sized_arrPrevQ,int iPrevQ,int &first){
cudaError_t cudaStatus;
//Tạo nội dung cho các phần tử của dQ
cudaStatus=createEmbeddingElement(dQ,sizedQ,first);
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaStatuscreateEmbeddingElement in makeColumnQ failed",stderr);
goto Error;
}
////In nội dung dQ
//cudaStatus=print(dQ,sizedQ);
//if(cudaStatus!=cudaSuccess){
// fprintf(stderr,"\n print of kernel.cu failed",stderr);
// goto Error;
//}
//Lấy con trỏ của dQ lưu vào pdQ
cudaStatus = getPointer(pdQ,sizepdQ,dQ);
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n getPointer in makeColumnQ failed",stderr);
goto Error;
}
//Lấy kích thước của dQ lưu vào mảng d_arrSizedQ
cudaStatus = getSizedQ(d_arrSizedQ,sized_arrSizedQ,sizedQ);
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n getSizedQ in makeColumnQ failed",stderr);
goto Error;
}
//Lấy prevQ
cudaStatus = getSizedQ(d_arrPrevQ,sized_arrPrevQ,iPrevQ);
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n getSizedQ of kernel.cu failed",stderr);
goto Error;
}
cudaDeviceSynchronize();
cudaStatus = cudaGetLastError();
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaDeviceSynchronize() in makeColumnQ failed");
goto Error;
}
Error:
return cudaStatus;
}
//kernel in nội dung embedding thứ i.
__global__ void kernelPrintEmbedding(Embedding **pdQ,int *d_arrSizedQ,int *d_arrPrevQ,int sizepdQ,int firstEmbedding,int lastColumnQ){
Embedding *dQ = pdQ[lastColumnQ];
int vid = dQ[firstEmbedding].vid;
int idx = dQ[firstEmbedding].idx;
int prevQ = d_arrPrevQ[lastColumnQ];
printf("\n Q%d: (idx:%d, vid:%d) prevQ:%d",lastColumnQ,idx,vid,prevQ);
while (true)
{
dQ=pdQ[prevQ];
vid = dQ[idx].vid;
idx = dQ[idx].idx;
printf("\n Q%d: (idx:%d, vid:%d)",prevQ,idx,vid);
prevQ = d_arrPrevQ[prevQ];
if(prevQ==-1){
printf("\nEnd of Embedding\n");
return;
}
}
}
//In embedding thứ i. Cần phải biết cột Q cuối để truy xuất Embedding ngược về phía trước
inline cudaError_t printEmbedding(Embedding **pdQ,int *d_arrSizedQ,int *d_arrPrevQ,int sizepdQ,int firstEmbedding,int lastColumnQ){
cudaError_t cudaStatus;
kernelPrintEmbedding<<<1,1>>>(pdQ,d_arrSizedQ,d_arrPrevQ,sizepdQ,firstEmbedding,lastColumnQ);
cudaDeviceSynchronize();
cudaStatus = cudaGetLastError();
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaDeviceSynchronize() in printEmbedding failed",cudaStatus);
goto Error;
}
Error:
return cudaStatus;
}
__global__ void kernelSetValueFordQ(Extension *d_ValidExtension,int noElem_d_ValidExtension,Embedding *dQ1,Embedding *dQ2,int *d_scanResult,int li,int lij,int lj){
int i = blockDim.x *blockIdx.x +threadIdx.x;
if(i<noElem_d_ValidExtension){
if(d_ValidExtension[i].li==li &&d_ValidExtension[i].lij == lij && d_ValidExtension[i].lj){
dQ1[d_scanResult[i]].idx=-1;
dQ1[d_scanResult[i]].vid=d_ValidExtension[i].vgi;
dQ2[d_scanResult[i]].idx=d_scanResult[i];
dQ2[d_scanResult[i]].vid=d_ValidExtension[i].vgj;
}
}
}
inline cudaError_t createEmbeddingRoot(Embedding **&dArrPointerEmbedding,int &noElem_dArrPointerEmbedding,int *&dArrSizedQ,int &noElem_dArrSizedQ,int *&dArrPrevQ,int &noElem_dArrPrevQ,Extension *d_ValidExtension,int noElem_d_ValidExtension,int li,int lij,int lj){
cudaError_t cudaStatus;
//Vì đây là lần đầu tiên tạo Embedding, chúng ta tạo 2 cột Q có kích thước bằng nhau và bằng số lượng Embedding tìm thấy trong d_ValidExtension của nhãn cạnh (li,lij,lj)
//Tạo Q1 và Q2 trên bộ nhớ device, sau đó chép địa chỉ của nó vào biến mảng dArrPointerEmbedding. Do đó, chúng ta không huỷ bộ nhớ của Q1 và Q2 sau khi gọi hàm createEmbeddingRoot.
Embedding *Q1=nullptr;//embedding dQ.
Embedding *Q2=nullptr;
int sizedQ=0;
//Tạo bảo nhiêu mảng dQ, mỗi mảng có số lượng phần tử là bao nhiêu và nội dung mảng là gì?
//Tạo 2 mảng dQ
/*1.Tạo mảng M có kích thước bằng với d_ValidExtension và khởi tạo giá trị cho các phần tử trong M bằng 0.*/
int* d_M;
cudaStatus=cudaMalloc((int**)&d_M,noElem_d_ValidExtension*sizeof(int));
if (cudaStatus!=cudaSuccess){
fprintf(stderr,"\ncudaMalloc M failed");
//exit(1);
goto Error;
}
else
{
cudaMemset(d_M,0,noElem_d_ValidExtension*sizeof(int));
}
/*//2. Tạo noElem_d_ValidExtension threads. Mỗi thread sẽ kiểm tra phần tử tương ứng trong mảng d_ValidExtension xem có bằng cạnh (li,lij,lj)
Nếu bằng thì bậc vị trí tại M lên giá trị là 1*/
//printf("\nMang d_ValidExtension");
//printfExtension(d_ValidExtension,noElem_d_ValidExtension);
//cudaDeviceSynchronize();
dim3 block(blocksize);
dim3 grid((noElem_d_ValidExtension+block.x-1)/block.x);
kernelMarkExtension<<<grid,block>>>(d_ValidExtension,noElem_d_ValidExtension,d_M,li,lij,lj);
cudaDeviceSynchronize();
/*printf("\n\nMang d_ValidExtension");
printfExtension(d_ValidExtension,noElem_d_ValidExtension);
cudaDeviceSynchronize();
printf("\nMang d-M:");
printInt(d_M,noElem_d_ValidExtension);*/
/* 3. Exclusive Scan d_M
Kết quả scan lưu vào mảng d_scanResult
*/
int* d_scanResult;
cudaStatus=cudaMalloc((int**)&d_scanResult,noElem_d_ValidExtension*sizeof(int));
if (cudaStatus!=cudaSuccess){
fprintf(stderr,"\ncudaMalloc M failed");
//exit(1);
goto Error;
}
else
{
cudaMemset(d_scanResult,0,noElem_d_ValidExtension*sizeof(int));
}
cudaStatus=scanV(d_M,noElem_d_ValidExtension,d_scanResult);
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\nscanV() d_M createForwardEmbedding failed");
//exit(1);
goto Error;
}
/*
4. Tạo mảng Q1 và Q2 có kích thước là (scanM[LastIndex]) nếu phần tử cuối cùng của d_ValidExtension không phải là (li,lij,lj).
Ngược lại thì Q có kích thước là (scanM[LastIndex]+1).
Mỗi phần tử của Q có cấu trúc là {int idx, int vid}
*/
bool same = false;
kernelMatchLastElement<<<1,1>>>(d_ValidExtension,noElem_d_ValidExtension,li,lij,lj,same);
cudaDeviceSynchronize();
int noElem_d_Q=0;
cudaStatus=getLastElement(d_scanResult,noElem_d_ValidExtension,noElem_d_Q);
if (same==true){
noElem_d_Q++;
}
sizedQ=noElem_d_Q;
printf("\nnoElem_d_Q1:%d",noElem_d_Q);
//Tạo Embedding dQ1, khi đã biết kích thước của chúng
cudaStatus = cudaMalloc((void**)&Q1,sizedQ*sizeof(Embedding));
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaMalloc dQ1 in createEmbeddingRoot() failed",cudaStatus);
goto Error;
}
else
{
cudaMemset(Q1,-1,sizedQ*sizeof(Embedding));
}
cudaStatus = cudaMalloc((void**)&Q2,sizedQ*sizeof(Embedding));
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaMalloc dQ1 in createEmbeddingRoot() failed",cudaStatus);
goto Error;
}
else
{
cudaMemset(Q2,-1,sizedQ*sizeof(Embedding));
}
kernelSetValueFordQ<<<grid,block>>>(d_ValidExtension,noElem_d_ValidExtension,Q1,Q2,d_scanResult,li,lij,lj);
cudaDeviceSynchronize();
getPointer(dArrPointerEmbedding,noElem_dArrPointerEmbedding,Q1);
getPointer(dArrPointerEmbedding,noElem_dArrPointerEmbedding,Q2);
int iPrevQ=-1;
for (int j = 0; j < 2; j++)
{
//Lấy kích thước của dQ lưu vào mảng d_arrSizedQ
cudaStatus = getSizedQ(dArrSizedQ,noElem_dArrSizedQ,sizedQ);
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n getSizedQ in makeColumnQ failed",stderr);
goto Error;
}
//Lấy prevQ
cudaStatus = getSizedQ(dArrPrevQ,noElem_dArrPrevQ,iPrevQ);
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n getSizedQ of kernel.cu failed",stderr);
goto Error;
}
iPrevQ++;
}
cudaDeviceSynchronize();
cudaStatus = cudaGetLastError();
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaDeviceSynchronize() in createEmbeddingRoot() failed",cudaStatus);
goto Error;
}
Error:
return cudaStatus;
}
//Hàm tạo Embedding Q ban đầu
inline cudaError_t createEmbeddingRoot1(Embedding **&dArrPointerEmbedding,int &noElem_dArrPointerEmbedding,int *&dArrSizedQ,int &noElem_dArrSizedQ,Extension *d_ValidExtension,int noElem_d_ValidExtension,int li,int lij,int lj){
cudaError_t cudaStatus;
//Vì đây là lần đầu tiên tạo Embedding, chúng ta tạo 2 cột Q có kích thước bằng nhau và bằng số lượng Embedding tìm thấy trong d_ValidExtension của nhãn cạnh (li,lij,lj)
//Tạo Q1 và Q2 trên bộ nhớ device, sau đó chép địa chỉ của nó vào biến mảng dArrPointerEmbedding. Do đó, chúng ta không huỷ bộ nhớ của Q1 và Q2 sau khi gọi hàm createEmbeddingRoot.
Embedding *Q1=nullptr;//embedding dQ.
Embedding *Q2=nullptr;
int sizedQ=0;
//Tạo bảo nhiêu mảng dQ, mỗi mảng có số lượng phần tử là bao nhiêu và nội dung mảng là gì?
//Tạo 2 mảng dQ
/*1.Tạo mảng M có kích thước bằng với d_ValidExtension và khởi tạo giá trị cho các phần tử trong M bằng 0.*/
int* d_M;
cudaStatus=cudaMalloc((int**)&d_M,noElem_d_ValidExtension*sizeof(int));
if (cudaStatus!=cudaSuccess){
fprintf(stderr,"\ncudaMalloc M failed");
//exit(1);
goto Error;
}
else
{
cudaMemset(d_M,0,noElem_d_ValidExtension*sizeof(int));
}
/*//2. Tạo noElem_d_ValidExtension threads. Mỗi thread sẽ kiểm tra phần tử tương ứng trong mảng d_ValidExtension xem có bằng cạnh (li,lij,lj)
Nếu bằng thì bậc vị trí tại M lên giá trị là 1*/
//printf("\nMang d_ValidExtension");
//printfExtension(d_ValidExtension,noElem_d_ValidExtension);
//cudaDeviceSynchronize();
dim3 block(blocksize);
dim3 grid((noElem_d_ValidExtension+block.x-1)/block.x);
kernelMarkExtension<<<grid,block>>>(d_ValidExtension,noElem_d_ValidExtension,d_M,li,lij,lj);
cudaDeviceSynchronize();
/*printf("\n\nMang d_ValidExtension");
printfExtension(d_ValidExtension,noElem_d_ValidExtension);
cudaDeviceSynchronize();
printf("\nMang d-M:");
printInt(d_M,noElem_d_ValidExtension);*/
/* 3. Exclusive Scan d_M
Kết quả scan lưu vào mảng d_scanResult
*/
int* d_scanResult;
cudaStatus=cudaMalloc((int**)&d_scanResult,noElem_d_ValidExtension*sizeof(int));
if (cudaStatus!=cudaSuccess){
fprintf(stderr,"\ncudaMalloc M failed");
//exit(1);
goto Error;
}
else
{
cudaMemset(d_scanResult,0,noElem_d_ValidExtension*sizeof(int));
}
cudaStatus=scanV(d_M,noElem_d_ValidExtension,d_scanResult);
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\nscanV() d_M createForwardEmbedding failed");
//exit(1);
goto Error;
}
/*
4. Tạo mảng Q1 và Q2 có kích thước là (scanM[LastIndex]) nếu phần tử cuối cùng của d_ValidExtension không phải là (li,lij,lj).
Ngược lại thì Q có kích thước là (scanM[LastIndex]+1).
Mỗi phần tử của Q có cấu trúc là {int idx, int vid}
*/
//bool same = false;
//kernelMatchLastElement<<<1,1>>>(d_ValidExtension,noElem_d_ValidExtension,li,lij,lj,same);
//cudaDeviceSynchronize();
int noElem_d_Q=0;
//cudaStatus=getLastElement(d_scanResult,noElem_d_ValidExtension,noElem_d_Q);
//
//if (same==true){
// noElem_d_Q++;
//}
cudaStatus = getSizeBaseOnScanResult(d_M,d_scanResult,noElem_d_ValidExtension,noElem_d_Q);
if(cudaStatus !=cudaSuccess){
fprintf(stderr,"\n getSizeBaseOnScanResult noELem_d_Q in createEmbeddingRoot1() failed",cudaStatus);
goto Error;
}
sizedQ=noElem_d_Q;
printf("\nnoElem_d_Q1:%d",noElem_d_Q);
//Tạo Embedding dQ1, khi đã biết kích thước của chúng
cudaStatus = cudaMalloc((void**)&Q1,sizedQ*sizeof(Embedding));
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaMalloc dQ1 in createEmbeddingRoot1() failed",cudaStatus);
goto Error;
}
else
{
cudaMemset(Q1,-1,sizedQ*sizeof(Embedding));
}
cudaStatus = cudaMalloc((void**)&Q2,sizedQ*sizeof(Embedding));
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaMalloc dQ1 in createEmbeddingRoot1() failed",cudaStatus);
goto Error;
}
else
{
cudaMemset(Q2,-1,sizedQ*sizeof(Embedding));
}
kernelSetValueFordQ<<<grid,block>>>(d_ValidExtension,noElem_d_ValidExtension,Q1,Q2,d_scanResult,li,lij,lj);
cudaDeviceSynchronize();
cudaStatus = cudaGetLastError();
if(cudaStatus !=cudaSuccess){
fprintf(stderr,"\n kernelSetValueFordQ in createEmbeddingRoot1() failed",cudaStatus);
goto Error;
}
cudaStatus = getPointer(dArrPointerEmbedding,noElem_dArrPointerEmbedding,Q1);
if(cudaStatus !=cudaSuccess){
fprintf(stderr,"\n getPointer() into dArrPointerEmbedding in createEmbeddingRoot1() failed",cudaStatus);
goto Error;
}
cudaStatus = getPointer(dArrPointerEmbedding,noElem_dArrPointerEmbedding,Q2);
if(cudaStatus !=cudaSuccess){
fprintf(stderr,"\n getPointer() into dArrPointerEmbedding in createEmbeddingRoot1() failed",cudaStatus);
goto Error;
}
for (int j = 0; j < 2; j++)
{
//Lấy kích thước của dQ lưu vào mảng d_arrSizedQ
cudaStatus = getSizedQ(dArrSizedQ,noElem_dArrSizedQ,sizedQ);
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n getSizedQ in makeColumnQ failed",stderr);
goto Error;
}
}
Error:
return cudaStatus;
}
//Kernel khởi tạo giá trị cho right most path trên device */
__global__ void kernelInitializeValueForRMPath(int *dRMPath,int noElem_dRMPath){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<noElem_dRMPath){
dRMPath[i]=i;
}
}
/* Tạo một right most path trên device */
inline cudaError_t createRMPath(int *&dRMPath,int &noElem_dRMPath){
cudaError_t cudaStatus;
//Khởi tạo kích thước ban đầu của dRMPath bằng 2
noElem_dRMPath=2;
cudaStatus = cudaMalloc((void**)&dRMPath,noElem_dRMPath*sizeof(int));
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaMalloc dRMPath failed",cudaStatus);
goto Error;
}
kernelInitializeValueForRMPath<<<1,2>>>(dRMPath,noElem_dRMPath);
cudaDeviceSynchronize();
cudaStatus=cudaGetLastError();
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n CudaDeviceSynchronize() in createRMPath() failed",cudaStatus);
goto Error;
}
Error:
return cudaStatus;
}
__global__ void kernelPrintRMPath(int *dRMPath,int noElem_dRMPath){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<noElem_dRMPath){
printf("\n dRMPath[%d]: %d",i,dRMPath[i]);
}
}
//Hàm hiển thị nội dung dRMPath trên device
inline cudaError_t printRMPath(int *dRMPath,int noElem_dRMPath){
cudaError_t cudaStatus;
dim3 block(blocksize);
dim3 grid((noElem_dRMPath + block.x - 1)/block.x);
kernelPrintRMPath<<<grid,block>>>(dRMPath,noElem_dRMPath);
cudaDeviceSynchronize();
cudaStatus=cudaGetLastError();
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n CudaDeviceSynchronize() in createRMPath() failed",cudaStatus);
goto Error;
}
Error:
return cudaStatus;
}
//kernel tìm số lượng embedding hiện tại
__global__ void kernelGetNumberOfEmbedding(int *dArrSizedQ,int noElem_dArrSizedQ,int *dNumberOfEmbedding){
dNumberOfEmbedding[0] = dArrSizedQ[noElem_dArrSizedQ-1];
}
//Hàm tìm số lượng embedding hiện tại
inline cudaError_t findNumberOfEmbedding(int *dArrSizedQ,int noElem_dArrSizedQ,int &noElem_dArrPointerdHO){
cudaError_t cudaStatus;
noElem_dArrPointerdHO=0;
int *dNumberOfEmbedding;
cudaStatus = cudaMalloc((void**)&dNumberOfEmbedding,sizeof(int));
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaMalloc dNumberOfEmbedding in findNumberOfEmbedding() failed",cudaStatus);
goto Error;
}
else
{
cudaMemset(dNumberOfEmbedding,0,sizeof(int));
}
kernelGetNumberOfEmbedding<<<1,1>>>(dArrSizedQ,noElem_dArrSizedQ,dNumberOfEmbedding);
cudaDeviceSynchronize();
cudaStatus=cudaGetLastError();
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaDeviceSynchronize() in createGraphHistory() failed",cudaStatus);
goto Error;
}
cudaMemcpy(&noElem_dArrPointerdHO,dNumberOfEmbedding,sizeof(int),cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
cudaStatus=cudaGetLastError();
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaDeviceSynchronize() in createGraphHistory() failed",cudaStatus);
goto Error;
}
Error:
cudaFree(dNumberOfEmbedding);
return cudaStatus;
}
inline cudaError_t createElementdHO(int *&dHO,int maxOfVer){
cudaError_t cudaStatus;
cudaStatus = cudaMalloc((void**)&dHO,maxOfVer*sizeof(int));
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaMalloc() for dHO in createElementdHO() failed",cudaStatus);
goto Error;
}
else
{
cudaMemset(dHO,0,maxOfVer*sizeof(int));
}
//cudaDeviceSynchronize();
//cudaStatus=cudaGetLastError();
//if(cudaStatus!=cudaSuccess){
// fprintf(stderr,"\n cudaDeviceSynchronize() in createElementdHO() failed",cudaStatus);
// goto Error;
//}
Error:
return cudaStatus;
}
//kernel lấy pointer trỏ đến bộ nhớ mảng ở device rồi gán cho dArrPointerdHO
__global__ void kernelAssignPointer(int **dArrPointerdHO,int pos,int *dHO){
dArrPointerdHO[pos]=dHO;
}
//Hàm lấy pointer trỏ đến bộ nhớ mảng ở device rồi gán cho dArrPointerdHO
inline cudaError_t assignPointer(int **&dArrPointerdHO,int pos,int *dHO){
cudaError_t cudaStatus;
kernelAssignPointer<<<1,1>>>(dArrPointerdHO,pos,dHO);
cudaDeviceSynchronize();
cudaStatus=cudaGetLastError();
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaDeviceSynchronize() in assignPointer() failed",cudaStatus);
goto Error;
}
Error:
return cudaStatus;
}
//Kernel in mảng double pointer Int trên device
__global__ void kernelPrintDoublePointerInt(int **dArrPointerdHO,int noElem_dArrPointerdHO,unsigned int maxOfVer){
int i = blockDim.x *blockIdx.x + threadIdx.x;
if(i<noElem_dArrPointerdHO){
for (int j = 0; j < maxOfVer; j++)
{
printf("\n Thread %d: j:%d V[%d]:%d",i,j,j,dArrPointerdHO[i][j]);;
}
}
}
/* Hàm in mảng double pointer int (dArrPointerdHO) khi biết số lượng phần tử mảng (noElem_dArrPointerdHO) và
* Kích thước của mỗi phần tử mảng */
inline cudaError_t printDoublePointerInt(int **dArrPointerdHO,int noElem_dArrPointerdHO,unsigned int maxOfVer){
cudaError_t cudaStatus;
dim3 block(blocksize);
dim3 grid((noElem_dArrPointerdHO + block.x - 1)/block.x);
kernelPrintDoublePointerInt<<<grid,block>>>(dArrPointerdHO,noElem_dArrPointerdHO,maxOfVer);
cudaDeviceSynchronize();
cudaStatus=cudaGetLastError();
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaDeviceSynchronize() in printDoublePointerInt() failed",cudaStatus);
goto Error;
}
Error:
return cudaStatus;
}
//Hàm tạo mảng double pointer Int trên device (dArrPointerdHO) khi biết trước số lượng phần tử cần tạo và kích thước của mỗi mảng.
inline cudaError_t createdArrPointerdHO(int **&dArrPointerdHO,int noElem_dArrPointerdHO,unsigned int maxOfVer){
cudaError_t cudaStatus;
cudaStatus = cudaMalloc((void**)&dArrPointerdHO,noElem_dArrPointerdHO*sizeof(int*));
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaMalloc() for dArrPointerdHO in createdArrPointerdHO() failed",cudaStatus);
goto Error;
}
for (int i = 0; i < noElem_dArrPointerdHO; i++)
{
int noElem_dHO=maxOfVer;
int *dHO=nullptr;
cudaStatus = createElementdHO(dHO,maxOfVer);
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n createElementdHO() in createdArrPointerdHO() failed",cudaStatus);
goto Error;
}
int pos = i;
assignPointer(dArrPointerdHO,pos,dHO);
}
cudaDeviceSynchronize();
cudaStatus=cudaGetLastError();
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaDeviceSynchronize() in createdArrPointerdHO() failed",cudaStatus);
goto Error;
}
Error:
return cudaStatus;
}
//Hàm tạo phần tử dHLN trên device
inline cudaError_t createElementdHLN(int *&dHLN,int noElem_dHLN){
cudaError_t cudaStatus;
cudaStatus = cudaMalloc((void**)&dHLN,noElem_dHLN*sizeof(int));
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaMalloc() dHLN in createElementdHLN() failed",cudaStatus);
goto Error;
}
else
{
cudaMemset(dHLN,0,noElem_dHLN*sizeof(int));
}
cudaDeviceSynchronize();
cudaStatus=cudaGetLastError();
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaDeviceSynchronize() in createElementdHLN() failed",cudaStatus);
goto Error;
}
Error:
return cudaStatus;
}
//Hàm tạo mảng double pointer int dHLN
inline cudaError_t createdArrPointerdHLN(int **&dArrPointerdHLN,int noElem_dArrPointerdHO,int *hNumberEdgeInEachGraph,int *hArrGraphId){
cudaError_t cudaStatus;
//Cấp phát bộ nhớ trên device cho dArrpointerdHLN theo số lượng embedding, cũng chính bằng số lượng phần tử của mảng dArrPointerdHO (noElem_dArrPointerdHO)
cudaStatus = cudaMalloc((void**)&dArrPointerdHLN, noElem_dArrPointerdHO*sizeof(int*));
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaMalloc() dArrPointerdHLN in createdArrPointerdHLN() failed",cudaStatus);
goto Error;
}
for (int i = 0; i < noElem_dArrPointerdHO; i++)
{
int index = hArrGraphId[i];
int *dHLN=nullptr;
cudaStatus = createElementdHLN(dHLN,hNumberEdgeInEachGraph[index]);
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n createElementdHLN() in createdArrPointerdHLN() failed",cudaStatus);
goto Error;
}
cudaStatus = assignPointer(dArrPointerdHLN,i,dHLN);
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n assignPointer() in createdArrPointerdHLN() failed",cudaStatus);
goto Error;
}
}
cudaDeviceSynchronize();
cudaStatus=cudaGetLastError();
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaDeviceSynchronize() in createdArrPointerdHLN() failed",cudaStatus);
goto Error;
}
Error:
return cudaStatus;
}
//kernel tìm graphid của tất cả các embedding và lưu kết quả vào mảng
__global__ void kernelFindGraphIdOfAllEmbedding(Embedding **dArrPointerEmbedding,int noElem_dArrPointerEmbedding,int *dArrGraphId,unsigned int maxOfVer,int noElemOfEmbedding,int *dArrSizedQ){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<noElemOfEmbedding){
int vid =dArrPointerEmbedding[noElem_dArrPointerEmbedding-1][i].vid;
int graphId=vid/maxOfVer;
dArrGraphId[i]=graphId;
//printf("\nThread %d: vid:%d graphId:%d maxOfVer:%d",i,vid,graphId,maxOfVer);
}
}
//Hàm tìm graphid của tất cả các embedding và lưu kết quả vào mảng
inline cudaError_t findGraphIdOfAllEmbedding(Embedding **dArrPointerEmbedding,int noElem_dArrPointerEmbedding,int *&hArrGraphId,unsigned int maxOfVer,int *&dArrGraphId,int noElemOfEmbedding,int *dArrSizedQ){
cudaError_t cudaStatus;
hArrGraphId = (int*)malloc(noElemOfEmbedding*sizeof(int));
if(hArrGraphId==NULL){
printf("\nMalloc hArrGraphId in findGraphIdOfAllEmbedding() failed\n");
exit(1);
}
//int *dArrGraphId=nullptr;
cudaStatus = cudaMalloc((void**)&dArrGraphId,noElemOfEmbedding*sizeof(int));
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaMalloc() dArrGraphId in findGraphIdOfAllEmbedding() failed",cudaStatus);
goto Error;
}
dim3 block(blocksize);
dim3 grid((noElemOfEmbedding + block.x -1)/block.x);
kernelFindGraphIdOfAllEmbedding<<<grid,block>>>(dArrPointerEmbedding,noElem_dArrPointerEmbedding,dArrGraphId,maxOfVer,noElemOfEmbedding,dArrSizedQ);
cudaDeviceSynchronize();
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaDeviceSynchronize() kernelFindGraphIdOfAllEmbedding in findGraphIdOfAllEmbedding() failed",cudaStatus);
goto Error;
}
cudaMemcpy(hArrGraphId,dArrGraphId,noElemOfEmbedding*sizeof(int),cudaMemcpyDeviceToHost);
/*printf("\n**********hArrGraphId ***********\n");
for (int j = 0; j < noElemOfEmbedding; j++)
{
printf("\n hArrGraphId[%d]:%d",j,hArrGraphId[j]);
}*/
cudaDeviceSynchronize();
cudaStatus=cudaGetLastError();
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaDeviceSynchronize() kernelFindGraphIdOfAllEmbedding in findGraphIdOfAllEmbedding() failed",cudaStatus);
goto Error;
}
Error:
return cudaStatus;
}
//kernel in mảng double pointer int dArrPointerdHLN
__global__ void kernelPrintdArrPointerdHLN(int **dArrPointerdHLN,int noElem_dArrPointerdHO,int *dNumberEdgeInEachGraph,int *dArrGraphId){
int i = blockDim.x *blockIdx.x + threadIdx.x;
if(i<noElem_dArrPointerdHO){
int n =dNumberEdgeInEachGraph[dArrGraphId[i]];
for (int j = 0; j < n; j++)
{
printf("\n Thread %d: j:%d dArrPointerdHLN[%d][%d]:%d",i,j,i,j,dArrPointerdHLN[i][j]);
}
}
}
//Hàm in mảng double pointer int dArrPointerdHLN
inline cudaError_t printdArrPointerdHLN(int **dArrPointerdHLN,int noElem_dArrPointerdHO,int *dNumberEdgeInEachGraph,int *dArrGraphId){
cudaError_t cudaStatus;
dim3 block(blocksize);
dim3 grid((noElem_dArrPointerdHO+block.x - 1)/block.x);
kernelPrintdArrPointerdHLN<<<grid,block>>>(dArrPointerdHLN,noElem_dArrPointerdHO,dNumberEdgeInEachGraph,dArrGraphId);
cudaDeviceSynchronize();
cudaStatus=cudaGetLastError();
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaDeviceSynchronize() kernelFindGraphIdOfAllEmbedding in findGraphIdOfAllEmbedding() failed",cudaStatus);
goto Error;
}
Error:
return cudaStatus;
}
//kernel tạo mảng dArrNumberEdgeOfEachdHLN dựa vào graphId đã thu thập được theo thứ tự của từng embedding lưu trong mảng dArrGraphId
__global__ void kernelCreatedArrNumberEdgeOfEachdHLN(int *dArrNumberEdgeOfEachdHLN,int noElemOfEmbedding,int *dArrGraphId,int *dNumberEdgeInEachGraph){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<noElemOfEmbedding){
dArrNumberEdgeOfEachdHLN[i]= dNumberEdgeInEachGraph[dArrGraphId[i]];
}
}
//Hàm tạo mảng dArrNumberEdgeOfEachdHLN dựa vào graphId đã thu thập được theo thứ tự của từng embedding lưu trong mảng dArrGraphId
inline cudaError_t createdArrNumberEdgeOfEachdHLN(int *&dArrNumberEdgeOfEachdHLN,int noElemOfEmbedding,int *dArrGraphId,int *dNumberEdgeInEachGraph){
cudaError_t cudaStatus;
//Cấp phát bộ nhớ cho mảng dArrNumberEdgeOfEachdHLN với số lượng phần tử bằng với số lượng embedding
cudaStatus = cudaMalloc((void**)&dArrNumberEdgeOfEachdHLN, noElemOfEmbedding*sizeof(int));
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaMalloc() dArrNumberEdgeOfEachdHLN in createdArrPointerdHLN() failed",cudaStatus);
goto Error;
}
dim3 block(blocksize);
dim3 grid((noElemOfEmbedding+block.x - 1)/block.x);
kernelCreatedArrNumberEdgeOfEachdHLN<<<grid,block>>>(dArrNumberEdgeOfEachdHLN,noElemOfEmbedding,dArrGraphId,dNumberEdgeInEachGraph);
cudaDeviceSynchronize();
cudaStatus=cudaGetLastError();
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaDeviceSynchronize() kernelCreatedArrNumberEdgeOfEachdHLN in createdArrNumberEdgeOfEachdHLN() failed",cudaStatus);
goto Error;
}
Error:
return cudaStatus;
}
//kernel in nội dung của mảng dArrPointerdHLN khi biết số lượng cạnh của mỗi phần tử tương ứng của embedding được lưu trong mảng dArrNumberEdgeOfEachdHLN
__global__ void kernelprintDoublePointerInt(int **dArrPointerdHLN,int noElemOfEmbedding,int *dArrNumberEdgeOfEachdHLN){
int i = blockDim.x *blockIdx.x + threadIdx.x;
if(i<noElemOfEmbedding){
int length = dArrNumberEdgeOfEachdHLN[i];
for (int j = 0; j < length; j++)
{
printf("\n Thread %d: j:%d dArrPointerdHLN[%d][%d]:%d",i,j,i,j,dArrPointerdHLN[i][j]);
}
}
}
//Overloading function printDoublePointerInt() để in nội dung mảng dArrPointerdHLN dựa vào số lượng embedding và số lượng cạnh trong mỗi phần tử
inline cudaError_t printDoublePointerInt(int **dArrPointerdHLN,int noElemOfEmbedding,int *dArrNumberEdgeOfEachdHLN){
cudaError_t cudaStatus;
dim3 block(blocksize);
dim3 grid((noElemOfEmbedding + block.x -1)/block.x);
kernelprintDoublePointerInt<<<grid,block>>>(dArrPointerdHLN,noElemOfEmbedding,dArrNumberEdgeOfEachdHLN);
cudaDeviceSynchronize();
cudaStatus=cudaGetLastError();
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaDeviceSynchronize() in printDoublePointerInt() failed",cudaStatus);
goto Error;
}
Error:
return cudaStatus;
}
__global__ void kernelAssignValueForGraphHistory(Embedding **dArrPointerEmbedding,int noElem_dArrPointerEmbedding,int *dArrPrevQ,int noElemOfEmbedding,int *d_O,int *d_N,unsigned int maxOfVer,int **dArrPointerdHO,int **dArrPointerdHLN,int *dArrNumberEdgeOfEachdHLN){
int i = blockDim.x * blockIdx.x + threadIdx.x;
//Mỗi một embedding sẽ cập nhật graphHistory tương ứng của nó (gồm 2 mảng: dArrPointerdHO(mảng các đỉnh của embedding mà thread i đang xử lý) và dArrPointerdHLN(mảng các cạnh tương ứng với ánh xạ đỉnh).)
if(i<noElemOfEmbedding){
int vid = dArrPointerEmbedding[noElem_dArrPointerEmbedding-1][i].vid; //Từ cột Q cuối cùng, chúng ta lấy ra được vid của 6 embedding tương ứng
int indexOfFirstVertexInGraph = vid-(vid%maxOfVer); //the first global id vertex in graph
int toVid = vid;//đỉnh to của cạnh thuộc embedding
int idxOfVertex= (vid%maxOfVer); //Vị trí của phần tử đỉnh cần cập nhật trong mảng dArrPointerdHO[i][idxOfVertex];
dArrPointerdHO[i][idxOfVertex]=2; //Cập nhật đỉnh đã thuộc right most path của embedding trong mảng dArrPointerdHO tương ứng.
int prevQ= dArrPrevQ[noElem_dArrPointerEmbedding-1];
int newi=dArrPointerEmbedding[noElem_dArrPointerEmbedding-1][i].idx; //lấy index gán cho newi
while (true)
{
vid = dArrPointerEmbedding[prevQ][newi].vid; //truy xuất phần tử phía trước theo prevQ và newi
int fromVid=vid; //đỉnh from của cạnh thuộc embedding
int idxEdge = d_O[vid]-d_O[indexOfFirstVertexInGraph]; //vị trí cạnh cần cập nhật được khởi tạo bằng giá trị index của vid đang xét trừ đi giá trị index của đỉnh đầu tiên trong đồ thị đó.
int indexOfdN=d_O[fromVid];
while (d_N[indexOfdN]!=toVid){
idxEdge=idxEdge+1;
indexOfdN++;
}
int fromVidR=toVid;
int toVidR=fromVid;
int indexOfEdgeR=d_O[fromVidR]-d_O[indexOfFirstVertexInGraph];
indexOfdN=d_O[fromVidR];
while(d_N[indexOfdN]!=toVidR){
indexOfEdgeR++;
indexOfdN++;
}
//Nếu không phải là đỉnh đầu tiên thì phải cộng vào idxEdge một lượng bằng tổng bậc của các đỉnh trước đó
//Tổng bậc của các đỉnh trước đó chính bằng
idxOfVertex = (vid%maxOfVer); //Đánh dấu đỉnh thuộc Embedding
dArrPointerdHO[i][idxOfVertex]=2;
dArrPointerdHLN[i][idxEdge]=2;//Đánh dấu cạnh thuộc Embedding. vì đây là đơn đồ thị vô hướng nên cạnh AB cũng bằng cạnh BA,do đó ta phải đánh dấu cạnh BA cũng thuộc embedding.
dArrPointerdHLN[i][indexOfEdgeR]=2;
if(dArrPrevQ[prevQ]==-1) return; //nếu là cột Q đầu tiên thì dừng lại vì đã duyệt xong embedding
newi=dArrPointerEmbedding[prevQ][newi].idx; //ngược lại thì lấy index của cột Q phía trước
prevQ=dArrPrevQ[prevQ]; //Lấy Q phía trước
toVid=fromVid; //cập nhật lại đỉnh to.
}
}
}
//Xây dựng graphHistory cho tất cả các embedding
inline cudaError_t createGraphHistory(Embedding **dArrPointerEmbedding,int *dArrSizedQ,int *dArrPrevQ,int noElem_dArrPointerEmbedding,int noElem_dArrSizedQ,int noElem_dArrPrevQ,int *d_O,int *d_LO,int numberOfElementd_O,int *d_N,int *d_LN,int numberOfElementd_N,unsigned int maxOfVer,int **&dArrPointerdHO,int &noElem_dArrPointerdHO,int **&dArrPointerdHLN,int *&dArrNumberEdgeOfEachdHLN,int *hNumberEdgeInEachGraph,int noElem_hNumberEdgeInEachGraph,int *dNumberEdgeInEachGraph){
cudaError_t cudaStatus;
//số lượng embedding chính bằng giá trị của biến noElem_dArrPointerdHO
cudaStatus = findNumberOfEmbedding(dArrSizedQ,noElem_dArrSizedQ,noElem_dArrPointerdHO);
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n findNumberOfEmbedding() in createGraphHistory() failed",cudaStatus);
goto Error;
}
int noElemOfEmbedding=noElem_dArrPointerdHO;
//In nội dung số lượng phần tử embedding vừa tìm được
//printf("\nNumber Of Embedding: %d",noElem_dArrPointerdHO);
/* Tạo graphHistory
* 1. Tạo mảng dArrPointerdHO
* 2. Tạo mảng dArrPointerdHLN
* 3. Tạo mảng dArrNumberEdgeOfEachdHLN: mảng này mô tả số cạnh của mỗi phần tử trong mảng dArrPointerdHLN
* Bước 2 và 3 có thể được thực hiện một cách độc lập, nên có thể xử lý song song ở bước này.
* 4. Cập nhật nội dung cho 3 mảng trên.
*/
//1. Tạo 5 mảng có số lượng phần tử là maxOfVer trên device, và chép pointer của các mảng bỏ vào phần tử dArrPointerEmbedding
cudaStatus = createdArrPointerdHO(dArrPointerdHO,noElem_dArrPointerdHO,maxOfVer);
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n createdArrPointerdHO() in createGraphHistory() failed",cudaStatus);
goto Error;
}
//In nội dung của mảng vừa tạo được
/*printf("\n ********** dArrPointerdHO *****************\n");
printDoublePointerInt(dArrPointerdHO,noElem_dArrPointerdHO,maxOfVer);
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n printDoublePointerInt() in createGraphHistory() failed",cudaStatus);
goto Error;
}*/
//2. Tạo dArrPointerHLN
/* Tìm số lượng cạnh của mỗi embedding
* Biết được global vertex id của embedding thì chúng ta biết được graphId của embedding đó
* Biết được graphID thì suy ra được số lượng cạnh của embedding.
* Trước tiên nên tính số lượng cạnh của mỗi đồ thị trong CSDL và lưu chúng vào một mảng <-- Làm được
* Sau đó duyệt qua các vid của embedding ở last column Q để biết được graphID mà embedding thuộc vào
*/
//Tính graphId của từng embedding và lưu vào mảng
int *dArrGraphId=nullptr; //Mảng này dùng để in nội dung của mảng dArrPointerdHLN
int *hArrGraphId=nullptr; //Lấy graphId ở mảng này mang đi tra trong mảng hNumberEdgeInEachGraph để lấy số lượng cạnh cho embedding đó để tạo dArrPointerdHLN
cudaStatus = findGraphIdOfAllEmbedding(dArrPointerEmbedding,noElem_dArrPointerEmbedding,hArrGraphId,maxOfVer,dArrGraphId,noElemOfEmbedding,dArrSizedQ);
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n findGraphIdOfAllEmbedding() in createGraphHistory() failed",cudaStatus);
goto Error;
}
cudaStatus =createdArrPointerdHLN(dArrPointerdHLN,noElem_dArrPointerdHO,hNumberEdgeInEachGraph,hArrGraphId);
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n createdArrPointerdHLN() in createGraphHistory() failed",cudaStatus);
goto Error;
}
//In nội dung mảng dArrPointerdHLN
/*printf("\n***************** dArrPointerdHLN ***************\n");
cudaStatus = printdArrPointerdHLN(dArrPointerdHLN,noElem_dArrPointerdHO,dNumberEdgeInEachGraph,dArrGraphId);
cudaStatus=cudaGetLastError();
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n printdArrPointerdHLN() in createGraphHistory() failed",cudaStatus);
goto Error;
}*/
//3. Tạo mảng dArrNumberEdgeOfEachdHLN
cudaStatus = createdArrNumberEdgeOfEachdHLN(dArrNumberEdgeOfEachdHLN,noElemOfEmbedding,dArrGraphId,dNumberEdgeInEachGraph);
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n createdArrNumberEdgeOfEachdHLN() in createGraphHistory() failed",cudaStatus);
goto Error;
}
//printf("\n**************dArrNumberEdgeOfEachdHLN**************\n");
//printInt(dArrNumberEdgeOfEachdHLN,noElemOfEmbedding);
//printf("\n**************dArrNumberEdgeOfEachdHLN**************\n");
//printDoublePointerInt(dArrPointerdHLN,noElemOfEmbedding,dArrNumberEdgeOfEachdHLN);
//4.1 Cập nhật nội dung cho graphHistory
/* Cần có cơ sở dữ liệu để ánh xạ đỉnh và cạnh phù hợp vào mảng dArrPointerEmbedding (chứa idx và vid), dArrPointerdHLN (chứa cạnh)
* Mỗi một thread sẽ chịu trách nhiệm cập nhật dữ liệu cho 1 embedding
*/
kernelAssignValueForGraphHistory<<<1,noElemOfEmbedding>>>(dArrPointerEmbedding,noElem_dArrPointerEmbedding,dArrPrevQ,noElemOfEmbedding,d_O,d_N,maxOfVer,dArrPointerdHO,dArrPointerdHLN,dArrNumberEdgeOfEachdHLN);
cudaDeviceSynchronize();
cudaStatus=cudaGetLastError();
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaDeviceSynchronize() in createGraphHistory() failed",cudaStatus);
goto Error;
}
Error:
return cudaStatus;
}
//kernel tính số cạnh trong mỗi đồ thị trong CSDL và lưu vào biến mảng tương ứng.
__global__ void kernelGetNumberOfEdgeInGraph(int *d_O,int numberOfElementd_N,unsigned int numberOfGraph,unsigned int maxOfVer,int *dNumberEdgeInEachGraph){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<numberOfGraph){
int graphId=i;
int idxFrom = graphId*maxOfVer;
int idxFirstNext = (graphId+1)*maxOfVer;
int r=0;
if (graphId!=(numberOfGraph-1)){
r=d_O[idxFirstNext]-d_O[idxFrom];
}else
{
r=numberOfElementd_N-d_O[idxFrom];
}
dNumberEdgeInEachGraph[i]=r;
}
}
//Hàm tính số cạnh của tất cả các đồ thị trong CSDL, kết quả lưu vào một mảng tương ứng
inline cudaError_t getNumberOfEdgeInGraph(int *d_O,int numberOfElementd_N,unsigned int maxOfVer,int *&hNumberEdgeInEachGraph,int *&dNumberEdgeInEachGraph,unsigned int numberOfGraph){
cudaError_t cudaStatus;
cudaStatus = cudaMalloc((void**)&dNumberEdgeInEachGraph,numberOfGraph*sizeof(int));
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaMalloc() in dNumberEdgeInEachGraph createGraphHistory() failed",cudaStatus);
goto Error;
}
dim3 block(blocksize);
dim3 grid((numberOfGraph + block.x-1)/block.x);
kernelGetNumberOfEdgeInGraph<<<grid,block>>>(d_O,numberOfElementd_N,numberOfGraph,maxOfVer,dNumberEdgeInEachGraph);
cudaDeviceSynchronize();
cudaStatus=cudaGetLastError();
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaDeviceSynchronize() kernelGetNumberOfEdgeInGraph in getNumberOfEdgeInGraph() failed",cudaStatus);
goto Error;
}
//printf("\n *************dNumberEdgeInEachGraph********\n" );
//printInt(dNumberEdgeInEachGraph,numberOfGraph);
hNumberEdgeInEachGraph = (int*)malloc(numberOfGraph*sizeof(int));
if(hNumberEdgeInEachGraph==NULL){
printf("\n Malloc hNumberEdgeInEachGraph in getNumberOfEdgeInGraph() failed" );
exit(1);
}
cudaMemcpy(hNumberEdgeInEachGraph,dNumberEdgeInEachGraph,numberOfGraph*sizeof(int),cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
cudaStatus=cudaGetLastError();
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaDeviceSynchronize() in createGraphHistory() failed",cudaStatus);
goto Error;
}
Error:
return cudaStatus;
}
//kernel in tất cả column Q của embedding
__global__ void kernelprintAllEmbeddingColumn(Embedding **dArrPointerEmbedding,int *dArrSizedQ,int noElem_dArrPointerEmbedding){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<noElem_dArrPointerEmbedding){
Embedding* Q = dArrPointerEmbedding[i];
int lenght = dArrSizedQ[i];
for (int j = 0; j < lenght; j++)
{
printf("\n Thread %d: j:%d (idx:%d vid:%d)",i,j,Q[j].idx,Q[j].vid);
}
}
}
//Hàm in tất cả các column Q của embedding
inline cudaError_t printAllEmbeddingColumn(Embedding **dArrPointerEmbedding,int *dArrSizedQ,int noElem_dArrPointerEmbedding){
cudaError_t cudaStatus;
dim3 block(blocksize);
dim3 grid((noElem_dArrPointerEmbedding + block.x - 1)/block.x);
printf("\n****************** All Columm in Embedding dArrPointerEmbedding *************\n");
kernelprintAllEmbeddingColumn<<<grid,block>>>(dArrPointerEmbedding,dArrSizedQ,noElem_dArrPointerEmbedding);
cudaDeviceSynchronize();
cudaStatus=cudaGetLastError();
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaDeviceSynchronize() in printAllEmbeddingColumn() failed",cudaStatus);
goto Error;
}
Error:
return cudaStatus;
}
//kernel in một embedding khi biết vị trí Row của nó trong last column Q.
__global__ void kernelprintEmbeddingFromPos(Embedding **dArrPointerEmbedding,int posColumn,int posRow){
Embedding *Q =dArrPointerEmbedding[posColumn];
printf("\n Q[%d] Row[%d] (idx:%d vid:%d)",posColumn,posRow,Q[posRow].idx,Q[posRow].vid);
while (true)
{
posRow = Q[posRow].idx;
posColumn=posColumn-1;
Q=dArrPointerEmbedding[posColumn];
printf("\n Q[%d] Row[%d] (idx:%d vid:%d)",posColumn,posRow,Q[posRow].idx,Q[posRow].vid);
posRow=Q[posRow].idx;
if(posRow==-1) return;
}
}
//Hàm in một embedding khi biết vị trí Row của nó trong last column Q.
inline cudaError_t printEmbeddingFromPos(Embedding **dArrPointerEmbedding,int posColumn,int posRow){
cudaError_t cudaStatus;
printf("\n ****Embeding from posColumn: %d posRow:%d **************\n",posColumn,posRow);
kernelprintEmbeddingFromPos<<<1,1>>>(dArrPointerEmbedding,posColumn,posRow);
cudaDeviceSynchronize();
cudaStatus=cudaGetLastError();
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaDeviceSynchronize() in printEmbeddingFromPos() failed",cudaStatus);
goto Error;
}
Error:
return cudaStatus;
}
//kernel tìm bậc của các vid trên cột Q và lưu kết quả vào mảng dArrDegreeOfVid
__global__ void kernelCalDegreeOfVid(Embedding **dArrPointerEmbedding,int idxQ,int *d_O, int numberOfElementd_O,int noElem_Embedding,int numberOfElementd_N,unsigned int maxOfVer,float *dArrDegreeOfVid){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<noElem_Embedding){
int vid = dArrPointerEmbedding[idxQ][i].vid;
float degreeOfV =0;
int nextVid;
int graphid;
int lastGraphId=(numberOfElementd_O-1)/maxOfVer;
if (vid==numberOfElementd_O-1){ //nếu như đây là đỉnh cuối cùng trong d_O
degreeOfV=numberOfElementd_N-d_O[vid]; //thì bậc của đỉnh vid chính bằng tổng số cạnh trừ cho giá trị của d_O[vid].
}
else
{
nextVid = vid+1; //xét đỉnh phía sau có khác 1 hay không?
graphid=vid/maxOfVer;
if(d_O[nextVid]==-1 && graphid==lastGraphId){
degreeOfV=numberOfElementd_N-d_O[vid];
}
else if(d_O[nextVid]==-1 && graphid!=lastGraphId){
nextVid=(graphid+1)*maxOfVer;
degreeOfV=d_O[nextVid]-d_O[vid];
}
else
{
degreeOfV=d_O[nextVid]-d_O[vid];
}
}
dArrDegreeOfVid[i]=degreeOfV;
}
}
//Hàm tìm bậc của các đỉnh trên column Q và lưu kết quả vào mảng dArrDegreeOfVid
inline cudaError_t findDegreeOfVer(Embedding **dArrPointerEmbedding,int idxQ,int *d_O, int numberOfElementd_O,int noElem_Embedding,int numberOfElementd_N, unsigned int maxOfVer,float *&dArrDegreeOfVid){
cudaError_t cudaStatus;
cudaStatus = cudaMalloc((void**)&dArrDegreeOfVid,noElem_Embedding*sizeof(float));
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\ncudaMalloc dArrDegreeOfVid in findMaxDegreeOfVer() failed");
goto Error;
}
else
{
cudaMemset(dArrDegreeOfVid,0,noElem_Embedding*sizeof(float));
}
dim3 block(blocksize);
dim3 grid((noElem_Embedding + block.x -1)/block.x);
kernelCalDegreeOfVid<<<grid,block>>>(dArrPointerEmbedding,idxQ,d_O, numberOfElementd_O,noElem_Embedding,numberOfElementd_N, maxOfVer,dArrDegreeOfVid);
cudaDeviceSynchronize();
cudaStatus=cudaGetLastError();
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaDeviceSynchronize() of kernelCalDegreeOfVid in findDegreeOfVer() failed",cudaStatus);
goto Error;
}
Error:
return cudaStatus;
}
//Hàm tìm bậc lớn nhất của các đỉnh vid trong cột Q và lưu kết quả vào biến maxDegreeOfVer và float *dArrDegreeOfVid
inline cudaError_t findMaxDegreeOfVer(Embedding **dArrPointerEmbedding,int idxQ,int *d_O, int numberOfElementd_O,int noElem_Embedding,int numberOfElementd_N,unsigned int maxOfVer,int &maxDegreeOfVer,float *&dArrDegreeOfVid){
cudaError_t cudaStatus;
//Lấy bậc của các đỉnh vid trong cột Q và lưu vào mảng dArrDegreeOfVid có số lượng phần tử bằng số lượng phần tử của embedding
cudaStatus = findDegreeOfVer(dArrPointerEmbedding,idxQ,d_O, numberOfElementd_O,noElem_Embedding,numberOfElementd_N, maxOfVer,dArrDegreeOfVid);
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\findDegreeOfVer() in findMaxDegreeOfVer() failed");
goto Error;
}
printf("\n*******dArrDegreeOfVid*************\n");
printFloat(dArrDegreeOfVid,noElem_Embedding);
//Tìm bậc lớn nhất và lưu kết quả vào biến maxDegreeOfVer
float *h_max;
h_max = (float*)malloc(sizeof(float));
if(h_max==NULL){
printf("\nMalloc h_max failed");
exit(1);
}
float *d_max;
int *d_mutex;
cudaStatus=cudaMalloc((void**)&d_max,sizeof(float));
if (cudaStatus!=cudaSuccess){
fprintf(stderr,"\ncudaMalloc d_max failed");
goto Error;
}
else
{
cudaMemset(d_max,0,sizeof(float));
}
cudaStatus=cudaMalloc((void**)&d_mutex,sizeof(int));
if (cudaStatus!=cudaSuccess){
fprintf(stderr,"\ncudaMalloc d_mutex failed");
goto Error;
}
else
{
cudaMemset(d_mutex,0,sizeof(int));
}
dim3 gridSize = 256;
dim3 blockSize = 256;
find_maximum_kernel<<<gridSize, blockSize>>>(dArrDegreeOfVid, d_max, d_mutex, noElem_Embedding);
cudaDeviceSynchronize();
cudaStatus = cudaGetLastError();
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\ncudaDeviceSynchronize find_maximum_kernel in findMaxDegreeOfVer() failed");
goto Error;
}
// copy from device to host
cudaMemcpy(h_max, d_max, sizeof(float), cudaMemcpyDeviceToHost);
//report results
maxDegreeOfVer = (int)(*h_max); //bậc lớn nhất của các đỉnh trong 1 cột Q
printf("\nMax degree of vid in Q column is: %d",maxDegreeOfVer);
Error:
free(h_max);
cudaFree(d_max);
//cudaFree(dArrDegreeOfVid); Giữ lại bậc của các đỉnh trong cột Q để thuận lợi cho việc tìm các mở rộng ở bước kế tiếp
return cudaStatus;
}
//kernel tìm các mở rộng hợp lệ và ghi nhận vào mảng dArrV và dArrExtension tương ứng.
__global__ void kernelFindValidForwardExtension(Embedding **dArrPointerEmbedding,int noElem_dArrPointerEmbedding,int noElem_Embedding,int *d_O,int *d_LO,int *d_N,int *d_LN,float *dArrDegreeOfVid,int maxDegreeOfVer,struct_V *dArrV,EXT *dArrExtension,int idxQ,int minLabel,int maxid){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<noElem_Embedding){
int posColumn =noElem_dArrPointerEmbedding-1;
int posRow=i;
int col = posColumn;
int row = posRow;
Embedding *Q=dArrPointerEmbedding[idxQ];
int vid = Q[i].vid;
int degreeVid=__float2int_rn(dArrDegreeOfVid[i]);
//Duyệt qua các đỉnh kề với đỉnh vid dựa vào số lần duyệt là bậc
int indexToVidIndN=d_O[vid];
int labelFromVid = d_LO[vid];
int toVid;
int labelToVid;
bool b=true;
for (int j = 0; j < degreeVid; j++,indexToVidIndN++) //Duyệt qua tất cả các đỉnh kề với đỉnh vid, nếu đỉnh không thuộc embedding thì --> cạnh cũng không thuộc embedding vì đây là Q cuối
{
toVid=d_N[indexToVidIndN]; //Lấy vid của đỉnh cần kiểm tra
labelToVid = d_LO[toVid]; //lấy label của đỉnh cần kiểm tra
posColumn=col;
posRow=row;
Q=dArrPointerEmbedding[posColumn];
printf("\nThread %d, j: %d has ToVidLabel:%d",i,j,labelToVid);
//1. Trước tiên kiểm tra nhãn của labelToVid có nhỏ hơn minLabel hay không. Nếu nhỏ hơn thì return
if(labelToVid<minLabel) continue;
//2. kiểm tra xem đỉnh toVid có tồn tại trong embedding hay không nếu tồn tại thì return
//Duyệt qua embedding column từ Q cuối đến Q đầu, lần lượt lấy vid so sánh với toVid
//printf("\n Q[%d] Row[%d] (idx:%d vid:%d)",posColumn,posRow,Q[posRow].idx,Q[posRow].vid);//Q[1][0]
if(toVid==Q[posRow].vid) continue;
//printf("\nj:%d toVid:%d Q.vid:%d",j,toVid,Q[posRow].vid);
while (true)
{
posRow = Q[posRow].idx;//0
posColumn=posColumn-1; //0
Q=dArrPointerEmbedding[posColumn];
//printf("\n posColumn[%d] Row[%d] (idx:%d vid:%d)",posColumn,posRow,Q[posRow].idx,Q[posRow].vid);//Q[0][0]
//printf("\nj:%d toVid:%d Q.vid:%d",j,toVid,Q[posRow].vid);
if(toVid==Q[posRow].vid) {
b=false; break;
}
posRow=Q[posRow].idx;//-1
//printf("\nposRow:%d",posRow);
if(posRow==-1) break;
}
if (b==false){b=true; continue;}
int indexOfd_arr_V=i*maxDegreeOfVer+j;
//printf("\nThread %d: m:%d",i,maxDegreeOfVer);
int indexOfd_LN=indexToVidIndN;
dArrV[indexOfd_arr_V].valid=1;
printf("\ndArrV[%d].valid:%d",indexOfd_arr_V,dArrV[indexOfd_arr_V].valid);
//cập nhật dữ liệu cho mảng dArrExtension
dArrExtension[indexOfd_arr_V].vgi=vid;
dArrExtension[indexOfd_arr_V].vgj=toVid;
dArrExtension[indexOfd_arr_V].lij=d_LN[indexOfd_LN];
printf("\n");
printf("d_LN[%d]:%d ",indexOfd_LN,d_LN[indexOfd_LN]);
dArrExtension[indexOfd_arr_V].li=labelFromVid;
dArrExtension[indexOfd_arr_V].lj=labelToVid;
dArrExtension[indexOfd_arr_V].vi=idxQ;
dArrExtension[indexOfd_arr_V].vj=maxid+1;
dArrExtension[indexOfd_arr_V].posColumn=col;
dArrExtension[indexOfd_arr_V].posRow=row;
}
}
}
//kernel in mảng struct_V *dArrV trên device
__global__ void kernelprintdArrV(struct_V *dArrV,int noElem_dArrV,EXT *dArrExtension){
int i = blockDim.x *blockIdx.x + threadIdx.x;
if(i<noElem_dArrV){
int vi = dArrExtension[i].vi;
int vj = dArrExtension[i].vj;
int li = dArrExtension[i].li;
int lij = dArrExtension[i].lij;
int lj = dArrExtension[i].lj;
printf("\n dArrV[%d].backward:%d ,dArrV[%d].valid:%d Extension:(vgi:%d,vgj:%d) (vi:%d vj:%d li:%d lij:%d lj:%d)",i,dArrV[i].backward,i,dArrV[i].valid,dArrExtension[i].vgi,dArrExtension[i].vgj,vi,vj,li,lij,lj);
}
}
//Hàm in mảng struct_V *dArrV
inline cudaError_t printdArrV(struct_V *dArrV,int noElem_dArrV,EXT *dArrExtension){
cudaError_t cudaStatus;
dim3 block(blocksize);
dim3 grid((noElem_dArrV + block.x -1 )/block.x);
kernelprintdArrV<<<grid,block>>>(dArrV,noElem_dArrV,dArrExtension);
cudaDeviceSynchronize();
cudaStatus = cudaGetLastError();
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaDeviceSynchronize() in printdArrV() failed", cudaStatus);
goto Error;
}
Error:
return cudaStatus;
}
//kernel trích phần tử valid từ mảng dArrV và lưu vào mảng dArrValid
__global__ void kernelExtractValidFromdArrV(struct_V *dArrV,int noElem_dArrV,int *dArrValid){
int i = threadIdx.x + blockDim.x*blockIdx.x;
if(i<noElem_dArrV){
dArrValid[i]=dArrV[i].valid;
}
}
//kernel trích các mở rộng hợp lệ từ mảng dArrExtension sang mảng dExt
__global__ void kernelExtractValidExtensionTodExt(EXT *dArrExtension,int *dArrValid,int *dArrValidScanResult,int noElem_dArrV,EXT *dExt,int noElem_dExt){
int i =blockDim.x * blockIdx.x + threadIdx.x;
if(i<noElem_dArrV){
if(dArrValid[i]==1){
dExt[dArrValidScanResult[i]].vi = dArrExtension[i].vi;
dExt[dArrValidScanResult[i]].vj = dArrExtension[i].vj;
dExt[dArrValidScanResult[i]].li = dArrExtension[i].li;
dExt[dArrValidScanResult[i]].lij = dArrExtension[i].lij;
dExt[dArrValidScanResult[i]].lj = dArrExtension[i].lj;
dExt[dArrValidScanResult[i]].vgi = dArrExtension[i].vgi;
dExt[dArrValidScanResult[i]].vgj = dArrExtension[i].vgj;
dExt[dArrValidScanResult[i]].posColumn = dArrExtension[i].posColumn;
dExt[dArrValidScanResult[i]].posRow = dArrExtension[i].posRow;
}
}
}
//Kernel in nội dung mảng EXT *dExt
__global__ void kernelPrintdExt(EXT *dExt,int noElem_dExt){
int i = blockDim.x *blockIdx.x + threadIdx.x;
if(i<noElem_dExt){
int vi=dExt[i].vi;
int vj=dExt[i].vj;
int li= dExt[i].li;
int lij=dExt[i].lij;
int lj=dExt[i].lj;
int vgi=dExt[i].vgi;
int vgj=dExt[i].vgj;
int posColumn= dExt[i].posColumn;
int posRow=dExt[i].posRow;
printf("\n Thread %d (vi:%d vj:%d li:%d lij:%d lj:%d) (vgi:%d vgj:%d) (posColumn:%d posRow:%d)",i,vi,vj,li,lij,lj,vgi,vgj,posColumn,posRow);
}
}
//Hàm in dExt
inline cudaError_t printdExt(EXT *dExt,int noElem_dExt){
cudaError_t cudaStatus;
dim3 block(blocksize);
dim3 grid((noElem_dExt+block.x -1)/block.x);
kernelPrintdExt<<<grid,block>>>(dExt,noElem_dExt);
cudaDeviceSynchronize();
cudaStatus = cudaGetLastError();
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaDeviceSynchronize() kernelPrintdExt in printdExt() failed", cudaStatus);
goto Error;
}
Error:
return cudaStatus;
}
//Hàm trích các mở rộng hợp lệ từ mảng dArrExtension sang mảng dExt
inline cudaError_t extractValidExtensionTodExt(EXT *dArrExtension,struct_V *dArrV,int noElem_dArrV,EXT *&dExt,int &noElem_dExt){
cudaError_t cudaStatus;
//1. Trích dữ liệu ra mảng dArrvalid
int *dArrValid = nullptr;
cudaStatus = cudaMalloc((void**)&dArrValid, noElem_dArrV*sizeof(int));
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaMalloc dArrValid in extractValidExtensionTodExt() failed", cudaStatus);
goto Error;
}
dim3 block(blocksize);
dim3 grid((noElem_dArrV + block.x -1)/block.x);
kernelExtractValidFromdArrV<<<grid,block>>>(dArrV,noElem_dArrV,dArrValid);
cudaDeviceSynchronize();
cudaStatus = cudaGetLastError();
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaDeviceSynchronize() kernelExtractValidFromdArrV in extractValidExtensionTodExt() failed", cudaStatus);
goto Error;
}
//In nội dung dArrValid
printf("\n********dArrValid******\n");
printInt(dArrValid,noElem_dArrV);
//2. Scan mảng dArrValid để lấy kích thước của mảng cần tạo
int *dArrValidScanResult = nullptr;
cudaStatus = cudaMalloc((void**)&dArrValidScanResult,sizeof(int)*noElem_dArrV);
if (cudaStatus!=cudaSuccess){
fprintf(stderr,"\n CudaMalloc dArrValidScanResult in extractValidExtensionToExt() failed");
goto Error;
}
else
{
cudaMemset(dArrValidScanResult,0,sizeof(int)*noElem_dArrV);
}
cudaStatus = scanV(dArrValid,noElem_dArrV,dArrValidScanResult);
if (cudaStatus!=cudaSuccess){
fprintf(stderr,"\n scanV dArrValid in extractValidExtensionToExt() failed");
goto Error;
}
//In nội dung kết quả dArrValidScanResult
printf("\n********dArrValidScanResult******\n");
printInt(dArrValidScanResult,noElem_dArrV);
//3. Lấy kích thước của mảng EXT *dExt;
noElem_dExt=0;
cudaStatus=getSizeBaseOnScanResult(dArrValid,dArrValidScanResult,noElem_dArrV,noElem_dExt);
if (cudaStatus!=cudaSuccess){
fprintf(stderr,"\n getSizeBaseOnScanResult in extractValidExtensionToExt() failed");
goto Error;
}
//In nội dung noElem_dExt
printf("\n******** noElem_dExt ******\n");
printf("\n noElem_dExt:%d",noElem_dExt);
//4. Khởi tạo mảng dExt có kích thước noElem_dExt rồi trích dữ liệu từ dArrExtension sang dựa vào dArrValid.
cudaStatus = cudaMalloc((void**)&dExt,sizeof(EXT)*noElem_dExt);
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaMalloc dExt in extractValidExtensionTodExt() failed", cudaStatus);
goto Error;
}
else
{
cudaMemset(dExt,0,sizeof(EXT)*noElem_dExt);
}
dim3 blockb(blocksize);
dim3 gridb((noElem_dArrV+blockb.x -1)/blockb.x);
kernelExtractValidExtensionTodExt<<<gridb,blockb>>>(dArrExtension,dArrValid,dArrValidScanResult,noElem_dArrV,dExt,noElem_dExt);
cudaDeviceSynchronize();
cudaStatus = cudaGetLastError();
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaDeviceSynchronize() kernelExtractValidExtensionTodExt in extractValidExtensionTodExt() failed", cudaStatus);
goto Error;
}
//In mảng dExt;
printf("\n********** dExt **********\n");
cudaStatus =printdExt(dExt,noElem_dExt);
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n printdExt() in extractValidExtensionTodExt() failed", cudaStatus);
goto Error;
}
Error:
cudaFree(dArrValid);
cudaFree(dArrValidScanResult);
return cudaStatus;
}
//Hàm Tìm tất cả các mở rộng hợp lệ forward từ các đỉnh trên cột Q và lưu vào mảng dExt và noElem_dExt
inline cudaError_t forwardExtensionQ(Embedding **dArrPointerEmbedding,int noElem_dArrPointerEmbedding,int *dArrSizedQ,int noElem_dArrSizedQ,int noElem_Embedding,int idxQ,EXT *&dExt,int &noElem_dExt,int *d_O,int *d_LO,int *d_N,int *d_LN,int numberOfElementd_O,int numberOfElementd_N,unsigned int maxOfVer,int minLabel,int maxid){
cudaError_t cudaStatus;
//Tìm bậc lớn nhất của các đỉnh vid trong cột Q
int maxDegreeOfVer=0;
float *dArrDegreeOfVid=nullptr; //Được sử dụng để tìm các mở rộng từ các vid trên column Q
cudaStatus = findMaxDegreeOfVer(dArrPointerEmbedding,idxQ,d_O,numberOfElementd_O,noElem_Embedding, numberOfElementd_N,maxOfVer,maxDegreeOfVer,dArrDegreeOfVid);
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n findMaxDegreeOfVer() in forwardExtensionQ() failed",cudaStatus);
goto Error;
}
//Tạo mảng dArrV có số lượng phần tử bằng số lượng embedding nhân với bậc lớn nhất của các vid vừa tìm được
//Tạo mảng d_arr_V có kích thước: maxDegree_vid_Q * |Q|
// Lưu ý, mảng d_arr_V phải có dạng cấu trúc đủ thể hiện cạnh mở rộng có hợp lệ hay không và là forward extension hay backward extension.
// struct struct_V
// {
// int valid; //default: 0, valid: 1
// int backward; //default: 0- forward; backward: 1
// }
struct_V *dArrV;
int noElem_dArrV=maxDegreeOfVer*noElem_Embedding;
cudaStatus=cudaMalloc((void**)&dArrV,noElem_dArrV*sizeof(struct_V));
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaMalloc dArrV in failed");
goto Error;
}
else
{
cudaMemset(dArrV,0,noElem_dArrV*sizeof(struct_V));
}
//Các mở rộng hợp lệ sẽ được ghi nhận vào mảng dArrV, đồng thời thông tin của cạnh mở rộng gồm dfscode, vgi, vgj và row pointer của nó cũng được xây dựng
//và lưu trữ trong mảng EXT *dExtension, mảng này có số lượng phần tử bằng với mảng dArrV. Sau đó chúng ta sẽ rút trích những mở rộng hợp lệ này và lưu vào dExt.
//Để xây dựng dfscode (vi,vj,li,lij,lj) thì chúng ta cần:
// - Dựa vào giá trị của right most path để xác định vi
// - Dựa vào maxid để xác định vj
// - Dựa vào CSDL để xác định các thành phần còn lại.
//Chúng ta có thể giải phóng bộ nhớ của dExtension sau khi đã trích các mở rộng hợp lệ thành công.
EXT *dArrExtension= nullptr;
cudaStatus = cudaMalloc((void**)&dArrExtension,noElem_dArrV*sizeof(EXT));
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaMalloc dArrExtension forwardExtensionQ() failed",cudaStatus);
goto Error;
}
else
{
cudaMemset(dArrExtension,0,noElem_dArrV*sizeof(EXT));
}
printf("\nnoElem_dArrV:%d",noElem_dArrV );
//Gọi kernel với các đối số: CSDL, bậc của các đỉnh, dArrV, dArrExtension,noElem_Embedding,maxDegreeOfVer,idxQ,dArrPointerEmbedding,minLabel,maxid
dim3 block(blocksize);
dim3 grid((noElem_Embedding+block.x - 1)/block.x);
kernelFindValidForwardExtension<<<grid,block>>>(dArrPointerEmbedding,noElem_dArrPointerEmbedding,noElem_Embedding,d_O,d_LO,d_N,d_LN,dArrDegreeOfVid,maxDegreeOfVer,dArrV,dArrExtension,idxQ,minLabel,maxid);
cudaDeviceSynchronize();
cudaStatus=cudaGetLastError();
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaDeviceSynchronize() kernelFindValidForwardExtension in forwardExtensionQ() failed",cudaStatus);
goto Error;
}
//In mảng dArrV để kiểm tra thử
/*printf("\n****************dArrV*******************\n");
cudaStatus = printdArrV(dArrV,noElem_dArrV,dArrExtension);
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n printdArrV() in forwardExtensionQ() failed",cudaStatus);
goto Error;
}*/
//Chép kết quả từ dArrExtension sang dExt
cudaStatus =extractValidExtensionTodExt(dArrExtension,dArrV,noElem_dArrV,dExt,noElem_dExt);
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n extractValidExtensionTodExt() in forwardExtensionQ() failed");
goto Error;
}
Error:
cudaFree(dArrExtension);
cudaFree(dArrV);
return cudaStatus;
}
//kernel lấy chép địa chỉ của dExt lưu vào dArrPointerExt
__global__ void kernelGetPointerExt(EXT **dArrPointerExt,EXT *dExt,int pos){
dArrPointerExt[pos]=dExt;
}
//Tìm tất cả các mở rộng hợp lệ forward và lưu vào mảng dArrPointerExt
inline cudaError_t forwardExtension(Embedding **dArrPointerEmbedding,int noElem_dArrPointerEmbedding,int *dArrSizedQ,int noElem_dArrSizedQ,int *dRMPath,int noElem_dRMPath,int *d_O,int *d_LO,int *d_N,int *d_LN,int numberOfElementd_O,int numberOfElementd_N,unsigned int maxOfVer,EXT **&dArrPointerExt,int &noElem_dArrPointerExt,int minLabel,int maxid,int *&dArrNoElemPointerExt){
cudaError_t cudaStatus;
//Lấy số lượng embedding
#pragma region "get noElem_Embedding"
int noElem_Embedding = 0;
cudaStatus = findNumberOfEmbedding(dArrSizedQ,noElem_dArrSizedQ,noElem_Embedding);
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n findNumberOfEmbedding() in forwardExtension() failed",cudaStatus);
goto Error;
}
//printf("\n noElem_Embedding:%d",noElem_Embedding);
#pragma endregion
//Duyệt qua các column Q thuộc dRMPath và tìm các mở rộng hợp lệ từ chúng
int *hRMPath =(int*)malloc(sizeof(int)*noElem_dRMPath);
if (hRMPath==NULL){
printf("\n malloc hRMPath in forwardExtension() failed");
exit(1);
}
cudaStatus = cudaMemcpy(hRMPath,dRMPath,sizeof(int)*noElem_dRMPath,cudaMemcpyDeviceToHost);
if (cudaStatus !=cudaSuccess){
fprintf(stderr,"\n cudaMemcpy dRMPath --> hRMPath failed",cudaStatus);
goto Error;
}
printf("\n ***************** hRMPath **************\n");
for (int i = 0; i < noElem_dRMPath; i++)
{
printf("\n hRMPath[%d]:%d",i,hRMPath[i]);
}
cudaStatus = cudaMalloc((void**)&dArrPointerExt,noElem_dRMPath*sizeof(EXT*));
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaMalloc dArrPointerExt in forwardExtension() failed",cudaStatus);
goto Error;
}
else
{
cudaMemset(dArrPointerExt,0,noElem_dRMPath*sizeof(EXT*));
}
int *hArrNoElemPointerExt;
hArrNoElemPointerExt = (int*)malloc(sizeof(int)*noElem_dRMPath);
if(hArrNoElemPointerExt==NULL){
printf("\nMalloc hArrNoElemPointerExt in kernel.cu failed");
goto Error;
}
else
{
memset(hArrNoElemPointerExt,0,sizeof(int)*noElem_dRMPath);
}
cudaStatus = cudaMalloc((void**)&dArrNoElemPointerExt,sizeof(int)*noElem_dRMPath);
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaMalloc dArrNoElemPointerExt in forwardExtension() failed",cudaStatus);
goto Error;
}
else
{
cudaMemset(dArrNoElemPointerExt,0,sizeof(int)*noElem_dRMPath);
}
for (int i = noElem_dRMPath-1; i>=0 ; i--)
{
int idxQ=hRMPath[i];
printf("\n*********idxQ:%d***************\n",idxQ);
EXT *dExt=nullptr; //Những mở rộng hợp lệ sẽ được trích sang mảng dExt
int noElem_dExt=0;
cudaStatus = forwardExtensionQ(dArrPointerEmbedding,noElem_dArrPointerEmbedding,dArrSizedQ,noElem_dArrSizedQ,noElem_Embedding,idxQ,dExt,noElem_dExt,d_O,d_LO,d_N,d_LN, numberOfElementd_O, numberOfElementd_N, maxOfVer,minLabel,maxid);
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n forwardExtensionQ() in forwardExtension() failed",cudaStatus);
goto Error;
}
//Chép pointer của dExt bỏ vào mảng dArrPointerExt
hArrNoElemPointerExt[i]=noElem_dExt;
kernelGetPointerExt<<<1,1>>>(dArrPointerExt,dExt,i);
cudaDeviceSynchronize();
cudaStatus=cudaGetLastError();
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaDeviceSynchronize() kernelGetPointerExt in forwardExtension() failed",cudaStatus);
goto Error;
}
}
//chép dữ liệu từ hArrNoElemPointerExt sang dArrNoElemPointerExt
cudaStatus = cudaMemcpy(dArrNoElemPointerExt,hArrNoElemPointerExt,sizeof(int)*noElem_dRMPath,cudaMemcpyHostToDevice);
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaMemcpy() hArrNoElemPointerExt sang dArrNoElemPointerExt in forwardExtension() failed",cudaStatus);
goto Error;
}
//cudaDeviceSynchronize();
//cudaStatus=cudaGetLastError();
//if(cudaStatus!=cudaSuccess){
// fprintf(stderr,"\n cudaDeviceSynchronize() in forwardExtension() failed",cudaStatus);
// goto Error;
//}
Error:
return cudaStatus;
}
//kernel in mảng dArrPointerExt
__global__ void kernelprintdArrPointerExt(EXT **dArrPointerExt,int *dArrNoElemPointerExt,int noElem_dArrPointerExt){
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i<noElem_dArrPointerExt){
int noElem_dExt=dArrNoElemPointerExt[i];
printf("\nThread %d: noElem_dExt:%d",i,noElem_dExt);
if(noElem_dExt>0){
EXT* dExt= dArrPointerExt[i];
printf("\n dExt_value:%p dExt_address:%p ",dExt,&dExt);
int length = dArrNoElemPointerExt[i];
for (int i = 0; i < length; i++)
{
int vi=dExt[i].vi;
int vj=dExt[i].vj;
int li= dExt[i].li;
int lij=dExt[i].lij;
int lj=dExt[i].lj;
int vgi=dExt[i].vgi;
int vgj=dExt[i].vgj;
int posColumn= dExt[i].posColumn;
int posRow=dExt[i].posRow;
printf("\n Thread %d (vi:%d vj:%d li:%d lij:%d lj:%d) (vgi:%d vgj:%d) (posColumn:%d posRow:%d)",i,vi,vj,li,lij,lj,vgi,vgj,posColumn,posRow);
}
}
}
}
//Hàm in mảng dArrPointerExt
inline cudaError_t printdArrPointerExt(EXT **dArrPointerExt,int *dArrNoElemPointerExt,int noElem_dArrPointerExt){
cudaError_t cudaStatus;
dim3 block(blocksize);
dim3 grid((noElem_dArrPointerExt + block.x - 1)/block.x);
kernelprintdArrPointerExt<<<grid,block>>>(dArrPointerExt,dArrNoElemPointerExt,noElem_dArrPointerExt);
cudaDeviceSynchronize();
cudaStatus = cudaGetLastError();
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\ncudaDeviceSynchronize() in printArrPointerdExt() failed");
goto Error;
}
Error:
return cudaStatus;
}
//Hàm giải phóng bộ nhớ Ext** dArrPointerExt và dArr
inline cudaError_t cudaFreeArrPointerExt(EXT **&dArrPointerExt,int *&dArrNoElemPointerExt,int noElem_dArrPointerExt){
cudaError_t cudaStatus;
EXT **hArrPointerExt=nullptr;
hArrPointerExt = (EXT**)malloc(sizeof(EXT*)*noElem_dArrPointerExt);
if(hArrPointerExt==NULL){
printf("\n malloc hArrPointerExt in cudaFreeArrpointerExt failed"),
exit(1);
}
cudaStatus = cudaMemcpy(hArrPointerExt,dArrPointerExt,noElem_dArrPointerExt*sizeof(EXT*),cudaMemcpyDeviceToHost);
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\cudaMemcpy() in printArrPointerdExt() failed");
goto Error;
}
int length = noElem_dArrPointerExt;
for (int i = 0; i < length; i++)
{
if (hArrPointerExt[i]!=NULL){
cudaFree(hArrPointerExt[i]);
}
}
cudaFree(dArrPointerExt);
cudaFree(dArrNoElemPointerExt);
Error:
return cudaStatus;
}
//Hàm giải phóng bộ nhớ Embedding *dArrPointerEmbedding và dArrSizeQ
inline cudaError_t cudaFreeArrPointerEmbedding(Embedding **&dArrPointerEmbedding,int *&dArrSizedQ,int noElem_dArrPointerEmbedding){
cudaError_t cudaStatus;
Embedding **hArrPointerExt=nullptr;
hArrPointerExt = (Embedding**)malloc(sizeof(Embedding*)*noElem_dArrPointerEmbedding);
if(hArrPointerExt==NULL){
printf("\n malloc hArrPointerExt in cudaFreeArrPointerEmbedding() failed"),
exit(1);
}
cudaStatus = cudaMemcpy(hArrPointerExt,dArrPointerEmbedding,noElem_dArrPointerEmbedding*sizeof(Embedding*),cudaMemcpyDeviceToHost);
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\cudaMemcpy() in cudaFreeArrPointerEmbedding() failed");
goto Error;
}
int length = noElem_dArrPointerEmbedding;
for (int i = 0; i < length; i++)
{
if (hArrPointerExt[i]!=NULL){
cudaFree(hArrPointerExt[i]);
}
}
cudaFree(dArrPointerEmbedding);
cudaFree(dArrSizedQ);
Error:
return cudaStatus;
}
//Kernel ánh xạ nhãn cạnh sang vị trí tương ứng trong dArrAllPossibleExtension và set giá trị tại đó bằng 1
__global__ void kernelassigndAllPossibleExtension(EXT **dArrPointerExt,int posdArrPointerExt,int Lv,int Le,int *dArrAllPossibleExtension,int noElem_PointerExt){
int i = blockDim.x *blockIdx.x + threadIdx.x;
if(i<noElem_PointerExt){
int lij,lj;
lij=dArrPointerExt[posdArrPointerExt][i].lij;
lj=dArrPointerExt[posdArrPointerExt][i].lj;
int idx=lij*Lv+lj;
dArrAllPossibleExtension[idx]=1;
}
}
//Hàm duyệt qua các phần tử trong mảng dExt và set giá trị 1 tại vị trí tương ứng trong mảng kết quả dArrAllPossibleExtension
inline cudaError_t assigndAllPossibleExtension(EXT **dArrPointerExt,int posdArrPointerExt,int Lv,int Le,int *dArrAllPossibleExtension,int noElem_PointerExt){
cudaError_t cudaStatus;
dim3 block(blocksize);
dim3 grid((noElem_PointerExt+block.x -1)/block.x);
kernelassigndAllPossibleExtension<<<grid,block>>>(dArrPointerExt, posdArrPointerExt, Lv, Le,dArrAllPossibleExtension,noElem_PointerExt);
cudaDeviceSynchronize();
cudaStatus=cudaGetLastError();
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaDeviceSynchronize kernelassigndAllPossibleExtension in assigndAllPossibleExtension() failed");
goto Error;
}
//In nội dung dArrAllPossibleExtension
cudaStatus = printInt(dArrAllPossibleExtension,Lv*Le);
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n printInt(dArrAllPossibleExtension,Lv*Le) in assigndAllPossibleExtension() failed");
goto Error;
}
Error:
return cudaStatus;
}
//Kernel gán giá trị cho mảng dArrUniEdge
__global__ void kernelassigndArrUniEdge(int *dArrAllPossibleExtension,int *dArrAllPossibleExtensionScanResult,int noElem_dArrAllPossibleExtension,UniEdge *dArrUniEdge,int Lv,int *dFromLi){
int i = blockDim.x*blockIdx.x +threadIdx.x;
if(i<noElem_dArrAllPossibleExtension){
if(dArrAllPossibleExtension[i]==1){
int li,lij,lj;
li=dFromLi[0];
lij = i/Lv;
lj=i%Lv;
dArrUniEdge[dArrAllPossibleExtensionScanResult[i]].li=li;
dArrUniEdge[dArrAllPossibleExtensionScanResult[i]].lij=lij;
dArrUniEdge[dArrAllPossibleExtensionScanResult[i]].lj=lj;
}
}
}
//Hàm gán giá trị cho mảng dArrUniEdge
inline cudaError_t assigndArrUniEdge(int *dArrAllPossibleExtension,int *dArrAllPossibleExtensionScanResult,int noElem_dArrAllPossibleExtension,UniEdge *&dArrUniEdge,int Lv,int *dFromLi){
cudaError_t cudaStatus;
dim3 block(blocksize);
dim3 grid((noElem_dArrAllPossibleExtension+block.x-1)/block.x);
kernelassigndArrUniEdge<<<grid,block>>>(dArrAllPossibleExtension,dArrAllPossibleExtensionScanResult,noElem_dArrAllPossibleExtension,dArrUniEdge,Lv,dFromLi);
cudaDeviceSynchronize();
cudaStatus = cudaGetLastError();
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaDeviceSynchronize() in assigndArrUniEdge() failed");
goto Error;
}
Error:
return cudaStatus;}
//kernel lấy nhãn from Li
__global__ void kernelGetFromLabel(EXT **dArrPointerExt,int pos,int *dFromLi){
dFromLi[0] = dArrPointerExt[pos][0].li;
}
//kernel getPointerUniEdge
__global__ void kernelGetPointerUniEdge(UniEdge **dArrPointerUniEdge,UniEdge *dArrUniEdge,int pos){
dArrPointerUniEdge[pos]=dArrUniEdge;
}
//Hàm trích các mở rộng duy nhất và lưu kết quả vào mảng dArrPointerUniEdge, mỗi phần tử của nó là một pointer trỏ đến mảng dArrUniEdge trên device
inline cudaError_t extractUniExtension(EXT **dArrPointerExt,int noElem_dArrPointerExt,int Lv,int Le,UniEdge **&dArrPointerUniEdge,int noElem_dArrPointerUniEdge,int *&dArrNoELemPointerUniEdge,int *hArrNoElemPointerExt,int *dArrNoElemPointerExt){
cudaError_t cudaStatus;
/*Duyệt qua từng EXTk để thực hiện rút trích và lưu kết quả vào UniEdge **dArrPointerUniEdge
* Mỗi phần tử của mảng UniEdge **dArrPointerUniEdge là một pointer, chính là kết quả của 1 lần xử lý EXTk
* Trích các unique forward extention lưu vào dUniqueEdgeForward
* Trích các unique backward extension lưu vào dUniqueEdgeBackward (Backward Extension chỉ tồn tại ở EXTk cuối)
*/
//1. Khởi tạo mảng UniEdge **dArrPointerUniEdge với số lượng phần tử bằng kích thước dRMPath
//Cấp phát bộ nhớ cho mảng dArrPointerUniEdge
cudaStatus=cudaMalloc((void**)&dArrPointerUniEdge,sizeof(UniEdge*)*noElem_dArrPointerUniEdge);
if (cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaMalloc dArrPointerUniEdge in extractUniExtension() failed",cudaStatus);
goto Error;
}
else
{
cudaMemset(dArrPointerUniEdge,0,sizeof(UniEdge*)*noElem_dArrPointerUniEdge);
}
//Cấp phát bộ nhớ cho mảng dArrNoELemPointerUniEdge
cudaStatus=cudaMalloc((void**)&dArrNoELemPointerUniEdge,sizeof(int)*noElem_dArrPointerUniEdge);
if (cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaMalloc dArrPointerUniEdge in extractUniExtension() failed",cudaStatus);
goto Error;
}
else
{
cudaMemset(dArrNoELemPointerUniEdge,0,sizeof(int)*noElem_dArrPointerUniEdge);
}
int *hArrNoELemPointerUniEdge=(int*)malloc(sizeof(int)*noElem_dArrPointerUniEdge); //Nơi lưu trữ tạm thời phải được giải phóng cuối hàm này, dữ liệu sẽ được chép sang bộ nhớ dArrNoELemPointerUniEdge
if(hArrNoELemPointerUniEdge==NULL){
printf("\n Malloc hArrNoELemPointerUniEdge in extractUniExtension() failed");
exit(1);
}
else
{
memset(hArrNoELemPointerUniEdge,0,sizeof(int)*noElem_dArrPointerUniEdge);
}
for (int i = 0; i < noElem_dArrPointerExt; i++)
{
//Khai báo bộ nhớ dArrAllPossibleExtension và số lượng phần tử của nó
int *dArrAllPossibleExtension =nullptr; //Phải được giải phóng bên trong vòng for sau khi dùng xong
int noElem_dArrAllPossibleExtension = Lv*Le;
//printf("\n hArrNoElemPointerExt:%d",hArrNoElemPointerExt[i]);
//Nếu số lượng phần tử tại EXTk lớn hơn bằng minsup thì mới duyệt.
//Ngon hơn nữa thì xét Số lượng phần tử phân biệt trong EXTk >= minsup thì mới duyệt
if(hArrNoElemPointerExt[i]>0){
int *dFromLi;
cudaStatus = cudaMalloc((void**)&dFromLi,sizeof(int));
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaMalloc dFromLi in extractUniExtension failed");
goto Error;
}
else
{
cudaMemset(dFromLi,0,sizeof(int));
}
//lấy nhãn Li lưu vào biến dFromLi
kernelGetFromLabel<<<1,1>>>(dArrPointerExt,i,dFromLi);
cudaDeviceSynchronize();
cudaStatus=cudaGetLastError();
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaDeviceSynchronize() kernelGetFromLabel in extracUniExtension failed");
goto Error;
}
//Hiển thị nội dung nhãn dFromLi
printf("\n ****dFrom *******\n");
cudaStatus =printInt(dFromLi,1);
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n printInt(dFromLi,1) in extracUniExtension failed");
goto Error;
}
UniEdge * dArrUniEdge=nullptr;
int noElem_dArrUniEdge=0;
//Khởi tạo một mảng dArrAllPossileExtension có kích thước bằng Lv*Le với giá trị là zero
cudaStatus=cudaMalloc((void**)&dArrAllPossibleExtension,noElem_dArrAllPossibleExtension*sizeof(int));
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaMalloc((void**)&dArrAllPossibleExtension in extractUniExtension() failed",cudaStatus);
goto Error;
}
else
{
cudaMemset(dArrAllPossibleExtension,0,noElem_dArrAllPossibleExtension*sizeof(int));
}
//Gọi hàm assigndAllPossibleExtension để ánh xạ (li,lij,lj) sang vị trí trên mảng dArrAllPossibleExtension và set 1 value tại index đó.
//Gọi kernel gồm hArrNoElemPointerExt[i] threads, mỗi thread sẽ đọc nhãn li,lij,lj và ánh xạ thành vị trí tương ứng trên mảng dArrAllPossibleExtension
//đồng thời set giá trị 1 tại vị trí trên mảng dArrAllPossibleExtension.
//
cudaStatus = assigndAllPossibleExtension(dArrPointerExt,i,Lv,Le,dArrAllPossibleExtension,hArrNoElemPointerExt[i]);
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n assigndAllPossibleExtension in extractUniExtension() failed",cudaStatus);
goto Error;
}
//Scan mảng dArrAllPossibleExtension để biết kích thước của mảng dArrUniEdge và ánh xạ từ vị trí trong dArrAllPossibleExtension thành nhãn để lưu vào dArrUniEdge
int *dArrAllPossibleExtensionScanResult =nullptr;
cudaStatus = cudaMalloc((void**)&dArrAllPossibleExtensionScanResult,noElem_dArrAllPossibleExtension*sizeof(int));
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaMalloc dArrAllPossibleExtensionScanResult in extractUniExtension() failed",cudaStatus);
goto Error;
}
cudaStatus = scanV(dArrAllPossibleExtension,noElem_dArrAllPossibleExtension,dArrAllPossibleExtensionScanResult);
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n scanV dArrAllPossibleExtension in extractUniExtension() failed",cudaStatus);
goto Error;
}
//Tính kích thước của dArrUniEdge và lưu vào noElem_dArrUniEdge
cudaStatus =getSizeBaseOnScanResult(dArrAllPossibleExtension,dArrAllPossibleExtensionScanResult,noElem_dArrAllPossibleExtension,noElem_dArrUniEdge);
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n scanV dArrAllPossibleExtension in extractUniExtension() failed",cudaStatus);
goto Error;
}
//HIển thị giá trị của noElem_dArrUniEdge
printf("\n******noElem_dArrUniEdge************\n");
//printf("\n noElem_dArrUniEdge:%d",noElem_dArrUniEdge);
//Cấp phát bộ nhớ cho dArrUniEdge
cudaStatus = cudaMalloc((void**)&dArrUniEdge,noElem_dArrUniEdge*sizeof(UniEdge));
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaMalloc dArrUniEdge in extractUniExtension() failed",cudaStatus);
goto Error;
}
//Gọi hàm để ánh xạ dữ liệu từ dArrAllPossibleExtension sang mảng dArrUniEdge
/* Input Data: dArrAllPossibleExtension, dArrAllPossibleExtensionScanResult, */
cudaStatus =assigndArrUniEdge(dArrAllPossibleExtension,dArrAllPossibleExtensionScanResult,noElem_dArrAllPossibleExtension,dArrUniEdge,Lv,dFromLi);
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n assigndArrUniEdge in extractUniExtension() failed",cudaStatus);
goto Error;
}
//In nội dung mảng dArrUniEdge
printf("\n**********printf************");
printfUniEdge(dArrUniEdge,noElem_dArrUniEdge);
//Lưu lại số lượng cạnh duy nhất
hArrNoELemPointerUniEdge[i]=noElem_dArrUniEdge;
kernelGetPointerUniEdge<<<1,1>>>(dArrPointerUniEdge,dArrUniEdge,i);
cudaDeviceSynchronize();
cudaFree(dArrAllPossibleExtensionScanResult);
cudaFree(dFromLi);
} //end if
cudaFree(dArrAllPossibleExtension);
} //end for
cudaMemcpy(dArrNoELemPointerUniEdge,hArrNoELemPointerUniEdge,sizeof(int)*noElem_dArrPointerUniEdge,cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
cudaStatus=cudaGetLastError();
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaDeviceSynchronize() in extractUniExtension() failed");
goto Error;
}
Error:
free(hArrNoELemPointerUniEdge);
return cudaStatus;
}
//kernel in nội dung mảngdArrPointerUniEdge
__global__ void kernelprintArrPointerUniEdge(UniEdge **dArrPointerUniEdge,int *dArrNoELemPointerUniEdge,int noElem_dArrPointerUniEdge){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<noElem_dArrPointerUniEdge){
if(dArrNoELemPointerUniEdge[i]!=0){
UniEdge * dArrUniEdge = dArrPointerUniEdge[i];
int n = dArrNoELemPointerUniEdge[i];
for (int j = 0; j < n; j++)
{
printf("\n Thread %d: j:%d (li:%d lij:%d lj:%d)",i,j,dArrUniEdge[j].li,dArrUniEdge[j].lij,dArrUniEdge[j].lj);
}
}
}
}
//Hàm in nội dung mảngdArrPointerUniEdge
inline cudaError_t printArrPointerUniEdge(UniEdge **dArrPointerUniEdge,int *dArrNoELemPointerUniEdge,int noElem_dArrPointerUniEdge){
cudaError_t cudaStatus;
dim3 block(blocksize);
dim3 grid((noElem_dArrPointerUniEdge+block.x-1)/block.x);
kernelprintArrPointerUniEdge<<<grid,block>>>(dArrPointerUniEdge,dArrNoELemPointerUniEdge,noElem_dArrPointerUniEdge);
cudaDeviceSynchronize();
cudaStatus= cudaGetLastError();
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaDeviceSynchronize() kernelprintArrPointerUniEdge in printArrPointerUniEdge() failed");
goto Error;
}
Error:
return cudaStatus;
}
//Hàm giải phóng bộ nhớ Ext** dArrPointerUniEdge và dArrNoELemPointerUniEdge
inline cudaError_t cudaFreeArrPointerUniEdge(UniEdge **&dArrPointerUniEdge,int *&dArrNoELemPointerUniEdge,int noElem_dArrPointerUniEdge){
cudaError_t cudaStatus;
UniEdge **hArrPointerUniEdge=nullptr;
hArrPointerUniEdge = (UniEdge**)malloc(sizeof(EXT*)*noElem_dArrPointerUniEdge);
if(hArrPointerUniEdge==NULL){
printf("\n malloc hArrPointerExt in cudaFreeArrpointerExt failed"),
exit(1);
}
cudaStatus = cudaMemcpy(hArrPointerUniEdge,dArrPointerUniEdge,noElem_dArrPointerUniEdge*sizeof(EXT*),cudaMemcpyDeviceToHost);
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\cudaMemcpy() in printArrPointerdExt() failed");
goto Error;
}
int length = noElem_dArrPointerUniEdge;
for (int i = 0; i < length; i++)
{
if (hArrPointerUniEdge[i]!=NULL){
cudaFree(hArrPointerUniEdge[i]);
}
}
cudaFree(dArrPointerUniEdge);
cudaFree(dArrNoELemPointerUniEdge);
Error:
return cudaStatus;
}
__global__ void kernelExtractPointerUniEdge(UniEdge **dPointerArrUniEdge,UniEdge **dArrPointerUniEdge,int pos){
dPointerArrUniEdge[0] = dArrPointerUniEdge[pos];
printf("\nPointer UniEdge:%p",dArrPointerUniEdge[pos]);
}
__global__ void kernelExtractPointerExt(EXT **dPointerArrExt,EXT **dArrPointerExt,int pos,unsigned int noElemdArrExt){
dPointerArrExt[0] = dArrPointerExt[pos];
printf("\nPointer:%p",dArrPointerExt[pos]);
}
__global__ void kernelfindBoundary(EXT **dPointerArrExt,unsigned int noElemdArrExt,unsigned int *dArrBoundary,unsigned int maxOfVer){
int i = blockDim.x*blockIdx.x + threadIdx.x;
EXT *dArrExt = dPointerArrExt[0];
if(i<noElemdArrExt-1){
unsigned int graphIdAfter=dArrExt[i+1].vgi/maxOfVer;
unsigned int graphIdCurrent=dArrExt[i].vgi/maxOfVer;
if(graphIdAfter!=graphIdCurrent){
dArrBoundary[i]=1;
}
}
}
inline cudaError_t findBoundary(EXT **dPointerArrExt,unsigned int noElemdArrExt,unsigned int *&dArrBoundary,unsigned int maxOfVer){
cudaError_t cudaStatus;
dim3 block(blocksize);
dim3 grid((noElemdArrExt+block.x-1)/block.x);
kernelfindBoundary<<<grid,block>>>(dPointerArrExt,noElemdArrExt,dArrBoundary,maxOfVer);
cudaDeviceSynchronize();
cudaStatus = cudaGetLastError();
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaDeviceSynchronize() in findBoundary() failed",cudaStatus);
goto Error;
}
Error:
return cudaStatus;
}
__global__ void kernelPrint(EXT **dArrExt,unsigned int noElemdArrExt){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<noElemdArrExt){
EXT *arrExt = dArrExt[0];
printf("\nPointer ext:%p",dArrExt[0]);
printf("\n vgi:%d vgj:%d",arrExt[i].vgi,arrExt[i].vgj);
}
}
__global__ void kernelPrintUE(UniEdge **dPointerArrUniEdge,unsigned int noElem){
int i = blockDim.x*blockIdx.x + threadIdx.x;
if(i<noElem){
UniEdge *arrUniEdge = dPointerArrUniEdge[0];
printf("\nPointer ue:%p",dPointerArrUniEdge[0]);
printf("\n UniEdge: li:%d, lij:%d, lj:%d)",arrUniEdge[i].li,arrUniEdge[i].lij,arrUniEdge[i].lj);
}
}
__global__ void kernelFilldF(UniEdge **dPointerArrUniEdge,unsigned int pos,EXT **dPointerArrExt,unsigned int noElemdArrExt,unsigned int *dArrBoundaryScanResult,float *dF){
int i = blockDim.x*blockIdx.x + threadIdx.x;
if(i<noElemdArrExt){
UniEdge *dUniEdge = dPointerArrUniEdge[0];
int li = dUniEdge[pos].li;
int lij = dUniEdge[pos].lij;
int lj = dUniEdge[pos].lj;
EXT *dArrExt = dPointerArrExt[0];
int Li = dArrExt[i].li;
int Lij = dArrExt[i].lij;
int Lj = dArrExt[i].lj;
printf("\nThread %d: UniEdge(li:%d lij:%d lj:%d) (Li:%d Lij:%d Lj:%d)",i,li,lij,lj,Li,Lij,Lj);
if(li==Li && lij==Lij && lj==Lj){
dF[dArrBoundaryScanResult[i]]=1;
}
}
}
inline cudaError_t calcSupport(UniEdge **dPointerArrUniEdge,unsigned int pos,EXT **dPointerArrExt,unsigned int noElemdArrExt,unsigned int *dArrBoundaryScanResult,float *dF,unsigned int noElemdF,float &support,unsigned int noElemdArrUniEdge){
cudaError_t cudaStatus;
dim3 block(blocksize);
dim3 grid((noElemdArrExt+block.x-1)/block.x);
printf("\n**********dPointerArrExt***********\n");
kernelPrint<<<1,noElemdArrExt>>>(dPointerArrExt,noElemdArrExt);
cudaDeviceSynchronize();
cudaStatus=cudaGetLastError();
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n kernelPrintExt in computeSupportv2() failed");
goto Error;
}
printf("\n**********dPointerArrUniEdge***********\n");
kernelPrintUE<<<1,noElemdArrUniEdge>>>(dPointerArrUniEdge,noElemdArrUniEdge);
cudaDeviceSynchronize();
cudaStatus=cudaGetLastError();
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n kernelPrintUE in computeSupportv2() failed");
goto Error;
}
kernelFilldF<<<grid,block>>>(dPointerArrUniEdge,pos,dPointerArrExt,noElemdArrExt,dArrBoundaryScanResult,dF);
cudaDeviceSynchronize();
cudaStatus = cudaGetLastError();
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaDeviceSynchronize() kernelFilldF in calcSupport() failed",cudaStatus);
goto Error;
}
printf("\n**********dF****************\n");
printFloat(dF,noElemdF);
reduction(dF,noElemdF,support);
printf("\n******support********");
printf("\n Support:%f",support);
cudaMemset(dF,0,noElemdF*sizeof(float));
Error:
return cudaStatus;
}
//Hàm tính độ hỗ trợ computeSupportv2
inline cudaError_t computeSupportv2(EXT **dArrPointerExt,int *dArrNoElemPointerExt,int *hArrNoElemPointerExt,int noElem_dArrPointerExt,UniEdge **dArrPointerUniEdge,int *dArrNoELemPointerUniEdge,int *hArrNoELemPointerUniEdge,int noElem_dArrPointerUniEdge,unsigned int **&hArrPointerSupport,unsigned int *&hArrNoElemPointerSupport,unsigned int noElem_hArrPointerSupport,unsigned int maxOfVer){
cudaError_t cudaStatus;
//Cấp phát bộ nhớ cho hArrPointerSupport. Mỗi phần tử là một địa chỉ trỏ đến 1 mảng kiểu unsigned int
hArrPointerSupport = (unsigned int**)malloc(sizeof(unsigned int*)*noElem_hArrPointerSupport);
if(hArrPointerSupport==NULL){
printf("\n malloc hArrPointerSupport in kernel.cu failed");
exit(1);
}
else
{
memset(hArrPointerSupport,0,sizeof(unsigned int*)*noElem_hArrPointerSupport);
}
hArrNoElemPointerSupport = (unsigned int*)malloc(sizeof(unsigned int)*noElem_hArrPointerSupport);
if(hArrNoElemPointerSupport==NULL){
printf("\n malloc hArrNoelemPointerSupport in computeSupportv2() failed");
exit(1);
}
else
{
memset(hArrNoElemPointerSupport,0,sizeof(unsigned int)*noElem_hArrPointerSupport);
}
//Duyệt qua mảng các pointer trỏ đến mảng chứa các cạnh duy nhất. Mỗi vòng lặp j sẽ ứng với một segment EXTk, và mỗi EXTk sẽ có một boundary
for (int j = 0; j < noElem_dArrPointerUniEdge ; j++)
{
//Mảng dArrBoundary dùng để lưu trữ boundary của EXTk (ở đây là EXT thứ j theo như vòng lặp for bên dưới)
unsigned int *dArrBoundary=nullptr;
unsigned int *dArrBoundaryScanResult=nullptr;
unsigned int noElemdArrBoundary=0; //Bằng với hArrNoElemPointerExt[j]
if(hArrNoELemPointerUniEdge[j]>0){ //Nếu tồn tại unique edge tại dArrPointerUniEdge j đang xét thì tìm boundary của EXTk j tương ứng
UniEdge **dPointerArrUniEdge=nullptr;
unsigned int noElemdArrUniEdge = hArrNoELemPointerUniEdge[j];
cudaStatus = cudaMalloc((void**)&dPointerArrUniEdge,sizeof(UniEdge*));
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\ncudaMalloc dPointerArrUniEdge in computeSupportv2() failed",cudaStatus);
goto Error;
}
EXT **dPointerArrExt = nullptr;
unsigned int noElemdArrExt = hArrNoElemPointerExt[j];
cudaStatus = cudaMalloc((void**)&dPointerArrExt,sizeof(EXT*));
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\ncudaMalloc dPointerArrExt in computeSupportv2() failed",cudaStatus);
goto Error;
}
//Hoạt động rút trích diễn ra song song
kernelExtractPointerUniEdge<<<1,1>>>(dPointerArrUniEdge,dArrPointerUniEdge,j); //Trích phần tử trong mảng dArrPointerUniEdge lưu vào biến dArrUniEdge để tiện tính toán
cudaDeviceSynchronize();
kernelExtractPointerExt<<<1,1>>>(dPointerArrExt,dArrPointerExt,j,noElemdArrExt); //Trích phần tử trong mảng dArrPointerExt lưu vào biến dArrExt để tiện tính toán
cudaDeviceSynchronize();
cudaStatus = cudaGetLastError();
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaDeviceSynchronize() kernelExtractPointerExt kernelExtractPointerUniEdge in computeSupportv2() failed",cudaStatus);
goto Error;
}
printf("\n**********dPointerArrExt***********\n");
kernelPrint<<<1,noElemdArrExt>>>(dPointerArrExt,noElemdArrExt);
cudaDeviceSynchronize();
cudaStatus=cudaGetLastError();
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n kernelPrintExt in computeSupportv2() failed");
goto Error;
}
printf("\n**********dPointerArrUniEdge***********\n");
kernelPrintUE<<<1,noElemdArrUniEdge>>>(dPointerArrUniEdge,noElemdArrUniEdge);
cudaDeviceSynchronize();
cudaStatus=cudaGetLastError();
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n kernelPrintUE in computeSupportv2() failed");
goto Error;
}
#pragma region "find Boundary and scan Boundary"
noElemdArrBoundary = noElemdArrExt;
cudaStatus=cudaMalloc((void**)&dArrBoundary,sizeof(unsigned int)*noElemdArrBoundary);
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaMalloc dArrBoundary in computeSupportv2() failed");
goto Error;
}
else
{
cudaMemset(dArrBoundary,0,sizeof(unsigned int)*noElemdArrBoundary);
}
cudaStatus=cudaMalloc((void**)&dArrBoundaryScanResult,sizeof(unsigned int)*noElemdArrBoundary);
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaMalloc dArrBoundary in computeSupportv2() failed");
goto Error;
}
else
{
cudaMemset(dArrBoundaryScanResult,0,sizeof(unsigned int)*noElemdArrBoundary);
}
//Tìm boundary của EXTk và lưu kết quả vào mảng dArrBoundary
cudaStatus = findBoundary(dPointerArrExt,noElemdArrExt,dArrBoundary,maxOfVer);
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n findBoundary() in computeSupportv2() failed");
goto Error;
}
printf("\n ************* dArrBoundary ************\n");
cudaStatus=printUnsignedInt(dArrBoundary,noElemdArrBoundary);
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n printUnsignedInt in computeSupportv2() failed", cudaStatus);
goto Error;
}
//Scan dArrBoundary lưu kết quả vào dArrBoundaryScanResult
cudaStatus=scanV(dArrBoundary,noElemdArrBoundary,dArrBoundaryScanResult);
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n Exclusive scan dArrBoundary in computeSupportv2() failed",cudaStatus);
goto Error;
}
printf("\n**************dArrBoundaryScanResult****************\n");
printUnsignedInt(dArrBoundaryScanResult,noElemdArrBoundary);
float *dF=nullptr;
unsigned int noElemdF = 0;
cudaStatus = cudaMemcpy(&noElemdF,&dArrBoundaryScanResult[noElemdArrBoundary-1],sizeof(unsigned int),cudaMemcpyDeviceToHost);
if(cudaStatus !=cudaSuccess){
fprintf(stderr,"\n cudamemcpy dF failed",cudaStatus);
goto Error;
}
noElemdF++;
printf("\n*****noElemdF******\n");
printf("noElemdF:%d",noElemdF);
cudaStatus = cudaMalloc((void**)&dF,sizeof(unsigned int)*noElemdF);
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\ncudaMalloc dF failed",cudaStatus);
goto Error;
}
else
{
cudaMemset(dF,0,sizeof(float)*noElemdF);
}
#pragma endregion "end of finding Boundary"
hArrNoElemPointerSupport[j]=noElemdArrUniEdge;
unsigned int * hArrSupport = (unsigned int*)malloc(sizeof(unsigned int)*noElemdArrUniEdge);
if(hArrSupport==NULL){
printf("\n Malloc hArrSupport in computeSupportv2() failed");
exit(1);
}
else
{
memset(hArrSupport,0,sizeof(unsigned int)*noElemdArrUniEdge);
}
//Duyệt và tính độ hỗ trợ của các cạnh
for (int i = 0; i < noElemdArrUniEdge; i++)
{
float support=0;
cudaStatus =calcSupport(dPointerArrUniEdge,i,dPointerArrExt,noElemdArrExt,dArrBoundaryScanResult,dF,noElemdF,support,noElemdArrUniEdge);
if(cudaStatus !=cudaSuccess){
fprintf(stderr,"\n calcSupport failed",cudaStatus);
goto Error;
}
hArrSupport[i]=support;
}
hArrPointerSupport[j]=hArrSupport;
/*printf("\n***************hArrPointerSupport*************\n");
for (int i = 0; i < noElemdArrUniEdge; i++)
{
printf("\n support:%d ",hArrSupport[i]);
}*/
}
}
cudaDeviceSynchronize();
cudaStatus = cudaGetLastError();
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaDeviceSynchronize() in computeSupportv2() failed",cudaStatus);
goto Error;
}
Error:
return cudaStatus;
}
//kernel trích nhãn của mở rộng thoả minsup
__global__ void kernelgetEdgeLabel(UniEdge **dArrPointerUniEdge,int pointerPos,int edgePos,UniEdge *dUniEdge){
dUniEdge->li = dArrPointerUniEdge[pointerPos][edgePos].li;
dUniEdge->lij = dArrPointerUniEdge[pointerPos][edgePos].lij;
dUniEdge->lj = dArrPointerUniEdge[pointerPos][edgePos].lj;
printf("\n dUniEdge: (li:%d, lij:%d, lj:%d)",dUniEdge->li,dUniEdge->lij,dUniEdge->lj);
}
//Hàm sao chép nhãn (li,lij,lj) của mở rộng thoả minsup sang host để build DFS_CODE
inline cudaError_t getEdgeLabel(UniEdge **dArrPointerUniEdge,int pos,int edgePos,UniEdge *&hUniEdge){
cudaError_t cudaStatus;
hUniEdge = (UniEdge*)malloc(sizeof(UniEdge));
if(hUniEdge==NULL){
printf("\n malloc hUniEdge in getEdgeLabel() failed\n");
exit(1);
}
else
{
memset(hUniEdge,0,sizeof(UniEdge));
}
UniEdge *dUniEdge =nullptr;
cudaStatus = cudaMalloc((void**)&dUniEdge,sizeof(UniEdge));
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaMalloc dUniEdge in getEdgeLabel() failed\n",cudaStatus);
goto Error;
}
else
{
cudaMemset(dUniEdge,0,sizeof(UniEdge));
}
kernelgetEdgeLabel<<<1,1>>>(dArrPointerUniEdge,pos,edgePos,dUniEdge);
cudaDeviceSynchronize();
cudaStatus = cudaGetLastError();
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n CudaDeviceSynchornize() kernelgetEdgeLabel in getEdgeLabel() failed", cudaStatus);
goto Error;
}
cudaStatus = cudaMemcpy(hUniEdge,dUniEdge,sizeof(UniEdge),cudaMemcpyDeviceToHost);
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaMemcpy() (dUniEdge --> hUniEdge) in getEdgeLabel() failed", cudaStatus);
goto Error;
}
/*printf("\n*********hUniEdge*********\n");
printf("\n hUniEdge: (li:%d, lij:%d, lj:%d)",hUniEdge->li,hUniEdge->lij,hUniEdge->lj);*/
Error:
return cudaStatus;
}
__global__ void kernelgetViVj(EXT **dArrPointerExt,int posPointer,int *Vi,int *Vj){
*Vi = dArrPointerExt[posPointer][0].vi;
*Vj = dArrPointerExt[posPointer][0].vj;
//printf("\n Vi:%d, Vj:%d",Vi[0],Vj[0]);
}
//Hàm trích ViVj từ ExtK để xây dựng DFS code
inline cudaError_t getViVj(EXT **dArrPointerExt,int posPointer, int &vi, int &vj){
cudaError_t cudaStatus;
int *Vi=nullptr;
int *Vj=nullptr;
cudaStatus = cudaMalloc((void**)&Vi,sizeof(int));
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaMalloc() Vi in getViVj() failed",cudaStatus);
goto Error;
}
else
{
cudaMemset(Vi,-1,sizeof(int));
}
cudaStatus = cudaMalloc((void**)&Vj,sizeof(int));
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaMalloc() Vj in getViVj() failed",cudaStatus);
goto Error;
}
else
{
cudaMemset(Vj,-1,sizeof(int));
}
kernelgetViVj<<<1,1>>>(dArrPointerExt,posPointer,Vi,Vj);
cudaDeviceSynchronize();
cudaStatus=cudaGetLastError();
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaDeviceSynchronize() in getViVj() failed",cudaStatus);
goto Error;
}
cudaMemcpy(&vi,Vi,sizeof(int),cudaMemcpyDeviceToHost);
cudaMemcpy(&vj,Vj,sizeof(int),cudaMemcpyDeviceToHost);
/*printf("\n**********(vi,vj)*************\n");
printf("\n (vi:%d,vj:%d)",vi,vj);*/
Error:
return cudaStatus;
}
__global__ void kernelMatchValueInEXT(EXT **dPointerArrExt,unsigned int noElemInArrExt,unsigned int *dValidEdge,int li,int lij,int lj,unsigned int maxOfVer){
int i =blockDim.x*blockIdx.x + threadIdx.x;
if(i<noElemInArrExt){
EXT *dArrExt = dPointerArrExt[0];
int Li,Lij,Lj;
Li=dArrExt[i].li;
Lij=dArrExt[i].lij;
Lj=dArrExt[i].lj;
if(li==Li && lij==Lij && lj==Lj){
int vgi = dArrExt[i].vgi/maxOfVer;
dValidEdge[vgi]=1;
}
}
}
__global__ void kernelGetLastElement(unsigned int *dValidEdge,unsigned int *dValidEdgeScanResult,unsigned int noElemdValidEdge,int *dnoElem_hArrGraphId){
if(dValidEdge[noElemdValidEdge-1]==1){
*dnoElem_hArrGraphId = dValidEdgeScanResult[noElemdValidEdge-1]+1;
}
else
{
*dnoElem_hArrGraphId = dValidEdgeScanResult[noElemdValidEdge-1];
}
}
inline cudaError_t findNumberElemBaseOnScanResult(unsigned int *dValidEdge,unsigned int *dValidEdgeScanResult,unsigned int noElemdValidEdge,int &noElem_hArrGraphId){
cudaError_t cudaStatus;
int *dnoElem_hArrGraphId=nullptr;
cudaStatus = cudaMalloc((void**)&dnoElem_hArrGraphId,sizeof(int));
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaMalloc dnoElem_hArrGraphId in findNumberElemBaseOnScanResult() failfed",cudaStatus);
goto Error;
}
else
{
cudaMemset(dnoElem_hArrGraphId,0,sizeof(int));
}
kernelGetLastElement<<<1,1>>>(dValidEdge,dValidEdgeScanResult,noElemdValidEdge,dnoElem_hArrGraphId);
cudaDeviceSynchronize();
cudaStatus=cudaGetLastError();
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n kernelGetLastElement dValidEdgeScanResult in findNumberElemBaseOnScanResult() failed",cudaStatus);
goto Error;
}
cudaStatus = cudaMemcpy(&noElem_hArrGraphId,dnoElem_hArrGraphId,sizeof(int),cudaMemcpyDeviceToHost);
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaMemcpy (dnoElem_hArrGraphId -->noElem_hArrGraphId) in findNumberElemBaseOnScanResult() failed",cudaStatus);
goto Error;
}
Error:
cudaFree(dnoElem_hArrGraphId);
return cudaStatus;
}
__global__ void kernelFilldArrGraphId(unsigned int *dValidEdge,unsigned int *dValidEdgeScanResult, unsigned int noElemdValidEdge, int *dArrGraphId){
int i = blockDim.x*blockIdx.x + threadIdx.x;
if(i<noElemdValidEdge){
if(dValidEdge[i]==1){
dArrGraphId[dValidEdgeScanResult[i]]=i;
}
}
}
__global__ void kernelGetLastElementExt(EXT **dPointerArrExt,unsigned int noElemInArrExt,unsigned int *dnoElemdValidEdge,unsigned int maxOfVer){
*dnoElemdValidEdge = dPointerArrExt[0][noElemInArrExt-1].vgi/maxOfVer;
printf("\ndnoElemdValidEdge:%d",*dnoElemdValidEdge);
}
//Hàm lấy graphid chứa embedding thoả minDFS_CODE
inline cudaError_t getGraphId(UniEdge *hUniEdge,EXT **dPointerArrExt,unsigned int noElemInArrExt,int *&hArrGraphId,int &noElem_hArrGraphId,unsigned int maxOfVer){
cudaError_t cudaStatus;
int li,lij,lj;
li=hUniEdge->li;
lij=hUniEdge->lij;
lj=hUniEdge->lj;
unsigned int *dValidEdge=nullptr;
unsigned int noElemdValidEdge=0;
unsigned int *dnoElemdValidEdge=nullptr;
cudaStatus=cudaMalloc((void**)&dnoElemdValidEdge,sizeof(unsigned int));
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaMalloc dnoElemdValidEdge in getGraphId() failed",cudaStatus);
goto Error;
}
else
{
cudaMemset(dnoElemdValidEdge,0,sizeof(unsigned int));
}
//First: Lấy phần tử cuối cùng trong EXTk, lấy graphId của nó để định kích thước cho mảng dValidEdge
kernelGetLastElementExt<<<1,1>>>(dPointerArrExt,noElemInArrExt,dnoElemdValidEdge,maxOfVer);
cudaDeviceSynchronize();
cudaStatus=cudaGetLastError();
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n kernelGetLastElementExt in getGraphId() failed",cudaStatus);
goto Error;
}
cudaMemcpy(&noElemdValidEdge,dnoElemdValidEdge,sizeof(unsigned int),cudaMemcpyDeviceToHost);
noElemdValidEdge++;
printf("\n noElemdValidEdge:%d",noElemdValidEdge);
//1. Khởi tạo mảng có số lượng phần tử bằng với noELemInArrExt
cudaStatus = cudaMalloc((void**)&dValidEdge,sizeof(unsigned int)*noElemdValidEdge);
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaMalloc dValidEdge in getGraphId() failed");
goto Error;
}
else
{
cudaMemset(dValidEdge,0,sizeof(unsigned int)*noElemdValidEdge);
}
//2. Duyệt qua EXTk và set value 1 trên mảng dValidEdge tại index mà giá trị (li,lij,li) trong EXTk bằng với trong hUniEdge
dim3 block(blocksize);
dim3 grid((noElemInArrExt+block.x-1)/block.x);
kernelMatchValueInEXT<<<grid,block>>>(dPointerArrExt,noElemInArrExt,dValidEdge,li,lij,lj,maxOfVer);
cudaDeviceSynchronize();
cudaStatus=cudaGetLastError();
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n kernelMatchValueInEXT in getGraphId() failed",cudaStatus);
goto Error;
}
printf("\n*************dValidEdge*************\n");
printUnsignedInt(dValidEdge,noElemdValidEdge);
//3. Scan mảng dValidEdge để cung cấp thông tin cho hArrGraphId
//3.1. Cấp phát mảng dValidEdgeScanResult để lưu kết quả scan
unsigned int *dValidEdgeScanResult=nullptr;
cudaStatus = cudaMalloc((void**)&dValidEdgeScanResult,sizeof(unsigned int)*noElemdValidEdge);
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaMalloc dValidEdgeScanResult in getGraphId() failed");
goto Error;
}
else
{
cudaMemset(dValidEdgeScanResult,0,sizeof(unsigned int)*noElemdValidEdge);
}
cudaStatus = scanV(dValidEdge,noElemdValidEdge,dValidEdgeScanResult);
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n scanV() dValidEdge in getGraphId() failed",cudaStatus);
goto Error;
}
printf("\n*************dValidEdgeScanResult*************\n");
printUnsignedInt(dValidEdgeScanResult,noElemdValidEdge);
//4. Xác định số lượng phần tử của mảng hArrGraphId
cudaStatus = findNumberElemBaseOnScanResult(dValidEdge,dValidEdgeScanResult,noElemdValidEdge,noElem_hArrGraphId);
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n findNumberElemBaseOnScanResult dValiedEdgeScanResult in getGraphId() failed",cudaStatus);
goto Error;
}
printf("\n Value of noELem_hArrGraphId:%d",noElem_hArrGraphId);
//5. Khởi tạo mảng hArrGraphId với số lượng phần tử vừa tìm được noElem_hArrGraphId
hArrGraphId = (int*)malloc(sizeof(int)*noElem_hArrGraphId);
if(hArrGraphId==NULL){
printf("\n Malloc hArrGraphId in getGraphId() failed");
exit(1);
}
int *dArrGraphId=nullptr;
cudaStatus = cudaMalloc((void**)&dArrGraphId,sizeof(int)*noElem_hArrGraphId);
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaMalloc dArrGraphId in getGraphId() failed",cudaStatus);
goto Error;
}
else
{
cudaMemset(dArrGraphId,0,sizeof(int)*noElem_hArrGraphId);
}
dim3 blocka(blocksize);
dim3 grida((noElem_hArrGraphId+blocka.x-1)/blocka.x);
kernelFilldArrGraphId<<<grida,blocka>>>(dValidEdge, dValidEdgeScanResult, noElemdValidEdge, dArrGraphId);
cudaDeviceSynchronize();
cudaStatus=cudaGetLastError();
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n kernelFilldArrGraphId in getGraphId() failed",cudaStatus);
goto Error;
}
cudaStatus = cudaMemcpy(hArrGraphId,dArrGraphId,sizeof(int)*noElem_hArrGraphId,cudaMemcpyDeviceToHost);
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaMemcpy (dArrGraphId --> hArrGraphId)",cudaStatus);
goto Error;
}
printf("\n*************dArrGraphId*************\n");
printInt(dArrGraphId,noElem_hArrGraphId);
Error:
cudaFree(dValidEdge);
cudaFree(dArrGraphId);
return cudaStatus;
}
inline cudaError_t printdPointerArrExt(EXT **dPointerArrExt,unsigned int noElemInArrExt){
cudaError_t cudaStatus;
dim3 block(blocksize);
dim3 grid((noElemInArrExt+block.x-1)/block.x);
kernelPrint<<<grid,block>>>(dPointerArrExt,noElemInArrExt);
cudaDeviceSynchronize();
cudaStatus=cudaGetLastError();
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n kernelPrint in printdPointerArrExt() failed");
goto Error;
}
Error:
return cudaStatus;
}
inline cudaError_t extendEmbeddingRoot(Embedding **&dArrPointerEmbedding,int &noElem_dArrPointerEmbedding,int *&dArrSizedQ,int &noElem_dArrSizedQ,EXT **dPointerArrExt,unsigned int noElemInArrExt,UniEdge *hUniEdge){
cudaError_t cudaStatus;
//1. Tạo mảng dM và dMScanResult có số lượng phần tử bằng với noElemInArrExt rồi khởi tạo giá trị các phần tử mảng bằng 0
//1.1. Tạo và khởi tạo giá trị cho mảng dM
unsigned int *dM=nullptr;
cudaStatus=cudaMalloc((void**)&dM,sizeof(unsigned int)*noElemInArrExt);
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaMalloc dM in extendEmbeddingRoot() failed",cudaStatus);
goto Error;
}
else
{
cudaMemset(dM,0,sizeof(unsigned int)*noElemInArrExt);
}
//1.1. Tạo và khởi tạo giá trị cho mảng dMScanResult
unsigned int *dMScanResult=nullptr;
cudaStatus=cudaMalloc((void**)&dMScanResult,sizeof(unsigned int)*noElemInArrExt);
if(cudaStatus!=cudaSuccess){
fprintf(stderr,"\n cudaMalloc dM in extendEmbeddingRoot() failed",cudaStatus);
goto Error;
}
else
{
cudaMemset(dMScanResult,0,sizeof(unsigned int)*noElemInArrExt);
}
//2.
cudaDeviceSynchronize();
cudaStatus=cudaGetLastError();
Error:
return cudaStatus;
}
|
8f8c794388548b5367a9032ca63f84f5087af93f.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "IncrementConnectionAgeKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int cell = 1;
int *connection = NULL;
hipMalloc(&connection, XSIZE*YSIZE);
int *age = NULL;
hipMalloc(&age, XSIZE*YSIZE);
int maxCells = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
IncrementConnectionAgeKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, cell,connection,age,maxCells);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
IncrementConnectionAgeKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, cell,connection,age,maxCells);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
IncrementConnectionAgeKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, cell,connection,age,maxCells);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 8f8c794388548b5367a9032ca63f84f5087af93f.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "IncrementConnectionAgeKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int cell = 1;
int *connection = NULL;
cudaMalloc(&connection, XSIZE*YSIZE);
int *age = NULL;
cudaMalloc(&age, XSIZE*YSIZE);
int maxCells = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
IncrementConnectionAgeKernel<<<gridBlock,threadBlock>>>(cell,connection,age,maxCells);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
IncrementConnectionAgeKernel<<<gridBlock,threadBlock>>>(cell,connection,age,maxCells);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
IncrementConnectionAgeKernel<<<gridBlock,threadBlock>>>(cell,connection,age,maxCells);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
c0362c70be0ada6394e1bf0077e42ec9fa5c2aa8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/reduce.h>
#include <thrust/count.h>
#include <thrust/functional.h>
#include <thrust/sort.h>
#include <iostream>
#include <thrust/extrema.h>
#include <stdlib.h>
#include <time.h>
namespace parallel{
template <class T>
//Retrieves indicies where data from nodes should be forwarded
__global__ void TEEN_indexes(T *d_vals, int *d_ind, T* h_thres_top, T* h_thres_bot, T* s_thres){
int i = threadIdx.x + blockIdx.x * blockDim.x;
//get indexes of values we want
if ((d_vals[i] > *h_thres_top) || (d_vals[i] < *h_thres_bot)){
//if the values exceed the given thresholds, collect index
d_ind[i]= 1;
}
else if (i > 0){
if (abs(d_vals[i] - d_vals[i-1]) > *s_thres){
//or if the values changed more than the soft threshold from the last value, collect index
d_ind[i] = 1;
}
}
}
template <class T>
__global__ void compact(T* d_vals, int* d_ind, T* d_vals_out){
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (((i == 0) && (d_ind[i] == 1)) || ((i>0) && (d_ind[i] > d_ind[i-1]))){
d_vals_out[d_ind[i]-1] = d_vals[i];
}
}
template <class T>
std::vector<T>* TEEN(std::vector<T>* in, T h_thres_top, T h_thres_bot, T s_thres){
//own algorithms
int size = in->size();
T arr[size];
std::copy(in->begin(),in->end(), arr);
int arr_ind[size];
for (int i = 0; i < size;i++){
arr_ind[i] = 0;
}
//device values
T *d_arr;
T *d_h_thres_top, *d_h_thres_bot, *d_s_thres;
int *d_arr_ind;
//allocate space for the values & copy memory there
hipMalloc((void**)&d_arr, size*sizeof(T));
hipMalloc((void**)&d_arr_ind, size*sizeof(int));
hipMalloc((void**)&d_h_thres_top, sizeof(int));
hipMalloc((void**)&d_h_thres_bot, sizeof(int));
hipMalloc((void**)&d_s_thres, sizeof(int));
hipMemcpy(d_arr, arr, size * sizeof(T),hipMemcpyHostToDevice);
hipMemcpy(d_arr_ind, arr_ind, size * sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(d_h_thres_top, &h_thres_top, sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(d_h_thres_bot, &h_thres_bot, sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(d_s_thres, &s_thres, sizeof(int),hipMemcpyHostToDevice);
//get indexes for data we want
hipLaunchKernelGGL(( TEEN_indexes<T>), dim3((size+266)/256), dim3(256), 0, 0, d_arr, d_arr_ind, d_h_thres_top, d_h_thres_bot, d_s_thres);
//we can now free some values
hipFree(d_h_thres_top);
hipFree(d_h_thres_bot);
hipFree(d_s_thres);
//copy values into thrust device pointer
thrust::device_ptr<int> d_ptr = thrust::device_malloc<int>(size);
thrust::copy(&d_arr_ind[0], &d_arr_ind[0]+size, d_ptr);
thrust::device_vector<int> d_vec(d_ptr,d_ptr + size);
//run a sum for the value count
int sum_ind = thrust::reduce(d_vec.begin(),d_vec.end(), 0, thrust::plus<int>());
//run an inclusive scan
thrust::inclusive_scan(d_vec.begin(),d_vec.end(),d_vec.begin());
//copy values back into original array
thrust::copy(d_vec.begin(),d_vec.end(),d_arr_ind);
//create compacted array
T comp_array[sum_ind];
T* d_comp_array;
hipMalloc((void**)&d_comp_array, sum_ind*sizeof(T));
hipMemset(&d_comp_array, 0, sum_ind*sizeof(T));
hipLaunchKernelGGL(( compact<T>), dim3((size+266)/256), dim3(256), 0, 0, d_arr, d_arr_ind, d_comp_array);
hipFree(d_arr);
hipFree(d_arr_ind);
hipMemcpy(comp_array, d_comp_array, sum_ind * sizeof(T),hipMemcpyDeviceToHost);
hipFree(d_comp_array);
std::vector<T> tmp(comp_array, comp_array + sum_ind);
std::vector<T>* ret = new std::vector<T>(tmp);
return ret;
}
template <class T>
__global__ void DSSS_operation(T* vals, T* pattern){
int i = threadIdx.x + blockIdx.x * blockDim.x;
//use bitwise operator to perform xor
vals[i] = vals[i] ^ pattern[i];
}
template <class T>
void DSSS_encrypt(std::vector<T>* in,T* pattern_arr, T* vals){
int size = in->size();
//get random values for encryption
srand(time(NULL));
for(int i = 0; i< size;i++){
pattern_arr[i] = rand();
}
//copy values, prepare memory
T *d_vals, *d_pattern;
std::copy(in->begin(),in->end(), vals);
hipMalloc((void**)&d_vals, size*sizeof(T));
hipMalloc((void**)&d_pattern, size*sizeof(T));
hipMemcpy(d_vals, vals, size*sizeof(T),hipMemcpyHostToDevice);
hipMemcpy(d_pattern, pattern_arr, size*sizeof(T),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( DSSS_operation<T>), dim3((size+266)/256), dim3(256), 0, 0, d_vals, d_pattern);
hipFree(d_pattern);
hipMemcpy(vals, d_vals, size*sizeof(T), hipMemcpyDeviceToHost);
hipFree(d_vals);
}
template <class T>
void DSSS_decrypt(std::vector<T>* vals, T* pattern_arr, T* in){
int size = vals->size();
T *d_vals, *d_pattern;
hipMalloc((void**)&d_vals, size*sizeof(T));
hipMalloc((void**)&d_pattern, size*sizeof(T));
hipMemcpy(d_vals, in, size*sizeof(T),hipMemcpyHostToDevice);
hipMemcpy(d_pattern, pattern_arr, size*sizeof(T),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( DSSS_operation<T>), dim3((size+266)/256), dim3(256), 0, 0, d_vals, d_pattern);
hipFree(d_pattern);
hipMemcpy(in, d_vals, size*sizeof(T), hipMemcpyDeviceToHost);
hipFree(d_vals);
std::vector<T> temp (in, in + sizeof(in) / sizeof(in[0]));
vals = new std::vector<T>(temp);
}
template <class T>
int reduce(std::vector<T>* in){
thrust::host_vector<T> h_vec = *in;
thrust::device_vector<T> d_vec = h_vec;
T sm = thrust::reduce(d_vec.begin(),d_vec.end(), 0, thrust::plus<T>());
return sm;
}
template <class T>
float average(std::vector<T>* in){
T sm = reduce(in);
return sm/(static_cast<float>(in->size()));
}
template <class T>
int count(std::vector<T>* in, T &val){
thrust::host_vector<T> h_vec = *in;
thrust::device_vector<T> d_vec = h_vec;
return thrust::count(d_vec.begin(),d_vec.end(), val);
}
template <class T>
void inclusive_scan(std::vector<T> &in){
thrust::host_vector<T> h_vec = in;
thrust::device_vector<T> d_vec = h_vec;
thrust::inclusive_scan(d_vec.begin(),d_vec.end(),d_vec.begin());
h_vec = d_vec;
for (int i = 0;i < h_vec.size(); i++){
in[i] = h_vec[i];
}
}
template <class T>
void exclusive_scan(std::vector<T> &in){
thrust::host_vector<T> h_vec = in;
thrust::device_vector<T> d_vec = h_vec;
thrust::exclusive_scan(d_vec.begin(),d_vec.end(),d_vec.begin());
h_vec = d_vec;
for (int i = 0;i < h_vec.size(); i++){
in[i] = h_vec[i];
}
}
template <class T>
int minimum(std::vector<T>* in){
thrust::host_vector<T> h_vec = *in;
thrust::device_vector<T> d_vec = h_vec;
return *thrust::min_element(d_vec.begin(),d_vec.end());
}
template <class T>
int maximum(std::vector<T>* in){
thrust::host_vector<T> h_vec = *in;
thrust::device_vector<T> d_vec = h_vec;
return *thrust::max_element(d_vec.begin(),d_vec.end());
}
}
template <class T>
std::vector<T>* TEEN (std::vector<T> &in, T h_thres_top, T h_thres_bot, T s_thres){
std::vector<T>* out = new std::vector<T>();
for (int i = 0;i < in.size();i++){
if ((in[i] > h_thres_top) || (in[i] < h_thres_bot)){
//if the values exceed the given thresholds, collect index
out->push_back(in[i]);
}
else if (i > 0){
if (abs(in[i] - in[i-1]) > s_thres){
//or if the values changed more than the soft threshold from the last value, collect index
out->push_back(in[i]);
}
}
}
return out;
}
template <class T>
void DSSS_encrypt(std::vector<T>* in, T* pattern_arr, T* vals){
int size = in->size();
//get random values for encryption
srand(time(NULL));
for(int i = 0; i< size;i++){
pattern_arr[i] = rand();
}
for(int i = 0; i < size;i++){
vals[i] = (in->at(i) ^ pattern_arr[i]);
}
}
template <class T>
void DSSS_decrypt(std::vector<T>* in, T* pattern_arr, T* vals){
int size = in->size();
for(int i = 0; i < size;i++){
vals[i] = in->at(i) ^ pattern_arr[i];
}
}
/* //Example Funciton calls
int main(){
static const int arr[] = {12,13,57,91,99,123,50};
std::vector<int> temp (arr, arr + sizeof(arr) / sizeof(arr[0]));
std::vector<int>* vals = parallel::TEEN<int>(&temp, 100, 15, 50);
//repetative pattern for encrypting
int pattern_arr[temp.size()];
int vals2[temp.size()];
parallel::DSSS_encrypt<int>(&temp, pattern_arr, vals2);
parallel::DSSS_decrypt<int>(&temp, pattern_arr, vals2);
std::cout << parallel::maximum(&temp);
parallel::sort(vals);
vals = TEEN<int>(temp, 100, 15, 50);
DSSS_encrypt<int>(&temp, pattern_arr, vals2);
DSSS_decrypt<int>(&temp, pattern_arr, vals2);
return 0;
}*/
| c0362c70be0ada6394e1bf0077e42ec9fa5c2aa8.cu | #include <vector>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/reduce.h>
#include <thrust/count.h>
#include <thrust/functional.h>
#include <thrust/sort.h>
#include <iostream>
#include <thrust/extrema.h>
#include <stdlib.h>
#include <time.h>
namespace parallel{
template <class T>
//Retrieves indicies where data from nodes should be forwarded
__global__ void TEEN_indexes(T *d_vals, int *d_ind, T* h_thres_top, T* h_thres_bot, T* s_thres){
int i = threadIdx.x + blockIdx.x * blockDim.x;
//get indexes of values we want
if ((d_vals[i] > *h_thres_top) || (d_vals[i] < *h_thres_bot)){
//if the values exceed the given thresholds, collect index
d_ind[i]= 1;
}
else if (i > 0){
if (abs(d_vals[i] - d_vals[i-1]) > *s_thres){
//or if the values changed more than the soft threshold from the last value, collect index
d_ind[i] = 1;
}
}
}
template <class T>
__global__ void compact(T* d_vals, int* d_ind, T* d_vals_out){
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (((i == 0) && (d_ind[i] == 1)) || ((i>0) && (d_ind[i] > d_ind[i-1]))){
d_vals_out[d_ind[i]-1] = d_vals[i];
}
}
template <class T>
std::vector<T>* TEEN(std::vector<T>* in, T h_thres_top, T h_thres_bot, T s_thres){
//own algorithms
int size = in->size();
T arr[size];
std::copy(in->begin(),in->end(), arr);
int arr_ind[size];
for (int i = 0; i < size;i++){
arr_ind[i] = 0;
}
//device values
T *d_arr;
T *d_h_thres_top, *d_h_thres_bot, *d_s_thres;
int *d_arr_ind;
//allocate space for the values & copy memory there
cudaMalloc((void**)&d_arr, size*sizeof(T));
cudaMalloc((void**)&d_arr_ind, size*sizeof(int));
cudaMalloc((void**)&d_h_thres_top, sizeof(int));
cudaMalloc((void**)&d_h_thres_bot, sizeof(int));
cudaMalloc((void**)&d_s_thres, sizeof(int));
cudaMemcpy(d_arr, arr, size * sizeof(T),cudaMemcpyHostToDevice);
cudaMemcpy(d_arr_ind, arr_ind, size * sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(d_h_thres_top, &h_thres_top, sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(d_h_thres_bot, &h_thres_bot, sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(d_s_thres, &s_thres, sizeof(int),cudaMemcpyHostToDevice);
//get indexes for data we want
TEEN_indexes<T><<<(size+266)/256, 256>>>(d_arr, d_arr_ind, d_h_thres_top, d_h_thres_bot, d_s_thres);
//we can now free some values
cudaFree(d_h_thres_top);
cudaFree(d_h_thres_bot);
cudaFree(d_s_thres);
//copy values into thrust device pointer
thrust::device_ptr<int> d_ptr = thrust::device_malloc<int>(size);
thrust::copy(&d_arr_ind[0], &d_arr_ind[0]+size, d_ptr);
thrust::device_vector<int> d_vec(d_ptr,d_ptr + size);
//run a sum for the value count
int sum_ind = thrust::reduce(d_vec.begin(),d_vec.end(), 0, thrust::plus<int>());
//run an inclusive scan
thrust::inclusive_scan(d_vec.begin(),d_vec.end(),d_vec.begin());
//copy values back into original array
thrust::copy(d_vec.begin(),d_vec.end(),d_arr_ind);
//create compacted array
T comp_array[sum_ind];
T* d_comp_array;
cudaMalloc((void**)&d_comp_array, sum_ind*sizeof(T));
cudaMemset(&d_comp_array, 0, sum_ind*sizeof(T));
compact<T><<<(size+266)/256, 256>>>(d_arr, d_arr_ind, d_comp_array);
cudaFree(d_arr);
cudaFree(d_arr_ind);
cudaMemcpy(comp_array, d_comp_array, sum_ind * sizeof(T),cudaMemcpyDeviceToHost);
cudaFree(d_comp_array);
std::vector<T> tmp(comp_array, comp_array + sum_ind);
std::vector<T>* ret = new std::vector<T>(tmp);
return ret;
}
template <class T>
__global__ void DSSS_operation(T* vals, T* pattern){
int i = threadIdx.x + blockIdx.x * blockDim.x;
//use bitwise operator to perform xor
vals[i] = vals[i] ^ pattern[i];
}
template <class T>
void DSSS_encrypt(std::vector<T>* in,T* pattern_arr, T* vals){
int size = in->size();
//get random values for encryption
srand(time(NULL));
for(int i = 0; i< size;i++){
pattern_arr[i] = rand();
}
//copy values, prepare memory
T *d_vals, *d_pattern;
std::copy(in->begin(),in->end(), vals);
cudaMalloc((void**)&d_vals, size*sizeof(T));
cudaMalloc((void**)&d_pattern, size*sizeof(T));
cudaMemcpy(d_vals, vals, size*sizeof(T),cudaMemcpyHostToDevice);
cudaMemcpy(d_pattern, pattern_arr, size*sizeof(T),cudaMemcpyHostToDevice);
DSSS_operation<T><<<(size+266)/256, 256>>>(d_vals, d_pattern);
cudaFree(d_pattern);
cudaMemcpy(vals, d_vals, size*sizeof(T), cudaMemcpyDeviceToHost);
cudaFree(d_vals);
}
template <class T>
void DSSS_decrypt(std::vector<T>* vals, T* pattern_arr, T* in){
int size = vals->size();
T *d_vals, *d_pattern;
cudaMalloc((void**)&d_vals, size*sizeof(T));
cudaMalloc((void**)&d_pattern, size*sizeof(T));
cudaMemcpy(d_vals, in, size*sizeof(T),cudaMemcpyHostToDevice);
cudaMemcpy(d_pattern, pattern_arr, size*sizeof(T),cudaMemcpyHostToDevice);
DSSS_operation<T><<<(size+266)/256, 256>>>(d_vals, d_pattern);
cudaFree(d_pattern);
cudaMemcpy(in, d_vals, size*sizeof(T), cudaMemcpyDeviceToHost);
cudaFree(d_vals);
std::vector<T> temp (in, in + sizeof(in) / sizeof(in[0]));
vals = new std::vector<T>(temp);
}
template <class T>
int reduce(std::vector<T>* in){
thrust::host_vector<T> h_vec = *in;
thrust::device_vector<T> d_vec = h_vec;
T sm = thrust::reduce(d_vec.begin(),d_vec.end(), 0, thrust::plus<T>());
return sm;
}
template <class T>
float average(std::vector<T>* in){
T sm = reduce(in);
return sm/(static_cast<float>(in->size()));
}
template <class T>
int count(std::vector<T>* in, T &val){
thrust::host_vector<T> h_vec = *in;
thrust::device_vector<T> d_vec = h_vec;
return thrust::count(d_vec.begin(),d_vec.end(), val);
}
template <class T>
void inclusive_scan(std::vector<T> &in){
thrust::host_vector<T> h_vec = in;
thrust::device_vector<T> d_vec = h_vec;
thrust::inclusive_scan(d_vec.begin(),d_vec.end(),d_vec.begin());
h_vec = d_vec;
for (int i = 0;i < h_vec.size(); i++){
in[i] = h_vec[i];
}
}
template <class T>
void exclusive_scan(std::vector<T> &in){
thrust::host_vector<T> h_vec = in;
thrust::device_vector<T> d_vec = h_vec;
thrust::exclusive_scan(d_vec.begin(),d_vec.end(),d_vec.begin());
h_vec = d_vec;
for (int i = 0;i < h_vec.size(); i++){
in[i] = h_vec[i];
}
}
template <class T>
int minimum(std::vector<T>* in){
thrust::host_vector<T> h_vec = *in;
thrust::device_vector<T> d_vec = h_vec;
return *thrust::min_element(d_vec.begin(),d_vec.end());
}
template <class T>
int maximum(std::vector<T>* in){
thrust::host_vector<T> h_vec = *in;
thrust::device_vector<T> d_vec = h_vec;
return *thrust::max_element(d_vec.begin(),d_vec.end());
}
}
template <class T>
std::vector<T>* TEEN (std::vector<T> &in, T h_thres_top, T h_thres_bot, T s_thres){
std::vector<T>* out = new std::vector<T>();
for (int i = 0;i < in.size();i++){
if ((in[i] > h_thres_top) || (in[i] < h_thres_bot)){
//if the values exceed the given thresholds, collect index
out->push_back(in[i]);
}
else if (i > 0){
if (abs(in[i] - in[i-1]) > s_thres){
//or if the values changed more than the soft threshold from the last value, collect index
out->push_back(in[i]);
}
}
}
return out;
}
template <class T>
void DSSS_encrypt(std::vector<T>* in, T* pattern_arr, T* vals){
int size = in->size();
//get random values for encryption
srand(time(NULL));
for(int i = 0; i< size;i++){
pattern_arr[i] = rand();
}
for(int i = 0; i < size;i++){
vals[i] = (in->at(i) ^ pattern_arr[i]);
}
}
template <class T>
void DSSS_decrypt(std::vector<T>* in, T* pattern_arr, T* vals){
int size = in->size();
for(int i = 0; i < size;i++){
vals[i] = in->at(i) ^ pattern_arr[i];
}
}
/* //Example Funciton calls
int main(){
static const int arr[] = {12,13,57,91,99,123,50};
std::vector<int> temp (arr, arr + sizeof(arr) / sizeof(arr[0]));
std::vector<int>* vals = parallel::TEEN<int>(&temp, 100, 15, 50);
//repetative pattern for encrypting
int pattern_arr[temp.size()];
int vals2[temp.size()];
parallel::DSSS_encrypt<int>(&temp, pattern_arr, vals2);
parallel::DSSS_decrypt<int>(&temp, pattern_arr, vals2);
std::cout << parallel::maximum(&temp);
parallel::sort(vals);
vals = TEEN<int>(temp, 100, 15, 50);
DSSS_encrypt<int>(&temp, pattern_arr, vals2);
DSSS_decrypt<int>(&temp, pattern_arr, vals2);
return 0;
}*/
|
a0e4429eef6c637f0102260ea0c07da1f7ea7251.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ static void k_count_received(int nr_total_blocks, uint *d_n_recv_by_block, uint *d_spine_cnts)
{
int bid = threadIdx.x + THREADS_PER_BLOCK * blockIdx.x;
if (bid < nr_total_blocks) {
d_spine_cnts[bid * 10 + CUDA_BND_S_NEW] = d_n_recv_by_block[bid];
}
} | a0e4429eef6c637f0102260ea0c07da1f7ea7251.cu | #include "includes.h"
__global__ static void k_count_received(int nr_total_blocks, uint *d_n_recv_by_block, uint *d_spine_cnts)
{
int bid = threadIdx.x + THREADS_PER_BLOCK * blockIdx.x;
if (bid < nr_total_blocks) {
d_spine_cnts[bid * 10 + CUDA_BND_S_NEW] = d_n_recv_by_block[bid];
}
} |
6de994e476cab448182f4c4e23a1c649544d5e0a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@author Ahmad Abdelfattah
@author Azzam Haidar
@generated from magmablas/zgemm_batched_smallsq.cu, normal z -> c, Thu Oct 8 23:05:36 2020
*/
#include "magma_internal.h"
#include "magma_templates.h"
#include "batched_kernel_param.h"
#define SLDA(N) ( (N==15||N==23||N==31)? N : (N+1) )
extern __shared__ magmaFloatComplex zdata[];
template<int N>
__global__ void
cgemm_batched_smallsq_kernel(
const magma_trans_t transA, magma_trans_t transB,
const magmaFloatComplex alpha, magmaFloatComplex const * const * dA_array, int ai, int aj, int ldda,
magmaFloatComplex const * const * dB_array, int bi, int bj, int lddb,
const magmaFloatComplex beta, magmaFloatComplex** dC_array, int ci, int cj, int lddc,
const int batchCount)
{
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tz = threadIdx.z;
const int bx = blockIdx.x;
const int batchid = bx * blockDim.z + tz;
if(batchid >= batchCount) return;
const magmaFloatComplex* __restrict__ dA = dA_array[batchid] + aj * ldda + ai;
const magmaFloatComplex* __restrict__ dB = dB_array[batchid] + bj * lddb + bi;
magmaFloatComplex* __restrict__ dC = dC_array[batchid] + cj * lddc + ci;
magmaFloatComplex rC = MAGMA_C_ZERO;
magmaFloatComplex rTmp = MAGMA_C_ZERO;
const int slda = SLDA(N);
const int sldb = SLDA(N);
magmaFloatComplex* sA = (magmaFloatComplex*)(zdata);
magmaFloatComplex* sB = (magmaFloatComplex*)(zdata + blockDim.z * slda * N);
sA += tz * slda * N;
sB += tz * sldb * N;
// read A & B
if(transA == MagmaNoTrans){
sA[ty * slda + tx] = dA[ty * ldda + tx];
}
else{
sA[tx * slda + ty] = (transA == MagmaTrans) ? dA[ty * ldda + tx] : MAGMA_C_CONJ( dA[ty * ldda + tx] );
}
if(transB == MagmaNoTrans){
sB[ty * sldb + tx] = dB[ty * lddb + tx];
}
else{
sB[tx * sldb + ty] = (transB == MagmaTrans) ? dB[ty * lddb + tx] : MAGMA_C_CONJ( dB[ty * lddb + tx] );
}
__syncthreads();
if(beta != MAGMA_C_ZERO){
rC = beta * dC[ty * lddc + tx];
}
// multiply
rTmp = MAGMA_C_ZERO;
#pragma unroll
for(int j = 0; j < N; j++){
rTmp += sA[j * slda + tx] * sB[ty * sldb + j];
}
rC += alpha * rTmp;
// write from rC
dC[ty * lddc + tx] = rC;
}
extern "C" void
magmablas_cgemm_batched_smallsq(
magma_trans_t transA, magma_trans_t transB,
magma_int_t m, magma_int_t n, magma_int_t k,
magmaFloatComplex alpha,
magmaFloatComplex const * const * dA_array, magma_int_t ai, magma_int_t aj, magma_int_t ldda,
magmaFloatComplex const * const * dB_array, magma_int_t bi, magma_int_t bj, magma_int_t lddb,
magmaFloatComplex beta,
magmaFloatComplex **dC_array, magma_int_t ci, magma_int_t cj, magma_int_t lddc,
magma_int_t batchCount, magma_queue_t queue )
{
magma_int_t info = 0;
if ( transA != MagmaNoTrans && transA != MagmaTrans && transA != MagmaConjTrans )
info = -1;
else if ( transB != MagmaNoTrans && transB != MagmaTrans && transB != MagmaConjTrans )
info = -2;
else if ( m < 0 )
info = -3;
else if ( n < 0 )
info = -4;
else if ( k < 0 )
info = -5;
else if ( transA == MagmaNoTrans ? ldda < m : ldda < k )
info = -8;
else if ( transB == MagmaNoTrans ? lddb < k : lddb < n )
info = -10;
else if ( lddc < m )
info = -13;
if( !(m == n && n == k) ){
printf("Only square sizes are supported\n");
info = -1;
}
if( m > 32){
printf("Only square sizes of up to 32 are supported\n");
info = -1;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
}
if ( m <= 0 || n <= 0 || k <= 0 ) return;
magma_int_t ntcol = magma_get_cgemm_batched_ntcol( m );
magma_int_t shmem = ( SLDA(m)*m + SLDA(n)*n ) * sizeof(magmaFloatComplex);
shmem *= ntcol;
const int nblocks = magma_ceildiv(batchCount, ntcol);
dim3 grid(nblocks, 1, 1);
dim3 threads(m, m, ntcol);
switch(m){
case 1:hipLaunchKernelGGL(( cgemm_batched_smallsq_kernel< 1>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 2:hipLaunchKernelGGL(( cgemm_batched_smallsq_kernel< 2>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 3:hipLaunchKernelGGL(( cgemm_batched_smallsq_kernel< 3>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 4:hipLaunchKernelGGL(( cgemm_batched_smallsq_kernel< 4>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 5:hipLaunchKernelGGL(( cgemm_batched_smallsq_kernel< 5>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 6:hipLaunchKernelGGL(( cgemm_batched_smallsq_kernel< 6>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 7:hipLaunchKernelGGL(( cgemm_batched_smallsq_kernel< 7>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 8:hipLaunchKernelGGL(( cgemm_batched_smallsq_kernel< 8>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 9:hipLaunchKernelGGL(( cgemm_batched_smallsq_kernel< 9>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 10:hipLaunchKernelGGL(( cgemm_batched_smallsq_kernel<10>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 11:hipLaunchKernelGGL(( cgemm_batched_smallsq_kernel<11>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 12:hipLaunchKernelGGL(( cgemm_batched_smallsq_kernel<12>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 13:hipLaunchKernelGGL(( cgemm_batched_smallsq_kernel<13>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 14:hipLaunchKernelGGL(( cgemm_batched_smallsq_kernel<14>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 15:hipLaunchKernelGGL(( cgemm_batched_smallsq_kernel<15>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 16:hipLaunchKernelGGL(( cgemm_batched_smallsq_kernel<16>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 17:hipLaunchKernelGGL(( cgemm_batched_smallsq_kernel<17>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 18:hipLaunchKernelGGL(( cgemm_batched_smallsq_kernel<18>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 19:hipLaunchKernelGGL(( cgemm_batched_smallsq_kernel<19>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 20:hipLaunchKernelGGL(( cgemm_batched_smallsq_kernel<20>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 21:hipLaunchKernelGGL(( cgemm_batched_smallsq_kernel<21>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 22:hipLaunchKernelGGL(( cgemm_batched_smallsq_kernel<22>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 23:hipLaunchKernelGGL(( cgemm_batched_smallsq_kernel<23>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 24:hipLaunchKernelGGL(( cgemm_batched_smallsq_kernel<24>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 25:hipLaunchKernelGGL(( cgemm_batched_smallsq_kernel<25>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 26:hipLaunchKernelGGL(( cgemm_batched_smallsq_kernel<26>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 27:hipLaunchKernelGGL(( cgemm_batched_smallsq_kernel<27>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 28:hipLaunchKernelGGL(( cgemm_batched_smallsq_kernel<28>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 29:hipLaunchKernelGGL(( cgemm_batched_smallsq_kernel<29>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 30:hipLaunchKernelGGL(( cgemm_batched_smallsq_kernel<30>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 31:hipLaunchKernelGGL(( cgemm_batched_smallsq_kernel<31>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 32:hipLaunchKernelGGL(( cgemm_batched_smallsq_kernel<32>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
default:;
}
}
| 6de994e476cab448182f4c4e23a1c649544d5e0a.cu | /*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@author Ahmad Abdelfattah
@author Azzam Haidar
@generated from magmablas/zgemm_batched_smallsq.cu, normal z -> c, Thu Oct 8 23:05:36 2020
*/
#include "magma_internal.h"
#include "magma_templates.h"
#include "batched_kernel_param.h"
#define SLDA(N) ( (N==15||N==23||N==31)? N : (N+1) )
extern __shared__ magmaFloatComplex zdata[];
template<int N>
__global__ void
cgemm_batched_smallsq_kernel(
const magma_trans_t transA, magma_trans_t transB,
const magmaFloatComplex alpha, magmaFloatComplex const * const * dA_array, int ai, int aj, int ldda,
magmaFloatComplex const * const * dB_array, int bi, int bj, int lddb,
const magmaFloatComplex beta, magmaFloatComplex** dC_array, int ci, int cj, int lddc,
const int batchCount)
{
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tz = threadIdx.z;
const int bx = blockIdx.x;
const int batchid = bx * blockDim.z + tz;
if(batchid >= batchCount) return;
const magmaFloatComplex* __restrict__ dA = dA_array[batchid] + aj * ldda + ai;
const magmaFloatComplex* __restrict__ dB = dB_array[batchid] + bj * lddb + bi;
magmaFloatComplex* __restrict__ dC = dC_array[batchid] + cj * lddc + ci;
magmaFloatComplex rC = MAGMA_C_ZERO;
magmaFloatComplex rTmp = MAGMA_C_ZERO;
const int slda = SLDA(N);
const int sldb = SLDA(N);
magmaFloatComplex* sA = (magmaFloatComplex*)(zdata);
magmaFloatComplex* sB = (magmaFloatComplex*)(zdata + blockDim.z * slda * N);
sA += tz * slda * N;
sB += tz * sldb * N;
// read A & B
if(transA == MagmaNoTrans){
sA[ty * slda + tx] = dA[ty * ldda + tx];
}
else{
sA[tx * slda + ty] = (transA == MagmaTrans) ? dA[ty * ldda + tx] : MAGMA_C_CONJ( dA[ty * ldda + tx] );
}
if(transB == MagmaNoTrans){
sB[ty * sldb + tx] = dB[ty * lddb + tx];
}
else{
sB[tx * sldb + ty] = (transB == MagmaTrans) ? dB[ty * lddb + tx] : MAGMA_C_CONJ( dB[ty * lddb + tx] );
}
__syncthreads();
if(beta != MAGMA_C_ZERO){
rC = beta * dC[ty * lddc + tx];
}
// multiply
rTmp = MAGMA_C_ZERO;
#pragma unroll
for(int j = 0; j < N; j++){
rTmp += sA[j * slda + tx] * sB[ty * sldb + j];
}
rC += alpha * rTmp;
// write from rC
dC[ty * lddc + tx] = rC;
}
extern "C" void
magmablas_cgemm_batched_smallsq(
magma_trans_t transA, magma_trans_t transB,
magma_int_t m, magma_int_t n, magma_int_t k,
magmaFloatComplex alpha,
magmaFloatComplex const * const * dA_array, magma_int_t ai, magma_int_t aj, magma_int_t ldda,
magmaFloatComplex const * const * dB_array, magma_int_t bi, magma_int_t bj, magma_int_t lddb,
magmaFloatComplex beta,
magmaFloatComplex **dC_array, magma_int_t ci, magma_int_t cj, magma_int_t lddc,
magma_int_t batchCount, magma_queue_t queue )
{
magma_int_t info = 0;
if ( transA != MagmaNoTrans && transA != MagmaTrans && transA != MagmaConjTrans )
info = -1;
else if ( transB != MagmaNoTrans && transB != MagmaTrans && transB != MagmaConjTrans )
info = -2;
else if ( m < 0 )
info = -3;
else if ( n < 0 )
info = -4;
else if ( k < 0 )
info = -5;
else if ( transA == MagmaNoTrans ? ldda < m : ldda < k )
info = -8;
else if ( transB == MagmaNoTrans ? lddb < k : lddb < n )
info = -10;
else if ( lddc < m )
info = -13;
if( !(m == n && n == k) ){
printf("Only square sizes are supported\n");
info = -1;
}
if( m > 32){
printf("Only square sizes of up to 32 are supported\n");
info = -1;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
}
if ( m <= 0 || n <= 0 || k <= 0 ) return;
magma_int_t ntcol = magma_get_cgemm_batched_ntcol( m );
magma_int_t shmem = ( SLDA(m)*m + SLDA(n)*n ) * sizeof(magmaFloatComplex);
shmem *= ntcol;
const int nblocks = magma_ceildiv(batchCount, ntcol);
dim3 grid(nblocks, 1, 1);
dim3 threads(m, m, ntcol);
switch(m){
case 1: cgemm_batched_smallsq_kernel< 1><<<grid, threads, shmem, queue->cuda_stream()>>>(transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 2: cgemm_batched_smallsq_kernel< 2><<<grid, threads, shmem, queue->cuda_stream()>>>(transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 3: cgemm_batched_smallsq_kernel< 3><<<grid, threads, shmem, queue->cuda_stream()>>>(transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 4: cgemm_batched_smallsq_kernel< 4><<<grid, threads, shmem, queue->cuda_stream()>>>(transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 5: cgemm_batched_smallsq_kernel< 5><<<grid, threads, shmem, queue->cuda_stream()>>>(transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 6: cgemm_batched_smallsq_kernel< 6><<<grid, threads, shmem, queue->cuda_stream()>>>(transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 7: cgemm_batched_smallsq_kernel< 7><<<grid, threads, shmem, queue->cuda_stream()>>>(transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 8: cgemm_batched_smallsq_kernel< 8><<<grid, threads, shmem, queue->cuda_stream()>>>(transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 9: cgemm_batched_smallsq_kernel< 9><<<grid, threads, shmem, queue->cuda_stream()>>>(transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 10: cgemm_batched_smallsq_kernel<10><<<grid, threads, shmem, queue->cuda_stream()>>>(transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 11: cgemm_batched_smallsq_kernel<11><<<grid, threads, shmem, queue->cuda_stream()>>>(transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 12: cgemm_batched_smallsq_kernel<12><<<grid, threads, shmem, queue->cuda_stream()>>>(transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 13: cgemm_batched_smallsq_kernel<13><<<grid, threads, shmem, queue->cuda_stream()>>>(transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 14: cgemm_batched_smallsq_kernel<14><<<grid, threads, shmem, queue->cuda_stream()>>>(transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 15: cgemm_batched_smallsq_kernel<15><<<grid, threads, shmem, queue->cuda_stream()>>>(transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 16: cgemm_batched_smallsq_kernel<16><<<grid, threads, shmem, queue->cuda_stream()>>>(transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 17: cgemm_batched_smallsq_kernel<17><<<grid, threads, shmem, queue->cuda_stream()>>>(transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 18: cgemm_batched_smallsq_kernel<18><<<grid, threads, shmem, queue->cuda_stream()>>>(transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 19: cgemm_batched_smallsq_kernel<19><<<grid, threads, shmem, queue->cuda_stream()>>>(transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 20: cgemm_batched_smallsq_kernel<20><<<grid, threads, shmem, queue->cuda_stream()>>>(transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 21: cgemm_batched_smallsq_kernel<21><<<grid, threads, shmem, queue->cuda_stream()>>>(transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 22: cgemm_batched_smallsq_kernel<22><<<grid, threads, shmem, queue->cuda_stream()>>>(transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 23: cgemm_batched_smallsq_kernel<23><<<grid, threads, shmem, queue->cuda_stream()>>>(transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 24: cgemm_batched_smallsq_kernel<24><<<grid, threads, shmem, queue->cuda_stream()>>>(transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 25: cgemm_batched_smallsq_kernel<25><<<grid, threads, shmem, queue->cuda_stream()>>>(transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 26: cgemm_batched_smallsq_kernel<26><<<grid, threads, shmem, queue->cuda_stream()>>>(transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 27: cgemm_batched_smallsq_kernel<27><<<grid, threads, shmem, queue->cuda_stream()>>>(transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 28: cgemm_batched_smallsq_kernel<28><<<grid, threads, shmem, queue->cuda_stream()>>>(transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 29: cgemm_batched_smallsq_kernel<29><<<grid, threads, shmem, queue->cuda_stream()>>>(transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 30: cgemm_batched_smallsq_kernel<30><<<grid, threads, shmem, queue->cuda_stream()>>>(transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 31: cgemm_batched_smallsq_kernel<31><<<grid, threads, shmem, queue->cuda_stream()>>>(transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 32: cgemm_batched_smallsq_kernel<32><<<grid, threads, shmem, queue->cuda_stream()>>>(transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
default:;
}
}
|
7d1fc035c000be43f7693f8be57aa9b1768b251f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include "parameters.h"
/*
spikedetect.cu:SpikeDetectdetect
abs()
flit()
even_sort()&odd_sort():
Crossing():flit()crossingcrossing210
*/
/****************************************************abs *************************************************************/
__global__ void abs(float *sort_ary, size_t N)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < N)
{
if (sort_ary[tid] < 0)
sort_ary[tid] = -sort_ary[tid];
}
}
/****************************************************flit*************************************************************/
__global__ void flit(float *sort_ary, size_t N)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < N)
sort_ary[tid] = -sort_ary[tid];
}
/**********************************************************************************************************/
__device__ void swap(float &a, float &b){
float t = a;
a = b;
b = t;
}
__global__ void even_sort(float *ary, int size, int *mark)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if ((tid + 1) % 2 == 1 && tid + 1 < size)
{
if (ary[tid] > ary[tid + 1]){
swap(ary[tid], ary[tid + 1]);
mark[0] = 1;
}
}
__syncthreads();
}
__global__ void odd_sort(float *ary, int size, int *mark)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if ((tid + 1) % 2 == 0 && tid + 1 < size)
{
if (ary[tid] > ary[tid + 1]){
swap(ary[tid], ary[tid + 1]);
mark[1] = 1;
}
}
__syncthreads();
}
/**************************************************************************************************************/
__global__ void Crossing(float *ary, float *sort_ary, size_t N, int *crossing)
{
float mid = (sort_ary[N / 2 - 1] + sort_ary[N / 2]) / 2.0/0.6745;
float high = mid * 4.5;
float low = mid * 2.0;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid == 1) printf("%f %f\n", high, low);
if (tid < N)
{
if (ary[tid] < high && ary[tid] >= low) crossing[tid] = 1;
else if (ary[tid] >= high) crossing[tid] = 2;
}
}
/****************************************************kernel*****************************************************************/
extern "C"
void mixGPU(int *crossing, float *ary_t, float *ary, int *mark, size_t N)
{
int numThreads = 1024;
int numBlocks = (N + numThreads - 1) / numThreads;
float *dev_ary = 0;
float *dev_sort_ary = 0;
float *dev_ary_t = 0;
int *dev_mark = 0;
int *dev_crossing = 0;
//unsigned int sharedSize = numThreads * sizeof(float);
//hipMalloc((void**)&dev_prms, sizeof(spikedetekt_prm));
hipMalloc((void**)&dev_ary, N*sizeof(float));
hipMalloc((void**)&dev_sort_ary,N * sizeof(float));
hipMalloc((void**)&dev_ary_t, N*sizeof(float));
hipMalloc((void**)&dev_mark, 2 * sizeof(int));
hipMalloc((void**)&dev_crossing, N * sizeof(int));
hipMemset(dev_crossing, 0, N * sizeof(int));
hipMemcpy(dev_ary, ary, sizeof(float)*N, hipMemcpyHostToDevice);
hipMemcpy(dev_ary_t, ary, sizeof(float)*N, hipMemcpyHostToDevice);
hipMemcpy(dev_sort_ary, ary, sizeof(float)*N, hipMemcpyHostToDevice);
// kernel execution
abs << <numBlocks, numThreads >> >(dev_sort_ary, N);
while (mark[0] + mark[1] > 0)
{
mark[0] = 0;
mark[1] = 0;
hipMemcpy(dev_mark, mark, sizeof(int) * 2, hipMemcpyHostToDevice);
even_sort << <numBlocks, numThreads >> >(dev_sort_ary, N, dev_mark);
odd_sort << <numBlocks, numThreads >> >(dev_sort_ary, N, dev_mark);
hipMemcpy(mark, dev_mark, 2 * sizeof(int), hipMemcpyDeviceToHost);
}
flit << <numBlocks, numThreads >> >(dev_ary_t, N);
hipMemcpy(ary_t, dev_ary_t, N * sizeof(float), hipMemcpyDeviceToHost);
hipLaunchKernelGGL(( Crossing) , dim3(numBlocks), dim3(numThreads) , 0, 0, dev_ary_t, dev_sort_ary, N, dev_crossing);
/************************************************************CPU test******************************************************************/
//hipMemcpy(answer, dev_answer, 2 * sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(crossing, dev_crossing, N * sizeof(int), hipMemcpyDeviceToHost);
hipFree(dev_ary);
hipFree(dev_sort_ary);
hipFree(dev_mark);
hipFree(dev_crossing);
hipFree(dev_ary_t);
}
| 7d1fc035c000be43f7693f8be57aa9b1768b251f.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include "parameters.h"
/*
spikedetect.cu:包含的函数主要是对应SpikeDetect部分的detect的一些操作
函数作用如下:
abs():对于原始给定的采样波形数组,进行求绝对值操作,应为后面的阈值计算,需要采样信号为正值
flit():对于原始给定的采样波形数组,进行求相反数操作,后面的操作都基于原始波形的相反波形
even_sort()&odd_sort():对于原始给定的采样波形数组,进行排序操作,用于求原始波形的中位数,计算高低阈值
Crossing():通过高低阈值和flit()之后的波形,计算crossing数组,crossing数组表示在该点的电位值的范围(高于高阈值为2,高于低阈值为1,低于低阈值为0)
*/
/****************************************************abs 操作*************************************************************/
__global__ void abs(float *sort_ary, size_t N)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < N)
{
if (sort_ary[tid] < 0)
sort_ary[tid] = -sort_ary[tid];
}
}
/****************************************************flit操作*************************************************************/
__global__ void flit(float *sort_ary, size_t N)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < N)
sort_ary[tid] = -sort_ary[tid];
}
/*************************************************排序操作*********************************************************/
__device__ void swap(float &a, float &b){
float t = a;
a = b;
b = t;
}
__global__ void even_sort(float *ary, int size, int *mark)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if ((tid + 1) % 2 == 1 && tid + 1 < size)
{
if (ary[tid] > ary[tid + 1]){
swap(ary[tid], ary[tid + 1]);
mark[0] = 1;
}
}
__syncthreads();
}
__global__ void odd_sort(float *ary, int size, int *mark)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if ((tid + 1) % 2 == 0 && tid + 1 < size)
{
if (ary[tid] > ary[tid + 1]){
swap(ary[tid], ary[tid + 1]);
mark[1] = 1;
}
}
__syncthreads();
}
/************************************************阈值操作**************************************************************/
__global__ void Crossing(float *ary, float *sort_ary, size_t N, int *crossing)
{
float mid = (sort_ary[N / 2 - 1] + sort_ary[N / 2]) / 2.0/0.6745;
float high = mid * 4.5;
float low = mid * 2.0;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid == 1) printf("%f %f\n", high, low);
if (tid < N)
{
if (ary[tid] < high && ary[tid] >= low) crossing[tid] = 1;
else if (ary[tid] >= high) crossing[tid] = 2;
}
}
/****************************************************调用kernel*****************************************************************/
extern "C"
void mixGPU(int *crossing, float *ary_t, float *ary, int *mark, size_t N)
{
int numThreads = 1024;
int numBlocks = (N + numThreads - 1) / numThreads;
float *dev_ary = 0;
float *dev_sort_ary = 0;
float *dev_ary_t = 0;
int *dev_mark = 0;
int *dev_crossing = 0;
//unsigned int sharedSize = numThreads * sizeof(float);
//cudaMalloc((void**)&dev_prms, sizeof(spikedetekt_prm));
cudaMalloc((void**)&dev_ary, N*sizeof(float));
cudaMalloc((void**)&dev_sort_ary,N * sizeof(float));
cudaMalloc((void**)&dev_ary_t, N*sizeof(float));
cudaMalloc((void**)&dev_mark, 2 * sizeof(int));
cudaMalloc((void**)&dev_crossing, N * sizeof(int));
cudaMemset(dev_crossing, 0, N * sizeof(int));
cudaMemcpy(dev_ary, ary, sizeof(float)*N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_ary_t, ary, sizeof(float)*N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_sort_ary, ary, sizeof(float)*N, cudaMemcpyHostToDevice);
// kernel execution
abs << <numBlocks, numThreads >> >(dev_sort_ary, N);
while (mark[0] + mark[1] > 0)
{
mark[0] = 0;
mark[1] = 0;
cudaMemcpy(dev_mark, mark, sizeof(int) * 2, cudaMemcpyHostToDevice);
even_sort << <numBlocks, numThreads >> >(dev_sort_ary, N, dev_mark);
odd_sort << <numBlocks, numThreads >> >(dev_sort_ary, N, dev_mark);
cudaMemcpy(mark, dev_mark, 2 * sizeof(int), cudaMemcpyDeviceToHost);
}
flit << <numBlocks, numThreads >> >(dev_ary_t, N);
cudaMemcpy(ary_t, dev_ary_t, N * sizeof(float), cudaMemcpyDeviceToHost);
Crossing <<<numBlocks, numThreads >>>(dev_ary_t, dev_sort_ary, N, dev_crossing);
/************************************************************CPU test******************************************************************/
//cudaMemcpy(answer, dev_answer, 2 * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(crossing, dev_crossing, N * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dev_ary);
cudaFree(dev_sort_ary);
cudaFree(dev_mark);
cudaFree(dev_crossing);
cudaFree(dev_ary_t);
}
|
5c7b2bd39b3d97b07ec0e2a4d454fb22ab723a75.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void mul_float(int n, float *a, float *b, float *sum)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
sum[i] = a[i] * b[i];
}
} | 5c7b2bd39b3d97b07ec0e2a4d454fb22ab723a75.cu | #include "includes.h"
__global__ void mul_float(int n, float *a, float *b, float *sum)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
sum[i] = a[i] * b[i];
}
} |
24d911b4caddac327f76958c1267cc21c9817431.hip | // !!! This is a file automatically generated by hipify!!!
//nvcc EigenValueViewerICTCM.cu -o temp -lglut -lm -lGLU -lGL
//#include <GL/glut.h>
//#include <math.h>
//#include <stdio.h>
//#include <device.h>
#include <iostream>
#include <fstream>
#include <sstream>
#include <string.h>
#include <GL/glut.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <unistd.h>
#include <hip/hip_runtime.h>
using namespace std;
#define SCALE 50.0
#define X_WINDOW 1000
#define Y_WINDOW 1000
#define X_MAX SCALE
#define X_MIN -SCALE
#define X_SCALE 1.0
#define Y_MAX SCALE
#define Y_MIN -SCALE
#define Y_SCALE 1.0
FILE* ffmpeg;
// function prototypes
void KeyPressed(unsigned char key, int x, int y);
void Display(void);
//globalsgcc FunctionHit.c -o FunctionHit -lglut -lm -lGLU -lGL
double g_x;
double g_y;
static int g_win;
double A11, A12, A21, A22;
int* Buffer;
double x_machine_to_x_screen(int x)
{
return( (2.0*x)/X_WINDOW-1.0 );
}
double y_machine_to_y_screen(int y)
{
return( -(2.0*y)/Y_WINDOW+1.0 );
}
double x_machine_to_x_world(int x)
{
double range;
range = X_MAX - X_MIN;
return( (range/X_WINDOW)*x + X_MIN);
}
double y_machine_to_y_world(int y)
{
double range;
range = Y_MAX - Y_MIN;
return(-((range/Y_WINDOW)*y - X_MAX));
}
double x_world_to_x_screen(double x)
{
double range;
range = X_MAX - X_MIN;
return( -1.0 + 2.0*(x - X_MIN)/range );
}
double y_world_to_y_screen(double y)
{
double range;
range = Y_MAX - Y_MIN;
return( -1.0 + 2.0*(y - Y_MIN)/range );
}
void place_axis()
{
glColor3f(1.0,1.0,1.0);
glBegin(GL_LINE_LOOP);
glVertex2f(x_machine_to_x_screen(0),0);
glVertex2f(x_machine_to_x_screen(X_WINDOW),0);
glEnd();
glBegin(GL_LINE_LOOP);
glVertex2f(0,y_machine_to_y_screen(0));
glVertex2f(0,y_machine_to_y_screen(Y_WINDOW));
glEnd();
glFlush();
}
void placePoint(double x, double y)
{
glPointSize(5);
glBegin(GL_POINTS);
glVertex2f(x_world_to_x_screen(x),y_world_to_y_screen(y));
glEnd();
glFlush();
}
void hitMatrix(double x, double y)
{
double xOld = x;
double yOld = y;
g_x = A11*xOld + A12*yOld;
g_y = A21*xOld + A22*yOld;
}
void printPoint()
{
printf("\n x = %f\n",g_x);
printf(" y = %f\n",g_y);
}
void mymouse(int button, int state, int x, int y)
{
if(state == GLUT_DOWN)
{
if(button == GLUT_LEFT_BUTTON)
{
glColor3f(1.0,1.0,0.0);
g_x = x_machine_to_x_world(x);
g_y = y_machine_to_y_world(y);
placePoint(g_x,g_y);
printPoint();
}
else
{
glColor3f(0.0,1.0,0.0);
hitMatrix(g_x,g_y);
placePoint(g_x,g_y);
printPoint();
}
}
}
void help()
{
printf("\n Click in the black x-y axis then hit one of the following options.");
printf("\n After sellecting options (1,3,4,5,6) left click the mouse to set your");
printf("\n initial condition then right click the mouse to generate your next point.");
printf("\n 1: Sets up the matrix that generated figure 1 in the paper.");
printf("\n 3: Sets up the matrix that generated figure 3 in the paper.");
printf("\n 4: Sets up the matrix that generated figure 4 in the paper.");
printf("\n 5: Sets up the matrix that generated figure 5 in the paper.");
printf("\n 6: Sets up the matrix that generated figure 6 in the paper.");
printf("\n n: Allows the user to enter their own 2X2 matrix. Note you will enter these values in the linux terminal");
printf("\n s: Takes a screan shot.");
printf("\n c: Clears the screan.");
printf("\n h: Displays the help screan again.");
printf("\n q: Quits the program.");
}
void KeyPressed(unsigned char key, int x, int y)
{
if(key == 'q')
{
glutDestroyWindow(g_win);
exit(0);
}
if(key == 'c')
{
glClear(GL_COLOR_BUFFER_BIT);
place_axis();
}
if(key == 's')
{
FILE* ScreenShotFile;
int* buffer;
const char* cmd = "ffmpeg -r 60 -f rawvideo -pix_fmt rgba -s 1000x1000 -i - "
"-threads 0 -preset fast -y -pix_fmt yuv420p -crf 21 -vf vflip output1.mp4";
ScreenShotFile = popen(cmd, "w");
buffer = (int*)malloc(X_WINDOW*Y_WINDOW*sizeof(int));
for(int i =0; i < 1; i++)
{
glReadPixels(5, 5, X_WINDOW, Y_WINDOW, GL_RGBA, GL_UNSIGNED_BYTE, buffer);
fwrite(buffer, sizeof(int)*X_WINDOW*Y_WINDOW, 1, ScreenShotFile);
}
pclose(ScreenShotFile);
free(buffer);
system("ffmpeg -i output1.mp4 screenShot.jpeg");
system("rm output1.mp4");
}
if(key == 'h')
{
help();
}
if(key == '1') // Produces figure 1
{
glClear(GL_COLOR_BUFFER_BIT);
place_axis();
A11 = 1.0;
A12 = 1.0/2.0;
A21 = 1.0/4.0;
A22 = 3.0/4.0;
}
if(key == '3') // Produces figure 3
{
glClear(GL_COLOR_BUFFER_BIT);
place_axis();
A11 = 1.0/2.0;
A12 = 3.0/4.0;
A21 = 1.0;
A22 = 1.0/4.0;
}
if(key == '4') // Produces figure
{
glClear(GL_COLOR_BUFFER_BIT);
place_axis();
A11 = 1.1;
A12 = 0.0;
A21 = 0.0;
A22 = 1.2;
}
if(key == '5') // Produces figure 5
{
glClear(GL_COLOR_BUFFER_BIT);
place_axis();
A11 = 1.0;
A12 = 0.0;
A21 = 0.0;
A22 = 1.2;
}
if(key == '6') // Produces figure 6
{
glClear(GL_COLOR_BUFFER_BIT);
place_axis();
A11 = 1.0/2.0;
A12 = 2.0;
A21 = 1.0/2.0;
A22 = -1.0/2.0;
}
if(key == 'n')
{
glClear(GL_COLOR_BUFFER_BIT);
place_axis();
printf("\n Enter A11\n");
scanf("%lf", &A11);
printf("\n Enter A12\n");
scanf("%lf", &A12);
printf("\n Enter A21\n");
scanf("%lf", &A21);
printf("\n Enter A22\n");
scanf("%lf", &A22);
printf("\n Done. Go back to the and click on the x-y screan.\n");
}
}
void display()
{
glPointSize(2.0);
glClear(GL_COLOR_BUFFER_BIT);
place_axis();
glutMouseFunc(mymouse);
}
int main(int argc, char** argv)
{
glutInit(&argc,argv);
glutInitWindowSize(X_WINDOW,Y_WINDOW);
Buffer = new int[X_WINDOW*Y_WINDOW];
glutInitWindowPosition(0,0);
g_win = glutCreateWindow("Eigen Values and Eigen Vectors");
glutKeyboardFunc(KeyPressed);
glutDisplayFunc(display);
help();
glutMainLoop();
}
| 24d911b4caddac327f76958c1267cc21c9817431.cu | //nvcc EigenValueViewerICTCM.cu -o temp -lglut -lm -lGLU -lGL
//#include <GL/glut.h>
//#include <math.h>
//#include <stdio.h>
//#include <device.h>
#include <iostream>
#include <fstream>
#include <sstream>
#include <string.h>
#include <GL/glut.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <unistd.h>
#include <cuda.h>
using namespace std;
#define SCALE 50.0
#define X_WINDOW 1000
#define Y_WINDOW 1000
#define X_MAX SCALE
#define X_MIN -SCALE
#define X_SCALE 1.0
#define Y_MAX SCALE
#define Y_MIN -SCALE
#define Y_SCALE 1.0
FILE* ffmpeg;
// function prototypes
void KeyPressed(unsigned char key, int x, int y);
void Display(void);
//globalsgcc FunctionHit.c -o FunctionHit -lglut -lm -lGLU -lGL
double g_x;
double g_y;
static int g_win;
double A11, A12, A21, A22;
int* Buffer;
double x_machine_to_x_screen(int x)
{
return( (2.0*x)/X_WINDOW-1.0 );
}
double y_machine_to_y_screen(int y)
{
return( -(2.0*y)/Y_WINDOW+1.0 );
}
double x_machine_to_x_world(int x)
{
double range;
range = X_MAX - X_MIN;
return( (range/X_WINDOW)*x + X_MIN);
}
double y_machine_to_y_world(int y)
{
double range;
range = Y_MAX - Y_MIN;
return(-((range/Y_WINDOW)*y - X_MAX));
}
double x_world_to_x_screen(double x)
{
double range;
range = X_MAX - X_MIN;
return( -1.0 + 2.0*(x - X_MIN)/range );
}
double y_world_to_y_screen(double y)
{
double range;
range = Y_MAX - Y_MIN;
return( -1.0 + 2.0*(y - Y_MIN)/range );
}
void place_axis()
{
glColor3f(1.0,1.0,1.0);
glBegin(GL_LINE_LOOP);
glVertex2f(x_machine_to_x_screen(0),0);
glVertex2f(x_machine_to_x_screen(X_WINDOW),0);
glEnd();
glBegin(GL_LINE_LOOP);
glVertex2f(0,y_machine_to_y_screen(0));
glVertex2f(0,y_machine_to_y_screen(Y_WINDOW));
glEnd();
glFlush();
}
void placePoint(double x, double y)
{
glPointSize(5);
glBegin(GL_POINTS);
glVertex2f(x_world_to_x_screen(x),y_world_to_y_screen(y));
glEnd();
glFlush();
}
void hitMatrix(double x, double y)
{
double xOld = x;
double yOld = y;
g_x = A11*xOld + A12*yOld;
g_y = A21*xOld + A22*yOld;
}
void printPoint()
{
printf("\n x = %f\n",g_x);
printf(" y = %f\n",g_y);
}
void mymouse(int button, int state, int x, int y)
{
if(state == GLUT_DOWN)
{
if(button == GLUT_LEFT_BUTTON)
{
glColor3f(1.0,1.0,0.0);
g_x = x_machine_to_x_world(x);
g_y = y_machine_to_y_world(y);
placePoint(g_x,g_y);
printPoint();
}
else
{
glColor3f(0.0,1.0,0.0);
hitMatrix(g_x,g_y);
placePoint(g_x,g_y);
printPoint();
}
}
}
void help()
{
printf("\n Click in the black x-y axis then hit one of the following options.");
printf("\n After sellecting options (1,3,4,5,6) left click the mouse to set your");
printf("\n initial condition then right click the mouse to generate your next point.");
printf("\n 1: Sets up the matrix that generated figure 1 in the paper.");
printf("\n 3: Sets up the matrix that generated figure 3 in the paper.");
printf("\n 4: Sets up the matrix that generated figure 4 in the paper.");
printf("\n 5: Sets up the matrix that generated figure 5 in the paper.");
printf("\n 6: Sets up the matrix that generated figure 6 in the paper.");
printf("\n n: Allows the user to enter their own 2X2 matrix. Note you will enter these values in the linux terminal");
printf("\n s: Takes a screan shot.");
printf("\n c: Clears the screan.");
printf("\n h: Displays the help screan again.");
printf("\n q: Quits the program.");
}
void KeyPressed(unsigned char key, int x, int y)
{
if(key == 'q')
{
glutDestroyWindow(g_win);
exit(0);
}
if(key == 'c')
{
glClear(GL_COLOR_BUFFER_BIT);
place_axis();
}
if(key == 's')
{
FILE* ScreenShotFile;
int* buffer;
const char* cmd = "ffmpeg -r 60 -f rawvideo -pix_fmt rgba -s 1000x1000 -i - "
"-threads 0 -preset fast -y -pix_fmt yuv420p -crf 21 -vf vflip output1.mp4";
ScreenShotFile = popen(cmd, "w");
buffer = (int*)malloc(X_WINDOW*Y_WINDOW*sizeof(int));
for(int i =0; i < 1; i++)
{
glReadPixels(5, 5, X_WINDOW, Y_WINDOW, GL_RGBA, GL_UNSIGNED_BYTE, buffer);
fwrite(buffer, sizeof(int)*X_WINDOW*Y_WINDOW, 1, ScreenShotFile);
}
pclose(ScreenShotFile);
free(buffer);
system("ffmpeg -i output1.mp4 screenShot.jpeg");
system("rm output1.mp4");
}
if(key == 'h')
{
help();
}
if(key == '1') // Produces figure 1
{
glClear(GL_COLOR_BUFFER_BIT);
place_axis();
A11 = 1.0;
A12 = 1.0/2.0;
A21 = 1.0/4.0;
A22 = 3.0/4.0;
}
if(key == '3') // Produces figure 3
{
glClear(GL_COLOR_BUFFER_BIT);
place_axis();
A11 = 1.0/2.0;
A12 = 3.0/4.0;
A21 = 1.0;
A22 = 1.0/4.0;
}
if(key == '4') // Produces figure
{
glClear(GL_COLOR_BUFFER_BIT);
place_axis();
A11 = 1.1;
A12 = 0.0;
A21 = 0.0;
A22 = 1.2;
}
if(key == '5') // Produces figure 5
{
glClear(GL_COLOR_BUFFER_BIT);
place_axis();
A11 = 1.0;
A12 = 0.0;
A21 = 0.0;
A22 = 1.2;
}
if(key == '6') // Produces figure 6
{
glClear(GL_COLOR_BUFFER_BIT);
place_axis();
A11 = 1.0/2.0;
A12 = 2.0;
A21 = 1.0/2.0;
A22 = -1.0/2.0;
}
if(key == 'n')
{
glClear(GL_COLOR_BUFFER_BIT);
place_axis();
printf("\n Enter A11\n");
scanf("%lf", &A11);
printf("\n Enter A12\n");
scanf("%lf", &A12);
printf("\n Enter A21\n");
scanf("%lf", &A21);
printf("\n Enter A22\n");
scanf("%lf", &A22);
printf("\n Done. Go back to the and click on the x-y screan.\n");
}
}
void display()
{
glPointSize(2.0);
glClear(GL_COLOR_BUFFER_BIT);
place_axis();
glutMouseFunc(mymouse);
}
int main(int argc, char** argv)
{
glutInit(&argc,argv);
glutInitWindowSize(X_WINDOW,Y_WINDOW);
Buffer = new int[X_WINDOW*Y_WINDOW];
glutInitWindowPosition(0,0);
g_win = glutCreateWindow("Eigen Values and Eigen Vectors");
glutKeyboardFunc(KeyPressed);
glutDisplayFunc(display);
help();
glutMainLoop();
}
|
d220e33ffd366535485db4d4d4ce217ab59f0097.hip | // !!! This is a file automatically generated by hipify!!!
#include "rroi.h"
#include <iostream>
#include <cmath>
#include <algorithm>
#include <hip/hip_runtime.h>
#include "rroi_helper.h"
#include "rotate_rect_ops.h"
#include "cuda_utils.h"
#if 1
// NOTE: only cache one roi_pool_pt in the shared memory
template <typename T>
__global__ void compute_weight(
T* __restrict__ top_data,
const T* __restrict__ bottom_data,
const T* __restrict__ roi_pool_pts,
const T* __restrict__ rois,
const float spatial_scale,
const int num_rois,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width)
{
extern __shared__ T roi_pool_pts_shared[];
const int roi_pool_pt_num = num_rois * pooled_height * pooled_width;
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < roi_pool_pt_num; i += blockDim.x * gridDim.x) {
int c = blockIdx.y * blockDim.y + threadIdx.y;
if (c < channels) {
int pw = i % pooled_width;
int ph = (i / pooled_width) % pooled_height;
int n = i / pooled_width / pooled_height;
const T* rois_offset = rois + n * 6; // batch_ind, xc, yc, w, h, angle
int roi_batch_ind = rois_offset[0];
T rbox_area = get_rotated_bounding_box_area(spatial_scale, rois_offset[4], rois_offset[3], pooled_height, pooled_width);
int roi_pool_idx = n * pooled_height * pooled_width + ph * pooled_width + pw;
int roi_pool_idx_shared = threadIdx.y;
if (roi_pool_idx_shared < 8) {
roi_pool_pts_shared[roi_pool_idx_shared] = roi_pool_pts[roi_pool_idx_shared * roi_pool_pt_num + roi_pool_idx];
}
__syncthreads();
int left, top, right, bottom;
get_rotated_bounding_box(left, top, right, bottom, roi_pool_pts_shared, width, height);
const T* bottom_data_offset = bottom_data + (roi_batch_ind * channels + c) * height * width;
T output_val = 0.0;
for (int hh = top; hh < bottom+1; ++hh) {
for (int ww = left; ww < right+1; ++ww) {
// T pixel_rect_vertices[8] = {ww+0.0f, hh+0.0f, ww+1.0f, hh+0.0f, ww+1.0f, hh+1.0f, ww+0.0f, hh+1.0f};
// T inter_area = computeRectInterArea(pixel_rect_vertices, roi_pool_pts_shared);
T inter_area = itersect_area_rbox_aabox(
roi_pool_pts_shared,
rbox_area,
ww + 0.f,
ww + 1.f,
hh + 0.f,
hh + 1.f
);
T px_weight = inter_area / rbox_area;
output_val += px_weight * bottom_data_offset[hh * width + ww];
}
}
int top_data_idx = (n * channels + c) * pooled_width * pooled_height + ph * pooled_width + pw;
top_data[top_data_idx] = output_val;
}
}
}
#endif
#if 0
// NOTE: cache multiple roi_pool_pts in the shared mem
template <typename T>
__global__ void compute_weight(
T* __restrict__ top_data,
const T* __restrict__ bottom_data,
const T* __restrict__ roi_pool_pts,
const T* __restrict__ rois,
const float spatial_scale,
const int num_rois,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width)
{
extern __shared__ T roi_pool_pts_shared[];
const int roi_pool_pt_num = num_rois * pooled_height * pooled_width;
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < num_rois * pooled_height * pooled_width; i += blockDim.x * gridDim.x) {
int c = blockIdx.y * blockDim.y + threadIdx.y;
if (c < channels) {
int pw = i % pooled_width;
int ph = (i / pooled_width) % pooled_height;
int n = i / pooled_width / pooled_height;
const T* rois_offset = rois + n * 6; // batch_ind, xc, yc, w, h, angle
int roi_batch_ind = rois_offset[0];
T rbox_area = get_rotated_bounding_box_area(spatial_scale, rois_offset[4], rois_offset[3], pooled_height, pooled_width);
int roi_pool_idx = n * pooled_height * pooled_width + ph * pooled_width + pw;
int roi_pool_offset_shared = 8 * threadIdx.x;
if (threadIdx.y < 8) {
roi_pool_pts_shared[roi_pool_offset_shared + threadIdx.y] = roi_pool_pts[threadIdx.y * roi_pool_pt_num + roi_pool_idx];
}
__syncthreads();
int left, top, right, bottom;
get_rotated_bounding_box(left, top, right, bottom, roi_pool_pts_shared + roi_pool_offset_shared, width, height);
const T* bottom_data_offset = bottom_data + (roi_batch_ind * channels + c) * height * width;
T output_val = 0.0;
for (int hh = top; hh < bottom+1; ++hh) {
for (int ww = left; ww < right+1; ++ww) {
T pixel_rect_vertices[8] = {ww+0.0f, hh+0.0f, ww+1.0f, hh+0.0f, ww+1.0f, hh+1.0f, ww+0.0f, hh+1.0f};
T inter_area = computeRectInterArea(pixel_rect_vertices, roi_pool_pts_shared + roi_pool_offset_shared);
T px_weight = inter_area / rbox_area;
output_val += px_weight * bottom_data_offset[hh * width + ww];
}
}
int top_data_idx = (n * channels + c) * pooled_width * pooled_height + ph * pooled_width + pw;
top_data[top_data_idx] = output_val;
}
}
}
#endif
// local memory version
template <typename T>
__global__ void compute_weight_local(
T* __restrict__ top_data,
const T* __restrict__ bottom_data,
const T* __restrict__ roi_pool_pts,
const T* __restrict__ rois,
const float spatial_scale,
const int num_rois,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width)
{
const int roi_pool_pt_num = num_rois * pooled_height * pooled_width;
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < roi_pool_pt_num; i += blockDim.x * gridDim.x) {
int c = blockIdx.y * blockDim.y + threadIdx.y;
if (c < channels) {
int pw = i % pooled_width;
int ph = (i / pooled_width) % pooled_height;
int n = i / pooled_width / pooled_height;
const T* rois_offset = rois + n * 6; // batch_ind, xc, yc, w, h, angle
int roi_batch_ind = rois_offset[0];
T rbox_area = get_rotated_bounding_box_area(spatial_scale, rois_offset[4], rois_offset[3], pooled_height, pooled_width);
int roi_pool_idx = n * pooled_height * pooled_width + ph * pooled_width + pw;
T roi_pool_pts_local[8];
for (int k = 0; k < 8; k++) {
roi_pool_pts_local[k] = roi_pool_pts[k * roi_pool_pt_num + roi_pool_idx];
}
__syncthreads();
int left, top, right, bottom;
get_rotated_bounding_box(left, top, right, bottom, roi_pool_pts_local, width, height);
const T* bottom_data_offset = bottom_data + (roi_batch_ind * channels + c) * height * width;
T output_val = 0.0;
for (int hh = top; hh < bottom+1; ++hh) {
for (int ww = left; ww < right+1; ++ww) {
// T pixel_rect_vertices[8] = {ww+0.0f, hh+0.0f, ww+1.0f, hh+0.0f, ww+1.0f, hh+1.0f, ww+0.0f, hh+1.0f};
// T inter_area = computeRectInterArea(pixel_rect_vertices, roi_pool_pts_local);
T inter_area = itersect_area_rbox_aabox(
roi_pool_pts_local,
rbox_area,
ww + 0.f,
ww + 1.f,
hh + 0.f,
hh + 1.f
);
T px_weight = inter_area / rbox_area;
output_val += px_weight * bottom_data_offset[hh * width + ww];
}
}
int top_data_idx = (n * channels + c) * pooled_width * pooled_height + ph * pooled_width + pw;
top_data[top_data_idx] = output_val;
}
}
}
void RROIAlign_forward(
int batch_size,
int num_rois,
int channels,
int height,
int width,
int pooled_height,
int pooled_width,
float spatial_scale,
float* bottom_data_d,
float* rois_d,
float* top_data_d,
hipStream_t stream
)
{
#if 0
unique_ptr_device<float> transfrom_matrix_d(nullptr);
CUDA_CHECK(hipMalloc((void **) &transfrom_matrix_d, 6 * num_rois * sizeof(float)));
{
int thread_num = ::min(num_rois, 1024);
int block_num = static_cast<int>(::ceil(num_rois * 1.0 / thread_num));
hipLaunchKernelGGL(( compute_all_transform_matrix<float>), dim3(block_num), dim3(thread_num), 0, stream,
transfrom_matrix_d.get(),
rois_d,
spatial_scale,
num_rois,
pooled_height,
pooled_width);
CUDA_CHECK(hipDeviceSynchronize());
}
unique_ptr_device<float> roi_pool_pts_d(nullptr);
int roi_pool_pt_num = num_rois * pooled_height * pooled_width;
CUDA_CHECK(hipMalloc((void **) &roi_pool_pts_d, 8 * roi_pool_pt_num * sizeof(float)));
{
int thread_num = ::min(roi_pool_pt_num, 1024);
int block_num = static_cast<int>(::ceil(roi_pool_pt_num * 1.0 / thread_num));
hipLaunchKernelGGL(( compute_roi_pool_pts_coalesced<float>), dim3(block_num), dim3(thread_num), 0, stream,
roi_pool_pts_d.get(),
transfrom_matrix_d.get(),
roi_pool_pt_num,
num_rois,
pooled_height,
pooled_width);
CUDA_CHECK(hipDeviceSynchronize());
}
#endif
#if 0
unique_ptr_device<float> roi_pool_pts_d(nullptr);
int roi_pool_pt_num = num_rois * pooled_height * pooled_width;
CUDA_CHECK(hipMalloc((void **) &roi_pool_pts_d, 8 * roi_pool_pt_num * sizeof(float)));
{
dim3 block(pooled_height, pooled_width);
dim3 grid(num_rois);
hipLaunchKernelGGL(( compute_roi_pool_pts_shared<float>), dim3(grid), dim3(block), 0, stream,
roi_pool_pts_d.get(),
rois_d,
spatial_scale,
roi_pool_pt_num,
num_rois,
pooled_height,
pooled_width);
CUDA_CHECK(hipDeviceSynchronize());
}
#else
unique_ptr_device<float> roi_pool_pts_d(nullptr);
int roi_pool_pt_num = num_rois * pooled_height * pooled_width;
CUDA_CHECK(hipMalloc((void **) &roi_pool_pts_d, 8 * roi_pool_pt_num * sizeof(float)));
{
dim3 block(pooled_height, pooled_width);
dim3 grid(num_rois);
hipLaunchKernelGGL(( compute_roi_pool_pts_local<float>), dim3(grid), dim3(block), 0, stream,
roi_pool_pts_d.get(),
rois_d,
spatial_scale,
roi_pool_pt_num,
num_rois,
pooled_height,
pooled_width);
CUDA_CHECK(hipDeviceSynchronize());
}
#endif
#if 0
{
hipDeviceProp_t deviceProperties;
int gpu_id = 0;
CUDA_CHECK(hipGetDeviceProperties(&deviceProperties, gpu_id));
// int thread_num_x = ::min(pooled_width * pooled_height, 1024);
int max_thread_num = 512;
int thread_num_y = ::min(channels, max_thread_num);
int thread_num_x = max_thread_num / thread_num_y;
// int block_num_x = static_cast<int>(::ceil(pooled_width * pooled_height * num_rois * 1.0 / thread_num_x));
int block_num_x = ::min(static_cast<int>(::ceil(pooled_width * pooled_height * num_rois * 1.0 / thread_num_x)), deviceProperties.maxGridSize[0]);
int block_num_y = static_cast<int>(::ceil(channels * 1.0 / thread_num_y));
dim3 block(thread_num_x, thread_num_y);
dim3 grid(block_num_x, block_num_y);
hipLaunchKernelGGL(( compute_weight_local<float>), dim3(grid), dim3(block), 0, stream,
top_data_d,
bottom_data_d,
roi_pool_pts_d.get(),
rois_d,
spatial_scale,
num_rois,
channels,
height,
width,
pooled_height,
pooled_width);
CUDA_CHECK(hipDeviceSynchronize());
}
#else
{
hipDeviceProp_t deviceProperties;
int gpu_id = 0;
CUDA_CHECK(hipGetDeviceProperties(&deviceProperties, gpu_id));
int max_thread_num = 512;
// int thread_num_x = ::min(max_thread_num / 8, pooled_width);
// int thread_num_y = ::min(max_thread_num / thread_num_x, channels);
int thread_num_y = ::min(channels, max_thread_num);
// int thread_num_x = max_thread_num / thread_num_y;
int thread_num_x = 1;
int block_num_x = ::min(static_cast<int>(::ceil(pooled_width * pooled_height * num_rois * 1.0 / thread_num_x)), deviceProperties.maxGridSize[0]);
int block_num_y = static_cast<int>(::ceil(channels * 1.0 / thread_num_y));
dim3 block(thread_num_x, thread_num_y);
dim3 grid(block_num_x, block_num_y);
size_t shared_mem_size = 8 * thread_num_x * sizeof(float);
hipLaunchKernelGGL(( compute_weight<float>), dim3(grid), dim3(block), shared_mem_size, stream,
top_data_d,
bottom_data_d,
roi_pool_pts_d.get(),
rois_d,
spatial_scale,
num_rois,
channels,
height,
width,
pooled_height,
pooled_width);
CUDA_CHECK(hipDeviceSynchronize());
}
#endif
}
| d220e33ffd366535485db4d4d4ce217ab59f0097.cu | #include "rroi.h"
#include <iostream>
#include <cmath>
#include <algorithm>
#include <cuda.h>
#include "rroi_helper.h"
#include "rotate_rect_ops.h"
#include "cuda_utils.h"
#if 1
// NOTE: only cache one roi_pool_pt in the shared memory
template <typename T>
__global__ void compute_weight(
T* __restrict__ top_data,
const T* __restrict__ bottom_data,
const T* __restrict__ roi_pool_pts,
const T* __restrict__ rois,
const float spatial_scale,
const int num_rois,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width)
{
extern __shared__ T roi_pool_pts_shared[];
const int roi_pool_pt_num = num_rois * pooled_height * pooled_width;
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < roi_pool_pt_num; i += blockDim.x * gridDim.x) {
int c = blockIdx.y * blockDim.y + threadIdx.y;
if (c < channels) {
int pw = i % pooled_width;
int ph = (i / pooled_width) % pooled_height;
int n = i / pooled_width / pooled_height;
const T* rois_offset = rois + n * 6; // batch_ind, xc, yc, w, h, angle
int roi_batch_ind = rois_offset[0];
T rbox_area = get_rotated_bounding_box_area(spatial_scale, rois_offset[4], rois_offset[3], pooled_height, pooled_width);
int roi_pool_idx = n * pooled_height * pooled_width + ph * pooled_width + pw;
int roi_pool_idx_shared = threadIdx.y;
if (roi_pool_idx_shared < 8) {
roi_pool_pts_shared[roi_pool_idx_shared] = roi_pool_pts[roi_pool_idx_shared * roi_pool_pt_num + roi_pool_idx];
}
__syncthreads();
int left, top, right, bottom;
get_rotated_bounding_box(left, top, right, bottom, roi_pool_pts_shared, width, height);
const T* bottom_data_offset = bottom_data + (roi_batch_ind * channels + c) * height * width;
T output_val = 0.0;
for (int hh = top; hh < bottom+1; ++hh) {
for (int ww = left; ww < right+1; ++ww) {
// T pixel_rect_vertices[8] = {ww+0.0f, hh+0.0f, ww+1.0f, hh+0.0f, ww+1.0f, hh+1.0f, ww+0.0f, hh+1.0f};
// T inter_area = computeRectInterArea(pixel_rect_vertices, roi_pool_pts_shared);
T inter_area = itersect_area_rbox_aabox(
roi_pool_pts_shared,
rbox_area,
ww + 0.f,
ww + 1.f,
hh + 0.f,
hh + 1.f
);
T px_weight = inter_area / rbox_area;
output_val += px_weight * bottom_data_offset[hh * width + ww];
}
}
int top_data_idx = (n * channels + c) * pooled_width * pooled_height + ph * pooled_width + pw;
top_data[top_data_idx] = output_val;
}
}
}
#endif
#if 0
// NOTE: cache multiple roi_pool_pts in the shared mem
template <typename T>
__global__ void compute_weight(
T* __restrict__ top_data,
const T* __restrict__ bottom_data,
const T* __restrict__ roi_pool_pts,
const T* __restrict__ rois,
const float spatial_scale,
const int num_rois,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width)
{
extern __shared__ T roi_pool_pts_shared[];
const int roi_pool_pt_num = num_rois * pooled_height * pooled_width;
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < num_rois * pooled_height * pooled_width; i += blockDim.x * gridDim.x) {
int c = blockIdx.y * blockDim.y + threadIdx.y;
if (c < channels) {
int pw = i % pooled_width;
int ph = (i / pooled_width) % pooled_height;
int n = i / pooled_width / pooled_height;
const T* rois_offset = rois + n * 6; // batch_ind, xc, yc, w, h, angle
int roi_batch_ind = rois_offset[0];
T rbox_area = get_rotated_bounding_box_area(spatial_scale, rois_offset[4], rois_offset[3], pooled_height, pooled_width);
int roi_pool_idx = n * pooled_height * pooled_width + ph * pooled_width + pw;
int roi_pool_offset_shared = 8 * threadIdx.x;
if (threadIdx.y < 8) {
roi_pool_pts_shared[roi_pool_offset_shared + threadIdx.y] = roi_pool_pts[threadIdx.y * roi_pool_pt_num + roi_pool_idx];
}
__syncthreads();
int left, top, right, bottom;
get_rotated_bounding_box(left, top, right, bottom, roi_pool_pts_shared + roi_pool_offset_shared, width, height);
const T* bottom_data_offset = bottom_data + (roi_batch_ind * channels + c) * height * width;
T output_val = 0.0;
for (int hh = top; hh < bottom+1; ++hh) {
for (int ww = left; ww < right+1; ++ww) {
T pixel_rect_vertices[8] = {ww+0.0f, hh+0.0f, ww+1.0f, hh+0.0f, ww+1.0f, hh+1.0f, ww+0.0f, hh+1.0f};
T inter_area = computeRectInterArea(pixel_rect_vertices, roi_pool_pts_shared + roi_pool_offset_shared);
T px_weight = inter_area / rbox_area;
output_val += px_weight * bottom_data_offset[hh * width + ww];
}
}
int top_data_idx = (n * channels + c) * pooled_width * pooled_height + ph * pooled_width + pw;
top_data[top_data_idx] = output_val;
}
}
}
#endif
// local memory version
template <typename T>
__global__ void compute_weight_local(
T* __restrict__ top_data,
const T* __restrict__ bottom_data,
const T* __restrict__ roi_pool_pts,
const T* __restrict__ rois,
const float spatial_scale,
const int num_rois,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width)
{
const int roi_pool_pt_num = num_rois * pooled_height * pooled_width;
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < roi_pool_pt_num; i += blockDim.x * gridDim.x) {
int c = blockIdx.y * blockDim.y + threadIdx.y;
if (c < channels) {
int pw = i % pooled_width;
int ph = (i / pooled_width) % pooled_height;
int n = i / pooled_width / pooled_height;
const T* rois_offset = rois + n * 6; // batch_ind, xc, yc, w, h, angle
int roi_batch_ind = rois_offset[0];
T rbox_area = get_rotated_bounding_box_area(spatial_scale, rois_offset[4], rois_offset[3], pooled_height, pooled_width);
int roi_pool_idx = n * pooled_height * pooled_width + ph * pooled_width + pw;
T roi_pool_pts_local[8];
for (int k = 0; k < 8; k++) {
roi_pool_pts_local[k] = roi_pool_pts[k * roi_pool_pt_num + roi_pool_idx];
}
__syncthreads();
int left, top, right, bottom;
get_rotated_bounding_box(left, top, right, bottom, roi_pool_pts_local, width, height);
const T* bottom_data_offset = bottom_data + (roi_batch_ind * channels + c) * height * width;
T output_val = 0.0;
for (int hh = top; hh < bottom+1; ++hh) {
for (int ww = left; ww < right+1; ++ww) {
// T pixel_rect_vertices[8] = {ww+0.0f, hh+0.0f, ww+1.0f, hh+0.0f, ww+1.0f, hh+1.0f, ww+0.0f, hh+1.0f};
// T inter_area = computeRectInterArea(pixel_rect_vertices, roi_pool_pts_local);
T inter_area = itersect_area_rbox_aabox(
roi_pool_pts_local,
rbox_area,
ww + 0.f,
ww + 1.f,
hh + 0.f,
hh + 1.f
);
T px_weight = inter_area / rbox_area;
output_val += px_weight * bottom_data_offset[hh * width + ww];
}
}
int top_data_idx = (n * channels + c) * pooled_width * pooled_height + ph * pooled_width + pw;
top_data[top_data_idx] = output_val;
}
}
}
void RROIAlign_forward(
int batch_size,
int num_rois,
int channels,
int height,
int width,
int pooled_height,
int pooled_width,
float spatial_scale,
float* bottom_data_d,
float* rois_d,
float* top_data_d,
cudaStream_t stream
)
{
#if 0
unique_ptr_device<float> transfrom_matrix_d(nullptr);
CUDA_CHECK(cudaMalloc((void **) &transfrom_matrix_d, 6 * num_rois * sizeof(float)));
{
int thread_num = std::min(num_rois, 1024);
int block_num = static_cast<int>(std::ceil(num_rois * 1.0 / thread_num));
compute_all_transform_matrix<float><<<block_num, thread_num, 0, stream>>>(
transfrom_matrix_d.get(),
rois_d,
spatial_scale,
num_rois,
pooled_height,
pooled_width);
CUDA_CHECK(cudaDeviceSynchronize());
}
unique_ptr_device<float> roi_pool_pts_d(nullptr);
int roi_pool_pt_num = num_rois * pooled_height * pooled_width;
CUDA_CHECK(cudaMalloc((void **) &roi_pool_pts_d, 8 * roi_pool_pt_num * sizeof(float)));
{
int thread_num = std::min(roi_pool_pt_num, 1024);
int block_num = static_cast<int>(std::ceil(roi_pool_pt_num * 1.0 / thread_num));
compute_roi_pool_pts_coalesced<float><<<block_num, thread_num, 0, stream>>>(
roi_pool_pts_d.get(),
transfrom_matrix_d.get(),
roi_pool_pt_num,
num_rois,
pooled_height,
pooled_width);
CUDA_CHECK(cudaDeviceSynchronize());
}
#endif
#if 0
unique_ptr_device<float> roi_pool_pts_d(nullptr);
int roi_pool_pt_num = num_rois * pooled_height * pooled_width;
CUDA_CHECK(cudaMalloc((void **) &roi_pool_pts_d, 8 * roi_pool_pt_num * sizeof(float)));
{
dim3 block(pooled_height, pooled_width);
dim3 grid(num_rois);
compute_roi_pool_pts_shared<float><<<grid, block, 0, stream>>>(
roi_pool_pts_d.get(),
rois_d,
spatial_scale,
roi_pool_pt_num,
num_rois,
pooled_height,
pooled_width);
CUDA_CHECK(cudaDeviceSynchronize());
}
#else
unique_ptr_device<float> roi_pool_pts_d(nullptr);
int roi_pool_pt_num = num_rois * pooled_height * pooled_width;
CUDA_CHECK(cudaMalloc((void **) &roi_pool_pts_d, 8 * roi_pool_pt_num * sizeof(float)));
{
dim3 block(pooled_height, pooled_width);
dim3 grid(num_rois);
compute_roi_pool_pts_local<float><<<grid, block, 0, stream>>>(
roi_pool_pts_d.get(),
rois_d,
spatial_scale,
roi_pool_pt_num,
num_rois,
pooled_height,
pooled_width);
CUDA_CHECK(cudaDeviceSynchronize());
}
#endif
#if 0
{
cudaDeviceProp deviceProperties;
int gpu_id = 0;
CUDA_CHECK(cudaGetDeviceProperties(&deviceProperties, gpu_id));
// int thread_num_x = std::min(pooled_width * pooled_height, 1024);
int max_thread_num = 512;
int thread_num_y = std::min(channels, max_thread_num);
int thread_num_x = max_thread_num / thread_num_y;
// int block_num_x = static_cast<int>(std::ceil(pooled_width * pooled_height * num_rois * 1.0 / thread_num_x));
int block_num_x = std::min(static_cast<int>(std::ceil(pooled_width * pooled_height * num_rois * 1.0 / thread_num_x)), deviceProperties.maxGridSize[0]);
int block_num_y = static_cast<int>(std::ceil(channels * 1.0 / thread_num_y));
dim3 block(thread_num_x, thread_num_y);
dim3 grid(block_num_x, block_num_y);
compute_weight_local<float><<<grid, block, 0, stream>>>(
top_data_d,
bottom_data_d,
roi_pool_pts_d.get(),
rois_d,
spatial_scale,
num_rois,
channels,
height,
width,
pooled_height,
pooled_width);
CUDA_CHECK(cudaDeviceSynchronize());
}
#else
{
cudaDeviceProp deviceProperties;
int gpu_id = 0;
CUDA_CHECK(cudaGetDeviceProperties(&deviceProperties, gpu_id));
int max_thread_num = 512;
// int thread_num_x = std::min(max_thread_num / 8, pooled_width);
// int thread_num_y = std::min(max_thread_num / thread_num_x, channels);
int thread_num_y = std::min(channels, max_thread_num);
// int thread_num_x = max_thread_num / thread_num_y;
int thread_num_x = 1;
int block_num_x = std::min(static_cast<int>(std::ceil(pooled_width * pooled_height * num_rois * 1.0 / thread_num_x)), deviceProperties.maxGridSize[0]);
int block_num_y = static_cast<int>(std::ceil(channels * 1.0 / thread_num_y));
dim3 block(thread_num_x, thread_num_y);
dim3 grid(block_num_x, block_num_y);
size_t shared_mem_size = 8 * thread_num_x * sizeof(float);
compute_weight<float><<<grid, block, shared_mem_size, stream>>>(
top_data_d,
bottom_data_d,
roi_pool_pts_d.get(),
rois_d,
spatial_scale,
num_rois,
channels,
height,
width,
pooled_height,
pooled_width);
CUDA_CHECK(cudaDeviceSynchronize());
}
#endif
}
|
c7ec8dca84c6e0b64dc58b6c33e3755ffcd6b21b.hip | // !!! This is a file automatically generated by hipify!!!
#include "deltaCV/gpu/cudaImg.cuh"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <hip/device_functions.h>
__global__ void sobel(unsigned char* dataIn,
unsigned char* dataOut,
short int imgRows,
short int imgCols)
{
int xdx = threadIdx.x + __umul24(blockIdx.x, blockDim.x);
int ydx = threadIdx.y + __umul24(blockIdx.y, blockDim.y);
short int Gx=0;
short int Gy=0;
if(xdx>0 && xdx<imgCols-1 && ydx>0 && ydx<imgRows-1)
{
Gx = dataIn[(xdx-1)+(ydx-1)*imgCols] - dataIn[(xdx+1)+(ydx-1)*imgCols] +
dataIn[(xdx-1)+ydx*imgCols]*2 - dataIn[(xdx+1)+ydx*imgCols]*2 +
dataIn[(xdx-1)+(ydx+1)*imgCols] - dataIn[(xdx+1)+(ydx+1)*imgCols];
Gy = dataIn[(xdx-1)+(ydx-1)*imgCols] + dataIn[xdx+(ydx-1)*imgCols]*2 + dataIn[(xdx+1)+(ydx-1)*imgCols] -
dataIn[(xdx-1)+(ydx+1)*imgCols] - dataIn[xdx+(ydx+1)*imgCols]*2 - dataIn[(xdx+1)+(ydx+1)*imgCols];
if(Gx<0)
Gx = -Gx;
if(Gy<0)
Gy = -Gy;
dataOut[xdx + ydx*imgCols] = (Gx+Gx)/2;
// if ((Gx+Gx)/2 > 100)
// {
// dataOut[tid]=255;
// } else{
// dataOut[tid]=0;
// }
}
}
__global__ void scharr(unsigned char* dataIn,
unsigned char* dataOut,
short int imgRows,
short int imgCols)
{
int xdx = threadIdx.x + __umul24(blockIdx.x, blockDim.x);
int ydx = threadIdx.y + __umul24(blockIdx.y, blockDim.y);
short int Gx=0;
short int Gy=0;
if(xdx>0 && xdx<imgCols-1 && ydx>0 && ydx<imgRows-1)
{
Gx = dataIn[(xdx-1)+(ydx-1)*imgCols]*3 - dataIn[(xdx+1)+(ydx-1)*imgCols]*3 +
dataIn[(xdx-1)+ydx*imgCols]*10 - dataIn[(xdx+1)+ydx*imgCols]*10 +
dataIn[(xdx-1)+(ydx+1)*imgCols]*3 - dataIn[(xdx+1)+(ydx+1)*imgCols]*3;
Gy = dataIn[(xdx-1)+(ydx-1)*imgCols]*3 + dataIn[xdx+(ydx-1)*imgCols]*10 + dataIn[(xdx+1)+(ydx-1)*imgCols]*3 -
dataIn[(xdx-1)+(ydx+1)*imgCols]*3 - dataIn[xdx+(ydx+1)*imgCols]*10 - dataIn[(xdx+1)+(ydx+1)*imgCols]*3;
if(Gx<0)
Gx = -Gx;
if(Gy<0)
Gy = -Gy;
dataOut[xdx + ydx*imgCols] = (Gx+Gx)/2;
// if ((Gx+Gx)/2 > 100)
// {
// dataOut[tid]=255;
// } else{
// dataOut[tid]=0;
// }
}
}
void sobel_gpu(unsigned char* dataIn,
unsigned char* dataOut,
short int imgRows,
short int imgCols,
dim3 tPerBlock,
dim3 bPerGrid)
{
hipLaunchKernelGGL(( sobel), dim3(bPerGrid),dim3(tPerBlock), 0, 0, dataIn,dataOut,imgRows,imgCols);
}
void scharr_gpu(unsigned char* dataIn,
unsigned char* dataOut,
short int imgRows,
short int imgCols,
dim3 tPerBlock,
dim3 bPerGrid)
{
hipLaunchKernelGGL(( scharr), dim3(bPerGrid),dim3(tPerBlock), 0, 0, dataIn,dataOut,imgRows,imgCols);
}
| c7ec8dca84c6e0b64dc58b6c33e3755ffcd6b21b.cu | #include "deltaCV/gpu/cudaImg.cuh"
#include <cuda_runtime.h>
#include <cuda.h>
#include <device_launch_parameters.h>
#include <device_functions.h>
__global__ void sobel(unsigned char* dataIn,
unsigned char* dataOut,
short int imgRows,
short int imgCols)
{
int xdx = threadIdx.x + __umul24(blockIdx.x, blockDim.x);
int ydx = threadIdx.y + __umul24(blockIdx.y, blockDim.y);
short int Gx=0;
short int Gy=0;
if(xdx>0 && xdx<imgCols-1 && ydx>0 && ydx<imgRows-1)
{
Gx = dataIn[(xdx-1)+(ydx-1)*imgCols] - dataIn[(xdx+1)+(ydx-1)*imgCols] +
dataIn[(xdx-1)+ydx*imgCols]*2 - dataIn[(xdx+1)+ydx*imgCols]*2 +
dataIn[(xdx-1)+(ydx+1)*imgCols] - dataIn[(xdx+1)+(ydx+1)*imgCols];
Gy = dataIn[(xdx-1)+(ydx-1)*imgCols] + dataIn[xdx+(ydx-1)*imgCols]*2 + dataIn[(xdx+1)+(ydx-1)*imgCols] -
dataIn[(xdx-1)+(ydx+1)*imgCols] - dataIn[xdx+(ydx+1)*imgCols]*2 - dataIn[(xdx+1)+(ydx+1)*imgCols];
if(Gx<0)
Gx = -Gx;
if(Gy<0)
Gy = -Gy;
dataOut[xdx + ydx*imgCols] = (Gx+Gx)/2;
// if ((Gx+Gx)/2 > 100)
// {
// dataOut[tid]=255;
// } else{
// dataOut[tid]=0;
// }
}
}
__global__ void scharr(unsigned char* dataIn,
unsigned char* dataOut,
short int imgRows,
short int imgCols)
{
int xdx = threadIdx.x + __umul24(blockIdx.x, blockDim.x);
int ydx = threadIdx.y + __umul24(blockIdx.y, blockDim.y);
short int Gx=0;
short int Gy=0;
if(xdx>0 && xdx<imgCols-1 && ydx>0 && ydx<imgRows-1)
{
Gx = dataIn[(xdx-1)+(ydx-1)*imgCols]*3 - dataIn[(xdx+1)+(ydx-1)*imgCols]*3 +
dataIn[(xdx-1)+ydx*imgCols]*10 - dataIn[(xdx+1)+ydx*imgCols]*10 +
dataIn[(xdx-1)+(ydx+1)*imgCols]*3 - dataIn[(xdx+1)+(ydx+1)*imgCols]*3;
Gy = dataIn[(xdx-1)+(ydx-1)*imgCols]*3 + dataIn[xdx+(ydx-1)*imgCols]*10 + dataIn[(xdx+1)+(ydx-1)*imgCols]*3 -
dataIn[(xdx-1)+(ydx+1)*imgCols]*3 - dataIn[xdx+(ydx+1)*imgCols]*10 - dataIn[(xdx+1)+(ydx+1)*imgCols]*3;
if(Gx<0)
Gx = -Gx;
if(Gy<0)
Gy = -Gy;
dataOut[xdx + ydx*imgCols] = (Gx+Gx)/2;
// if ((Gx+Gx)/2 > 100)
// {
// dataOut[tid]=255;
// } else{
// dataOut[tid]=0;
// }
}
}
void sobel_gpu(unsigned char* dataIn,
unsigned char* dataOut,
short int imgRows,
short int imgCols,
dim3 tPerBlock,
dim3 bPerGrid)
{
sobel<<<bPerGrid,tPerBlock>>>(dataIn,dataOut,imgRows,imgCols);
}
void scharr_gpu(unsigned char* dataIn,
unsigned char* dataOut,
short int imgRows,
short int imgCols,
dim3 tPerBlock,
dim3 bPerGrid)
{
scharr<<<bPerGrid,tPerBlock>>>(dataIn,dataOut,imgRows,imgCols);
}
|
6527d70852f758735f899829df890093e3152238.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/operators/relu_op.h"
#include <algorithm>
#include <functional>
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
namespace {
#ifdef __HIPCC__
typedef __half2 half2;
#endif
template <typename T>
__global__ void ReluCUDAKernel(const int N, const T* X, T* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
Y[i] = __ldg(X + i) > 0 ? __ldg(X + i) : T(0);
#else
Y[i] = X[i] > 0 ? X[i] : T(0);
#endif
}
}
__global__ void ReluHalfCUDAKernel(const int N, const half* X, half* Y) {
const half kZero = __float2half(0.0f);
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 530
Y[i] = __hgt(__ldg(X + i), kZero) ? __ldg(X + i) : kZero;
#else
Y[i] = (__half2float(X[i]) > 0) ? X[i] : kZero;
#endif
}
}
__global__ void ReluHalf2CUDAKernel(const int N, const half2* X, half2* Y) {
const half2 kZero = __float2half2_rn(0.0f);
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 530
Y[i] = __hmul2(__hgt2(__ldg(X + i), kZero), __ldg(X + i));
#else
const float2 xx = __half22float2(X[i]);
Y[i] = __floats2half2_rn(xx.x > 0 ? xx.x : 0.f, xx.y > 0 ? xx.y : 0.f);
#endif
}
}
template <typename T>
__global__ void
ReluGradientCUDAKernel(const int N, const T* dY, const T* Y, T* dX) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
dX[i] = __ldg(Y + i) > 0 ? __ldg(dY + i) : 0;
#else
dX[i] = Y[i] > 0 ? dY[i] : 0;
#endif
}
}
__global__ void ReluGradientHalfCUDAKernel(
const int N,
const half* dY,
const half* Y,
half* dX) {
const half kZero = __float2half(0.0f);
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 530
dX[i] = __hgt(__ldg(Y + i), kZero) ? __ldg(dY + i) : kZero;
#else
dX[i] = (__half2float(Y[i]) > 0) ? dY[i] : kZero;
#endif
}
}
__global__ void ReluGradientHalf2CUDAKernel(
const int N,
const half2* dY,
const half2* Y,
half2* dX) {
const half2 kZero = __float2half2_rn(0.0f);
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 530
dX[i] = __hmul2(__hgt2(__ldg(Y + i), kZero), __ldg(dY + i));
#else
const float2 dy = __half22float2(dY[i]);
const float2 yy = __half22float2(Y[i]);
dX[i] = __floats2half2_rn(yy.x > 0 ? dy.x : 0.f, yy.y > 0 ? dy.y : 0.f);
#endif
}
}
} // namespace
template <>
template <typename T>
bool ReluFunctor<CUDAContext>::
operator()(const int N, const T* X, T* Y, CUDAContext* context) const {
hipLaunchKernelGGL(( ReluCUDAKernel<T>)
, dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), N, X, Y);
return true;
}
template <>
template <>
bool ReluFunctor<CUDAContext>::operator()<at::Half>(
const int N,
const at::Half* X,
at::Half* Y,
CUDAContext* context) const {
if ((N & 1) == 0) {
hipLaunchKernelGGL(( ReluHalf2CUDAKernel),
dim3(CAFFE_GET_BLOCKS((N >> 1))),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
(N >> 1),
reinterpret_cast<const half2*>(X),
reinterpret_cast<half2*>(Y));
} else {
hipLaunchKernelGGL(( ReluHalfCUDAKernel),
dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
N, reinterpret_cast<const half*>(X), reinterpret_cast<half*>(Y));
}
return true;
}
template <>
template <typename T>
bool ReluGradientFunctor<CUDAContext>::Forward(
const std::vector<int>& Y_dims,
const std::vector<int>& /* dY_dims */,
const T* Y,
const T* dY,
T* dX,
CUDAContext* context) const {
const int size = std::accumulate(
Y_dims.cbegin(), Y_dims.cend(), 1, std::multiplies<int>());
hipLaunchKernelGGL(( ReluGradientCUDAKernel<T>)
, dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), size, dY, Y, dX);
return true;
}
template <>
template <>
bool ReluGradientFunctor<CUDAContext>::Forward<at::Half>(
const std::vector<int>& Y_dims,
const std::vector<int>& /* dY_dims */,
const at::Half* Y,
const at::Half* dY,
at::Half* dX,
CUDAContext* context) const {
const int size = std::accumulate(
Y_dims.cbegin(), Y_dims.cend(), 1, std::multiplies<int>());
if ((size & 1) == 0) {
hipLaunchKernelGGL(( ReluGradientHalf2CUDAKernel),
dim3(CAFFE_GET_BLOCKS((size >> 1))),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
(size >> 1),
reinterpret_cast<const half2*>(dY),
reinterpret_cast<const half2*>(Y),
reinterpret_cast<half2*>(dX));
} else {
hipLaunchKernelGGL(( ReluGradientHalfCUDAKernel),
dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
size,
reinterpret_cast<const half*>(dY),
reinterpret_cast<const half*>(Y),
reinterpret_cast<half*>(dX));
}
return true;
}
REGISTER_CUDA_OPERATOR(
Relu,
UnaryElementwiseOp<
TensorTypes<float, at::Half>,
CUDAContext,
ReluFunctor<CUDAContext>>);
REGISTER_CUDA_OPERATOR(
ReluGradient,
BinaryElementwiseOp<
TensorTypes<float, at::Half>,
CUDAContext,
ReluGradientFunctor<CUDAContext>>);
} // namespace caffe2
| 6527d70852f758735f899829df890093e3152238.cu | #include "caffe2/operators/relu_op.h"
#include <algorithm>
#include <functional>
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
namespace {
#ifdef __HIPCC__
typedef __half2 half2;
#endif
template <typename T>
__global__ void ReluCUDAKernel(const int N, const T* X, T* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
Y[i] = __ldg(X + i) > 0 ? __ldg(X + i) : T(0);
#else
Y[i] = X[i] > 0 ? X[i] : T(0);
#endif
}
}
__global__ void ReluHalfCUDAKernel(const int N, const half* X, half* Y) {
const half kZero = __float2half(0.0f);
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 530
Y[i] = __hgt(__ldg(X + i), kZero) ? __ldg(X + i) : kZero;
#else
Y[i] = (__half2float(X[i]) > 0) ? X[i] : kZero;
#endif
}
}
__global__ void ReluHalf2CUDAKernel(const int N, const half2* X, half2* Y) {
const half2 kZero = __float2half2_rn(0.0f);
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 530
Y[i] = __hmul2(__hgt2(__ldg(X + i), kZero), __ldg(X + i));
#else
const float2 xx = __half22float2(X[i]);
Y[i] = __floats2half2_rn(xx.x > 0 ? xx.x : 0.f, xx.y > 0 ? xx.y : 0.f);
#endif
}
}
template <typename T>
__global__ void
ReluGradientCUDAKernel(const int N, const T* dY, const T* Y, T* dX) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
dX[i] = __ldg(Y + i) > 0 ? __ldg(dY + i) : 0;
#else
dX[i] = Y[i] > 0 ? dY[i] : 0;
#endif
}
}
__global__ void ReluGradientHalfCUDAKernel(
const int N,
const half* dY,
const half* Y,
half* dX) {
const half kZero = __float2half(0.0f);
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 530
dX[i] = __hgt(__ldg(Y + i), kZero) ? __ldg(dY + i) : kZero;
#else
dX[i] = (__half2float(Y[i]) > 0) ? dY[i] : kZero;
#endif
}
}
__global__ void ReluGradientHalf2CUDAKernel(
const int N,
const half2* dY,
const half2* Y,
half2* dX) {
const half2 kZero = __float2half2_rn(0.0f);
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 530
dX[i] = __hmul2(__hgt2(__ldg(Y + i), kZero), __ldg(dY + i));
#else
const float2 dy = __half22float2(dY[i]);
const float2 yy = __half22float2(Y[i]);
dX[i] = __floats2half2_rn(yy.x > 0 ? dy.x : 0.f, yy.y > 0 ? dy.y : 0.f);
#endif
}
}
} // namespace
template <>
template <typename T>
bool ReluFunctor<CUDAContext>::
operator()(const int N, const T* X, T* Y, CUDAContext* context) const {
ReluCUDAKernel<T>
<<<CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(N, X, Y);
return true;
}
template <>
template <>
bool ReluFunctor<CUDAContext>::operator()<at::Half>(
const int N,
const at::Half* X,
at::Half* Y,
CUDAContext* context) const {
if ((N & 1) == 0) {
ReluHalf2CUDAKernel<<<
CAFFE_GET_BLOCKS((N >> 1)),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
(N >> 1),
reinterpret_cast<const half2*>(X),
reinterpret_cast<half2*>(Y));
} else {
ReluHalfCUDAKernel<<<
CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
N, reinterpret_cast<const half*>(X), reinterpret_cast<half*>(Y));
}
return true;
}
template <>
template <typename T>
bool ReluGradientFunctor<CUDAContext>::Forward(
const std::vector<int>& Y_dims,
const std::vector<int>& /* dY_dims */,
const T* Y,
const T* dY,
T* dX,
CUDAContext* context) const {
const int size = std::accumulate(
Y_dims.cbegin(), Y_dims.cend(), 1, std::multiplies<int>());
ReluGradientCUDAKernel<T>
<<<CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, dY, Y, dX);
return true;
}
template <>
template <>
bool ReluGradientFunctor<CUDAContext>::Forward<at::Half>(
const std::vector<int>& Y_dims,
const std::vector<int>& /* dY_dims */,
const at::Half* Y,
const at::Half* dY,
at::Half* dX,
CUDAContext* context) const {
const int size = std::accumulate(
Y_dims.cbegin(), Y_dims.cend(), 1, std::multiplies<int>());
if ((size & 1) == 0) {
ReluGradientHalf2CUDAKernel<<<
CAFFE_GET_BLOCKS((size >> 1)),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
(size >> 1),
reinterpret_cast<const half2*>(dY),
reinterpret_cast<const half2*>(Y),
reinterpret_cast<half2*>(dX));
} else {
ReluGradientHalfCUDAKernel<<<
CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
size,
reinterpret_cast<const half*>(dY),
reinterpret_cast<const half*>(Y),
reinterpret_cast<half*>(dX));
}
return true;
}
REGISTER_CUDA_OPERATOR(
Relu,
UnaryElementwiseOp<
TensorTypes<float, at::Half>,
CUDAContext,
ReluFunctor<CUDAContext>>);
REGISTER_CUDA_OPERATOR(
ReluGradient,
BinaryElementwiseOp<
TensorTypes<float, at::Half>,
CUDAContext,
ReluGradientFunctor<CUDAContext>>);
} // namespace caffe2
|
e11daab2c8181c6bf1171cd7a17fed213b35c3fe.hip | // !!! This is a file automatically generated by hipify!!!
#include "headers/headers_mains.h"
#include "device_dedispersion_kernel.cu"
#include "device_corner_turn_kernel.cu"
#include "device_binning_kernel.cu"
#include "device_SPS_inplace_kernel.cu" //Added by KA
#include "device_MSD_grid_kernel.cu" //Added by KA
#include "device_MSD_plane_kernel.cu" //Added by KA
#include "device_MSD_limited_kernel.cu" //Added by KA
#include "device_SNR_limited_kernel.cu" //Added by KA
#include "device_threshold_kernel.cu" //Added by KA
#include "device_single_FIR_kernel.cu" //Added by KA
#include "device_harmonic_summing_kernel.cu" //Added by KA
#include "device_SPS_inplace.cu" //Added by KA
#include "device_SPS_long.cu" //Added by KA
#include "device_MSD_BLN_grid.cu" //Added by KA
#include "device_MSD_BLN_grid_kernel.hip" //Added by KA
#include "device_MSD_BLN_pw.cu" //Added by KA
//#include "device_MSD_BLN_pw_dp.cu" //Added by KA
#include "device_MSD_grid.cu" //Added by KA
#include "device_MSD_plane.cu" //Added by KA
#include "device_MSD_limited.cu" //Added by KA
#include "device_SNR_limited.cu" //Added by KA
#include "device_threshold.cu" //Added by KA
#include "device_single_FIR.cu" //Added by KA
#include "device_harmonic_summing.cu"
#include "device_peak_find.hip"
#include "device_peak_find_kernel.cu"
#include "device_bin.cu"
#include "device_dedisperse.cu"
#include "device_corner_turn.cu"
#include "device_set_stretch.cu"
#include "device_stats.cu"
#include "device_stretch.cu"
#include "device_power.cu"
#include "device_init.cu"
#include "device_inference.cu"
#include "device_load_data.cu"
#include "device_save_data.cu"
#include "device_zero_dm.cu"
#include "device_zero_dm_outliers.cu"
#include "device_rfi.cu"
#include "device_analysis.cu" //Added by KA
#include "device_periods.cu" //Added by KA
// fdas
#include "device_acceleration_fdas.cu"
| e11daab2c8181c6bf1171cd7a17fed213b35c3fe.cu | #include "headers/headers_mains.h"
#include "device_dedispersion_kernel.cu"
#include "device_corner_turn_kernel.cu"
#include "device_binning_kernel.cu"
#include "device_SPS_inplace_kernel.cu" //Added by KA
#include "device_MSD_grid_kernel.cu" //Added by KA
#include "device_MSD_plane_kernel.cu" //Added by KA
#include "device_MSD_limited_kernel.cu" //Added by KA
#include "device_SNR_limited_kernel.cu" //Added by KA
#include "device_threshold_kernel.cu" //Added by KA
#include "device_single_FIR_kernel.cu" //Added by KA
#include "device_harmonic_summing_kernel.cu" //Added by KA
#include "device_SPS_inplace.cu" //Added by KA
#include "device_SPS_long.cu" //Added by KA
#include "device_MSD_BLN_grid.cu" //Added by KA
#include "device_MSD_BLN_grid_kernel.cu" //Added by KA
#include "device_MSD_BLN_pw.cu" //Added by KA
//#include "device_MSD_BLN_pw_dp.cu" //Added by KA
#include "device_MSD_grid.cu" //Added by KA
#include "device_MSD_plane.cu" //Added by KA
#include "device_MSD_limited.cu" //Added by KA
#include "device_SNR_limited.cu" //Added by KA
#include "device_threshold.cu" //Added by KA
#include "device_single_FIR.cu" //Added by KA
#include "device_harmonic_summing.cu"
#include "device_peak_find.cu"
#include "device_peak_find_kernel.cu"
#include "device_bin.cu"
#include "device_dedisperse.cu"
#include "device_corner_turn.cu"
#include "device_set_stretch.cu"
#include "device_stats.cu"
#include "device_stretch.cu"
#include "device_power.cu"
#include "device_init.cu"
#include "device_inference.cu"
#include "device_load_data.cu"
#include "device_save_data.cu"
#include "device_zero_dm.cu"
#include "device_zero_dm_outliers.cu"
#include "device_rfi.cu"
#include "device_analysis.cu" //Added by KA
#include "device_periods.cu" //Added by KA
// fdas
#include "device_acceleration_fdas.cu"
|
8753c6f88ec67db16bd65db09f5a379fc7edc921.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <stdio.h>
__global__ addArray(int *a, int *b, int *c, int count) {
int i = threadIdx.x;
c[i] = a[i] + b[i];
#if 0
for (int i = 0; i < count; ++i)
c[i] = a[i] + b[i];
#endif
}
void main() {
const int count = 5;
const int size = count * sizeof(int);
int ha[] = { 1,2,3,4,5};
int hb[] = { 10,20,30,40,50 };
int hc[count];
//cuda var in gpu
int *da, *db, *dc;
hipMalloc(&da, size);
hipMalloc(&db, size);
hipMalloc(&dc, size)
hipMemcpy(da, ha, size, hipMemcpyHostToHost);
hipMemcpy(db, hb, size, hipMemcpyDeviceToHost);
hipLaunchKernelGGL(( addArray) , dim3(1), dim3(count) , 0, 0, da, db, dc);
hipMemcpy(hc, dc, size, hipMemcpyDeviceToHost);
for (int i = 0; i < count; ++i)
{
printf("%d", hc[i]);
}
getchar();
} | 8753c6f88ec67db16bd65db09f5a379fc7edc921.cu | #include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <stdio.h>
__global__ addArray(int *a, int *b, int *c, int count) {
int i = threadIdx.x;
c[i] = a[i] + b[i];
#if 0
for (int i = 0; i < count; ++i)
c[i] = a[i] + b[i];
#endif
}
void main() {
const int count = 5;
const int size = count * sizeof(int);
int ha[] = { 1,2,3,4,5};
int hb[] = { 10,20,30,40,50 };
int hc[count];
//cuda var in gpu
int *da, *db, *dc;
cudaMalloc(&da, size);
cudaMalloc(&db, size);
cudaMalloc(&dc, size)
cudaMemcpy(da, ha, size, cudaMemcpyHostToHost);
cudaMemcpy(db, hb, size, cudaMemcpyDeviceToHost);
addArray <<<1, count >>> (da, db, dc);
cudaMemcpy(hc, dc, size, cudaMemcpyDeviceToHost);
for (int i = 0; i < count; ++i)
{
printf("%d", hc[i]);
}
getchar();
} |
Toa_gpu_3dvti_fd_1orderfunciton.hip | // !!! This is a file automatically generated by hipify!!!
//a#########################################################
//a## 3D Acoustic VTI Medium Forward
//a##
//a## Ps :GPU(CUDA)
//a##
//a##/*a***************************
//a##Function for VTI medium modeling,
//a##
//a## Ps: the function of modeling following:
//a##
//a## du/dt=1/rho*dp/dx ,
//a## dv/dt=1/rho*dp/dy ,
//a## dw/dt=1/rho*dq/dz ,
//a## dp/dt=rho*vpx^2*(du/dx+dv/dy)+rho*vp*vpn*dw/dz ,
//a## dq/dt=rho*vp*vpn*(du/dx+dv/dy)+rho*vp^2*dw/dz ,
//a## vpx^2=vp^2*(1+2*epsilon);
//a## vpn^2=vp^2*(1+2*delta);
//a##
//a##*********a*******************/
//a##
//a## code by Rong Tao
//a##
//a#########################################################
#include<stdio.h>
#include<malloc.h>
#include<math.h>
#include<stdlib.h>
#include <string.h>
#include <hip/hip_runtime.h>
#define pi 3.141592653
#define BlockSize1 16// tile size in 1st-axis
#define BlockSize2 16// tile size in 2nd-axis
#define mm 4
__device__ float d0;
__constant__ float c[mm] = {1.196289, -0.0797526, 0.009570313, -0.0006975447};
//a################################################################################
void check_gpu_error(const char *msg)
/*< check GPU errors >*/
{
hipError_t err = hipGetLastError();
if (hipSuccess != err) {
printf("Cuda error: %s: %s\n", msg, hipGetErrorString(err));
exit(0);
}
}
//a################################################################################
__global__ void
add_source(float pfac, int fsx, int fsy, int sz, int nx, int ny, int nz, int nnx, int nny, int nnz, float dt, float t,
float favg, int wtype, int npml, int is, int dsx, int dsy, float *P, float *Q, int nsx)
/*< generate ricker wavelet with time deley >*/
{
int ixs, iys, izs;
float x_, xx_, tdelay, ts, source = 0.0, sx, sy;
tdelay = 1.0 / favg;
ts = t - tdelay;
sx = fsx + is % nsx * dsx;
sy = fsy + is / nsx * dsy;
if (wtype == 1)//ricker wavelet
{
x_ = favg * ts;
xx_ = x_ * x_;
source = (1 - 2 * pi * pi * (xx_)) * exp(-(pi * pi * xx_));
} else if (wtype == 2) {//derivative of gaussian
x_ = (-4) * favg * favg * pi * pi / log(0.1);
source = (-2) * pi * pi * ts * exp(-x_ * ts * ts);
} else if (wtype == 3) {//derivative of gaussian
x_ = (-1) * favg * favg * pi * pi / log(0.1);
source = exp(-x_ * ts * ts);
}
if (t <= 2 * tdelay) {
ixs = sx + npml - 1;
iys = sy + npml - 1;
izs = sz + npml - 1;
P[izs + ixs * nnz + iys * nnz * nnx] += pfac * source;
Q[izs + ixs * nnz + iys * nnz * nnx] += pfac * source;
}
}
/*******************func*********************/
__global__ void
update_vel(int nx, int ny, int nz, int nnx, int nny, int nnz, int npml, float dt, float dx, float dy, float dz,
float *u0, float *v0, float *w0, float *u1, float *v1, float *w1, float *P, float *Q,
float *coffx1, float *coffx2, float *coffy1, float *coffy2, float *coffz1, float *coffz2, float *rho) {
const int iz = blockIdx.x * blockDim.x + threadIdx.x;//0--nz's thread:iz
const int ix = blockIdx.y * blockDim.y + threadIdx.y;//0--nx's thread:ix
int id, iy, im;
float dtx, dty, dtz, xx, yy, zz;
dtx = dt / dx;
dty = dt / dy;
dtz = dt / dz;
for (iy = 0; iy < nny; iy++) {
id = iz + ix * nnz + iy * nnz * nnx;
if (id >= mm && id < nnx * nny * nnz - mm) {
if (ix >= mm && ix < (nnx - mm) && iy >= mm && iy < (nny - mm) && iz >= mm && iz < (nnz - mm)) {
xx = 0.0;
yy = 0.0;
zz = 0.0;
for (im = 0; im < mm; im++) {
yy += c[im] * (P[id + (im + 1) * nnz * nnx] - P[id - im * nnz * nnx]);
xx += c[im] * (P[id + (im + 1) * nnz] - P[id - im * nnz]);
zz += c[im] * (Q[id + im + 1] - Q[id - im]);
}
xx /= rho[id];
yy /= rho[id];
zz /= rho[id];
u1[id] = coffx2[ix] * u0[id] - coffx1[ix] * dtx * xx;
v1[id] = coffy2[iy] * v0[id] - coffy1[iy] * dty * yy;
w1[id] = coffz2[iz] * w0[id] - coffz1[iz] * dtz * zz;
}
}
}
}
/*******************func***********************/
__global__ void update_stress(int nx, int ny, int nz, int nnx, int nny, int nnz, float dt, float dx, float dy, float dz,
float *u1, float *v1, float *w1, float *P, float *Q, float *vp, float *rho, int npml,
float *px1, float *px0, float *py1, float *py0, float *pz1, float *pz0,
float *qx1, float *qx0, float *qy1, float *qy0, float *qz1, float *qz0,
float *acoffx1, float *acoffx2, float *acoffy1, float *acoffy2, float *acoffz1,
float *acoffz2,
float *delta, float *epsilon, int fsx, int dsx, int fsy, int dsy, int zs, int is, int nsx,
bool SV) {
const int iz = blockIdx.x * blockDim.x + threadIdx.x;//0--nz's thread:iz
const int ix = blockIdx.y * blockDim.y + threadIdx.y;//0--nx's thread:ix
int id, iy, im, rx, ry, rz, R = 15, r = 4;
float dtx, dty, dtz, xx, yy, zz, ee, dd;
dtx = dt / dx;
dty = dt / dy;
dtz = dt / dz;
for (iy = 0; iy < nny; iy++) {
id = iz + ix * nnz + iy * nnz * nnx;
if (id >= mm && id < nnx * nnz * nny - mm) {
/************************i****************************************/
/************************iso circle start*************************/
rx = ix - (fsx + is % nsx * dsx + npml - 1);
ry = iy - (fsy + is / nsx * dsy + npml - 1);
rz = iz - (zs + npml - 1);
if (SV) {
if ((rx * rx + ry * ry + rz * rz) <= R * R) {
if ((rx * rx + ry * ry + rz * rz) <= r * r) {
ee = 0.0;
dd = 0.0;
} else {
ee = 0.5 * (1 - cos(pi * ((sqrtf(rx * rx + ry * ry + rz * rz) - r) * 4.0 / (R * 3.0 - 1)))) *
epsilon[id];
dd = 0.5 * (1 - cos(pi * ((sqrtf(rx * rx + ry * ry + rz * rz) - r) * 4.0 / (R * 3.0 - 1)))) *
delta[id];
}
} else {
ee = epsilon[id];
dd = delta[id];
}
} else {
ee = epsilon[id];
dd = delta[id];
}
/************************ iso circle end *************************/
/************************i****************************************/
if (ix >= mm && ix < (nnx - mm) && iy >= mm && iy < (nny - mm) && iz >= mm && iz < (nnz - mm)) {
xx = 0.0;
yy = 0.0;
zz = 0.0;
for (im = 0; im < mm; im++) {
yy += c[im] * (v1[id + im * nnz * nnx] - v1[id - (im + 1) * nnz * nnx]);
xx += c[im] * (u1[id + im * nnz] - u1[id - (im + 1) * nnz]);
zz += c[im] * (w1[id + im] - w1[id - im - 1]);
}
px1[id] = acoffx2[ix] * px0[id] - acoffx1[ix] * rho[id] * vp[id] * vp[id] * (1 + 2 * ee) * dtx * xx;
py1[id] = acoffy2[iy] * py0[id] - acoffy1[iy] * rho[id] * vp[id] * vp[id] * (1 + 2 * ee) * dty * yy;
pz1[id] =
acoffz2[iz] * pz0[id] - acoffz1[iz] * rho[id] * vp[id] * vp[id] * sqrtf(1 + 2 * dd) * dtz * zz;
qx1[id] =
acoffx2[ix] * qx0[id] - acoffx1[ix] * rho[id] * vp[id] * vp[id] * sqrtf(1 + 2 * dd) * dtx * xx;
qy1[id] =
acoffy2[iy] * qy0[id] - acoffy1[iy] * rho[id] * vp[id] * vp[id] * sqrtf(1 + 2 * dd) * dty * yy;
qz1[id] = acoffz2[iz] * qz0[id] - acoffz1[iz] * rho[id] * vp[id] * vp[id] * dtz * zz;
P[id] = px1[id] + py1[id] + pz1[id];
Q[id] = qx1[id] + qy1[id] + qz1[id];
}
}
}
}
/********************func**********************/
__global__ void get_d0(float dx, float dy, float dz, int nnx, int nny, int nnz, int npml, float *vp) {
d0 = 10.0 * vp[nny * nnx * nnz / 2] * log(100000.0) / (2.0 * npml * ((dx + dy + dz) / 3.0));
}
/*************func*******************/
void pad_vv(int nx, int ny, int nz, int nnx, int nny, int nnz, int npml, float *ee) {
int ix, iy, iz, id;
for (iy = 0; iy < nny; iy++)
for (ix = 0; ix < nnx; ix++) {
for (iz = 0; iz < nnz; iz++) {
id = iz + ix * nnz + iy * nnz * nnx;
if (ix < npml) {
ee[id] = ee[iz + npml * nnz + iy * nnz * nnx]; //left
} else if (ix >= nnx - npml) {
ee[id] = ee[iz + (nnx - npml - 1) * nnz + iy * nnz * nnx];//right
}
}
}
for (iy = 0; iy < nny; iy++)
for (ix = 0; ix < nnx; ix++) {
for (iz = 0; iz < nnz; iz++) {
id = iz + ix * nnz + iy * nnz * nnx;
if (iy < npml) {
ee[id] = ee[iz + ix * nnz + npml * nnz * nnx]; //front
} else if (iy >= nny - npml) {
ee[id] = ee[iz + ix * nnz + (nny - npml - 1) * nnz * nnx];//back
}
}
}
for (iy = 0; iy < nny; iy++)
for (ix = 0; ix < nnx; ix++) {
for (iz = 0; iz < nnz; iz++) {
id = iz + ix * nnz + iy * nnz * nnx;
if (iz < npml) {
ee[id] = ee[npml + ix * nnz + iy * nnz * nnx]; //up
} else if (iz >= nnz - npml) {
ee[id] = ee[nnz - npml - 1 + ix * nnz + iy * nnz * nnx];//down
}
}
}
}
/*************func*******************/
void
read_file(char FN1[], char FN2[], char FN3[], char FN4[], int nx, int ny, int nz, int nnx, int nny, int nnz, float *vv,
float *epsilon, float *delta, float *rho, int npml) {
int ix, iy, iz, id;
FILE *fp1, *fp2, *fp3, *fp4;
if ((fp1 = fopen(FN1, "rb")) == NULL)printf("error open <%s>!\n", FN1);
if ((fp2 = fopen(FN2, "rb")) == NULL)printf("error open <%s>!\n", FN2);
if ((fp3 = fopen(FN3, "rb")) == NULL)printf("error open <%s>!\n", FN3);
if ((fp4 = fopen(FN4, "rb")) == NULL)printf("error open <%s>!\n", FN4);
for (iy = npml; iy < ny + npml; iy++)
for (ix = npml; ix < nx + npml; ix++) {
for (iz = npml; iz < nz + npml; iz++) {
id = iz + ix * nnz + iy * nnz * nnx;
fread(&vv[id], 4L, 1, fp1);//vv[id]=3000.0;
fread(&epsilon[id], 4L, 1, fp2);
fread(&delta[id], 4L, 1, fp3);
fread(&rho[id], 4L, 1, fp4);//rho[id]=1.5;
}
}
fclose(fp1);
fclose(fp2);
fclose(fp3);
fclose(fp4);
}
/*************func*******************/
__global__ void initial_coffe(float dt, int nn, float *coff1, float *coff2, float *acoff1, float *acoff2, int npml) {
int id = threadIdx.x + blockDim.x * blockIdx.x;
if (id < nn + 2 * npml) {
if (id < npml) {
coff1[id] = 1.0 / (1.0 + (dt * d0 * pow((npml - 0.5 - id) / npml, 2.0)) / 2.0);
coff2[id] = coff1[id] * (1.0 - (dt * d0 * pow((npml - 0.5 - id) / npml, 2.0)) / 2.0);
acoff1[id] = 1.0 / (1.0 + (dt * d0 * pow(((npml - id) * 1.0) / npml, 2.0)) / 2.0);
acoff2[id] = acoff1[id] * (1.0 - (dt * d0 * pow(((npml - id) * 1.0) / npml, 2.0)) / 2.0);
} else if (id >= npml && id < npml + nn) {
coff1[id] = 1.0;
coff2[id] = 1.0;
acoff1[id] = 1.0;
acoff2[id] = 1.0;
} else {
coff1[id] = 1.0 / (1.0 + (dt * d0 * pow((0.5 + id - nn - npml) / npml, 2.0)) / 2.0);
coff2[id] = coff1[id] * (1.0 - (dt * d0 * pow((0.5 + id - nn - npml) / npml, 2.0)) / 2.0);
acoff1[id] = 1.0 / (1.0 + (dt * d0 * pow(((id - nn - npml) * 1.0) / npml, 2.0)) / 2.0);
acoff2[id] = acoff1[id] * (1.0 - (dt * d0 * pow(((id - nn - npml) * 1.0) / npml, 2.0)) / 2.0);
}
}
}
/*************func*******************/
__global__ void
shot_record(int nnx, int nny, int nnz, int nx, int ny, int nz, int npml, int it, int nt, float *P, float *shot) {
int id = threadIdx.x + blockDim.x * blockIdx.x;
int ix = id % nx;
int iy = id / nx;
if (id < nx * ny) {
shot[it + nt * ix + nt * nx * iy] = P[npml + nnz * (ix + npml) + nnz * nnx * (iy + npml)];
}
}
/*************func**************/
void window3d(float *a, float *b, int nz, int nx, int ny, int nnz, int nnx, int npml)
/*< window a 3d subvolume >*/
{
int iz, ix, iy;
for (iy = 0; iy < ny; iy++)
for (ix = 0; ix < nx; ix++)
for (iz = 0; iz < nz; iz++) {
a[iz + nz * ix + nz * nx * iy] = b[(iz + npml) + nnz * (ix + npml) + nnz * nnx * (iy + npml)];
}
}
/*************func**************/
__global__ void
mute_directwave(int nx, int ny, int nt, float dt, float favg, float dx, float dy, float dz, int fsx, int fsy, int dsx,
int dsy,
int zs, int is, float *vp, float *epsilon, float *shot, int tt, int nsx) {
const int ix = blockIdx.x * blockDim.x + threadIdx.x;
const int iy = blockIdx.y * blockDim.y + threadIdx.y;
int id, it;
int mu_t, mu_nt;
float mu_x, mu_y, mu_z, mu_t0;
for (it = 0; it < nt; it++) {
id = it + ix * nt + iy * nx * nt;
if (ix < nx && iy < ny && it < nt) {
mu_x = dx * abs(ix - fsx - (is % nsx) * dsx);
mu_y = dy * abs(iy - fsy - (is / nsx) * dsy);
mu_z = dz * zs;
mu_t0 = sqrtf(pow(mu_x, 2) + pow(mu_y, 2) + pow(mu_z, 2)) / (vp[1] * sqrtf(1 + 2 * epsilon[1]));
mu_t = (int) (2.0 / (dt * favg));
mu_nt = (int) (mu_t0 / dt) + mu_t + tt;
if (it < mu_nt)
shot[id] = 0.0;
}
}
/* int id=threadIdx.x+blockDim.x*blockIdx.x;
int mu_t,mu_nt;
float mu_x,mu_y,mu_z,mu_t0;
int ix=(id/nt)%nx;
int iy=(id/nt)/nx;
int it=id%nt;
if(id<nx*ny*nt)
{
mu_x=dx*abs(ix-fsx-(is%nsx)*dsx);
mu_y=dy*abs(iy-fsy-(is/nsx)*dsy);
mu_z=dz*zs;
mu_t0=sqrtf(pow(mu_x,2)+pow(mu_y,2)+pow(mu_z,2))/(vp[1]*sqrtf(1+2*epsilon[1]));
mu_t=(int)(2.0/(dt*favg));
mu_nt=(int)(mu_t0/dt)+mu_t+tt;
if(it<mu_nt)
shot[id]=0.0;
} */
}
//a########################################################################
extern "C" void
cuda_3dfd_vti(char *FNvel, char *FNrho, char *FNepsilon, char *FNdelta, char *FNsnap, char *FNshot, int is, int ns,
int nx, int ny, int nz, float dx, float dy, float dz,
int sxbeg, int sybeg, int szbeg, int jsx, int jsy, int jsz,
float dgx, float dgy, float dgt,
int nt, float dt, float fm, bool show_snapshot, bool cut_directwave,
int snap_interval, int cudaDevicei) {
int it, nnx, nny, nnz, wtype, ix, iy;
int nsx, dsx, fsx, dsy, fsy, zs, npml;
float t, pfac, favg;
float *v, *e, *d, *rho;
float *vp, *epsilon, *delta, *density;
float *s_u0, *s_u1, *s_px0, *s_qx0, *s_px1, *s_qx1;
float *s_v0, *s_v1, *s_py0, *s_qy0, *s_py1, *s_qy1;
float *s_w0, *s_w1, *s_pz0, *s_qz0, *s_pz1, *s_qz1;
float *s_P, *s_Q, *shot_Dev, *shot_Hos;
float *coffx1, *coffx2, *coffy1, *coffy2, *coffz1, *coffz2;
float *acoffx1, *acoffx2, *acoffy1, *acoffy2, *acoffz1, *acoffz2;
float *ptr;
char snapname[300], snapid[300];
/*************wavelet\boundary**************/
wtype = 1;
npml = 20;
/********** dat document ***********/
// char FN1[250] = {"waxian_vel_301301201.dat"};
// char FN2[250] = {"waxian_eps_301301201.dat"};
// char FN3[250] = {"waxian_del_301301201.dat"};
// char FN4[250] = {"waxian_shot_1501_iso_mute.dat"};
// char FN5[250] = {"waxian_snap.dat"};
// char FN6[250] = {"waxian_rho_301301201.dat"};
/********aaa************/
FILE *fpsnap, *fpshot;
fpshot = fopen(FNshot, "wb");
/********* parameters *************/
favg = fm;
pfac = 10.0;
nsx = ns;
fsx = sxbeg;
dsx = jsx;
fsy = sybeg;
dsy = jsy;
zs = szbeg;
/*************v***************/
nnx = nx + 2 * npml;
nny = ny + 2 * npml;
nnz = nz + 2 * npml;
/************a*************/
v = (float *) malloc(nnz * nnx * nny * sizeof(float));
rho = (float *) malloc(nnz * nnx * nny * sizeof(float));
e = (float *) malloc(nnz * nnx * nny * sizeof(float));
d = (float *) malloc(nnz * nnx * nny * sizeof(float));
shot_Hos = (float *) malloc(nt * nx * ny * sizeof(float));
read_file(FNvel, FNepsilon, FNdelta, FNrho, nx, ny, nz, nnx, nny, nnz, v, e, d, rho, npml);
/****************************/
pad_vv(nx, ny, nz, nnx, nny, nnz, npml, e);
pad_vv(nx, ny, nz, nnx, nny, nnz, npml, d);
pad_vv(nx, ny, nz, nnx, nny, nnz, npml, v);
pad_vv(nx, ny, nz, nnx, nny, nnz, npml, rho);
hipSetDevice(cudaDevicei);// initialize device, default device=0;
check_gpu_error("Failed to initialize device!");
dim3 Xdimg, dimg, dimb;
Xdimg.x = (nnx + BlockSize1 - 1) / BlockSize1;
Xdimg.y = (nny + BlockSize2 - 1) / BlockSize2;
dimg.x = (nnz + BlockSize1 - 1) / BlockSize1;
dimg.y = (nnx + BlockSize2 - 1) / BlockSize2;
dimb.x = BlockSize1;
dimb.y = BlockSize2;
/****************************/
hipMalloc(&vp, nnz * nnx * nny * sizeof(float));
hipMalloc(&epsilon, nnz * nnx * nny * sizeof(float));
hipMalloc(&delta, nnz * nnx * nny * sizeof(float));
hipMalloc(&density, nnz * nnx * nny * sizeof(float));
hipMemcpy(vp, v, nnz * nnx * nny * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(epsilon, e, nnz * nnx * nny * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(delta, d, nnz * nnx * nny * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(density, rho, nnz * nnx * nny * sizeof(float), hipMemcpyHostToDevice);
/****************************/
hipMalloc(&s_u0, nnz * nnx * nny * sizeof(float));
hipMalloc(&s_u1, nnz * nnx * nny * sizeof(float));
hipMalloc(&s_v0, nnz * nnx * nny * sizeof(float));
hipMalloc(&s_v1, nnz * nnx * nny * sizeof(float));
hipMalloc(&s_w0, nnz * nnx * nny * sizeof(float));
hipMalloc(&s_w1, nnz * nnx * nny * sizeof(float));
hipMalloc(&s_P, nnz * nnx * nny * sizeof(float));
hipMalloc(&s_Q, nnz * nnx * nny * sizeof(float));
hipMalloc(&s_px0, nnz * nnx * nny * sizeof(float));
hipMalloc(&s_px1, nnz * nnx * nny * sizeof(float));
hipMalloc(&s_py0, nnz * nnx * nny * sizeof(float));
hipMalloc(&s_py1, nnz * nnx * nny * sizeof(float));
hipMalloc(&s_pz0, nnz * nnx * nny * sizeof(float));
hipMalloc(&s_pz1, nnz * nnx * nny * sizeof(float));
hipMalloc(&s_qx0, nnz * nnx * nny * sizeof(float));
hipMalloc(&s_qx1, nnz * nnx * nny * sizeof(float));
hipMalloc(&s_qy0, nnz * nnx * nny * sizeof(float));
hipMalloc(&s_qy1, nnz * nnx * nny * sizeof(float));
hipMalloc(&s_qz0, nnz * nnx * nny * sizeof(float));
hipMalloc(&s_qz1, nnz * nnx * nny * sizeof(float));
hipMalloc(&coffx1, nnx * sizeof(float));
hipMalloc(&coffx2, nnx * sizeof(float));
hipMalloc(&coffy1, nnx * sizeof(float));
hipMalloc(&coffy2, nnx * sizeof(float));
hipMalloc(&coffz1, nnz * sizeof(float));
hipMalloc(&coffz2, nnz * sizeof(float));
hipMalloc(&acoffx1, nnx * sizeof(float));
hipMalloc(&acoffx2, nnx * sizeof(float));
hipMalloc(&acoffy1, nnx * sizeof(float));
hipMalloc(&acoffy2, nnx * sizeof(float));
hipMalloc(&acoffz1, nnz * sizeof(float));
hipMalloc(&acoffz2, nnz * sizeof(float));
hipMalloc(&shot_Dev, nx * ny * nt * sizeof(float));
/******************************/
check_gpu_error("Failed to allocate memory for variables!");
get_d0 << < 1, 1 >> > (dx, dy, dz, nnx, nny, nnz, npml, vp);
initial_coffe << < (nnx + 511) / 512, 512 >> > (dt, nx, coffx1, coffx2, acoffx1, acoffx2, npml);
initial_coffe << < (nny + 511) / 512, 512 >> > (dt, ny, coffy1, coffy2, acoffy1, acoffy2, npml);
initial_coffe << < (nnz + 511) / 512, 512 >> > (dt, nz, coffz1, coffz2, acoffz1, acoffz2, npml);
//printf("--------------------------------------------------------\n");
//printf("--- \n");
/**********IS Loop start*******/
for (is = 0; is < ns; is++) {
// printf("--- IS=%3d \n",is);
hipMemset(s_u0, 0, nnz * nnx * nny * sizeof(float));
hipMemset(s_u1, 0, nnz * nnx * nny * sizeof(float));
hipMemset(s_v0, 0, nnz * nnx * nny * sizeof(float));
hipMemset(s_v1, 0, nnz * nnx * nny * sizeof(float));
hipMemset(s_w0, 0, nnz * nnx * nny * sizeof(float));
hipMemset(s_w1, 0, nnz * nnx * nny * sizeof(float));
hipMemset(s_P, 0, nnz * nnx * nny * sizeof(float));
hipMemset(s_Q, 0, nnz * nnx * nny * sizeof(float));
hipMemset(s_px0, 0, nnz * nnx * nny * sizeof(float));
hipMemset(s_px1, 0, nnz * nnx * nny * sizeof(float));
hipMemset(s_py0, 0, nnz * nnx * nny * sizeof(float));
hipMemset(s_py1, 0, nnz * nnx * nny * sizeof(float));
hipMemset(s_pz0, 0, nnz * nnx * nny * sizeof(float));
hipMemset(s_pz1, 0, nnz * nnx * nny * sizeof(float));
hipMemset(s_qx0, 0, nnz * nnx * nny * sizeof(float));
hipMemset(s_qx1, 0, nnz * nnx * nny * sizeof(float));
hipMemset(s_qy0, 0, nnz * nnx * nny * sizeof(float));
hipMemset(s_qy1, 0, nnz * nnx * nny * sizeof(float));
hipMemset(s_qz0, 0, nnz * nnx * nny * sizeof(float));
hipMemset(s_qz1, 0, nnz * nnx * nny * sizeof(float));
hipMemset(shot_Dev, 0, nt * nx * ny * sizeof(float));
for (it = 0, t = dt; it < nt; it++, t += dt) {
//if (it % 100 == 0)printf("--- IS===%d it===%d\n", is, it);
add_source << < 1, 1 >> >
(pfac, fsx, fsy, zs, nx, ny, nz, nnx, nny, nnz, dt, t, favg, wtype, npml, is, dsx, dsy, s_P, s_Q, nsx);
hipDeviceSynchronize();
update_vel << < dimg, dimb >> > (nx, ny, nz, nnx, nny, nnz, npml, dt, dx, dy, dz,
s_u0, s_v0, s_w0, s_u1, s_v1, s_w1, s_P, s_Q, coffx1, coffx2, coffy1, coffy2, coffz1, coffz2, density);
hipDeviceSynchronize();
update_stress << < dimg, dimb >> >
(nx, ny, nz, nnx, nny, nnz, dt, dx, dy, dz, s_u1, s_v1, s_w1, s_P, s_Q, vp, density, npml,
s_px1, s_px0, s_py1, s_py0, s_pz1, s_pz0, s_qx1, s_qx0, s_qy1, s_qy0, s_qz1, s_qz0,
acoffx1, acoffx2, acoffy1, acoffy2, acoffz1, acoffz2, delta, epsilon,
fsx, dsx, fsy, dsy, zs, is, nsx, true);
hipDeviceSynchronize();
ptr = s_u0;
s_u0 = s_u1;
s_u1 = ptr;
ptr = s_v0;
s_v0 = s_v1;
s_v1 = ptr;
ptr = s_w0;
s_w0 = s_w1;
s_w1 = ptr;
ptr = s_px0;
s_px0 = s_px1;
s_px1 = ptr;
ptr = s_py0;
s_py0 = s_py1;
s_py1 = ptr;
ptr = s_pz0;
s_pz0 = s_pz1;
s_pz1 = ptr;
ptr = s_qx0;
s_qx0 = s_qx1;
s_qx1 = ptr;
ptr = s_qy0;
s_qy0 = s_qy1;
s_qy1 = ptr;
ptr = s_qz0;
s_qz0 = s_qz1;
s_qz1 = ptr;
shot_record << < (nx * ny + 511) / 512, 512 >> > (nnx, nny, nnz, nx, ny, nz, npml, it, nt, s_P, shot_Dev);
hipDeviceSynchronize();
if (show_snapshot) {
if (it % snap_interval == 0) {
hipMemcpy(e, s_P, nnz * nnx * nny * sizeof(float), hipMemcpyDeviceToHost);
strcpy(snapname, FNsnap);
sprintf(snapid, "ishot_%d_", is);
strcat(snapname, snapid);
sprintf(snapid, "it_%d", it);
strcat(snapname, snapid);
strcat(snapname, ".bin");
if ((fpsnap = fopen(snapname, "wb")) == NULL) {
printf("cannot write snapfile\n");
}
window3d(v, e, nz, nx, ny, nnz, nnx, npml);
fwrite(v, sizeof(float), nx * nz * ny, fpsnap);
fclose(fpsnap);
}
}
}//it loop end
if (cut_directwave) {
mute_directwave << < Xdimg, dimb >> >
(nx, ny, nt, dt, favg, dx, dy, dz, fsx, fsy, dsx, dsy, zs, is, vp, epsilon, shot_Dev, 60, nsx);
}
hipMemcpy(shot_Hos, shot_Dev, nt * nx * ny * sizeof(float), hipMemcpyDeviceToHost);
fseek(fpshot, is * nt * nx * ny * sizeof(float), 0);
for (iy = 0; iy < ny; iy += int(dgy / dy)) {
for (ix = 0; ix < nx; ix += int(dgx / dx)) {
for (it = 0; it < nt; it += int(dgt / dt)) {
fwrite(&shot_Hos[iy * nx * nt + ix * nt + it], sizeof(float), 1, fpshot);
}
}
}
}//is loop end
/*********IS Loop end*********/
//printf("--- The forward is over \n");
//printf("--- Complete!!!!!!!!! \n");
//printf("total %d shots: %f (s)\n", ns, ((float) (end - start)) / CLOCKS_PER_SEC);
/***********close************/
fclose(fpshot);
/***********free*************/
hipFree(coffx1);
hipFree(coffx2);
hipFree(coffz1);
hipFree(coffz2);
hipFree(acoffx1);
hipFree(acoffx2);
hipFree(acoffz1);
hipFree(acoffz2);
hipFree(s_u0);
hipFree(s_u1);
hipFree(s_v0);
hipFree(s_v1);
hipFree(s_w0);
hipFree(s_w1);
hipFree(s_P);
hipFree(s_Q);
hipFree(s_px0);
hipFree(s_px1);
hipFree(s_py0);
hipFree(s_py1);
hipFree(s_pz0);
hipFree(s_pz1);
hipFree(s_qx0);
hipFree(s_qx1);
hipFree(s_qy0);
hipFree(s_qy1);
hipFree(s_qz0);
hipFree(s_qz1);
hipFree(shot_Dev);
hipFree(vp);
hipFree(epsilon);
hipFree(delta);
hipFree(density);
/***************host free*****************/
free(v);
free(e);
free(d);
free(rho);
free(shot_Hos);
}
| Toa_gpu_3dvti_fd_1orderfunciton.cu | //a#########################################################
//a## 3D Acoustic VTI Medium Forward
//a##
//a## Ps :GPU(CUDA)
//a##
//a##/*a***************************
//a##Function for VTI medium modeling,
//a##
//a## Ps: the function of modeling following:
//a##
//a## du/dt=1/rho*dp/dx ,
//a## dv/dt=1/rho*dp/dy ,
//a## dw/dt=1/rho*dq/dz ,
//a## dp/dt=rho*vpx^2*(du/dx+dv/dy)+rho*vp*vpn*dw/dz ,
//a## dq/dt=rho*vp*vpn*(du/dx+dv/dy)+rho*vp^2*dw/dz ,
//a## vpx^2=vp^2*(1+2*epsilon);
//a## vpn^2=vp^2*(1+2*delta);
//a##
//a##*********a*******************/
//a##
//a## code by Rong Tao
//a##
//a#########################################################
#include<stdio.h>
#include<malloc.h>
#include<math.h>
#include<stdlib.h>
#include <string.h>
#include <cuda_runtime.h>
#define pi 3.141592653
#define BlockSize1 16// tile size in 1st-axis
#define BlockSize2 16// tile size in 2nd-axis
#define mm 4
__device__ float d0;
__constant__ float c[mm] = {1.196289, -0.0797526, 0.009570313, -0.0006975447};
//a################################################################################
void check_gpu_error(const char *msg)
/*< check GPU errors >*/
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
printf("Cuda error: %s: %s\n", msg, cudaGetErrorString(err));
exit(0);
}
}
//a################################################################################
__global__ void
add_source(float pfac, int fsx, int fsy, int sz, int nx, int ny, int nz, int nnx, int nny, int nnz, float dt, float t,
float favg, int wtype, int npml, int is, int dsx, int dsy, float *P, float *Q, int nsx)
/*< generate ricker wavelet with time deley >*/
{
int ixs, iys, izs;
float x_, xx_, tdelay, ts, source = 0.0, sx, sy;
tdelay = 1.0 / favg;
ts = t - tdelay;
sx = fsx + is % nsx * dsx;
sy = fsy + is / nsx * dsy;
if (wtype == 1)//ricker wavelet
{
x_ = favg * ts;
xx_ = x_ * x_;
source = (1 - 2 * pi * pi * (xx_)) * exp(-(pi * pi * xx_));
} else if (wtype == 2) {//derivative of gaussian
x_ = (-4) * favg * favg * pi * pi / log(0.1);
source = (-2) * pi * pi * ts * exp(-x_ * ts * ts);
} else if (wtype == 3) {//derivative of gaussian
x_ = (-1) * favg * favg * pi * pi / log(0.1);
source = exp(-x_ * ts * ts);
}
if (t <= 2 * tdelay) {
ixs = sx + npml - 1;
iys = sy + npml - 1;
izs = sz + npml - 1;
P[izs + ixs * nnz + iys * nnz * nnx] += pfac * source;
Q[izs + ixs * nnz + iys * nnz * nnx] += pfac * source;
}
}
/*******************func*********************/
__global__ void
update_vel(int nx, int ny, int nz, int nnx, int nny, int nnz, int npml, float dt, float dx, float dy, float dz,
float *u0, float *v0, float *w0, float *u1, float *v1, float *w1, float *P, float *Q,
float *coffx1, float *coffx2, float *coffy1, float *coffy2, float *coffz1, float *coffz2, float *rho) {
const int iz = blockIdx.x * blockDim.x + threadIdx.x;//0--nz's thread:iz
const int ix = blockIdx.y * blockDim.y + threadIdx.y;//0--nx's thread:ix
int id, iy, im;
float dtx, dty, dtz, xx, yy, zz;
dtx = dt / dx;
dty = dt / dy;
dtz = dt / dz;
for (iy = 0; iy < nny; iy++) {
id = iz + ix * nnz + iy * nnz * nnx;
if (id >= mm && id < nnx * nny * nnz - mm) {
if (ix >= mm && ix < (nnx - mm) && iy >= mm && iy < (nny - mm) && iz >= mm && iz < (nnz - mm)) {
xx = 0.0;
yy = 0.0;
zz = 0.0;
for (im = 0; im < mm; im++) {
yy += c[im] * (P[id + (im + 1) * nnz * nnx] - P[id - im * nnz * nnx]);
xx += c[im] * (P[id + (im + 1) * nnz] - P[id - im * nnz]);
zz += c[im] * (Q[id + im + 1] - Q[id - im]);
}
xx /= rho[id];
yy /= rho[id];
zz /= rho[id];
u1[id] = coffx2[ix] * u0[id] - coffx1[ix] * dtx * xx;
v1[id] = coffy2[iy] * v0[id] - coffy1[iy] * dty * yy;
w1[id] = coffz2[iz] * w0[id] - coffz1[iz] * dtz * zz;
}
}
}
}
/*******************func***********************/
__global__ void update_stress(int nx, int ny, int nz, int nnx, int nny, int nnz, float dt, float dx, float dy, float dz,
float *u1, float *v1, float *w1, float *P, float *Q, float *vp, float *rho, int npml,
float *px1, float *px0, float *py1, float *py0, float *pz1, float *pz0,
float *qx1, float *qx0, float *qy1, float *qy0, float *qz1, float *qz0,
float *acoffx1, float *acoffx2, float *acoffy1, float *acoffy2, float *acoffz1,
float *acoffz2,
float *delta, float *epsilon, int fsx, int dsx, int fsy, int dsy, int zs, int is, int nsx,
bool SV) {
const int iz = blockIdx.x * blockDim.x + threadIdx.x;//0--nz's thread:iz
const int ix = blockIdx.y * blockDim.y + threadIdx.y;//0--nx's thread:ix
int id, iy, im, rx, ry, rz, R = 15, r = 4;
float dtx, dty, dtz, xx, yy, zz, ee, dd;
dtx = dt / dx;
dty = dt / dy;
dtz = dt / dz;
for (iy = 0; iy < nny; iy++) {
id = iz + ix * nnz + iy * nnz * nnx;
if (id >= mm && id < nnx * nnz * nny - mm) {
/************************i****************************************/
/************************iso circle start*************************/
rx = ix - (fsx + is % nsx * dsx + npml - 1);
ry = iy - (fsy + is / nsx * dsy + npml - 1);
rz = iz - (zs + npml - 1);
if (SV) {
if ((rx * rx + ry * ry + rz * rz) <= R * R) {
if ((rx * rx + ry * ry + rz * rz) <= r * r) {
ee = 0.0;
dd = 0.0;
} else {
ee = 0.5 * (1 - cos(pi * ((sqrtf(rx * rx + ry * ry + rz * rz) - r) * 4.0 / (R * 3.0 - 1)))) *
epsilon[id];
dd = 0.5 * (1 - cos(pi * ((sqrtf(rx * rx + ry * ry + rz * rz) - r) * 4.0 / (R * 3.0 - 1)))) *
delta[id];
}
} else {
ee = epsilon[id];
dd = delta[id];
}
} else {
ee = epsilon[id];
dd = delta[id];
}
/************************ iso circle end *************************/
/************************i****************************************/
if (ix >= mm && ix < (nnx - mm) && iy >= mm && iy < (nny - mm) && iz >= mm && iz < (nnz - mm)) {
xx = 0.0;
yy = 0.0;
zz = 0.0;
for (im = 0; im < mm; im++) {
yy += c[im] * (v1[id + im * nnz * nnx] - v1[id - (im + 1) * nnz * nnx]);
xx += c[im] * (u1[id + im * nnz] - u1[id - (im + 1) * nnz]);
zz += c[im] * (w1[id + im] - w1[id - im - 1]);
}
px1[id] = acoffx2[ix] * px0[id] - acoffx1[ix] * rho[id] * vp[id] * vp[id] * (1 + 2 * ee) * dtx * xx;
py1[id] = acoffy2[iy] * py0[id] - acoffy1[iy] * rho[id] * vp[id] * vp[id] * (1 + 2 * ee) * dty * yy;
pz1[id] =
acoffz2[iz] * pz0[id] - acoffz1[iz] * rho[id] * vp[id] * vp[id] * sqrtf(1 + 2 * dd) * dtz * zz;
qx1[id] =
acoffx2[ix] * qx0[id] - acoffx1[ix] * rho[id] * vp[id] * vp[id] * sqrtf(1 + 2 * dd) * dtx * xx;
qy1[id] =
acoffy2[iy] * qy0[id] - acoffy1[iy] * rho[id] * vp[id] * vp[id] * sqrtf(1 + 2 * dd) * dty * yy;
qz1[id] = acoffz2[iz] * qz0[id] - acoffz1[iz] * rho[id] * vp[id] * vp[id] * dtz * zz;
P[id] = px1[id] + py1[id] + pz1[id];
Q[id] = qx1[id] + qy1[id] + qz1[id];
}
}
}
}
/********************func**********************/
__global__ void get_d0(float dx, float dy, float dz, int nnx, int nny, int nnz, int npml, float *vp) {
d0 = 10.0 * vp[nny * nnx * nnz / 2] * log(100000.0) / (2.0 * npml * ((dx + dy + dz) / 3.0));
}
/*************func*******************/
void pad_vv(int nx, int ny, int nz, int nnx, int nny, int nnz, int npml, float *ee) {
int ix, iy, iz, id;
for (iy = 0; iy < nny; iy++)
for (ix = 0; ix < nnx; ix++) {
for (iz = 0; iz < nnz; iz++) {
id = iz + ix * nnz + iy * nnz * nnx;
if (ix < npml) {
ee[id] = ee[iz + npml * nnz + iy * nnz * nnx]; //left
} else if (ix >= nnx - npml) {
ee[id] = ee[iz + (nnx - npml - 1) * nnz + iy * nnz * nnx];//right
}
}
}
for (iy = 0; iy < nny; iy++)
for (ix = 0; ix < nnx; ix++) {
for (iz = 0; iz < nnz; iz++) {
id = iz + ix * nnz + iy * nnz * nnx;
if (iy < npml) {
ee[id] = ee[iz + ix * nnz + npml * nnz * nnx]; //front
} else if (iy >= nny - npml) {
ee[id] = ee[iz + ix * nnz + (nny - npml - 1) * nnz * nnx];//back
}
}
}
for (iy = 0; iy < nny; iy++)
for (ix = 0; ix < nnx; ix++) {
for (iz = 0; iz < nnz; iz++) {
id = iz + ix * nnz + iy * nnz * nnx;
if (iz < npml) {
ee[id] = ee[npml + ix * nnz + iy * nnz * nnx]; //up
} else if (iz >= nnz - npml) {
ee[id] = ee[nnz - npml - 1 + ix * nnz + iy * nnz * nnx];//down
}
}
}
}
/*************func*******************/
void
read_file(char FN1[], char FN2[], char FN3[], char FN4[], int nx, int ny, int nz, int nnx, int nny, int nnz, float *vv,
float *epsilon, float *delta, float *rho, int npml) {
int ix, iy, iz, id;
FILE *fp1, *fp2, *fp3, *fp4;
if ((fp1 = fopen(FN1, "rb")) == NULL)printf("error open <%s>!\n", FN1);
if ((fp2 = fopen(FN2, "rb")) == NULL)printf("error open <%s>!\n", FN2);
if ((fp3 = fopen(FN3, "rb")) == NULL)printf("error open <%s>!\n", FN3);
if ((fp4 = fopen(FN4, "rb")) == NULL)printf("error open <%s>!\n", FN4);
for (iy = npml; iy < ny + npml; iy++)
for (ix = npml; ix < nx + npml; ix++) {
for (iz = npml; iz < nz + npml; iz++) {
id = iz + ix * nnz + iy * nnz * nnx;
fread(&vv[id], 4L, 1, fp1);//vv[id]=3000.0;
fread(&epsilon[id], 4L, 1, fp2);
fread(&delta[id], 4L, 1, fp3);
fread(&rho[id], 4L, 1, fp4);//rho[id]=1.5;
}
}
fclose(fp1);
fclose(fp2);
fclose(fp3);
fclose(fp4);
}
/*************func*******************/
__global__ void initial_coffe(float dt, int nn, float *coff1, float *coff2, float *acoff1, float *acoff2, int npml) {
int id = threadIdx.x + blockDim.x * blockIdx.x;
if (id < nn + 2 * npml) {
if (id < npml) {
coff1[id] = 1.0 / (1.0 + (dt * d0 * pow((npml - 0.5 - id) / npml, 2.0)) / 2.0);
coff2[id] = coff1[id] * (1.0 - (dt * d0 * pow((npml - 0.5 - id) / npml, 2.0)) / 2.0);
acoff1[id] = 1.0 / (1.0 + (dt * d0 * pow(((npml - id) * 1.0) / npml, 2.0)) / 2.0);
acoff2[id] = acoff1[id] * (1.0 - (dt * d0 * pow(((npml - id) * 1.0) / npml, 2.0)) / 2.0);
} else if (id >= npml && id < npml + nn) {
coff1[id] = 1.0;
coff2[id] = 1.0;
acoff1[id] = 1.0;
acoff2[id] = 1.0;
} else {
coff1[id] = 1.0 / (1.0 + (dt * d0 * pow((0.5 + id - nn - npml) / npml, 2.0)) / 2.0);
coff2[id] = coff1[id] * (1.0 - (dt * d0 * pow((0.5 + id - nn - npml) / npml, 2.0)) / 2.0);
acoff1[id] = 1.0 / (1.0 + (dt * d0 * pow(((id - nn - npml) * 1.0) / npml, 2.0)) / 2.0);
acoff2[id] = acoff1[id] * (1.0 - (dt * d0 * pow(((id - nn - npml) * 1.0) / npml, 2.0)) / 2.0);
}
}
}
/*************func*******************/
__global__ void
shot_record(int nnx, int nny, int nnz, int nx, int ny, int nz, int npml, int it, int nt, float *P, float *shot) {
int id = threadIdx.x + blockDim.x * blockIdx.x;
int ix = id % nx;
int iy = id / nx;
if (id < nx * ny) {
shot[it + nt * ix + nt * nx * iy] = P[npml + nnz * (ix + npml) + nnz * nnx * (iy + npml)];
}
}
/*************func**************/
void window3d(float *a, float *b, int nz, int nx, int ny, int nnz, int nnx, int npml)
/*< window a 3d subvolume >*/
{
int iz, ix, iy;
for (iy = 0; iy < ny; iy++)
for (ix = 0; ix < nx; ix++)
for (iz = 0; iz < nz; iz++) {
a[iz + nz * ix + nz * nx * iy] = b[(iz + npml) + nnz * (ix + npml) + nnz * nnx * (iy + npml)];
}
}
/*************func**************/
__global__ void
mute_directwave(int nx, int ny, int nt, float dt, float favg, float dx, float dy, float dz, int fsx, int fsy, int dsx,
int dsy,
int zs, int is, float *vp, float *epsilon, float *shot, int tt, int nsx) {
const int ix = blockIdx.x * blockDim.x + threadIdx.x;
const int iy = blockIdx.y * blockDim.y + threadIdx.y;
int id, it;
int mu_t, mu_nt;
float mu_x, mu_y, mu_z, mu_t0;
for (it = 0; it < nt; it++) {
id = it + ix * nt + iy * nx * nt;
if (ix < nx && iy < ny && it < nt) {
mu_x = dx * abs(ix - fsx - (is % nsx) * dsx);
mu_y = dy * abs(iy - fsy - (is / nsx) * dsy);
mu_z = dz * zs;
mu_t0 = sqrtf(pow(mu_x, 2) + pow(mu_y, 2) + pow(mu_z, 2)) / (vp[1] * sqrtf(1 + 2 * epsilon[1]));
mu_t = (int) (2.0 / (dt * favg));
mu_nt = (int) (mu_t0 / dt) + mu_t + tt;
if (it < mu_nt)
shot[id] = 0.0;
}
}
/* int id=threadIdx.x+blockDim.x*blockIdx.x;
int mu_t,mu_nt;
float mu_x,mu_y,mu_z,mu_t0;
int ix=(id/nt)%nx;
int iy=(id/nt)/nx;
int it=id%nt;
if(id<nx*ny*nt)
{
mu_x=dx*abs(ix-fsx-(is%nsx)*dsx);
mu_y=dy*abs(iy-fsy-(is/nsx)*dsy);
mu_z=dz*zs;
mu_t0=sqrtf(pow(mu_x,2)+pow(mu_y,2)+pow(mu_z,2))/(vp[1]*sqrtf(1+2*epsilon[1]));
mu_t=(int)(2.0/(dt*favg));
mu_nt=(int)(mu_t0/dt)+mu_t+tt;
if(it<mu_nt)
shot[id]=0.0;
} */
}
//a########################################################################
extern "C" void
cuda_3dfd_vti(char *FNvel, char *FNrho, char *FNepsilon, char *FNdelta, char *FNsnap, char *FNshot, int is, int ns,
int nx, int ny, int nz, float dx, float dy, float dz,
int sxbeg, int sybeg, int szbeg, int jsx, int jsy, int jsz,
float dgx, float dgy, float dgt,
int nt, float dt, float fm, bool show_snapshot, bool cut_directwave,
int snap_interval, int cudaDevicei) {
int it, nnx, nny, nnz, wtype, ix, iy;
int nsx, dsx, fsx, dsy, fsy, zs, npml;
float t, pfac, favg;
float *v, *e, *d, *rho;
float *vp, *epsilon, *delta, *density;
float *s_u0, *s_u1, *s_px0, *s_qx0, *s_px1, *s_qx1;
float *s_v0, *s_v1, *s_py0, *s_qy0, *s_py1, *s_qy1;
float *s_w0, *s_w1, *s_pz0, *s_qz0, *s_pz1, *s_qz1;
float *s_P, *s_Q, *shot_Dev, *shot_Hos;
float *coffx1, *coffx2, *coffy1, *coffy2, *coffz1, *coffz2;
float *acoffx1, *acoffx2, *acoffy1, *acoffy2, *acoffz1, *acoffz2;
float *ptr;
char snapname[300], snapid[300];
/*************wavelet\boundary**************/
wtype = 1;
npml = 20;
/********** dat document ***********/
// char FN1[250] = {"waxian_vel_301301201.dat"};
// char FN2[250] = {"waxian_eps_301301201.dat"};
// char FN3[250] = {"waxian_del_301301201.dat"};
// char FN4[250] = {"waxian_shot_1501_iso_mute.dat"};
// char FN5[250] = {"waxian_snap.dat"};
// char FN6[250] = {"waxian_rho_301301201.dat"};
/********aaa************/
FILE *fpsnap, *fpshot;
fpshot = fopen(FNshot, "wb");
/********* parameters *************/
favg = fm;
pfac = 10.0;
nsx = ns;
fsx = sxbeg;
dsx = jsx;
fsy = sybeg;
dsy = jsy;
zs = szbeg;
/*************v***************/
nnx = nx + 2 * npml;
nny = ny + 2 * npml;
nnz = nz + 2 * npml;
/************a*************/
v = (float *) malloc(nnz * nnx * nny * sizeof(float));
rho = (float *) malloc(nnz * nnx * nny * sizeof(float));
e = (float *) malloc(nnz * nnx * nny * sizeof(float));
d = (float *) malloc(nnz * nnx * nny * sizeof(float));
shot_Hos = (float *) malloc(nt * nx * ny * sizeof(float));
read_file(FNvel, FNepsilon, FNdelta, FNrho, nx, ny, nz, nnx, nny, nnz, v, e, d, rho, npml);
/****************************/
pad_vv(nx, ny, nz, nnx, nny, nnz, npml, e);
pad_vv(nx, ny, nz, nnx, nny, nnz, npml, d);
pad_vv(nx, ny, nz, nnx, nny, nnz, npml, v);
pad_vv(nx, ny, nz, nnx, nny, nnz, npml, rho);
cudaSetDevice(cudaDevicei);// initialize device, default device=0;
check_gpu_error("Failed to initialize device!");
dim3 Xdimg, dimg, dimb;
Xdimg.x = (nnx + BlockSize1 - 1) / BlockSize1;
Xdimg.y = (nny + BlockSize2 - 1) / BlockSize2;
dimg.x = (nnz + BlockSize1 - 1) / BlockSize1;
dimg.y = (nnx + BlockSize2 - 1) / BlockSize2;
dimb.x = BlockSize1;
dimb.y = BlockSize2;
/****************************/
cudaMalloc(&vp, nnz * nnx * nny * sizeof(float));
cudaMalloc(&epsilon, nnz * nnx * nny * sizeof(float));
cudaMalloc(&delta, nnz * nnx * nny * sizeof(float));
cudaMalloc(&density, nnz * nnx * nny * sizeof(float));
cudaMemcpy(vp, v, nnz * nnx * nny * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(epsilon, e, nnz * nnx * nny * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(delta, d, nnz * nnx * nny * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(density, rho, nnz * nnx * nny * sizeof(float), cudaMemcpyHostToDevice);
/****************************/
cudaMalloc(&s_u0, nnz * nnx * nny * sizeof(float));
cudaMalloc(&s_u1, nnz * nnx * nny * sizeof(float));
cudaMalloc(&s_v0, nnz * nnx * nny * sizeof(float));
cudaMalloc(&s_v1, nnz * nnx * nny * sizeof(float));
cudaMalloc(&s_w0, nnz * nnx * nny * sizeof(float));
cudaMalloc(&s_w1, nnz * nnx * nny * sizeof(float));
cudaMalloc(&s_P, nnz * nnx * nny * sizeof(float));
cudaMalloc(&s_Q, nnz * nnx * nny * sizeof(float));
cudaMalloc(&s_px0, nnz * nnx * nny * sizeof(float));
cudaMalloc(&s_px1, nnz * nnx * nny * sizeof(float));
cudaMalloc(&s_py0, nnz * nnx * nny * sizeof(float));
cudaMalloc(&s_py1, nnz * nnx * nny * sizeof(float));
cudaMalloc(&s_pz0, nnz * nnx * nny * sizeof(float));
cudaMalloc(&s_pz1, nnz * nnx * nny * sizeof(float));
cudaMalloc(&s_qx0, nnz * nnx * nny * sizeof(float));
cudaMalloc(&s_qx1, nnz * nnx * nny * sizeof(float));
cudaMalloc(&s_qy0, nnz * nnx * nny * sizeof(float));
cudaMalloc(&s_qy1, nnz * nnx * nny * sizeof(float));
cudaMalloc(&s_qz0, nnz * nnx * nny * sizeof(float));
cudaMalloc(&s_qz1, nnz * nnx * nny * sizeof(float));
cudaMalloc(&coffx1, nnx * sizeof(float));
cudaMalloc(&coffx2, nnx * sizeof(float));
cudaMalloc(&coffy1, nnx * sizeof(float));
cudaMalloc(&coffy2, nnx * sizeof(float));
cudaMalloc(&coffz1, nnz * sizeof(float));
cudaMalloc(&coffz2, nnz * sizeof(float));
cudaMalloc(&acoffx1, nnx * sizeof(float));
cudaMalloc(&acoffx2, nnx * sizeof(float));
cudaMalloc(&acoffy1, nnx * sizeof(float));
cudaMalloc(&acoffy2, nnx * sizeof(float));
cudaMalloc(&acoffz1, nnz * sizeof(float));
cudaMalloc(&acoffz2, nnz * sizeof(float));
cudaMalloc(&shot_Dev, nx * ny * nt * sizeof(float));
/******************************/
check_gpu_error("Failed to allocate memory for variables!");
get_d0 << < 1, 1 >> > (dx, dy, dz, nnx, nny, nnz, npml, vp);
initial_coffe << < (nnx + 511) / 512, 512 >> > (dt, nx, coffx1, coffx2, acoffx1, acoffx2, npml);
initial_coffe << < (nny + 511) / 512, 512 >> > (dt, ny, coffy1, coffy2, acoffy1, acoffy2, npml);
initial_coffe << < (nnz + 511) / 512, 512 >> > (dt, nz, coffz1, coffz2, acoffz1, acoffz2, npml);
//printf("--------------------------------------------------------\n");
//printf("--- \n");
/**********IS Loop start*******/
for (is = 0; is < ns; is++) {
// printf("--- IS=%3d \n",is);
cudaMemset(s_u0, 0, nnz * nnx * nny * sizeof(float));
cudaMemset(s_u1, 0, nnz * nnx * nny * sizeof(float));
cudaMemset(s_v0, 0, nnz * nnx * nny * sizeof(float));
cudaMemset(s_v1, 0, nnz * nnx * nny * sizeof(float));
cudaMemset(s_w0, 0, nnz * nnx * nny * sizeof(float));
cudaMemset(s_w1, 0, nnz * nnx * nny * sizeof(float));
cudaMemset(s_P, 0, nnz * nnx * nny * sizeof(float));
cudaMemset(s_Q, 0, nnz * nnx * nny * sizeof(float));
cudaMemset(s_px0, 0, nnz * nnx * nny * sizeof(float));
cudaMemset(s_px1, 0, nnz * nnx * nny * sizeof(float));
cudaMemset(s_py0, 0, nnz * nnx * nny * sizeof(float));
cudaMemset(s_py1, 0, nnz * nnx * nny * sizeof(float));
cudaMemset(s_pz0, 0, nnz * nnx * nny * sizeof(float));
cudaMemset(s_pz1, 0, nnz * nnx * nny * sizeof(float));
cudaMemset(s_qx0, 0, nnz * nnx * nny * sizeof(float));
cudaMemset(s_qx1, 0, nnz * nnx * nny * sizeof(float));
cudaMemset(s_qy0, 0, nnz * nnx * nny * sizeof(float));
cudaMemset(s_qy1, 0, nnz * nnx * nny * sizeof(float));
cudaMemset(s_qz0, 0, nnz * nnx * nny * sizeof(float));
cudaMemset(s_qz1, 0, nnz * nnx * nny * sizeof(float));
cudaMemset(shot_Dev, 0, nt * nx * ny * sizeof(float));
for (it = 0, t = dt; it < nt; it++, t += dt) {
//if (it % 100 == 0)printf("--- IS===%d it===%d\n", is, it);
add_source << < 1, 1 >> >
(pfac, fsx, fsy, zs, nx, ny, nz, nnx, nny, nnz, dt, t, favg, wtype, npml, is, dsx, dsy, s_P, s_Q, nsx);
cudaDeviceSynchronize();
update_vel << < dimg, dimb >> > (nx, ny, nz, nnx, nny, nnz, npml, dt, dx, dy, dz,
s_u0, s_v0, s_w0, s_u1, s_v1, s_w1, s_P, s_Q, coffx1, coffx2, coffy1, coffy2, coffz1, coffz2, density);
cudaDeviceSynchronize();
update_stress << < dimg, dimb >> >
(nx, ny, nz, nnx, nny, nnz, dt, dx, dy, dz, s_u1, s_v1, s_w1, s_P, s_Q, vp, density, npml,
s_px1, s_px0, s_py1, s_py0, s_pz1, s_pz0, s_qx1, s_qx0, s_qy1, s_qy0, s_qz1, s_qz0,
acoffx1, acoffx2, acoffy1, acoffy2, acoffz1, acoffz2, delta, epsilon,
fsx, dsx, fsy, dsy, zs, is, nsx, true);
cudaDeviceSynchronize();
ptr = s_u0;
s_u0 = s_u1;
s_u1 = ptr;
ptr = s_v0;
s_v0 = s_v1;
s_v1 = ptr;
ptr = s_w0;
s_w0 = s_w1;
s_w1 = ptr;
ptr = s_px0;
s_px0 = s_px1;
s_px1 = ptr;
ptr = s_py0;
s_py0 = s_py1;
s_py1 = ptr;
ptr = s_pz0;
s_pz0 = s_pz1;
s_pz1 = ptr;
ptr = s_qx0;
s_qx0 = s_qx1;
s_qx1 = ptr;
ptr = s_qy0;
s_qy0 = s_qy1;
s_qy1 = ptr;
ptr = s_qz0;
s_qz0 = s_qz1;
s_qz1 = ptr;
shot_record << < (nx * ny + 511) / 512, 512 >> > (nnx, nny, nnz, nx, ny, nz, npml, it, nt, s_P, shot_Dev);
cudaDeviceSynchronize();
if (show_snapshot) {
if (it % snap_interval == 0) {
cudaMemcpy(e, s_P, nnz * nnx * nny * sizeof(float), cudaMemcpyDeviceToHost);
strcpy(snapname, FNsnap);
sprintf(snapid, "ishot_%d_", is);
strcat(snapname, snapid);
sprintf(snapid, "it_%d", it);
strcat(snapname, snapid);
strcat(snapname, ".bin");
if ((fpsnap = fopen(snapname, "wb")) == NULL) {
printf("cannot write snapfile\n");
}
window3d(v, e, nz, nx, ny, nnz, nnx, npml);
fwrite(v, sizeof(float), nx * nz * ny, fpsnap);
fclose(fpsnap);
}
}
}//it loop end
if (cut_directwave) {
mute_directwave << < Xdimg, dimb >> >
(nx, ny, nt, dt, favg, dx, dy, dz, fsx, fsy, dsx, dsy, zs, is, vp, epsilon, shot_Dev, 60, nsx);
}
cudaMemcpy(shot_Hos, shot_Dev, nt * nx * ny * sizeof(float), cudaMemcpyDeviceToHost);
fseek(fpshot, is * nt * nx * ny * sizeof(float), 0);
for (iy = 0; iy < ny; iy += int(dgy / dy)) {
for (ix = 0; ix < nx; ix += int(dgx / dx)) {
for (it = 0; it < nt; it += int(dgt / dt)) {
fwrite(&shot_Hos[iy * nx * nt + ix * nt + it], sizeof(float), 1, fpshot);
}
}
}
}//is loop end
/*********IS Loop end*********/
//printf("--- The forward is over \n");
//printf("--- Complete!!!!!!!!! \n");
//printf("total %d shots: %f (s)\n", ns, ((float) (end - start)) / CLOCKS_PER_SEC);
/***********close************/
fclose(fpshot);
/***********free*************/
cudaFree(coffx1);
cudaFree(coffx2);
cudaFree(coffz1);
cudaFree(coffz2);
cudaFree(acoffx1);
cudaFree(acoffx2);
cudaFree(acoffz1);
cudaFree(acoffz2);
cudaFree(s_u0);
cudaFree(s_u1);
cudaFree(s_v0);
cudaFree(s_v1);
cudaFree(s_w0);
cudaFree(s_w1);
cudaFree(s_P);
cudaFree(s_Q);
cudaFree(s_px0);
cudaFree(s_px1);
cudaFree(s_py0);
cudaFree(s_py1);
cudaFree(s_pz0);
cudaFree(s_pz1);
cudaFree(s_qx0);
cudaFree(s_qx1);
cudaFree(s_qy0);
cudaFree(s_qy1);
cudaFree(s_qz0);
cudaFree(s_qz1);
cudaFree(shot_Dev);
cudaFree(vp);
cudaFree(epsilon);
cudaFree(delta);
cudaFree(density);
/***************host free*****************/
free(v);
free(e);
free(d);
free(rho);
free(shot_Hos);
}
|
62577e6a7587f22fb0cb01469a4e2905089f0fcf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include<stdlib.h>
__global__ void fxn(double *W1, double *W2, double *X, double *Y, double *b1, double *b2, double *h, double *Z, double *loss){
int m = blockDim.x, n = blockDim.y, T = 10;
int mx = threadIdx.x, Nx = blockIdx.x, nx = threadIdx.y;
double lambda = 0.01;
// Initialization
if(Nx == 0){
/* hiprandState_t state;
hiprand_init(clock64(), nx, 0, &state);
W1[m*nx + mx] = hiprand_uniform(&state);
hiprand_init(clock64(), nx, 0, &state);
W2[n*mx + nx] = hiprand_uniform(&state);
printf("%lf %lf\n", W1[m*nx + mx], W2[n*mx + nx]);
*/
W1[m*nx + mx] = 0.1;
W2[n*mx + nx] = 0.1;
if(nx == 0){
//b1[mx] = hiprand_uniform(&state);
b1[mx] = 0.1;
}
if(mx == 0){
//b2[nx] = hiprand_uniform(&state);
b2[nx] = 0.1;
}
}
__syncthreads();
// PUT CUDA BARRIER
for(int t = 0; t < T; t++){
// Initialize loss
if(Nx == 0 && mx == 0 && nx == 0){
printf("***********\n\nTHIS IS ITERATION NUMBER %d\n\n****************\n\n", t);
loss[t] = 0;
}
// Initializing h
if(nx == 0){
h[Nx*m + mx] = b1[mx];
}
atomicAdd(&h[Nx*m + mx], X[Nx*n + nx] * W1[m*nx + mx]);
__syncthreads();
if(nx == 0)
printf("H values: %lf\n", h[Nx*m + mx]);
double e;
// Sigmoid
if(nx == 0){
e = exp(h[Nx*m + mx]);
h[Nx*m + mx] = e/(1 + e);
}
__syncthreads();
// calculating Z
if(mx == 0){
Z[Nx*n + nx] = b2[nx];
}
atomicAdd(&Z[Nx*n + nx], h[Nx*m + mx] * W2[n*mx + nx]); // CHECK SWAP
__syncthreads();
if(mx == 0){
e = exp(Z[Nx*n + nx]);
Z[Nx*n + nx] = e/(1 + e);
printf("Z values: %lf\n", Z[Nx*n + nx]);
}
__syncthreads();
printf("%d %d %d\n", Nx, mx, nx);
double d = 0;
if(mx == 0){
//printf("%d %lf %lf\n", bd*n + td, Z[bd*n + td], Y[bd*n + td]);
d = Z[Nx*n + nx] - Y[Nx*n + nx];
d = d * d;
}
if(Nx == 0){
d += lambda * (W1[m*nx + mx] * W1[m*nx + mx] + W2[n*mx + nx] * W2[n*mx + nx]);
//atomicAdd(&d, dx);
printf("aya");
}
if(mx == 0 || Nx == 0)
printf("d value here: %d %d %lf\n", mx, Nx, d);
// ATOMIC OPERATION REQUIRED HERE
if(d != 0){
atomicAdd(&loss[t], d);
}
printf("down: %d %d %d\n", Nx, mx, nx);
if(Nx + mx + nx == 0)
printf("loss: %lf\n", loss[t]);
// eta needs to be declared and grad needs to be found here
/* if(bd == 0){
for(int i = 0;i < n; i++){
W[m*i + td] -= eta * grad
}
b1[td] -= eta * grad;
if(td < n)
b2[td] -= eta * grad;
}*/
// ITERATION COMPLETE
}
}
int main(){
int N, m, n, T;
N = 3, m = 4, n = m/2, T = 10;
double *X, *Y, *h, *Z, *loss;
X = (double*)malloc(N*n*sizeof(double));
Y = (double*)malloc(N*n*sizeof(double));
h = (double*)malloc(N*m*sizeof(double));
Z = (double*)malloc(N*n*sizeof(double));
loss = (double*)malloc(T * sizeof(double));
// device memory
double *d_X, *d_Y, *d_W1, *d_W2, *d_b1, *d_b2, *d_h, *d_loss, *d_Z;
hipMalloc((void**)&d_W1, sizeof(double) * n * m);
hipMalloc((void**)&d_W2, sizeof(double) * m * n);
hipMalloc((void**)&d_X, sizeof(double) * N * n);
hipMalloc((void**)&d_Y, sizeof(double) * N * n);
hipMalloc((void**)&d_Z, sizeof(double) * N * n);
hipMalloc((void**)&d_b1, sizeof(double) * m);
hipMalloc((void**)&d_b2, sizeof(double) * n);
hipMalloc((void**)&d_h, sizeof(double) * N * m);
hipMalloc((void**)&d_loss, sizeof(double) * T);
for(int i = 0; i < N*n; i++){
scanf("%lf", &X[i]);
printf("%lf\n", X[i]);
}
for(int i = 0; i < N*n; i++){
scanf("%lf", &Y[i]);
printf("%lf\n", Y[i]);
}
hipMemcpy(d_X, X, sizeof(double) * N * n, hipMemcpyHostToDevice);
hipMemcpy(d_Y, Y, sizeof(double) * N * n, hipMemcpyHostToDevice);
dim3 threads(m, n);
hipLaunchKernelGGL(( fxn), dim3(N), dim3(threads), 0, 0, d_W1, d_W2, d_X, d_Y, d_b1, d_b2, d_h, d_Z, d_loss);
hipMemcpy(h, d_h, sizeof(double) * N * m, hipMemcpyDeviceToHost);
hipMemcpy(Z, d_Z, sizeof(double) * N * n, hipMemcpyDeviceToHost);
hipMemcpy(loss, d_loss, sizeof(double) * T, hipMemcpyDeviceToHost);
printf("h\n");
for(int i = 0;i < N*m; i++)
printf("%lf ", h[i]);
printf("\nZ");
for(int i = 0;i < N*n; i++)
printf("%lf ", Z[i]);
printf("\n");
printf("LOSS\n");
for(int i = 0;i < T; i++)
printf("%lf ", loss[i]);
printf("\n");
return 0;
}
| 62577e6a7587f22fb0cb01469a4e2905089f0fcf.cu | #include<stdio.h>
#include <curand.h>
#include <curand_kernel.h>
#include<stdlib.h>
__global__ void fxn(double *W1, double *W2, double *X, double *Y, double *b1, double *b2, double *h, double *Z, double *loss){
int m = blockDim.x, n = blockDim.y, T = 10;
int mx = threadIdx.x, Nx = blockIdx.x, nx = threadIdx.y;
double lambda = 0.01;
// Initialization
if(Nx == 0){
/* curandState state;
curand_init(clock64(), nx, 0, &state);
W1[m*nx + mx] = curand_uniform(&state);
curand_init(clock64(), nx, 0, &state);
W2[n*mx + nx] = curand_uniform(&state);
printf("%lf %lf\n", W1[m*nx + mx], W2[n*mx + nx]);
*/
W1[m*nx + mx] = 0.1;
W2[n*mx + nx] = 0.1;
if(nx == 0){
//b1[mx] = curand_uniform(&state);
b1[mx] = 0.1;
}
if(mx == 0){
//b2[nx] = curand_uniform(&state);
b2[nx] = 0.1;
}
}
__syncthreads();
// PUT CUDA BARRIER
for(int t = 0; t < T; t++){
// Initialize loss
if(Nx == 0 && mx == 0 && nx == 0){
printf("***********\n\nTHIS IS ITERATION NUMBER %d\n\n****************\n\n", t);
loss[t] = 0;
}
// Initializing h
if(nx == 0){
h[Nx*m + mx] = b1[mx];
}
atomicAdd(&h[Nx*m + mx], X[Nx*n + nx] * W1[m*nx + mx]);
__syncthreads();
if(nx == 0)
printf("H values: %lf\n", h[Nx*m + mx]);
double e;
// Sigmoid
if(nx == 0){
e = exp(h[Nx*m + mx]);
h[Nx*m + mx] = e/(1 + e);
}
__syncthreads();
// calculating Z
if(mx == 0){
Z[Nx*n + nx] = b2[nx];
}
atomicAdd(&Z[Nx*n + nx], h[Nx*m + mx] * W2[n*mx + nx]); // CHECK SWAP
__syncthreads();
if(mx == 0){
e = exp(Z[Nx*n + nx]);
Z[Nx*n + nx] = e/(1 + e);
printf("Z values: %lf\n", Z[Nx*n + nx]);
}
__syncthreads();
printf("%d %d %d\n", Nx, mx, nx);
double d = 0;
if(mx == 0){
//printf("%d %lf %lf\n", bd*n + td, Z[bd*n + td], Y[bd*n + td]);
d = Z[Nx*n + nx] - Y[Nx*n + nx];
d = d * d;
}
if(Nx == 0){
d += lambda * (W1[m*nx + mx] * W1[m*nx + mx] + W2[n*mx + nx] * W2[n*mx + nx]);
//atomicAdd(&d, dx);
printf("aya");
}
if(mx == 0 || Nx == 0)
printf("d value here: %d %d %lf\n", mx, Nx, d);
// ATOMIC OPERATION REQUIRED HERE
if(d != 0){
atomicAdd(&loss[t], d);
}
printf("down: %d %d %d\n", Nx, mx, nx);
if(Nx + mx + nx == 0)
printf("loss: %lf\n", loss[t]);
// eta needs to be declared and grad needs to be found here
/* if(bd == 0){
for(int i = 0;i < n; i++){
W[m*i + td] -= eta * grad
}
b1[td] -= eta * grad;
if(td < n)
b2[td] -= eta * grad;
}*/
// ITERATION COMPLETE
}
}
int main(){
int N, m, n, T;
N = 3, m = 4, n = m/2, T = 10;
double *X, *Y, *h, *Z, *loss;
X = (double*)malloc(N*n*sizeof(double));
Y = (double*)malloc(N*n*sizeof(double));
h = (double*)malloc(N*m*sizeof(double));
Z = (double*)malloc(N*n*sizeof(double));
loss = (double*)malloc(T * sizeof(double));
// device memory
double *d_X, *d_Y, *d_W1, *d_W2, *d_b1, *d_b2, *d_h, *d_loss, *d_Z;
cudaMalloc((void**)&d_W1, sizeof(double) * n * m);
cudaMalloc((void**)&d_W2, sizeof(double) * m * n);
cudaMalloc((void**)&d_X, sizeof(double) * N * n);
cudaMalloc((void**)&d_Y, sizeof(double) * N * n);
cudaMalloc((void**)&d_Z, sizeof(double) * N * n);
cudaMalloc((void**)&d_b1, sizeof(double) * m);
cudaMalloc((void**)&d_b2, sizeof(double) * n);
cudaMalloc((void**)&d_h, sizeof(double) * N * m);
cudaMalloc((void**)&d_loss, sizeof(double) * T);
for(int i = 0; i < N*n; i++){
scanf("%lf", &X[i]);
printf("%lf\n", X[i]);
}
for(int i = 0; i < N*n; i++){
scanf("%lf", &Y[i]);
printf("%lf\n", Y[i]);
}
cudaMemcpy(d_X, X, sizeof(double) * N * n, cudaMemcpyHostToDevice);
cudaMemcpy(d_Y, Y, sizeof(double) * N * n, cudaMemcpyHostToDevice);
dim3 threads(m, n);
fxn<<<N, threads>>>(d_W1, d_W2, d_X, d_Y, d_b1, d_b2, d_h, d_Z, d_loss);
cudaMemcpy(h, d_h, sizeof(double) * N * m, cudaMemcpyDeviceToHost);
cudaMemcpy(Z, d_Z, sizeof(double) * N * n, cudaMemcpyDeviceToHost);
cudaMemcpy(loss, d_loss, sizeof(double) * T, cudaMemcpyDeviceToHost);
printf("h\n");
for(int i = 0;i < N*m; i++)
printf("%lf ", h[i]);
printf("\nZ");
for(int i = 0;i < N*n; i++)
printf("%lf ", Z[i]);
printf("\n");
printf("LOSS\n");
for(int i = 0;i < T; i++)
printf("%lf ", loss[i]);
printf("\n");
return 0;
}
|
a210176cf3bf5fd811e11fee98555c2d1cc3f361.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "serialReduction.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *d_array = NULL;
hipMalloc(&d_array, XSIZE*YSIZE);
int numberOfElements = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
serialReduction), dim3(gridBlock),dim3(threadBlock), 0, 0, d_array,numberOfElements);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
serialReduction), dim3(gridBlock),dim3(threadBlock), 0, 0, d_array,numberOfElements);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
serialReduction), dim3(gridBlock),dim3(threadBlock), 0, 0, d_array,numberOfElements);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | a210176cf3bf5fd811e11fee98555c2d1cc3f361.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "serialReduction.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *d_array = NULL;
cudaMalloc(&d_array, XSIZE*YSIZE);
int numberOfElements = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
serialReduction<<<gridBlock,threadBlock>>>(d_array,numberOfElements);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
serialReduction<<<gridBlock,threadBlock>>>(d_array,numberOfElements);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
serialReduction<<<gridBlock,threadBlock>>>(d_array,numberOfElements);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
2db075f3da53969996cbb9d0e35f974c3830cdf1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright (c) 2011, CESNET z.s.p.o
* Copyright (c) 2011, Silicon Genome, LLC.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "gpujpeg/gpujpeg_huffman_gpu_encoder.h"
#include "gpujpeg/gpujpeg_util.h"
#define WARPS_NUM 8
/** Natural order in constant memory */
__constant__ int gpujpeg_huffman_gpu_encoder_order_natural[GPUJPEG_ORDER_NATURAL_SIZE];
/**
* Huffman coding tables in constant memory - each has 257 items (256 + 1 extra)
* There are are 4 of them - one after another, in following order:
* - luminance (Y) AC
* - luminance (Y) DC
* - chroma (cb/cr) AC
* - chroma (cb/cr) DC
*/
__device__ uint32_t gpujpeg_huffman_gpu_lut[(256 + 1) * 4];
/**
* Value decomposition in constant memory (input range from -4096 to 4095 ... both inclusive)
* Mapping from coefficient value into the code for the value ind its bit size.
*/
__device__ unsigned int gpujpeg_huffman_value_decomposition[8 * 1024];
/** Allocate huffman tables in constant memory */
__device__ struct gpujpeg_table_huffman_encoder gpujpeg_huffman_gpu_encoder_table_huffman[GPUJPEG_COMPONENT_TYPE_COUNT][GPUJPEG_HUFFMAN_TYPE_COUNT];
struct gpujpeg_huffman_gpu_encoder
{
/** Size of occupied part of output buffer */
unsigned int * d_gpujpeg_huffman_output_byte_count;
};
/**
* Initializes coefficient decomposition table in global memory. (CC >= 2.0)
* Output table is a mapping from some value into its code and bit size.
*/
__global__ static void
gpujpeg_huffman_gpu_encoder_value_decomposition_init_kernel() {
// fetch some value
const int tid = threadIdx.x + blockIdx.x * blockDim.x;
const int value = tid - 4096;
// decompose it
unsigned int value_code = value;
int absolute = value;
if ( value < 0 ) {
// valu eis now absolute value of input
absolute = -absolute;
// For a negative input, want temp2 = bitwise complement of abs(input)
// This code assumes we are on a two's complement machine
value_code--;
}
// Find the number of bits needed for the magnitude of the coefficient
unsigned int value_nbits = 0;
while ( absolute ) {
value_nbits++;
absolute >>= 1;
}
// save result packed into unsigned int (value bits are left aligned in MSBs and size is right aligned in LSBs)
gpujpeg_huffman_value_decomposition[tid] = value_nbits | (value_code << (32 - value_nbits));
}
#if __CUDA_ARCH__ >= 200
/**
* Adds up to 32 bits at once into ouptut buffer, applying byte stuffing.
* Codeword value must be aligned to left (most significant bits). (CC >= 2.0)
*/
__device__ static void
gpujpeg_huffman_gpu_encoder_emit_bits(unsigned int & remaining_bits, int & byte_count, int & bit_count, uint8_t * const out_ptr, const unsigned int packed_code_word)
{
// decompose packed codeword into the msb-aligned value and bit-length of the value
const unsigned int code_word = packed_code_word & ~31;
const unsigned int code_bit_size = packed_code_word & 31;
// concatenate with remaining bits
remaining_bits |= code_word >> bit_count;
bit_count += code_bit_size;
// flush some bytes if have more than 8 bits
if (bit_count >= 8) {
do {
const unsigned int out_byte = remaining_bits >> 24;
out_ptr[byte_count++] = out_byte;
if(0xff == out_byte) {
// keep zero byte after each 0xFF (buffer is expected to be zeroed)
out_ptr[byte_count++] = 0;
}
remaining_bits <<= 8;
bit_count -= 8;
} while (bit_count >= 8);
// keep only remaining bits in the buffer
remaining_bits = code_word << (code_bit_size - bit_count);
remaining_bits &= 0xfffffffe << (31 - bit_count);
}
}
/**
* Given some huffman table offset, RLE zero count and coefficient value,
* this returns huffman codeword for the value (packed in 27 MSBs)
* together with its bit size (in 5 LSBs). (CC >= 2.0)
*/
__device__ static unsigned int
gpujpeg_huffman_gpu_encode_value(const int preceding_zero_count, const int coefficient,
const int huffman_lut_offset)
{
// value bits are in MSBs (left aligned) and bit size of the value is in LSBs (right aligned)
const unsigned int packed_value = gpujpeg_huffman_value_decomposition[4096 + coefficient];
// decompose value info into upshifted value and value's bit size
const int value_nbits = packed_value & 0xf;
const unsigned int value_code = packed_value & ~0xf;
// find prefix of the codeword and size of the prefix
const int huffman_lut_idx = huffman_lut_offset + preceding_zero_count * 16 + value_nbits;
const unsigned int packed_prefix = gpujpeg_huffman_gpu_lut[huffman_lut_idx];
const unsigned int prefix_nbits = packed_prefix & 31;
// compose packed codeword with its size
return (packed_prefix + value_nbits) | (value_code >> prefix_nbits);
}
/**
* Flush remaining codewords from buffer in shared memory to global memory output buffer. (CC >= 2.0)
*/
__device__ static void
gpujpeg_huffman_gpu_encoder_flush_codewords(unsigned int * const s_out, unsigned int * &data_compressed, int & remaining_codewords, const int tid) {
// this works for up to 4 * 32 remaining codewords
if(remaining_codewords) {
// pad remaining codewords with extra zero-sized codewords, not to have to use special case in serialization kernel, which saves 4 codewords at once
s_out[remaining_codewords + tid] = 0;
// save all remaining codewords at once (together with some zero sized padding codewords)
*((uint4*)data_compressed) = ((uint4*)s_out)[tid];
// update codeword counter
data_compressed += remaining_codewords;
remaining_codewords = 0;
}
}
/**
* Encode one 8x8 block (CC >= 2.0)
*
* @return 0 if succeeds, otherwise nonzero
*/
__device__ static int
gpujpeg_huffman_gpu_encoder_encode_block(const int16_t * block, unsigned int * &data_compressed, unsigned int * const s_out,
int & remaining_codewords, const int last_dc_idx, int tid, const int huffman_lut_offset)
{
// each thread loads a pair of values (pair after zigzag reordering)
const int load_idx = tid * 2;
int in_even = block[gpujpeg_huffman_gpu_encoder_order_natural[load_idx]];
const int in_odd = block[gpujpeg_huffman_gpu_encoder_order_natural[load_idx + 1]];
// compute preceding zero count for even coefficient (actually compute the count multiplied by 16)
const unsigned int nonzero_mask = (1 << tid) - 1;
const unsigned int nonzero_bitmap_0 = 1 | __ballot(in_even); // DC is always treated as nonzero
const unsigned int nonzero_bitmap_1 = __ballot(in_odd);
const unsigned int nonzero_bitmap_pairs = nonzero_bitmap_0 | nonzero_bitmap_1;
const int zero_pair_count = __clz(nonzero_bitmap_pairs & nonzero_mask);
int zeros_before_even = 2 * (zero_pair_count + tid - 32);
if((0x80000000 >> zero_pair_count) > (nonzero_bitmap_1 & nonzero_mask)) {
zeros_before_even += 1;
}
// true if any nonzero pixel follows thread's odd pixel
const bool nonzero_follows = nonzero_bitmap_pairs & ~nonzero_mask;
// count of consecutive zeros before odd value (either one more than
// even if even is zero or none if even value itself is nonzero)
// (the count is actually multiplied by 16)
int zeros_before_odd = in_even || !tid ? 0 : zeros_before_even + 1;
// clear zero counts if no nonzero pixel follows (so that no 16-zero symbols will be emited)
// otherwise only trim extra bits from the counts of following zeros
const int zero_count_mask = nonzero_follows ? 0xF : 0;
zeros_before_even &= zero_count_mask;
zeros_before_odd &= zero_count_mask;
// pointer to LUT for encoding thread's even value
// (only thread #0 uses DC table, others use AC table)
int even_lut_offset = huffman_lut_offset;
// first thread handles special DC coefficient
if(0 == tid) {
// first thread uses DC part of the table for its even value
even_lut_offset += 256 + 1;
// update last DC coefficient (saved at the special place at the end of the shared bufer)
const int original_in_even = in_even;
in_even -= ((int*)s_out)[last_dc_idx];
((int*)s_out)[last_dc_idx] = original_in_even;
}
// last thread handles special block-termination symbol
if(0 == ((tid ^ 31) | in_odd)) {
// this causes selection of huffman symbol at index 256 (which contains the termination symbol)
zeros_before_odd = 16;
}
// each thread gets codeword for its two pixels
unsigned int even_code = gpujpeg_huffman_gpu_encode_value(zeros_before_even, in_even, even_lut_offset);
unsigned int odd_code = gpujpeg_huffman_gpu_encode_value(zeros_before_odd, in_odd, huffman_lut_offset);
// concatenate both codewords into one if they are short enough
const unsigned int even_code_size = even_code & 31;
const unsigned int odd_code_size = odd_code & 31;
const unsigned int total_size = even_code_size + odd_code_size;
if(total_size <= 27) {
even_code = total_size | ((odd_code & ~31) >> even_code_size) | (even_code & ~31);
odd_code = 0;
}
// each thread get number of preceding nonzero codewords and total number of nonzero codewords in this block
const unsigned int even_codeword_presence = __ballot(even_code);
const unsigned int odd_codeword_presence = __ballot(odd_code);
const int codeword_offset = __popc(nonzero_mask & even_codeword_presence)
+ __popc(nonzero_mask & odd_codeword_presence);
// each thread saves its values into temporary shared buffer
if(even_code) {
s_out[remaining_codewords + codeword_offset] = even_code;
if(odd_code) {
s_out[remaining_codewords + codeword_offset + 1] = odd_code;
}
}
// advance count of codewords in shared memory buffer
remaining_codewords += __popc(odd_codeword_presence) + __popc(even_codeword_presence);
// flush some codewords to global memory if there are too many of them in shared buffer
const int flush_count = 32 * 4; // = half of the buffer
if(remaining_codewords > flush_count) {
// move first half of the buffer into output buffer in global memory and update output pointer
*((uint4*)data_compressed) = ((uint4*)s_out)[tid];
data_compressed += flush_count;
// shift remaining codewords to begin of the buffer and update their count
((uint4*)s_out)[tid] = ((uint4*)s_out)[flush_count / 4 + tid]; // 4 for 4 uints in uint4
remaining_codewords -= flush_count;
}
// nothing to fail here
return 0;
}
#endif // #if __CUDA_ARCH__ >= 200
/**
* Huffman encoder kernel (For compute capability >= 2.0)
*
* @return void
*/
template <bool CONTINUOUS_BLOCK_LIST>
#if __CUDA_ARCH__ >= 200
__launch_bounds__(WARPS_NUM * 32, 1024 / (WARPS_NUM * 32))
#endif
__global__ static void
gpujpeg_huffman_encoder_encode_kernel_warp(
struct gpujpeg_segment* d_segment,
int segment_count,
uint8_t* d_data_compressed,
const uint64_t* const d_block_list,
int16_t* const d_data_quantized,
struct gpujpeg_component* const d_component,
const int comp_count,
unsigned int * d_gpujpeg_huffman_output_byte_count
) {
#if __CUDA_ARCH__ >= 200
int warpidx = threadIdx.x >> 5;
int tid = threadIdx.x & 31;
__shared__ uint4 s_out_all[(64 + 1) * WARPS_NUM];
unsigned int * s_out = (unsigned int*)(s_out_all + warpidx * (64 + 1));
// Number of remaining codewords in shared buffer
int remaining_codewords = 0;
// Select Segment
const int block_idx = blockIdx.x + blockIdx.y * gridDim.x;
const int segment_index = block_idx * WARPS_NUM + warpidx;
// first thread initializes compact output size for next kernel
if(0 == tid && 0 == warpidx && 0 == block_idx) {
*d_gpujpeg_huffman_output_byte_count = 0;
}
// stop if out of segment bounds
if ( segment_index >= segment_count )
return;
struct gpujpeg_segment* segment = &d_segment[segment_index];
// Initialize last DC coefficients
if(tid < 3) {
s_out[256 + tid] = 0;
}
// Prepare data pointers
unsigned int * data_compressed = (unsigned int*)(d_data_compressed + segment->data_temp_index);
unsigned int * data_compressed_start = data_compressed;
// Pre-add thread ID to output pointer (it's allways used only with it)
data_compressed += (tid * 4);
// Encode all block in segment
if(CONTINUOUS_BLOCK_LIST) {
// Get component for current scan
const struct gpujpeg_component* component = &d_component[segment->scan_index];
// mcu size of the component
const int comp_mcu_size = component->mcu_size;
// Get component data for MCU (first block)
const int16_t* block = component->d_data_quantized + (segment->scan_segment_index * component->segment_mcu_count) * comp_mcu_size;
// Get huffman table offset
const int huffman_table_offset = component->type == GPUJPEG_COMPONENT_LUMINANCE ? 0 : (256 + 1) * 2; // possibly skips luminance tables
// Encode MCUs in segment
for (int block_count = segment->mcu_count; block_count--;) {
// Encode 8x8 block
gpujpeg_huffman_gpu_encoder_encode_block(block, data_compressed, s_out, remaining_codewords, 256, tid, huffman_table_offset);
// Advance to next block
block += comp_mcu_size;
}
} else {
// Pointer to segment's list of 8x8 blocks and their count
const uint64_t* packed_block_info_ptr = d_block_list + segment->block_index_list_begin;
// Encode all blocks
for(int block_count = segment->block_count; block_count--;) {
// Get pointer to next block input data and info about its color type
const uint64_t packed_block_info = *(packed_block_info_ptr++);
// Get coder parameters
const int last_dc_idx = 256 + (packed_block_info & 0x7f);
// Get offset to right part of huffman table
const int huffman_table_offset = packed_block_info & 0x80 ? (256 + 1) * 2 : 0; // possibly skips luminance tables
// Source data pointer
int16_t* block = &d_data_quantized[packed_block_info >> 8];
// Encode 8x8 block
gpujpeg_huffman_gpu_encoder_encode_block(block, data_compressed, s_out, remaining_codewords, last_dc_idx, tid, huffman_table_offset);
}
}
// flush remaining codewords
gpujpeg_huffman_gpu_encoder_flush_codewords(s_out, data_compressed, remaining_codewords, tid);
// Set number of codewords.
if (tid == 0 ) {
segment->data_compressed_size = data_compressed - data_compressed_start;
}
#endif // #if __CUDA_ARCH__ >= 200
}
#define SERIALIZATION_THREADS_PER_TBLOCK 192
/**
* Codeword serialization kernel (CC >= 2.0).
*
* @return void
*/
#if __CUDA_ARCH__ >= 200
__launch_bounds__(SERIALIZATION_THREADS_PER_TBLOCK, 1536 / SERIALIZATION_THREADS_PER_TBLOCK)
#endif
__global__ static void
gpujpeg_huffman_encoder_serialization_kernel(
struct gpujpeg_segment* d_segment,
int segment_count,
const uint8_t* const d_src,
uint8_t* const d_dest
) {
#if __CUDA_ARCH__ >= 200
// Temp buffer for all threads of the threadblock
__shared__ uint4 s_temp_all[2 * SERIALIZATION_THREADS_PER_TBLOCK];
// Thread's 32 bytes in shared memory for output composition
uint4 * const s_temp = s_temp_all + threadIdx.x * 2;
// Select Segment
const int block_idx = blockIdx.x + blockIdx.y * gridDim.x;
int segment_index = block_idx * SERIALIZATION_THREADS_PER_TBLOCK + threadIdx.x;
if ( segment_index >= segment_count )
return;
// Thread's segment
struct gpujpeg_segment* const segment = &d_segment[segment_index];
// Input and output pointers
const int data_offset = segment->data_temp_index;
uint4 * const d_dest_stream_start = (uint4*)(d_dest + data_offset);
uint4 * d_dest_stream = d_dest_stream_start;
const uint4 * d_src_codewords = (uint4*)(d_src + data_offset);
// number of bytes in the temp buffer, remaining bits and their count
int byte_count = 0, bit_count = 0;
unsigned int remaining_bits = 0;
// "data_compressed_size" is now initialized to number of codewords to be serialized
for(int cword_tuple_count = (segment->data_compressed_size + 3) >> 2; cword_tuple_count--; ) // reading 4 codewords at once
{
// read 4 codewords and advance input pointer to next ones
const uint4 cwords = *(d_src_codewords++);
// encode first pair of codewords
gpujpeg_huffman_gpu_encoder_emit_bits(remaining_bits, byte_count, bit_count, (uint8_t*)s_temp, cwords.x);
gpujpeg_huffman_gpu_encoder_emit_bits(remaining_bits, byte_count, bit_count, (uint8_t*)s_temp, cwords.y);
// possibly flush output if have at least 16 bytes
if(byte_count >= 16) {
// write 16 bytes into destination buffer
*(d_dest_stream++) = s_temp[0];
// move remaining bytes to first half of the buffer
s_temp[0] = s_temp[1];
// update number of remaining bits
byte_count -= 16;
}
// encode other two codewords
gpujpeg_huffman_gpu_encoder_emit_bits(remaining_bits, byte_count, bit_count, (uint8_t*)s_temp, cwords.z);
gpujpeg_huffman_gpu_encoder_emit_bits(remaining_bits, byte_count, bit_count, (uint8_t*)s_temp, cwords.w);
// possibly flush output if have at least 16 bytes
if(byte_count >= 16) {
// write 16 bytes into destination buffer
*(d_dest_stream++) = s_temp[0];
// move remaining bytes to first half of the buffer
s_temp[0] = s_temp[1];
// update number of remaining bits
byte_count -= 16;
}
}
// Emit left bits
gpujpeg_huffman_gpu_encoder_emit_bits(remaining_bits, byte_count, bit_count, (uint8_t*)s_temp, 0xfe000007);
// Terminate codestream with restart marker
((uint8_t*)s_temp)[byte_count + 0] = 0xFF;
((uint8_t*)s_temp)[byte_count + 1] = GPUJPEG_MARKER_RST0 + (segment->scan_segment_index % 8);
// flush remaining bytes
d_dest_stream[0] = s_temp[0];
d_dest_stream[1] = s_temp[1];
// Set compressed size
segment->data_compressed_size = (d_dest_stream - d_dest_stream_start) * 16 + byte_count + 2;
#endif // #if __CUDA_ARCH__ >= 200
}
/**
* Huffman coder compact output allocation kernel - serially reserves
* some space for compressed output of segments in output buffer.
* (For CC 1.0 - a workaround for missing atomic operations.)
*
* Only single threadblock with 512 threads is launched.
*/
__global__ static void
gpujpeg_huffman_encoder_allocation_kernel (
struct gpujpeg_segment* const d_segment,
const int segment_count,
unsigned int * d_gpujpeg_huffman_output_byte_count
) {
// offsets of segments
__shared__ unsigned int s_segment_offsets[512];
// cumulative sum of bytes of all segments
unsigned int total_byte_count = 0;
// iterate over all segments
const unsigned int segment_idx_end = (segment_count + 511) & ~511;
for(unsigned int segment_idx = threadIdx.x; segment_idx < segment_idx_end; segment_idx += 512) {
// all threads load byte sizes of their segments (rounded up to next multiple of 16 B) into the shared array
s_segment_offsets[threadIdx.x] = segment_idx < segment_count
? (d_segment[segment_idx].data_compressed_size + 15) & ~15
: 0;
// first thread runs a sort of serial prefix sum over the segment sizes to get their offsets
__syncthreads();
if(0 == threadIdx.x) {
#pragma unroll 4
for(int i = 0; i < 512; i++) {
const unsigned int segment_size = s_segment_offsets[i];
s_segment_offsets[i] = total_byte_count;
total_byte_count += segment_size;
}
}
__syncthreads();
// all threads write offsets back into corresponding segment structures
if(segment_idx < segment_count) {
d_segment[segment_idx].data_compressed_index = s_segment_offsets[threadIdx.x];
}
}
// first thread finally saves the total sum of bytes needed for compressed data
if(threadIdx.x == 0) {
*d_gpujpeg_huffman_output_byte_count = total_byte_count;
}
}
/**
* Huffman coder output compaction kernel.
*
* @return void
*/
__global__ static void
gpujpeg_huffman_encoder_compaction_kernel (
struct gpujpeg_segment* const d_segment,
const int segment_count,
const uint8_t* const d_src,
uint8_t* const d_dest,
unsigned int * d_gpujpeg_huffman_output_byte_count
) {
// get some segment (size of threadblocks is 32 x N, so threadIdx.y is warp index)
const int block_idx = blockIdx.x + blockIdx.y * gridDim.x;
const int segment_idx = threadIdx.y + block_idx * blockDim.y;
if(segment_idx >= segment_count) {
return;
}
// temp variables for all warps
__shared__ uint4* volatile s_out_ptrs[WARPS_NUM];
// get info about the segment
const unsigned int segment_byte_count = (d_segment[segment_idx].data_compressed_size + 15) & ~15; // number of bytes rounded up to multiple of 16
const unsigned int segment_in_offset = d_segment[segment_idx].data_temp_index; // this should be aligned at least to 16byte boundary
// first thread of each warp reserves space in output buffer
if(0 == threadIdx.x) {
// Either load precomputed output offset (for CC 1.0) or compute it now (for CCs with atomic operations)
#if __CUDA_ARCH__ == 100
const unsigned int segment_out_offset = d_segment[segment_idx].data_compressed_index;
#else
const unsigned int segment_out_offset = atomicAdd(d_gpujpeg_huffman_output_byte_count, segment_byte_count);
d_segment[segment_idx].data_compressed_index = segment_out_offset;
#endif
s_out_ptrs[threadIdx.y] = (uint4*)(d_dest + segment_out_offset);
}
// all threads read output buffer offset for their segment and prepare input and output pointers and number of copy iterations
const uint4 * d_in = threadIdx.x + (uint4*)(d_src + segment_in_offset);
uint4 * d_out = threadIdx.x + s_out_ptrs[threadIdx.y];
unsigned int copy_iterations = segment_byte_count / 512; // 512 is number of bytes copied in each iteration (32 threads * 16 bytes per thread)
// copy the data!
while(copy_iterations--) {
*d_out = *d_in;
d_out += 32;
d_in += 32;
}
// copy remaining bytes (less than 512 bytes)
if((threadIdx.x * 16) < (segment_byte_count & 511)) {
*d_out = *d_in;
}
}
// Threadblock size for CC 1.x kernel
#define THREAD_BLOCK_SIZE 48
/**
* Write one byte to compressed data (CC 1.x)
*
* @param data_compressed Data compressed
* @param value Byte value to write
* @return void
*/
#define gpujpeg_huffman_gpu_encoder_emit_byte(data_compressed, value) { \
*data_compressed = (uint8_t)(value); \
data_compressed++; }
/**
* Write two bytes to compressed data (CC 1.x)
*
* @param data_compressed Data compressed
* @param value Two-byte value to write
* @return void
*/
#define gpujpeg_huffman_gpu_encoder_emit_2byte(data_compressed, value) { \
*data_compressed = (uint8_t)(((value) >> 8) & 0xFF); \
data_compressed++; \
*data_compressed = (uint8_t)((value) & 0xFF); \
data_compressed++; }
/**
* Write marker to compressed data (CC 1.x)
*
* @param data_compressed Data compressed
* @oaran marker Marker to write (JPEG_MARKER_...)
* @return void
*/
#define gpujpeg_huffman_gpu_encoder_marker(data_compressed, marker) { \
*data_compressed = 0xFF;\
data_compressed++; \
*data_compressed = (uint8_t)(marker); \
data_compressed++; }
/**
* Output bits to the file. Only the right 24 bits of put_buffer are used;
* the valid bits are left-justified in this part. At most 16 bits can be
* passed to EmitBits in one call, and we never retain more than 7 bits
* in put_buffer between calls, so 24 bits are sufficient. Version for CC 1.x
*
* @param coder Huffman coder structure
* @param code Huffman code
* @param size Size in bits of the Huffman code
* @return void
*/
__device__ static int
gpujpeg_huffman_gpu_encoder_emit_bits(unsigned int code, int size, int & put_value, int & put_bits, uint8_t* & data_compressed)
{
// This routine is heavily used, so it's worth coding tightly
int _put_buffer = (int)code;
int _put_bits = put_bits;
// If size is 0, caller used an invalid Huffman table entry
if ( size == 0 )
return -1;
// Mask off any extra bits in code
_put_buffer &= (((int)1) << size) - 1;
// New number of bits in buffer
_put_bits += size;
// Align incoming bits
_put_buffer <<= 24 - _put_bits;
// And merge with old buffer contents
_put_buffer |= put_value;
// If there are more than 8 bits, write it out
unsigned char uc;
while ( _put_bits >= 8 ) {
// Write one byte out
uc = (unsigned char) ((_put_buffer >> 16) & 0xFF);
gpujpeg_huffman_gpu_encoder_emit_byte(data_compressed, uc);
// If need to stuff a zero byte
if ( uc == 0xFF ) {
// Write zero byte out
gpujpeg_huffman_gpu_encoder_emit_byte(data_compressed, 0);
}
_put_buffer <<= 8;
_put_bits -= 8;
}
// update state variables
put_value = _put_buffer;
put_bits = _put_bits;
return 0;
}
/**
* Emit left bits (CC 1.x)
*
* @param coder Huffman coder structure
* @return void
*/
__device__ static void
gpujpeg_huffman_gpu_encoder_emit_left_bits(int & put_value, int & put_bits, uint8_t* & data_compressed)
{
// Fill 7 bits with ones
if ( gpujpeg_huffman_gpu_encoder_emit_bits(0x7F, 7, put_value, put_bits, data_compressed) != 0 )
return;
//unsigned char uc = (unsigned char) ((put_value >> 16) & 0xFF);
// Write one byte out
//gpujpeg_huffman_gpu_encoder_emit_byte(data_compressed, uc);
put_value = 0;
put_bits = 0;
}
/**
* Encode one 8x8 block (for CC 1.x)
*
* @return 0 if succeeds, otherwise nonzero
*/
__device__ static int
gpujpeg_huffman_gpu_encoder_encode_block(int & put_value, int & put_bits, int & dc, int16_t* data, uint8_t* & data_compressed,
struct gpujpeg_table_huffman_encoder* d_table_dc, struct gpujpeg_table_huffman_encoder* d_table_ac)
{
typedef uint64_t loading_t;
const int loading_iteration_count = 64 * 2 / sizeof(loading_t);
// Load block to shared memory
__shared__ int16_t s_data[64 * THREAD_BLOCK_SIZE];
for ( int i = 0; i < loading_iteration_count; i++ ) {
((loading_t*)s_data)[loading_iteration_count * threadIdx.x + i] = ((loading_t*)data)[i];
}
int data_start = 64 * threadIdx.x;
// Encode the DC coefficient difference per section F.1.2.1
int temp = s_data[data_start + 0] - dc;
dc = s_data[data_start + 0];
int temp2 = temp;
if ( temp < 0 ) {
// Temp is abs value of input
temp = -temp;
// For a negative input, want temp2 = bitwise complement of abs(input)
// This code assumes we are on a two's complement machine
temp2--;
}
// Find the number of bits needed for the magnitude of the coefficient
int nbits = 0;
while ( temp ) {
nbits++;
temp >>= 1;
}
// Write category number
if ( gpujpeg_huffman_gpu_encoder_emit_bits(d_table_dc->code[nbits], d_table_dc->size[nbits], put_value, put_bits, data_compressed) != 0 ) {
return -1;
}
// Write category offset (EmitBits rejects calls with size 0)
if ( nbits ) {
if ( gpujpeg_huffman_gpu_encoder_emit_bits((unsigned int) temp2, nbits, put_value, put_bits, data_compressed) != 0 )
return -1;
}
// Encode the AC coefficients per section F.1.2.2 (r = run length of zeros)
int r = 0;
for ( int k = 1; k < 64; k++ )
{
temp = s_data[data_start + gpujpeg_huffman_gpu_encoder_order_natural[k]];
if ( temp == 0 ) {
r++;
}
else {
// If run length > 15, must emit special run-length-16 codes (0xF0)
while ( r > 15 ) {
if ( gpujpeg_huffman_gpu_encoder_emit_bits(d_table_ac->code[0xF0], d_table_ac->size[0xF0], put_value, put_bits, data_compressed) != 0 )
return -1;
r -= 16;
}
temp2 = temp;
if ( temp < 0 ) {
// temp is abs value of input
temp = -temp;
// This code assumes we are on a two's complement machine
temp2--;
}
// Find the number of bits needed for the magnitude of the coefficient
// there must be at least one 1 bit
nbits = 1;
while ( (temp >>= 1) )
nbits++;
// Emit Huffman symbol for run length / number of bits
int i = (r << 4) + nbits;
if ( gpujpeg_huffman_gpu_encoder_emit_bits(d_table_ac->code[i], d_table_ac->size[i], put_value, put_bits, data_compressed) != 0 )
return -1;
// Write Category offset
if ( gpujpeg_huffman_gpu_encoder_emit_bits((unsigned int) temp2, nbits, put_value, put_bits, data_compressed) != 0 )
return -1;
r = 0;
}
}
// If all the left coefs were zero, emit an end-of-block code
if ( r > 0 ) {
if ( gpujpeg_huffman_gpu_encoder_emit_bits(d_table_ac->code[0], d_table_ac->size[0], put_value, put_bits, data_compressed) != 0 )
return -1;
}
return 0;
}
/**
* Huffman encoder kernel (for CC 1.x)
*
* @return void
*/
__global__ static void
gpujpeg_huffman_encoder_encode_kernel(
struct gpujpeg_component* d_component,
struct gpujpeg_segment* d_segment,
int comp_count,
int segment_count,
uint8_t* d_data_compressed,
unsigned int * d_gpujpeg_huffman_output_byte_count
)
{
int segment_index = blockIdx.x * blockDim.x + threadIdx.x;
if ( segment_index >= segment_count )
return;
struct gpujpeg_segment* segment = &d_segment[segment_index];
// first thread initializes compact output size for next kernel
if(0 == segment_index) {
*d_gpujpeg_huffman_output_byte_count = 0;
}
// Initialize huffman coder
int put_value = 0;
int put_bits = 0;
int dc[GPUJPEG_MAX_COMPONENT_COUNT];
for ( int comp = 0; comp < GPUJPEG_MAX_COMPONENT_COUNT; comp++ )
dc[comp] = 0;
// Prepare data pointers
uint8_t* data_compressed = &d_data_compressed[segment->data_temp_index];
uint8_t* data_compressed_start = data_compressed;
// Non-interleaving mode
if ( comp_count == 1 ) {
int segment_index = segment->scan_segment_index;
// Encode MCUs in segment
for ( int mcu_index = 0; mcu_index < segment->mcu_count; mcu_index++ ) {
// Get component for current scan
struct gpujpeg_component* component = &d_component[segment->scan_index];
// Get component data for MCU
int16_t* block = &component->d_data_quantized[(segment_index * component->segment_mcu_count + mcu_index) * component->mcu_size];
// Get coder parameters
int & component_dc = dc[segment->scan_index];
// Get huffman tables
struct gpujpeg_table_huffman_encoder* d_table_dc = NULL;
struct gpujpeg_table_huffman_encoder* d_table_ac = NULL;
if ( component->type == GPUJPEG_COMPONENT_LUMINANCE ) {
d_table_dc = &gpujpeg_huffman_gpu_encoder_table_huffman[GPUJPEG_COMPONENT_LUMINANCE][GPUJPEG_HUFFMAN_DC];
d_table_ac = &gpujpeg_huffman_gpu_encoder_table_huffman[GPUJPEG_COMPONENT_LUMINANCE][GPUJPEG_HUFFMAN_AC];
} else {
d_table_dc = &gpujpeg_huffman_gpu_encoder_table_huffman[GPUJPEG_COMPONENT_CHROMINANCE][GPUJPEG_HUFFMAN_DC];
d_table_ac = &gpujpeg_huffman_gpu_encoder_table_huffman[GPUJPEG_COMPONENT_CHROMINANCE][GPUJPEG_HUFFMAN_AC];
}
// Encode 8x8 block
if ( gpujpeg_huffman_gpu_encoder_encode_block(put_value, put_bits, component_dc, block, data_compressed, d_table_dc, d_table_ac) != 0 )
break;
}
}
// Interleaving mode
else {
int segment_index = segment->scan_segment_index;
// Encode MCUs in segment
for ( int mcu_index = 0; mcu_index < segment->mcu_count; mcu_index++ ) {
//assert(segment->scan_index == 0);
for ( int comp = 0; comp < comp_count; comp++ ) {
struct gpujpeg_component* component = &d_component[comp];
// Prepare mcu indexes
int mcu_index_x = (segment_index * component->segment_mcu_count + mcu_index) % component->mcu_count_x;
int mcu_index_y = (segment_index * component->segment_mcu_count + mcu_index) / component->mcu_count_x;
// Compute base data index
int data_index_base = mcu_index_y * (component->mcu_size * component->mcu_count_x) + mcu_index_x * (component->mcu_size_x * GPUJPEG_BLOCK_SIZE);
// For all vertical 8x8 blocks
for ( int y = 0; y < component->sampling_factor.vertical; y++ ) {
// Compute base row data index
int data_index_row = data_index_base + y * (component->mcu_count_x * component->mcu_size_x * GPUJPEG_BLOCK_SIZE);
// For all horizontal 8x8 blocks
for ( int x = 0; x < component->sampling_factor.horizontal; x++ ) {
// Compute 8x8 block data index
int data_index = data_index_row + x * GPUJPEG_BLOCK_SIZE * GPUJPEG_BLOCK_SIZE;
// Get component data for MCU
int16_t* block = &component->d_data_quantized[data_index];
// Get coder parameters
int & component_dc = dc[comp];
// Get huffman tables
struct gpujpeg_table_huffman_encoder* d_table_dc = NULL;
struct gpujpeg_table_huffman_encoder* d_table_ac = NULL;
if ( component->type == GPUJPEG_COMPONENT_LUMINANCE ) {
d_table_dc = &gpujpeg_huffman_gpu_encoder_table_huffman[GPUJPEG_COMPONENT_LUMINANCE][GPUJPEG_HUFFMAN_DC];
d_table_ac = &gpujpeg_huffman_gpu_encoder_table_huffman[GPUJPEG_COMPONENT_LUMINANCE][GPUJPEG_HUFFMAN_AC];
} else {
d_table_dc = &gpujpeg_huffman_gpu_encoder_table_huffman[GPUJPEG_COMPONENT_CHROMINANCE][GPUJPEG_HUFFMAN_DC];
d_table_ac = &gpujpeg_huffman_gpu_encoder_table_huffman[GPUJPEG_COMPONENT_CHROMINANCE][GPUJPEG_HUFFMAN_AC];
}
// Encode 8x8 block
gpujpeg_huffman_gpu_encoder_encode_block(put_value, put_bits, component_dc, block, data_compressed, d_table_dc, d_table_ac);
}
}
}
}
}
// Emit left bits
if ( put_bits > 0 )
gpujpeg_huffman_gpu_encoder_emit_left_bits(put_value, put_bits, data_compressed);
// Output restart marker
int restart_marker = GPUJPEG_MARKER_RST0 + (segment->scan_segment_index % 8);
gpujpeg_huffman_gpu_encoder_marker(data_compressed, restart_marker);
// Set compressed size
segment->data_compressed_size = data_compressed - data_compressed_start;
}
/** Adds packed coefficients into the GPU version of Huffman lookup table. */
void
gpujpeg_huffman_gpu_add_packed_table(uint32_t * const dest, const struct gpujpeg_table_huffman_encoder * const src, const bool is_ac) {
// make a upshifted copy of the table for GPU encoding
for ( int i = 0; i <= 256; i++ ) {
const int size = src->size[i & 0xFF];
dest[i] = (src->code[i & 0xFF] << (32 - size)) | size;
}
// reserve first index in GPU version of AC table for special purposes
if ( is_ac ) {
dest[0] = 0;
}
}
/** Documented at declaration */
struct gpujpeg_huffman_gpu_encoder *
gpujpeg_huffman_gpu_encoder_create(const struct gpujpeg_encoder * encoder)
{
struct gpujpeg_huffman_gpu_encoder * huffman_gpu_encoder = (struct gpujpeg_huffman_gpu_encoder *) malloc(sizeof(struct gpujpeg_huffman_gpu_encoder));
if ( huffman_gpu_encoder == NULL ) {
return NULL;
}
memset(huffman_gpu_encoder, 0, sizeof(struct gpujpeg_huffman_gpu_encoder));
// Allocate
hipMalloc((void**)&huffman_gpu_encoder->d_gpujpeg_huffman_output_byte_count, sizeof(unsigned int));
gpujpeg_cuda_check_error("Allocation of huffman output byte count failed", return NULL);
// Initialize decomposition lookup table
hipFuncSetCacheConfig(gpujpeg_huffman_gpu_encoder_value_decomposition_init_kernel, hipFuncCachePreferShared);
hipLaunchKernelGGL(( gpujpeg_huffman_gpu_encoder_value_decomposition_init_kernel), dim3(32), dim3(256), 0, 0, ); // 8192 threads total
hipDeviceSynchronize();
gpujpeg_cuda_check_error("Decomposition LUT initialization failed", return NULL);
// compose GPU version of the huffman LUT and copy it into GPU memory (for CC >= 2.0)
uint32_t gpujpeg_huffman_cpu_lut[(256 + 1) * 4];
gpujpeg_huffman_gpu_add_packed_table(gpujpeg_huffman_cpu_lut + 257 * 0, &encoder->table_huffman[GPUJPEG_COMPONENT_LUMINANCE][GPUJPEG_HUFFMAN_AC], true);
gpujpeg_huffman_gpu_add_packed_table(gpujpeg_huffman_cpu_lut + 257 * 1, &encoder->table_huffman[GPUJPEG_COMPONENT_LUMINANCE][GPUJPEG_HUFFMAN_DC], false);
gpujpeg_huffman_gpu_add_packed_table(gpujpeg_huffman_cpu_lut + 257 * 2, &encoder->table_huffman[GPUJPEG_COMPONENT_CHROMINANCE][GPUJPEG_HUFFMAN_AC], true);
gpujpeg_huffman_gpu_add_packed_table(gpujpeg_huffman_cpu_lut + 257 * 3, &encoder->table_huffman[GPUJPEG_COMPONENT_CHROMINANCE][GPUJPEG_HUFFMAN_DC], false);
hipMemcpyToSymbol(
gpujpeg_huffman_gpu_lut,
gpujpeg_huffman_cpu_lut,
(256 + 1) * 4 * sizeof(*gpujpeg_huffman_gpu_lut),
0,
hipMemcpyHostToDevice
);
gpujpeg_cuda_check_error("Huffman encoder init (Huffman LUT copy)", return NULL);
// Copy original Huffman coding tables to GPU memory (for CC 1.x)
hipMemcpyToSymbol(
gpujpeg_huffman_gpu_encoder_table_huffman,
&encoder->table_huffman[GPUJPEG_COMPONENT_LUMINANCE][GPUJPEG_HUFFMAN_DC],
sizeof(gpujpeg_huffman_gpu_encoder_table_huffman),
0,
hipMemcpyHostToDevice
);
gpujpeg_cuda_check_error("Huffman encoder init (Huffman coding table)", return NULL);
// Copy natural order to constant device memory
hipMemcpyToSymbol(
gpujpeg_huffman_gpu_encoder_order_natural,
gpujpeg_order_natural,
GPUJPEG_ORDER_NATURAL_SIZE * sizeof(int),
0,
hipMemcpyHostToDevice
);
gpujpeg_cuda_check_error("Huffman encoder init (natural order copy)", return NULL);
// Configure more shared memory for all kernels
hipFuncSetCacheConfig(gpujpeg_huffman_encoder_encode_kernel_warp<true>, hipFuncCachePreferShared);
hipFuncSetCacheConfig(gpujpeg_huffman_encoder_encode_kernel_warp<false>, hipFuncCachePreferShared);
hipFuncSetCacheConfig(gpujpeg_huffman_encoder_serialization_kernel, hipFuncCachePreferShared);
hipFuncSetCacheConfig(gpujpeg_huffman_encoder_compaction_kernel, hipFuncCachePreferShared);
hipFuncSetCacheConfig(gpujpeg_huffman_encoder_encode_kernel, hipFuncCachePreferShared);
hipFuncSetCacheConfig(gpujpeg_huffman_encoder_allocation_kernel, hipFuncCachePreferShared);
return huffman_gpu_encoder;
}
void
gpujpeg_huffman_gpu_encoder_destroy(struct gpujpeg_huffman_gpu_encoder * huffman_gpu_encoder)
{
assert(huffman_gpu_encoder != NULL);
if (huffman_gpu_encoder->d_gpujpeg_huffman_output_byte_count != NULL) {
hipFree(huffman_gpu_encoder->d_gpujpeg_huffman_output_byte_count);
}
free(huffman_gpu_encoder);
}
/**
* Get grid size for specified count of threadblocks. (Grid size is limited
* to 65536 in both directions, so if we need more threadblocks, we must use
* both x and y coordinates.)
*/
dim3
gpujpeg_huffman_gpu_encoder_grid_size(int tblock_count)
{
dim3 size(tblock_count);
while(size.x > 0xffff) {
size.x = (size.x + 1) >> 1;
size.y <<= 1;
}
return size;
}
/** Documented at declaration */
int
gpujpeg_huffman_gpu_encoder_encode(struct gpujpeg_encoder* encoder, struct gpujpeg_huffman_gpu_encoder * huffman_gpu_encoder, unsigned int * output_byte_count)
{
// Get coder
struct gpujpeg_coder* coder = &encoder->coder;
assert(coder->param.restart_interval > 0);
// Select encoder kernel which either expects continuos segments of blocks or uses block lists
int comp_count = 1;
if ( coder->param.interleaved == 1 )
comp_count = coder->param_image.comp_count;
assert(comp_count >= 1 && comp_count <= GPUJPEG_MAX_COMPONENT_COUNT);
// Select encoder kernel based on compute capability
if ( encoder->coder.cuda_cc_major < 2 ) {
// Run kernel
dim3 thread(THREAD_BLOCK_SIZE);
dim3 grid(gpujpeg_div_and_round_up(coder->segment_count, thread.x));
hipLaunchKernelGGL(( gpujpeg_huffman_encoder_encode_kernel), dim3(grid), dim3(thread), 0, *(encoder->stream),
coder->d_component,
coder->d_segment,
comp_count,
coder->segment_count,
coder->d_temp_huffman,
huffman_gpu_encoder->d_gpujpeg_huffman_output_byte_count
);
gpujpeg_cuda_check_error("Huffman encoding failed", return -1);
} else {
// Run encoder kernel
dim3 thread(32 * WARPS_NUM);
dim3 grid = gpujpeg_huffman_gpu_encoder_grid_size(gpujpeg_div_and_round_up(coder->segment_count, (thread.x / 32)));
if(comp_count == 1) {
hipLaunchKernelGGL(( gpujpeg_huffman_encoder_encode_kernel_warp<true>), dim3(grid), dim3(thread), 0, *(encoder->stream),
coder->d_segment,
coder->segment_count,
coder->d_data_compressed,
coder->d_block_list,
coder->d_data_quantized,
coder->d_component,
comp_count,
huffman_gpu_encoder->d_gpujpeg_huffman_output_byte_count
);
gpujpeg_cuda_check_error("Huffman encoding failed", return -1);
} else {
hipLaunchKernelGGL(( gpujpeg_huffman_encoder_encode_kernel_warp<false>), dim3(grid), dim3(thread), 0, *(encoder->stream),
coder->d_segment,
coder->segment_count,
coder->d_data_compressed,
coder->d_block_list,
coder->d_data_quantized,
coder->d_component,
comp_count,
huffman_gpu_encoder->d_gpujpeg_huffman_output_byte_count
);
gpujpeg_cuda_check_error("Huffman encoding failed", return -1);
}
// Run codeword serialization kernel
const int num_serialization_tblocks = gpujpeg_div_and_round_up(coder->segment_count, SERIALIZATION_THREADS_PER_TBLOCK);
const dim3 serialization_grid = gpujpeg_huffman_gpu_encoder_grid_size(num_serialization_tblocks);
hipLaunchKernelGGL(( gpujpeg_huffman_encoder_serialization_kernel), dim3(num_serialization_tblocks), dim3(SERIALIZATION_THREADS_PER_TBLOCK), 0, *(encoder->stream),
coder->d_segment,
coder->segment_count,
coder->d_data_compressed,
coder->d_temp_huffman
);
gpujpeg_cuda_check_error("Codeword serialization failed", return -1);
}
// No atomic operations in CC 1.0 => run output size computation kernel to allocate the output buffer space
if ( encoder->coder.cuda_cc_major == 1 && encoder->coder.cuda_cc_minor == 0 ) {
hipLaunchKernelGGL(( gpujpeg_huffman_encoder_allocation_kernel), dim3(1), dim3(512), 0, *(encoder->stream), coder->d_segment, coder->segment_count, huffman_gpu_encoder->d_gpujpeg_huffman_output_byte_count);
gpujpeg_cuda_check_error("Huffman encoder output allocation failed", return -1);
}
// Run output compaction kernel (one warp per segment)
const dim3 compaction_thread(32, WARPS_NUM);
const dim3 compaction_grid = gpujpeg_huffman_gpu_encoder_grid_size(gpujpeg_div_and_round_up(coder->segment_count, WARPS_NUM));
hipLaunchKernelGGL(( gpujpeg_huffman_encoder_compaction_kernel), dim3(compaction_grid), dim3(compaction_thread), 0, *(encoder->stream),
coder->d_segment,
coder->segment_count,
coder->d_temp_huffman,
coder->d_data_compressed,
huffman_gpu_encoder->d_gpujpeg_huffman_output_byte_count
);
gpujpeg_cuda_check_error("Huffman output compaction failed", return -1);
// Read and return number of occupied bytes
hipMemcpyAsync(output_byte_count, huffman_gpu_encoder->d_gpujpeg_huffman_output_byte_count, sizeof(unsigned int), hipMemcpyDeviceToHost, *(encoder->stream));
gpujpeg_cuda_check_error("Huffman output size getting failed", return -1);
// indicate success
return 0;
}
| 2db075f3da53969996cbb9d0e35f974c3830cdf1.cu | /**
* Copyright (c) 2011, CESNET z.s.p.o
* Copyright (c) 2011, Silicon Genome, LLC.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "gpujpeg/gpujpeg_huffman_gpu_encoder.h"
#include "gpujpeg/gpujpeg_util.h"
#define WARPS_NUM 8
/** Natural order in constant memory */
__constant__ int gpujpeg_huffman_gpu_encoder_order_natural[GPUJPEG_ORDER_NATURAL_SIZE];
/**
* Huffman coding tables in constant memory - each has 257 items (256 + 1 extra)
* There are are 4 of them - one after another, in following order:
* - luminance (Y) AC
* - luminance (Y) DC
* - chroma (cb/cr) AC
* - chroma (cb/cr) DC
*/
__device__ uint32_t gpujpeg_huffman_gpu_lut[(256 + 1) * 4];
/**
* Value decomposition in constant memory (input range from -4096 to 4095 ... both inclusive)
* Mapping from coefficient value into the code for the value ind its bit size.
*/
__device__ unsigned int gpujpeg_huffman_value_decomposition[8 * 1024];
/** Allocate huffman tables in constant memory */
__device__ struct gpujpeg_table_huffman_encoder gpujpeg_huffman_gpu_encoder_table_huffman[GPUJPEG_COMPONENT_TYPE_COUNT][GPUJPEG_HUFFMAN_TYPE_COUNT];
struct gpujpeg_huffman_gpu_encoder
{
/** Size of occupied part of output buffer */
unsigned int * d_gpujpeg_huffman_output_byte_count;
};
/**
* Initializes coefficient decomposition table in global memory. (CC >= 2.0)
* Output table is a mapping from some value into its code and bit size.
*/
__global__ static void
gpujpeg_huffman_gpu_encoder_value_decomposition_init_kernel() {
// fetch some value
const int tid = threadIdx.x + blockIdx.x * blockDim.x;
const int value = tid - 4096;
// decompose it
unsigned int value_code = value;
int absolute = value;
if ( value < 0 ) {
// valu eis now absolute value of input
absolute = -absolute;
// For a negative input, want temp2 = bitwise complement of abs(input)
// This code assumes we are on a two's complement machine
value_code--;
}
// Find the number of bits needed for the magnitude of the coefficient
unsigned int value_nbits = 0;
while ( absolute ) {
value_nbits++;
absolute >>= 1;
}
// save result packed into unsigned int (value bits are left aligned in MSBs and size is right aligned in LSBs)
gpujpeg_huffman_value_decomposition[tid] = value_nbits | (value_code << (32 - value_nbits));
}
#if __CUDA_ARCH__ >= 200
/**
* Adds up to 32 bits at once into ouptut buffer, applying byte stuffing.
* Codeword value must be aligned to left (most significant bits). (CC >= 2.0)
*/
__device__ static void
gpujpeg_huffman_gpu_encoder_emit_bits(unsigned int & remaining_bits, int & byte_count, int & bit_count, uint8_t * const out_ptr, const unsigned int packed_code_word)
{
// decompose packed codeword into the msb-aligned value and bit-length of the value
const unsigned int code_word = packed_code_word & ~31;
const unsigned int code_bit_size = packed_code_word & 31;
// concatenate with remaining bits
remaining_bits |= code_word >> bit_count;
bit_count += code_bit_size;
// flush some bytes if have more than 8 bits
if (bit_count >= 8) {
do {
const unsigned int out_byte = remaining_bits >> 24;
out_ptr[byte_count++] = out_byte;
if(0xff == out_byte) {
// keep zero byte after each 0xFF (buffer is expected to be zeroed)
out_ptr[byte_count++] = 0;
}
remaining_bits <<= 8;
bit_count -= 8;
} while (bit_count >= 8);
// keep only remaining bits in the buffer
remaining_bits = code_word << (code_bit_size - bit_count);
remaining_bits &= 0xfffffffe << (31 - bit_count);
}
}
/**
* Given some huffman table offset, RLE zero count and coefficient value,
* this returns huffman codeword for the value (packed in 27 MSBs)
* together with its bit size (in 5 LSBs). (CC >= 2.0)
*/
__device__ static unsigned int
gpujpeg_huffman_gpu_encode_value(const int preceding_zero_count, const int coefficient,
const int huffman_lut_offset)
{
// value bits are in MSBs (left aligned) and bit size of the value is in LSBs (right aligned)
const unsigned int packed_value = gpujpeg_huffman_value_decomposition[4096 + coefficient];
// decompose value info into upshifted value and value's bit size
const int value_nbits = packed_value & 0xf;
const unsigned int value_code = packed_value & ~0xf;
// find prefix of the codeword and size of the prefix
const int huffman_lut_idx = huffman_lut_offset + preceding_zero_count * 16 + value_nbits;
const unsigned int packed_prefix = gpujpeg_huffman_gpu_lut[huffman_lut_idx];
const unsigned int prefix_nbits = packed_prefix & 31;
// compose packed codeword with its size
return (packed_prefix + value_nbits) | (value_code >> prefix_nbits);
}
/**
* Flush remaining codewords from buffer in shared memory to global memory output buffer. (CC >= 2.0)
*/
__device__ static void
gpujpeg_huffman_gpu_encoder_flush_codewords(unsigned int * const s_out, unsigned int * &data_compressed, int & remaining_codewords, const int tid) {
// this works for up to 4 * 32 remaining codewords
if(remaining_codewords) {
// pad remaining codewords with extra zero-sized codewords, not to have to use special case in serialization kernel, which saves 4 codewords at once
s_out[remaining_codewords + tid] = 0;
// save all remaining codewords at once (together with some zero sized padding codewords)
*((uint4*)data_compressed) = ((uint4*)s_out)[tid];
// update codeword counter
data_compressed += remaining_codewords;
remaining_codewords = 0;
}
}
/**
* Encode one 8x8 block (CC >= 2.0)
*
* @return 0 if succeeds, otherwise nonzero
*/
__device__ static int
gpujpeg_huffman_gpu_encoder_encode_block(const int16_t * block, unsigned int * &data_compressed, unsigned int * const s_out,
int & remaining_codewords, const int last_dc_idx, int tid, const int huffman_lut_offset)
{
// each thread loads a pair of values (pair after zigzag reordering)
const int load_idx = tid * 2;
int in_even = block[gpujpeg_huffman_gpu_encoder_order_natural[load_idx]];
const int in_odd = block[gpujpeg_huffman_gpu_encoder_order_natural[load_idx + 1]];
// compute preceding zero count for even coefficient (actually compute the count multiplied by 16)
const unsigned int nonzero_mask = (1 << tid) - 1;
const unsigned int nonzero_bitmap_0 = 1 | __ballot(in_even); // DC is always treated as nonzero
const unsigned int nonzero_bitmap_1 = __ballot(in_odd);
const unsigned int nonzero_bitmap_pairs = nonzero_bitmap_0 | nonzero_bitmap_1;
const int zero_pair_count = __clz(nonzero_bitmap_pairs & nonzero_mask);
int zeros_before_even = 2 * (zero_pair_count + tid - 32);
if((0x80000000 >> zero_pair_count) > (nonzero_bitmap_1 & nonzero_mask)) {
zeros_before_even += 1;
}
// true if any nonzero pixel follows thread's odd pixel
const bool nonzero_follows = nonzero_bitmap_pairs & ~nonzero_mask;
// count of consecutive zeros before odd value (either one more than
// even if even is zero or none if even value itself is nonzero)
// (the count is actually multiplied by 16)
int zeros_before_odd = in_even || !tid ? 0 : zeros_before_even + 1;
// clear zero counts if no nonzero pixel follows (so that no 16-zero symbols will be emited)
// otherwise only trim extra bits from the counts of following zeros
const int zero_count_mask = nonzero_follows ? 0xF : 0;
zeros_before_even &= zero_count_mask;
zeros_before_odd &= zero_count_mask;
// pointer to LUT for encoding thread's even value
// (only thread #0 uses DC table, others use AC table)
int even_lut_offset = huffman_lut_offset;
// first thread handles special DC coefficient
if(0 == tid) {
// first thread uses DC part of the table for its even value
even_lut_offset += 256 + 1;
// update last DC coefficient (saved at the special place at the end of the shared bufer)
const int original_in_even = in_even;
in_even -= ((int*)s_out)[last_dc_idx];
((int*)s_out)[last_dc_idx] = original_in_even;
}
// last thread handles special block-termination symbol
if(0 == ((tid ^ 31) | in_odd)) {
// this causes selection of huffman symbol at index 256 (which contains the termination symbol)
zeros_before_odd = 16;
}
// each thread gets codeword for its two pixels
unsigned int even_code = gpujpeg_huffman_gpu_encode_value(zeros_before_even, in_even, even_lut_offset);
unsigned int odd_code = gpujpeg_huffman_gpu_encode_value(zeros_before_odd, in_odd, huffman_lut_offset);
// concatenate both codewords into one if they are short enough
const unsigned int even_code_size = even_code & 31;
const unsigned int odd_code_size = odd_code & 31;
const unsigned int total_size = even_code_size + odd_code_size;
if(total_size <= 27) {
even_code = total_size | ((odd_code & ~31) >> even_code_size) | (even_code & ~31);
odd_code = 0;
}
// each thread get number of preceding nonzero codewords and total number of nonzero codewords in this block
const unsigned int even_codeword_presence = __ballot(even_code);
const unsigned int odd_codeword_presence = __ballot(odd_code);
const int codeword_offset = __popc(nonzero_mask & even_codeword_presence)
+ __popc(nonzero_mask & odd_codeword_presence);
// each thread saves its values into temporary shared buffer
if(even_code) {
s_out[remaining_codewords + codeword_offset] = even_code;
if(odd_code) {
s_out[remaining_codewords + codeword_offset + 1] = odd_code;
}
}
// advance count of codewords in shared memory buffer
remaining_codewords += __popc(odd_codeword_presence) + __popc(even_codeword_presence);
// flush some codewords to global memory if there are too many of them in shared buffer
const int flush_count = 32 * 4; // = half of the buffer
if(remaining_codewords > flush_count) {
// move first half of the buffer into output buffer in global memory and update output pointer
*((uint4*)data_compressed) = ((uint4*)s_out)[tid];
data_compressed += flush_count;
// shift remaining codewords to begin of the buffer and update their count
((uint4*)s_out)[tid] = ((uint4*)s_out)[flush_count / 4 + tid]; // 4 for 4 uints in uint4
remaining_codewords -= flush_count;
}
// nothing to fail here
return 0;
}
#endif // #if __CUDA_ARCH__ >= 200
/**
* Huffman encoder kernel (For compute capability >= 2.0)
*
* @return void
*/
template <bool CONTINUOUS_BLOCK_LIST>
#if __CUDA_ARCH__ >= 200
__launch_bounds__(WARPS_NUM * 32, 1024 / (WARPS_NUM * 32))
#endif
__global__ static void
gpujpeg_huffman_encoder_encode_kernel_warp(
struct gpujpeg_segment* d_segment,
int segment_count,
uint8_t* d_data_compressed,
const uint64_t* const d_block_list,
int16_t* const d_data_quantized,
struct gpujpeg_component* const d_component,
const int comp_count,
unsigned int * d_gpujpeg_huffman_output_byte_count
) {
#if __CUDA_ARCH__ >= 200
int warpidx = threadIdx.x >> 5;
int tid = threadIdx.x & 31;
__shared__ uint4 s_out_all[(64 + 1) * WARPS_NUM];
unsigned int * s_out = (unsigned int*)(s_out_all + warpidx * (64 + 1));
// Number of remaining codewords in shared buffer
int remaining_codewords = 0;
// Select Segment
const int block_idx = blockIdx.x + blockIdx.y * gridDim.x;
const int segment_index = block_idx * WARPS_NUM + warpidx;
// first thread initializes compact output size for next kernel
if(0 == tid && 0 == warpidx && 0 == block_idx) {
*d_gpujpeg_huffman_output_byte_count = 0;
}
// stop if out of segment bounds
if ( segment_index >= segment_count )
return;
struct gpujpeg_segment* segment = &d_segment[segment_index];
// Initialize last DC coefficients
if(tid < 3) {
s_out[256 + tid] = 0;
}
// Prepare data pointers
unsigned int * data_compressed = (unsigned int*)(d_data_compressed + segment->data_temp_index);
unsigned int * data_compressed_start = data_compressed;
// Pre-add thread ID to output pointer (it's allways used only with it)
data_compressed += (tid * 4);
// Encode all block in segment
if(CONTINUOUS_BLOCK_LIST) {
// Get component for current scan
const struct gpujpeg_component* component = &d_component[segment->scan_index];
// mcu size of the component
const int comp_mcu_size = component->mcu_size;
// Get component data for MCU (first block)
const int16_t* block = component->d_data_quantized + (segment->scan_segment_index * component->segment_mcu_count) * comp_mcu_size;
// Get huffman table offset
const int huffman_table_offset = component->type == GPUJPEG_COMPONENT_LUMINANCE ? 0 : (256 + 1) * 2; // possibly skips luminance tables
// Encode MCUs in segment
for (int block_count = segment->mcu_count; block_count--;) {
// Encode 8x8 block
gpujpeg_huffman_gpu_encoder_encode_block(block, data_compressed, s_out, remaining_codewords, 256, tid, huffman_table_offset);
// Advance to next block
block += comp_mcu_size;
}
} else {
// Pointer to segment's list of 8x8 blocks and their count
const uint64_t* packed_block_info_ptr = d_block_list + segment->block_index_list_begin;
// Encode all blocks
for(int block_count = segment->block_count; block_count--;) {
// Get pointer to next block input data and info about its color type
const uint64_t packed_block_info = *(packed_block_info_ptr++);
// Get coder parameters
const int last_dc_idx = 256 + (packed_block_info & 0x7f);
// Get offset to right part of huffman table
const int huffman_table_offset = packed_block_info & 0x80 ? (256 + 1) * 2 : 0; // possibly skips luminance tables
// Source data pointer
int16_t* block = &d_data_quantized[packed_block_info >> 8];
// Encode 8x8 block
gpujpeg_huffman_gpu_encoder_encode_block(block, data_compressed, s_out, remaining_codewords, last_dc_idx, tid, huffman_table_offset);
}
}
// flush remaining codewords
gpujpeg_huffman_gpu_encoder_flush_codewords(s_out, data_compressed, remaining_codewords, tid);
// Set number of codewords.
if (tid == 0 ) {
segment->data_compressed_size = data_compressed - data_compressed_start;
}
#endif // #if __CUDA_ARCH__ >= 200
}
#define SERIALIZATION_THREADS_PER_TBLOCK 192
/**
* Codeword serialization kernel (CC >= 2.0).
*
* @return void
*/
#if __CUDA_ARCH__ >= 200
__launch_bounds__(SERIALIZATION_THREADS_PER_TBLOCK, 1536 / SERIALIZATION_THREADS_PER_TBLOCK)
#endif
__global__ static void
gpujpeg_huffman_encoder_serialization_kernel(
struct gpujpeg_segment* d_segment,
int segment_count,
const uint8_t* const d_src,
uint8_t* const d_dest
) {
#if __CUDA_ARCH__ >= 200
// Temp buffer for all threads of the threadblock
__shared__ uint4 s_temp_all[2 * SERIALIZATION_THREADS_PER_TBLOCK];
// Thread's 32 bytes in shared memory for output composition
uint4 * const s_temp = s_temp_all + threadIdx.x * 2;
// Select Segment
const int block_idx = blockIdx.x + blockIdx.y * gridDim.x;
int segment_index = block_idx * SERIALIZATION_THREADS_PER_TBLOCK + threadIdx.x;
if ( segment_index >= segment_count )
return;
// Thread's segment
struct gpujpeg_segment* const segment = &d_segment[segment_index];
// Input and output pointers
const int data_offset = segment->data_temp_index;
uint4 * const d_dest_stream_start = (uint4*)(d_dest + data_offset);
uint4 * d_dest_stream = d_dest_stream_start;
const uint4 * d_src_codewords = (uint4*)(d_src + data_offset);
// number of bytes in the temp buffer, remaining bits and their count
int byte_count = 0, bit_count = 0;
unsigned int remaining_bits = 0;
// "data_compressed_size" is now initialized to number of codewords to be serialized
for(int cword_tuple_count = (segment->data_compressed_size + 3) >> 2; cword_tuple_count--; ) // reading 4 codewords at once
{
// read 4 codewords and advance input pointer to next ones
const uint4 cwords = *(d_src_codewords++);
// encode first pair of codewords
gpujpeg_huffman_gpu_encoder_emit_bits(remaining_bits, byte_count, bit_count, (uint8_t*)s_temp, cwords.x);
gpujpeg_huffman_gpu_encoder_emit_bits(remaining_bits, byte_count, bit_count, (uint8_t*)s_temp, cwords.y);
// possibly flush output if have at least 16 bytes
if(byte_count >= 16) {
// write 16 bytes into destination buffer
*(d_dest_stream++) = s_temp[0];
// move remaining bytes to first half of the buffer
s_temp[0] = s_temp[1];
// update number of remaining bits
byte_count -= 16;
}
// encode other two codewords
gpujpeg_huffman_gpu_encoder_emit_bits(remaining_bits, byte_count, bit_count, (uint8_t*)s_temp, cwords.z);
gpujpeg_huffman_gpu_encoder_emit_bits(remaining_bits, byte_count, bit_count, (uint8_t*)s_temp, cwords.w);
// possibly flush output if have at least 16 bytes
if(byte_count >= 16) {
// write 16 bytes into destination buffer
*(d_dest_stream++) = s_temp[0];
// move remaining bytes to first half of the buffer
s_temp[0] = s_temp[1];
// update number of remaining bits
byte_count -= 16;
}
}
// Emit left bits
gpujpeg_huffman_gpu_encoder_emit_bits(remaining_bits, byte_count, bit_count, (uint8_t*)s_temp, 0xfe000007);
// Terminate codestream with restart marker
((uint8_t*)s_temp)[byte_count + 0] = 0xFF;
((uint8_t*)s_temp)[byte_count + 1] = GPUJPEG_MARKER_RST0 + (segment->scan_segment_index % 8);
// flush remaining bytes
d_dest_stream[0] = s_temp[0];
d_dest_stream[1] = s_temp[1];
// Set compressed size
segment->data_compressed_size = (d_dest_stream - d_dest_stream_start) * 16 + byte_count + 2;
#endif // #if __CUDA_ARCH__ >= 200
}
/**
* Huffman coder compact output allocation kernel - serially reserves
* some space for compressed output of segments in output buffer.
* (For CC 1.0 - a workaround for missing atomic operations.)
*
* Only single threadblock with 512 threads is launched.
*/
__global__ static void
gpujpeg_huffman_encoder_allocation_kernel (
struct gpujpeg_segment* const d_segment,
const int segment_count,
unsigned int * d_gpujpeg_huffman_output_byte_count
) {
// offsets of segments
__shared__ unsigned int s_segment_offsets[512];
// cumulative sum of bytes of all segments
unsigned int total_byte_count = 0;
// iterate over all segments
const unsigned int segment_idx_end = (segment_count + 511) & ~511;
for(unsigned int segment_idx = threadIdx.x; segment_idx < segment_idx_end; segment_idx += 512) {
// all threads load byte sizes of their segments (rounded up to next multiple of 16 B) into the shared array
s_segment_offsets[threadIdx.x] = segment_idx < segment_count
? (d_segment[segment_idx].data_compressed_size + 15) & ~15
: 0;
// first thread runs a sort of serial prefix sum over the segment sizes to get their offsets
__syncthreads();
if(0 == threadIdx.x) {
#pragma unroll 4
for(int i = 0; i < 512; i++) {
const unsigned int segment_size = s_segment_offsets[i];
s_segment_offsets[i] = total_byte_count;
total_byte_count += segment_size;
}
}
__syncthreads();
// all threads write offsets back into corresponding segment structures
if(segment_idx < segment_count) {
d_segment[segment_idx].data_compressed_index = s_segment_offsets[threadIdx.x];
}
}
// first thread finally saves the total sum of bytes needed for compressed data
if(threadIdx.x == 0) {
*d_gpujpeg_huffman_output_byte_count = total_byte_count;
}
}
/**
* Huffman coder output compaction kernel.
*
* @return void
*/
__global__ static void
gpujpeg_huffman_encoder_compaction_kernel (
struct gpujpeg_segment* const d_segment,
const int segment_count,
const uint8_t* const d_src,
uint8_t* const d_dest,
unsigned int * d_gpujpeg_huffman_output_byte_count
) {
// get some segment (size of threadblocks is 32 x N, so threadIdx.y is warp index)
const int block_idx = blockIdx.x + blockIdx.y * gridDim.x;
const int segment_idx = threadIdx.y + block_idx * blockDim.y;
if(segment_idx >= segment_count) {
return;
}
// temp variables for all warps
__shared__ uint4* volatile s_out_ptrs[WARPS_NUM];
// get info about the segment
const unsigned int segment_byte_count = (d_segment[segment_idx].data_compressed_size + 15) & ~15; // number of bytes rounded up to multiple of 16
const unsigned int segment_in_offset = d_segment[segment_idx].data_temp_index; // this should be aligned at least to 16byte boundary
// first thread of each warp reserves space in output buffer
if(0 == threadIdx.x) {
// Either load precomputed output offset (for CC 1.0) or compute it now (for CCs with atomic operations)
#if __CUDA_ARCH__ == 100
const unsigned int segment_out_offset = d_segment[segment_idx].data_compressed_index;
#else
const unsigned int segment_out_offset = atomicAdd(d_gpujpeg_huffman_output_byte_count, segment_byte_count);
d_segment[segment_idx].data_compressed_index = segment_out_offset;
#endif
s_out_ptrs[threadIdx.y] = (uint4*)(d_dest + segment_out_offset);
}
// all threads read output buffer offset for their segment and prepare input and output pointers and number of copy iterations
const uint4 * d_in = threadIdx.x + (uint4*)(d_src + segment_in_offset);
uint4 * d_out = threadIdx.x + s_out_ptrs[threadIdx.y];
unsigned int copy_iterations = segment_byte_count / 512; // 512 is number of bytes copied in each iteration (32 threads * 16 bytes per thread)
// copy the data!
while(copy_iterations--) {
*d_out = *d_in;
d_out += 32;
d_in += 32;
}
// copy remaining bytes (less than 512 bytes)
if((threadIdx.x * 16) < (segment_byte_count & 511)) {
*d_out = *d_in;
}
}
// Threadblock size for CC 1.x kernel
#define THREAD_BLOCK_SIZE 48
/**
* Write one byte to compressed data (CC 1.x)
*
* @param data_compressed Data compressed
* @param value Byte value to write
* @return void
*/
#define gpujpeg_huffman_gpu_encoder_emit_byte(data_compressed, value) { \
*data_compressed = (uint8_t)(value); \
data_compressed++; }
/**
* Write two bytes to compressed data (CC 1.x)
*
* @param data_compressed Data compressed
* @param value Two-byte value to write
* @return void
*/
#define gpujpeg_huffman_gpu_encoder_emit_2byte(data_compressed, value) { \
*data_compressed = (uint8_t)(((value) >> 8) & 0xFF); \
data_compressed++; \
*data_compressed = (uint8_t)((value) & 0xFF); \
data_compressed++; }
/**
* Write marker to compressed data (CC 1.x)
*
* @param data_compressed Data compressed
* @oaran marker Marker to write (JPEG_MARKER_...)
* @return void
*/
#define gpujpeg_huffman_gpu_encoder_marker(data_compressed, marker) { \
*data_compressed = 0xFF;\
data_compressed++; \
*data_compressed = (uint8_t)(marker); \
data_compressed++; }
/**
* Output bits to the file. Only the right 24 bits of put_buffer are used;
* the valid bits are left-justified in this part. At most 16 bits can be
* passed to EmitBits in one call, and we never retain more than 7 bits
* in put_buffer between calls, so 24 bits are sufficient. Version for CC 1.x
*
* @param coder Huffman coder structure
* @param code Huffman code
* @param size Size in bits of the Huffman code
* @return void
*/
__device__ static int
gpujpeg_huffman_gpu_encoder_emit_bits(unsigned int code, int size, int & put_value, int & put_bits, uint8_t* & data_compressed)
{
// This routine is heavily used, so it's worth coding tightly
int _put_buffer = (int)code;
int _put_bits = put_bits;
// If size is 0, caller used an invalid Huffman table entry
if ( size == 0 )
return -1;
// Mask off any extra bits in code
_put_buffer &= (((int)1) << size) - 1;
// New number of bits in buffer
_put_bits += size;
// Align incoming bits
_put_buffer <<= 24 - _put_bits;
// And merge with old buffer contents
_put_buffer |= put_value;
// If there are more than 8 bits, write it out
unsigned char uc;
while ( _put_bits >= 8 ) {
// Write one byte out
uc = (unsigned char) ((_put_buffer >> 16) & 0xFF);
gpujpeg_huffman_gpu_encoder_emit_byte(data_compressed, uc);
// If need to stuff a zero byte
if ( uc == 0xFF ) {
// Write zero byte out
gpujpeg_huffman_gpu_encoder_emit_byte(data_compressed, 0);
}
_put_buffer <<= 8;
_put_bits -= 8;
}
// update state variables
put_value = _put_buffer;
put_bits = _put_bits;
return 0;
}
/**
* Emit left bits (CC 1.x)
*
* @param coder Huffman coder structure
* @return void
*/
__device__ static void
gpujpeg_huffman_gpu_encoder_emit_left_bits(int & put_value, int & put_bits, uint8_t* & data_compressed)
{
// Fill 7 bits with ones
if ( gpujpeg_huffman_gpu_encoder_emit_bits(0x7F, 7, put_value, put_bits, data_compressed) != 0 )
return;
//unsigned char uc = (unsigned char) ((put_value >> 16) & 0xFF);
// Write one byte out
//gpujpeg_huffman_gpu_encoder_emit_byte(data_compressed, uc);
put_value = 0;
put_bits = 0;
}
/**
* Encode one 8x8 block (for CC 1.x)
*
* @return 0 if succeeds, otherwise nonzero
*/
__device__ static int
gpujpeg_huffman_gpu_encoder_encode_block(int & put_value, int & put_bits, int & dc, int16_t* data, uint8_t* & data_compressed,
struct gpujpeg_table_huffman_encoder* d_table_dc, struct gpujpeg_table_huffman_encoder* d_table_ac)
{
typedef uint64_t loading_t;
const int loading_iteration_count = 64 * 2 / sizeof(loading_t);
// Load block to shared memory
__shared__ int16_t s_data[64 * THREAD_BLOCK_SIZE];
for ( int i = 0; i < loading_iteration_count; i++ ) {
((loading_t*)s_data)[loading_iteration_count * threadIdx.x + i] = ((loading_t*)data)[i];
}
int data_start = 64 * threadIdx.x;
// Encode the DC coefficient difference per section F.1.2.1
int temp = s_data[data_start + 0] - dc;
dc = s_data[data_start + 0];
int temp2 = temp;
if ( temp < 0 ) {
// Temp is abs value of input
temp = -temp;
// For a negative input, want temp2 = bitwise complement of abs(input)
// This code assumes we are on a two's complement machine
temp2--;
}
// Find the number of bits needed for the magnitude of the coefficient
int nbits = 0;
while ( temp ) {
nbits++;
temp >>= 1;
}
// Write category number
if ( gpujpeg_huffman_gpu_encoder_emit_bits(d_table_dc->code[nbits], d_table_dc->size[nbits], put_value, put_bits, data_compressed) != 0 ) {
return -1;
}
// Write category offset (EmitBits rejects calls with size 0)
if ( nbits ) {
if ( gpujpeg_huffman_gpu_encoder_emit_bits((unsigned int) temp2, nbits, put_value, put_bits, data_compressed) != 0 )
return -1;
}
// Encode the AC coefficients per section F.1.2.2 (r = run length of zeros)
int r = 0;
for ( int k = 1; k < 64; k++ )
{
temp = s_data[data_start + gpujpeg_huffman_gpu_encoder_order_natural[k]];
if ( temp == 0 ) {
r++;
}
else {
// If run length > 15, must emit special run-length-16 codes (0xF0)
while ( r > 15 ) {
if ( gpujpeg_huffman_gpu_encoder_emit_bits(d_table_ac->code[0xF0], d_table_ac->size[0xF0], put_value, put_bits, data_compressed) != 0 )
return -1;
r -= 16;
}
temp2 = temp;
if ( temp < 0 ) {
// temp is abs value of input
temp = -temp;
// This code assumes we are on a two's complement machine
temp2--;
}
// Find the number of bits needed for the magnitude of the coefficient
// there must be at least one 1 bit
nbits = 1;
while ( (temp >>= 1) )
nbits++;
// Emit Huffman symbol for run length / number of bits
int i = (r << 4) + nbits;
if ( gpujpeg_huffman_gpu_encoder_emit_bits(d_table_ac->code[i], d_table_ac->size[i], put_value, put_bits, data_compressed) != 0 )
return -1;
// Write Category offset
if ( gpujpeg_huffman_gpu_encoder_emit_bits((unsigned int) temp2, nbits, put_value, put_bits, data_compressed) != 0 )
return -1;
r = 0;
}
}
// If all the left coefs were zero, emit an end-of-block code
if ( r > 0 ) {
if ( gpujpeg_huffman_gpu_encoder_emit_bits(d_table_ac->code[0], d_table_ac->size[0], put_value, put_bits, data_compressed) != 0 )
return -1;
}
return 0;
}
/**
* Huffman encoder kernel (for CC 1.x)
*
* @return void
*/
__global__ static void
gpujpeg_huffman_encoder_encode_kernel(
struct gpujpeg_component* d_component,
struct gpujpeg_segment* d_segment,
int comp_count,
int segment_count,
uint8_t* d_data_compressed,
unsigned int * d_gpujpeg_huffman_output_byte_count
)
{
int segment_index = blockIdx.x * blockDim.x + threadIdx.x;
if ( segment_index >= segment_count )
return;
struct gpujpeg_segment* segment = &d_segment[segment_index];
// first thread initializes compact output size for next kernel
if(0 == segment_index) {
*d_gpujpeg_huffman_output_byte_count = 0;
}
// Initialize huffman coder
int put_value = 0;
int put_bits = 0;
int dc[GPUJPEG_MAX_COMPONENT_COUNT];
for ( int comp = 0; comp < GPUJPEG_MAX_COMPONENT_COUNT; comp++ )
dc[comp] = 0;
// Prepare data pointers
uint8_t* data_compressed = &d_data_compressed[segment->data_temp_index];
uint8_t* data_compressed_start = data_compressed;
// Non-interleaving mode
if ( comp_count == 1 ) {
int segment_index = segment->scan_segment_index;
// Encode MCUs in segment
for ( int mcu_index = 0; mcu_index < segment->mcu_count; mcu_index++ ) {
// Get component for current scan
struct gpujpeg_component* component = &d_component[segment->scan_index];
// Get component data for MCU
int16_t* block = &component->d_data_quantized[(segment_index * component->segment_mcu_count + mcu_index) * component->mcu_size];
// Get coder parameters
int & component_dc = dc[segment->scan_index];
// Get huffman tables
struct gpujpeg_table_huffman_encoder* d_table_dc = NULL;
struct gpujpeg_table_huffman_encoder* d_table_ac = NULL;
if ( component->type == GPUJPEG_COMPONENT_LUMINANCE ) {
d_table_dc = &gpujpeg_huffman_gpu_encoder_table_huffman[GPUJPEG_COMPONENT_LUMINANCE][GPUJPEG_HUFFMAN_DC];
d_table_ac = &gpujpeg_huffman_gpu_encoder_table_huffman[GPUJPEG_COMPONENT_LUMINANCE][GPUJPEG_HUFFMAN_AC];
} else {
d_table_dc = &gpujpeg_huffman_gpu_encoder_table_huffman[GPUJPEG_COMPONENT_CHROMINANCE][GPUJPEG_HUFFMAN_DC];
d_table_ac = &gpujpeg_huffman_gpu_encoder_table_huffman[GPUJPEG_COMPONENT_CHROMINANCE][GPUJPEG_HUFFMAN_AC];
}
// Encode 8x8 block
if ( gpujpeg_huffman_gpu_encoder_encode_block(put_value, put_bits, component_dc, block, data_compressed, d_table_dc, d_table_ac) != 0 )
break;
}
}
// Interleaving mode
else {
int segment_index = segment->scan_segment_index;
// Encode MCUs in segment
for ( int mcu_index = 0; mcu_index < segment->mcu_count; mcu_index++ ) {
//assert(segment->scan_index == 0);
for ( int comp = 0; comp < comp_count; comp++ ) {
struct gpujpeg_component* component = &d_component[comp];
// Prepare mcu indexes
int mcu_index_x = (segment_index * component->segment_mcu_count + mcu_index) % component->mcu_count_x;
int mcu_index_y = (segment_index * component->segment_mcu_count + mcu_index) / component->mcu_count_x;
// Compute base data index
int data_index_base = mcu_index_y * (component->mcu_size * component->mcu_count_x) + mcu_index_x * (component->mcu_size_x * GPUJPEG_BLOCK_SIZE);
// For all vertical 8x8 blocks
for ( int y = 0; y < component->sampling_factor.vertical; y++ ) {
// Compute base row data index
int data_index_row = data_index_base + y * (component->mcu_count_x * component->mcu_size_x * GPUJPEG_BLOCK_SIZE);
// For all horizontal 8x8 blocks
for ( int x = 0; x < component->sampling_factor.horizontal; x++ ) {
// Compute 8x8 block data index
int data_index = data_index_row + x * GPUJPEG_BLOCK_SIZE * GPUJPEG_BLOCK_SIZE;
// Get component data for MCU
int16_t* block = &component->d_data_quantized[data_index];
// Get coder parameters
int & component_dc = dc[comp];
// Get huffman tables
struct gpujpeg_table_huffman_encoder* d_table_dc = NULL;
struct gpujpeg_table_huffman_encoder* d_table_ac = NULL;
if ( component->type == GPUJPEG_COMPONENT_LUMINANCE ) {
d_table_dc = &gpujpeg_huffman_gpu_encoder_table_huffman[GPUJPEG_COMPONENT_LUMINANCE][GPUJPEG_HUFFMAN_DC];
d_table_ac = &gpujpeg_huffman_gpu_encoder_table_huffman[GPUJPEG_COMPONENT_LUMINANCE][GPUJPEG_HUFFMAN_AC];
} else {
d_table_dc = &gpujpeg_huffman_gpu_encoder_table_huffman[GPUJPEG_COMPONENT_CHROMINANCE][GPUJPEG_HUFFMAN_DC];
d_table_ac = &gpujpeg_huffman_gpu_encoder_table_huffman[GPUJPEG_COMPONENT_CHROMINANCE][GPUJPEG_HUFFMAN_AC];
}
// Encode 8x8 block
gpujpeg_huffman_gpu_encoder_encode_block(put_value, put_bits, component_dc, block, data_compressed, d_table_dc, d_table_ac);
}
}
}
}
}
// Emit left bits
if ( put_bits > 0 )
gpujpeg_huffman_gpu_encoder_emit_left_bits(put_value, put_bits, data_compressed);
// Output restart marker
int restart_marker = GPUJPEG_MARKER_RST0 + (segment->scan_segment_index % 8);
gpujpeg_huffman_gpu_encoder_marker(data_compressed, restart_marker);
// Set compressed size
segment->data_compressed_size = data_compressed - data_compressed_start;
}
/** Adds packed coefficients into the GPU version of Huffman lookup table. */
void
gpujpeg_huffman_gpu_add_packed_table(uint32_t * const dest, const struct gpujpeg_table_huffman_encoder * const src, const bool is_ac) {
// make a upshifted copy of the table for GPU encoding
for ( int i = 0; i <= 256; i++ ) {
const int size = src->size[i & 0xFF];
dest[i] = (src->code[i & 0xFF] << (32 - size)) | size;
}
// reserve first index in GPU version of AC table for special purposes
if ( is_ac ) {
dest[0] = 0;
}
}
/** Documented at declaration */
struct gpujpeg_huffman_gpu_encoder *
gpujpeg_huffman_gpu_encoder_create(const struct gpujpeg_encoder * encoder)
{
struct gpujpeg_huffman_gpu_encoder * huffman_gpu_encoder = (struct gpujpeg_huffman_gpu_encoder *) malloc(sizeof(struct gpujpeg_huffman_gpu_encoder));
if ( huffman_gpu_encoder == NULL ) {
return NULL;
}
memset(huffman_gpu_encoder, 0, sizeof(struct gpujpeg_huffman_gpu_encoder));
// Allocate
cudaMalloc((void**)&huffman_gpu_encoder->d_gpujpeg_huffman_output_byte_count, sizeof(unsigned int));
gpujpeg_cuda_check_error("Allocation of huffman output byte count failed", return NULL);
// Initialize decomposition lookup table
cudaFuncSetCacheConfig(gpujpeg_huffman_gpu_encoder_value_decomposition_init_kernel, cudaFuncCachePreferShared);
gpujpeg_huffman_gpu_encoder_value_decomposition_init_kernel<<<32, 256>>>(); // 8192 threads total
cudaThreadSynchronize();
gpujpeg_cuda_check_error("Decomposition LUT initialization failed", return NULL);
// compose GPU version of the huffman LUT and copy it into GPU memory (for CC >= 2.0)
uint32_t gpujpeg_huffman_cpu_lut[(256 + 1) * 4];
gpujpeg_huffman_gpu_add_packed_table(gpujpeg_huffman_cpu_lut + 257 * 0, &encoder->table_huffman[GPUJPEG_COMPONENT_LUMINANCE][GPUJPEG_HUFFMAN_AC], true);
gpujpeg_huffman_gpu_add_packed_table(gpujpeg_huffman_cpu_lut + 257 * 1, &encoder->table_huffman[GPUJPEG_COMPONENT_LUMINANCE][GPUJPEG_HUFFMAN_DC], false);
gpujpeg_huffman_gpu_add_packed_table(gpujpeg_huffman_cpu_lut + 257 * 2, &encoder->table_huffman[GPUJPEG_COMPONENT_CHROMINANCE][GPUJPEG_HUFFMAN_AC], true);
gpujpeg_huffman_gpu_add_packed_table(gpujpeg_huffman_cpu_lut + 257 * 3, &encoder->table_huffman[GPUJPEG_COMPONENT_CHROMINANCE][GPUJPEG_HUFFMAN_DC], false);
cudaMemcpyToSymbol(
gpujpeg_huffman_gpu_lut,
gpujpeg_huffman_cpu_lut,
(256 + 1) * 4 * sizeof(*gpujpeg_huffman_gpu_lut),
0,
cudaMemcpyHostToDevice
);
gpujpeg_cuda_check_error("Huffman encoder init (Huffman LUT copy)", return NULL);
// Copy original Huffman coding tables to GPU memory (for CC 1.x)
cudaMemcpyToSymbol(
gpujpeg_huffman_gpu_encoder_table_huffman,
&encoder->table_huffman[GPUJPEG_COMPONENT_LUMINANCE][GPUJPEG_HUFFMAN_DC],
sizeof(gpujpeg_huffman_gpu_encoder_table_huffman),
0,
cudaMemcpyHostToDevice
);
gpujpeg_cuda_check_error("Huffman encoder init (Huffman coding table)", return NULL);
// Copy natural order to constant device memory
cudaMemcpyToSymbol(
gpujpeg_huffman_gpu_encoder_order_natural,
gpujpeg_order_natural,
GPUJPEG_ORDER_NATURAL_SIZE * sizeof(int),
0,
cudaMemcpyHostToDevice
);
gpujpeg_cuda_check_error("Huffman encoder init (natural order copy)", return NULL);
// Configure more shared memory for all kernels
cudaFuncSetCacheConfig(gpujpeg_huffman_encoder_encode_kernel_warp<true>, cudaFuncCachePreferShared);
cudaFuncSetCacheConfig(gpujpeg_huffman_encoder_encode_kernel_warp<false>, cudaFuncCachePreferShared);
cudaFuncSetCacheConfig(gpujpeg_huffman_encoder_serialization_kernel, cudaFuncCachePreferShared);
cudaFuncSetCacheConfig(gpujpeg_huffman_encoder_compaction_kernel, cudaFuncCachePreferShared);
cudaFuncSetCacheConfig(gpujpeg_huffman_encoder_encode_kernel, cudaFuncCachePreferShared);
cudaFuncSetCacheConfig(gpujpeg_huffman_encoder_allocation_kernel, cudaFuncCachePreferShared);
return huffman_gpu_encoder;
}
void
gpujpeg_huffman_gpu_encoder_destroy(struct gpujpeg_huffman_gpu_encoder * huffman_gpu_encoder)
{
assert(huffman_gpu_encoder != NULL);
if (huffman_gpu_encoder->d_gpujpeg_huffman_output_byte_count != NULL) {
cudaFree(huffman_gpu_encoder->d_gpujpeg_huffman_output_byte_count);
}
free(huffman_gpu_encoder);
}
/**
* Get grid size for specified count of threadblocks. (Grid size is limited
* to 65536 in both directions, so if we need more threadblocks, we must use
* both x and y coordinates.)
*/
dim3
gpujpeg_huffman_gpu_encoder_grid_size(int tblock_count)
{
dim3 size(tblock_count);
while(size.x > 0xffff) {
size.x = (size.x + 1) >> 1;
size.y <<= 1;
}
return size;
}
/** Documented at declaration */
int
gpujpeg_huffman_gpu_encoder_encode(struct gpujpeg_encoder* encoder, struct gpujpeg_huffman_gpu_encoder * huffman_gpu_encoder, unsigned int * output_byte_count)
{
// Get coder
struct gpujpeg_coder* coder = &encoder->coder;
assert(coder->param.restart_interval > 0);
// Select encoder kernel which either expects continuos segments of blocks or uses block lists
int comp_count = 1;
if ( coder->param.interleaved == 1 )
comp_count = coder->param_image.comp_count;
assert(comp_count >= 1 && comp_count <= GPUJPEG_MAX_COMPONENT_COUNT);
// Select encoder kernel based on compute capability
if ( encoder->coder.cuda_cc_major < 2 ) {
// Run kernel
dim3 thread(THREAD_BLOCK_SIZE);
dim3 grid(gpujpeg_div_and_round_up(coder->segment_count, thread.x));
gpujpeg_huffman_encoder_encode_kernel<<<grid, thread, 0, *(encoder->stream)>>>(
coder->d_component,
coder->d_segment,
comp_count,
coder->segment_count,
coder->d_temp_huffman,
huffman_gpu_encoder->d_gpujpeg_huffman_output_byte_count
);
gpujpeg_cuda_check_error("Huffman encoding failed", return -1);
} else {
// Run encoder kernel
dim3 thread(32 * WARPS_NUM);
dim3 grid = gpujpeg_huffman_gpu_encoder_grid_size(gpujpeg_div_and_round_up(coder->segment_count, (thread.x / 32)));
if(comp_count == 1) {
gpujpeg_huffman_encoder_encode_kernel_warp<true><<<grid, thread, 0, *(encoder->stream)>>>(
coder->d_segment,
coder->segment_count,
coder->d_data_compressed,
coder->d_block_list,
coder->d_data_quantized,
coder->d_component,
comp_count,
huffman_gpu_encoder->d_gpujpeg_huffman_output_byte_count
);
gpujpeg_cuda_check_error("Huffman encoding failed", return -1);
} else {
gpujpeg_huffman_encoder_encode_kernel_warp<false><<<grid, thread, 0, *(encoder->stream)>>>(
coder->d_segment,
coder->segment_count,
coder->d_data_compressed,
coder->d_block_list,
coder->d_data_quantized,
coder->d_component,
comp_count,
huffman_gpu_encoder->d_gpujpeg_huffman_output_byte_count
);
gpujpeg_cuda_check_error("Huffman encoding failed", return -1);
}
// Run codeword serialization kernel
const int num_serialization_tblocks = gpujpeg_div_and_round_up(coder->segment_count, SERIALIZATION_THREADS_PER_TBLOCK);
const dim3 serialization_grid = gpujpeg_huffman_gpu_encoder_grid_size(num_serialization_tblocks);
gpujpeg_huffman_encoder_serialization_kernel<<<num_serialization_tblocks, SERIALIZATION_THREADS_PER_TBLOCK, 0, *(encoder->stream)>>>(
coder->d_segment,
coder->segment_count,
coder->d_data_compressed,
coder->d_temp_huffman
);
gpujpeg_cuda_check_error("Codeword serialization failed", return -1);
}
// No atomic operations in CC 1.0 => run output size computation kernel to allocate the output buffer space
if ( encoder->coder.cuda_cc_major == 1 && encoder->coder.cuda_cc_minor == 0 ) {
gpujpeg_huffman_encoder_allocation_kernel<<<1, 512, 0, *(encoder->stream)>>>(coder->d_segment, coder->segment_count, huffman_gpu_encoder->d_gpujpeg_huffman_output_byte_count);
gpujpeg_cuda_check_error("Huffman encoder output allocation failed", return -1);
}
// Run output compaction kernel (one warp per segment)
const dim3 compaction_thread(32, WARPS_NUM);
const dim3 compaction_grid = gpujpeg_huffman_gpu_encoder_grid_size(gpujpeg_div_and_round_up(coder->segment_count, WARPS_NUM));
gpujpeg_huffman_encoder_compaction_kernel<<<compaction_grid, compaction_thread, 0, *(encoder->stream)>>>(
coder->d_segment,
coder->segment_count,
coder->d_temp_huffman,
coder->d_data_compressed,
huffman_gpu_encoder->d_gpujpeg_huffman_output_byte_count
);
gpujpeg_cuda_check_error("Huffman output compaction failed", return -1);
// Read and return number of occupied bytes
cudaMemcpyAsync(output_byte_count, huffman_gpu_encoder->d_gpujpeg_huffman_output_byte_count, sizeof(unsigned int), cudaMemcpyDeviceToHost, *(encoder->stream));
gpujpeg_cuda_check_error("Huffman output size getting failed", return -1);
// indicate success
return 0;
}
|
6f4abfd8902988b731afc8f7029906520743e43e.hip | // !!! This is a file automatically generated by hipify!!!
# pragma warning (disable:4819)
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__
void doNothing(){
;
}
int main()
{
printf("size of int: %d \n", sizeof(int));
printf("size of __int64: %d \n", sizeof(__int64));
printf("size of size_t: %d \n", sizeof(size_t));
printf("size of float: %d \n", sizeof(float));
printf("size of double: %d \n", sizeof(double));
return 0;
}
| 6f4abfd8902988b731afc8f7029906520743e43e.cu | # pragma warning (disable:4819)
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__
void doNothing(){
;
}
int main()
{
printf("size of int: %d \n", sizeof(int));
printf("size of __int64: %d \n", sizeof(__int64));
printf("size of size_t: %d \n", sizeof(size_t));
printf("size of float: %d \n", sizeof(float));
printf("size of double: %d \n", sizeof(double));
return 0;
}
|
6015bca0a9fed011a9b5d77ff1619b6b8afbdc43.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/generate.h>
#include <thrust/reduce.h>
#include <thrust/functional.h>
#include <thrust/random.h>
#include <thrust/system/hip/execution_policy.h>
#ifdef __HIPCC__
# define GRAIN_DEVICE_HOST __device__ __host__
# define GRAIN_GLOBAL __global__
# define GRAIN_PARALLEL_FOR(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); i += blockDim.x * gridDim.x)
#else
# define GRAIN_DEVICE_HOST
# define GRAIN_GLOBAL
# define GRAIN_PARALLEL_FOR(i, n) \
_Pragma("omp parallel for") \
for (int i = 0; i < (n); ++i)
#endif
// template <typename T>
// GRAIN_GLOBAL void saxpy(T* res, const T* x, const T* y, int n) {
GRAIN_GLOBAL void saxpy(float* res, const float* x, const float* y, int n) {
GRAIN_PARALLEL_FOR(i, n) {
res[i] = x[i] + y[i];
}
}
GRAIN_GLOBAL void relu(float* x, int n) {
GRAIN_PARALLEL_FOR(i, n) {
if (x[i] < 0) x[i] = 0;
}
}
GRAIN_GLOBAL void reluGrad(float* gx, const float* gy, const float* x, int n) {
GRAIN_PARALLEL_FOR(i, n) {
gx[i] = (x[i] <= 0) ? 0 : gy[i];
}
}
// TODO faster implementation using thrust
GRAIN_GLOBAL void sum(const float* x, float* result, int N) {
if (threadIdx.x != 0) return;
result[0] = 0;
for (int n = 0; n < N; ++n) {
result[0] += x[n];
}
}
GRAIN_GLOBAL void sum_faster(const float *g_idata, float *g_odata, uint n, uint N) {
extern __shared__ float sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * (blockDim.x * 2) + threadIdx.x;
if (i >= N) return;
sdata[tid] = g_idata[i];
__syncthreads();
for (unsigned int s=1; s <= blockDim.x; s<<=1) {
if (tid % (2 * s) == 0) {
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
GRAIN_GLOBAL void nll(float* loss, uint* count, const float* logp, const int* targetId, int ignoreIndex, uint batchSize, int classSize) {
GRAIN_PARALLEL_FOR(i, batchSize) {
auto t = targetId[i];
if (t != ignoreIndex) {
atomicAdd(loss, -logp[i * classSize + t]);
atomicAdd(count, 1);
}
}
}
GRAIN_GLOBAL void nllGrad(float* glogP, float coeff, const int* targetId, int ignoreIndex, uint batchSize, int classSize) {
GRAIN_PARALLEL_FOR(i, batchSize) {
auto t = targetId[i];
if (t != ignoreIndex) {
glogP[i * classSize + t] = coeff;
}
}
}
GRAIN_GLOBAL void addBias(float* y, const float* b, uint blen, uint ylen) {
GRAIN_PARALLEL_FOR(i, ylen) {
y[i] += b[i % blen];
}
}
// convert a linear index to a row index
template <typename T>
struct linear_index_to_row_index : public thrust::unary_function<T,T>
{
T C; // number of columns
__host__ __device__
linear_index_to_row_index(T C) : C(C) {}
__host__ __device__
T operator()(T i)
{
return i / C;
}
};
// convert a linear index to a row index
template <typename T>
struct linear_index_to_col_index : public thrust::unary_function<T,T>
{
T C; // number of columns
__host__ __device__
linear_index_to_col_index(T C) : C(C) {}
__host__ __device__
T operator()(T i)
{
return i % C;
}
};
GRAIN_GLOBAL void addBiasGrad(const float* gy, float* gb, uint blen, uint ylen) {
// compute row sums by summing values with equal row indices
GRAIN_PARALLEL_FOR(i, ylen) {
atomicAdd(gb + (i % blen), gy[i]);
}
// TODO use thrust
// using I = uint;
// auto key_iter = thrust::make_transform_iterator(thrust::counting_iterator<I>(0), linear_index_to_col_index<I>(blen));
// thrust::reduce_by_key
// (thrust::hip::par,
// key_iter, // keys_first
// key_iter + ylen, // keys_last
// thrust::device_ptr<const float>(gy), // values_first
// thrust::make_discard_iterator(), // keys_output
// thrust::device_ptr<float>(gb), // values_output
// thrust::equal_to<I>(), // binary_pred
// thrust::plus<float>()); // binary_o
}
// x[i0, i1, ..., iN-1] = x[i0 * strides[N-1] + i1 * strides[N-2] + ... + iN-1 * strides[0]]
__device__ uint indexof(uint i, uint ndim, const uint* shape, const uint* strides) {
uint idx = i;
uint pos = 0;
for (int d = ndim - 1; d >= 0; --d) {
pos += (idx % shape[d]) * strides[d];
idx /= shape[d];
}
return pos;
}
/// TODO generalize this nd map function with template
/// TODO define all math functions in CUDA
/// https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#mathematical-functions-appendix
#define GRAIN_ND_EACH(name, func) \
GRAIN_GLOBAL void name(float* x, uint len, uint ndim, const uint* shape, const uint* strides) { \
uint idx; \
GRAIN_PARALLEL_FOR(i, len) { \
idx = indexof(i, ndim, shape, strides); \
x[idx] = func(x[idx]); \
} \
}
// fast-math functions https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#intrinsic-functions
GRAIN_GLOBAL void reciprocal(float* x, uint len, uint ndim, const uint* shape, const uint* strides) {
uint idx;
GRAIN_PARALLEL_FOR(i, len) {
idx = indexof(i, ndim, shape, strides);
x[idx] = 1.0f / x[idx];
}
}
GRAIN_ND_EACH(log, logf)
GRAIN_ND_EACH(log2, log2f)
GRAIN_ND_EACH(log10, log10f)
GRAIN_ND_EACH(exp, expf)
GRAIN_ND_EACH(exp2, exp2f)
GRAIN_ND_EACH(exp10, exp10f)
GRAIN_ND_EACH(cos, cosf)
GRAIN_ND_EACH(sin, sinf)
GRAIN_ND_EACH(tan, tanf)
GRAIN_GLOBAL void pow(float power, float* x, uint len, uint ndim, const uint* shape, const uint* strides) {
uint idx;
GRAIN_PARALLEL_FOR(i, len) {
idx = indexof(i, ndim, shape, strides);
x[idx] = powf(x[idx], power);
}
}
GRAIN_GLOBAL void powGrad(float power, float* x, uint len, uint ndim, const uint* shape, const uint* strides) {
uint idx;
GRAIN_PARALLEL_FOR(i, len) {
idx = indexof(i, ndim, shape, strides);
x[idx] = power * powf(x[idx], power-1);
}
}
GRAIN_GLOBAL void neg(float* x, uint len, uint ndim, const uint* shape, const uint* strides) {
uint idx;
GRAIN_PARALLEL_FOR(i, len) {
idx = indexof(i, ndim, shape, strides);
x[idx] = -x[idx];
}
}
GRAIN_ND_EACH(abs, fabsf)
GRAIN_GLOBAL void absGrad(float* x, uint len, uint ndim, const uint* shape, const uint* strides) {
uint idx;
GRAIN_PARALLEL_FOR(i, len) {
idx = indexof(i, ndim, shape, strides);
if (x[idx] > 0) {
x[idx] = 1.0f;
return;
}
if (x[idx] < 0) {
x[idx] = -1.0;
}
}
}
GRAIN_GLOBAL void embedding(const float* w, const int* x, float* y, uint nvocab, uint nembed, uint nbatch) {
uint b, e;
GRAIN_PARALLEL_FOR(i, nbatch * nembed) {
b = i / nembed;
e = i % nembed;
y[i] = w[x[b] * nembed + e];
}
}
GRAIN_GLOBAL void embeddingGrad(float* gw, const int* x, const float* gy, uint nvocab, uint nembed, uint nbatch) {
uint b, e;
GRAIN_PARALLEL_FOR(i, nbatch * nembed) {
b = i / nembed;
e = i % nembed;
atomicAdd(gw + x[b] * nembed + e, gy[b * nembed + e]);
}
}
GRAIN_GLOBAL void huber(float* output, const float* predict, const float* target, float threshold,
uint len, uint ndim, const uint* shape, const uint* strides) {
auto t05 = 0.5 * threshold;
float l1;
uint idx;
GRAIN_PARALLEL_FOR(i, len) {
idx = indexof(i, ndim, shape, strides);
l1 = fabs(predict[idx] - target[idx]);
atomicAdd(output,
l1 > threshold
? threshold * (l1 - t05)
: 0.5 * l1 * l1);
}
}
GRAIN_GLOBAL void huberGrad(float* gradPredict, const float* predict, const float* target, float threshold, uint len, uint ndim, const uint* shape, const uint* strides) {
uint idx;
GRAIN_PARALLEL_FOR(i, len) {
idx = indexof(i, ndim, shape, strides);
gradPredict[idx] = fmaxf(fminf(predict[idx] - target[idx], threshold), -threshold);
}
}
| 6015bca0a9fed011a9b5d77ff1619b6b8afbdc43.cu | #include <cuda.h>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/generate.h>
#include <thrust/reduce.h>
#include <thrust/functional.h>
#include <thrust/random.h>
#include <thrust/system/cuda/execution_policy.h>
#ifdef __CUDACC__
# define GRAIN_DEVICE_HOST __device__ __host__
# define GRAIN_GLOBAL __global__
# define GRAIN_PARALLEL_FOR(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); i += blockDim.x * gridDim.x)
#else
# define GRAIN_DEVICE_HOST
# define GRAIN_GLOBAL
# define GRAIN_PARALLEL_FOR(i, n) \
_Pragma("omp parallel for") \
for (int i = 0; i < (n); ++i)
#endif
// template <typename T>
// GRAIN_GLOBAL void saxpy(T* res, const T* x, const T* y, int n) {
GRAIN_GLOBAL void saxpy(float* res, const float* x, const float* y, int n) {
GRAIN_PARALLEL_FOR(i, n) {
res[i] = x[i] + y[i];
}
}
GRAIN_GLOBAL void relu(float* x, int n) {
GRAIN_PARALLEL_FOR(i, n) {
if (x[i] < 0) x[i] = 0;
}
}
GRAIN_GLOBAL void reluGrad(float* gx, const float* gy, const float* x, int n) {
GRAIN_PARALLEL_FOR(i, n) {
gx[i] = (x[i] <= 0) ? 0 : gy[i];
}
}
// TODO faster implementation using thrust
GRAIN_GLOBAL void sum(const float* x, float* result, int N) {
if (threadIdx.x != 0) return;
result[0] = 0;
for (int n = 0; n < N; ++n) {
result[0] += x[n];
}
}
GRAIN_GLOBAL void sum_faster(const float *g_idata, float *g_odata, uint n, uint N) {
extern __shared__ float sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * (blockDim.x * 2) + threadIdx.x;
if (i >= N) return;
sdata[tid] = g_idata[i];
__syncthreads();
for (unsigned int s=1; s <= blockDim.x; s<<=1) {
if (tid % (2 * s) == 0) {
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
GRAIN_GLOBAL void nll(float* loss, uint* count, const float* logp, const int* targetId, int ignoreIndex, uint batchSize, int classSize) {
GRAIN_PARALLEL_FOR(i, batchSize) {
auto t = targetId[i];
if (t != ignoreIndex) {
atomicAdd(loss, -logp[i * classSize + t]);
atomicAdd(count, 1);
}
}
}
GRAIN_GLOBAL void nllGrad(float* glogP, float coeff, const int* targetId, int ignoreIndex, uint batchSize, int classSize) {
GRAIN_PARALLEL_FOR(i, batchSize) {
auto t = targetId[i];
if (t != ignoreIndex) {
glogP[i * classSize + t] = coeff;
}
}
}
GRAIN_GLOBAL void addBias(float* y, const float* b, uint blen, uint ylen) {
GRAIN_PARALLEL_FOR(i, ylen) {
y[i] += b[i % blen];
}
}
// convert a linear index to a row index
template <typename T>
struct linear_index_to_row_index : public thrust::unary_function<T,T>
{
T C; // number of columns
__host__ __device__
linear_index_to_row_index(T C) : C(C) {}
__host__ __device__
T operator()(T i)
{
return i / C;
}
};
// convert a linear index to a row index
template <typename T>
struct linear_index_to_col_index : public thrust::unary_function<T,T>
{
T C; // number of columns
__host__ __device__
linear_index_to_col_index(T C) : C(C) {}
__host__ __device__
T operator()(T i)
{
return i % C;
}
};
GRAIN_GLOBAL void addBiasGrad(const float* gy, float* gb, uint blen, uint ylen) {
// compute row sums by summing values with equal row indices
GRAIN_PARALLEL_FOR(i, ylen) {
atomicAdd(gb + (i % blen), gy[i]);
}
// TODO use thrust
// using I = uint;
// auto key_iter = thrust::make_transform_iterator(thrust::counting_iterator<I>(0), linear_index_to_col_index<I>(blen));
// thrust::reduce_by_key
// (thrust::cuda::par,
// key_iter, // keys_first
// key_iter + ylen, // keys_last
// thrust::device_ptr<const float>(gy), // values_first
// thrust::make_discard_iterator(), // keys_output
// thrust::device_ptr<float>(gb), // values_output
// thrust::equal_to<I>(), // binary_pred
// thrust::plus<float>()); // binary_o
}
// x[i0, i1, ..., iN-1] = x[i0 * strides[N-1] + i1 * strides[N-2] + ... + iN-1 * strides[0]]
__device__ uint indexof(uint i, uint ndim, const uint* shape, const uint* strides) {
uint idx = i;
uint pos = 0;
for (int d = ndim - 1; d >= 0; --d) {
pos += (idx % shape[d]) * strides[d];
idx /= shape[d];
}
return pos;
}
/// TODO generalize this nd map function with template
/// TODO define all math functions in CUDA
/// https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#mathematical-functions-appendix
#define GRAIN_ND_EACH(name, func) \
GRAIN_GLOBAL void name(float* x, uint len, uint ndim, const uint* shape, const uint* strides) { \
uint idx; \
GRAIN_PARALLEL_FOR(i, len) { \
idx = indexof(i, ndim, shape, strides); \
x[idx] = func(x[idx]); \
} \
}
// fast-math functions https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#intrinsic-functions
GRAIN_GLOBAL void reciprocal(float* x, uint len, uint ndim, const uint* shape, const uint* strides) {
uint idx;
GRAIN_PARALLEL_FOR(i, len) {
idx = indexof(i, ndim, shape, strides);
x[idx] = 1.0f / x[idx];
}
}
GRAIN_ND_EACH(log, logf)
GRAIN_ND_EACH(log2, log2f)
GRAIN_ND_EACH(log10, log10f)
GRAIN_ND_EACH(exp, expf)
GRAIN_ND_EACH(exp2, exp2f)
GRAIN_ND_EACH(exp10, exp10f)
GRAIN_ND_EACH(cos, cosf)
GRAIN_ND_EACH(sin, sinf)
GRAIN_ND_EACH(tan, tanf)
GRAIN_GLOBAL void pow(float power, float* x, uint len, uint ndim, const uint* shape, const uint* strides) {
uint idx;
GRAIN_PARALLEL_FOR(i, len) {
idx = indexof(i, ndim, shape, strides);
x[idx] = powf(x[idx], power);
}
}
GRAIN_GLOBAL void powGrad(float power, float* x, uint len, uint ndim, const uint* shape, const uint* strides) {
uint idx;
GRAIN_PARALLEL_FOR(i, len) {
idx = indexof(i, ndim, shape, strides);
x[idx] = power * powf(x[idx], power-1);
}
}
GRAIN_GLOBAL void neg(float* x, uint len, uint ndim, const uint* shape, const uint* strides) {
uint idx;
GRAIN_PARALLEL_FOR(i, len) {
idx = indexof(i, ndim, shape, strides);
x[idx] = -x[idx];
}
}
GRAIN_ND_EACH(abs, fabsf)
GRAIN_GLOBAL void absGrad(float* x, uint len, uint ndim, const uint* shape, const uint* strides) {
uint idx;
GRAIN_PARALLEL_FOR(i, len) {
idx = indexof(i, ndim, shape, strides);
if (x[idx] > 0) {
x[idx] = 1.0f;
return;
}
if (x[idx] < 0) {
x[idx] = -1.0;
}
}
}
GRAIN_GLOBAL void embedding(const float* w, const int* x, float* y, uint nvocab, uint nembed, uint nbatch) {
uint b, e;
GRAIN_PARALLEL_FOR(i, nbatch * nembed) {
b = i / nembed;
e = i % nembed;
y[i] = w[x[b] * nembed + e];
}
}
GRAIN_GLOBAL void embeddingGrad(float* gw, const int* x, const float* gy, uint nvocab, uint nembed, uint nbatch) {
uint b, e;
GRAIN_PARALLEL_FOR(i, nbatch * nembed) {
b = i / nembed;
e = i % nembed;
atomicAdd(gw + x[b] * nembed + e, gy[b * nembed + e]);
}
}
GRAIN_GLOBAL void huber(float* output, const float* predict, const float* target, float threshold,
uint len, uint ndim, const uint* shape, const uint* strides) {
auto t05 = 0.5 * threshold;
float l1;
uint idx;
GRAIN_PARALLEL_FOR(i, len) {
idx = indexof(i, ndim, shape, strides);
l1 = fabs(predict[idx] - target[idx]);
atomicAdd(output,
l1 > threshold
? threshold * (l1 - t05)
: 0.5 * l1 * l1);
}
}
GRAIN_GLOBAL void huberGrad(float* gradPredict, const float* predict, const float* target, float threshold, uint len, uint ndim, const uint* shape, const uint* strides) {
uint idx;
GRAIN_PARALLEL_FOR(i, len) {
idx = indexof(i, ndim, shape, strides);
gradPredict[idx] = fmaxf(fminf(predict[idx] - target[idx], threshold), -threshold);
}
}
|
45d5eff62ed8964e96d4b2c4e76853b66fb0816f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.4.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
December 2013
@generated s Tue Dec 17 13:18:44 2013
@author Mark Gates
*/
#include "common_magma.h"
#include <assert.h>
#define NB 64
/* =====================================================================
Matrix is m x n, and is divided into block rows, each NB x n.
Each CUDA block has NB threads to handle one block row.
Each thread adds one row, iterating across all columns.
The bottom block of rows may be partially outside the matrix;
if so, rows outside the matrix (i >= m) are disabled.
TODO. Block in both directions, for large matrices.
E.g., each block does 64x64 tile, instead of 64xN tile.
*/
__global__ void
sgeadd_kernel(
int m, int n,
float alpha,
const float *dA, int ldda,
float *dB, int lddb )
{
// dA and dB iterate across row i
int i = blockIdx.x*blockDim.x + threadIdx.x;
if ( i < m ) {
dA += i;
dB += i;
const float *dAend = dA + n*ldda;
while( dA < dAend ) {
*dB = alpha*(*dA) + (*dB);
dA += ldda;
dB += lddb;
}
}
}
/* ===================================================================== */
extern "C" void
magmablas_sgeadd(
magma_int_t m, magma_int_t n,
float alpha,
const float *dA, magma_int_t ldda,
float *dB, magma_int_t lddb )
{
/*
Purpose
=======
ZGEADD adds two matrices, dB = alpha*dA + dB.
Arguments
=========
M (input) INTEGER
The number of rows of the matrix dA. M >= 0.
N (input) INTEGER
The number of columns of the matrix dA. N >= 0.
ALPHA (input) COMPLEX REAL
The scalar alpha.
dA (input) COMPLEX REAL array, dimension (LDDA,N)
The m by n matrix dA.
LDDA (input) INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
dB (input/output) COMPLEX REAL array, dimension (LDDB,N)
The m by n matrix dB.
LDDB (input) INTEGER
The leading dimension of the array dB. LDDB >= max(1,M).
===================================================================== */
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 || n == 0 )
return;
dim3 threads( NB );
dim3 grid( (m + NB - 1)/NB );
hipLaunchKernelGGL(( sgeadd_kernel), dim3(grid), dim3(threads), 0, magma_stream ,
m, n, alpha, dA, ldda, dB, lddb );
}
| 45d5eff62ed8964e96d4b2c4e76853b66fb0816f.cu | /*
-- MAGMA (version 1.4.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
December 2013
@generated s Tue Dec 17 13:18:44 2013
@author Mark Gates
*/
#include "common_magma.h"
#include <assert.h>
#define NB 64
/* =====================================================================
Matrix is m x n, and is divided into block rows, each NB x n.
Each CUDA block has NB threads to handle one block row.
Each thread adds one row, iterating across all columns.
The bottom block of rows may be partially outside the matrix;
if so, rows outside the matrix (i >= m) are disabled.
TODO. Block in both directions, for large matrices.
E.g., each block does 64x64 tile, instead of 64xN tile.
*/
__global__ void
sgeadd_kernel(
int m, int n,
float alpha,
const float *dA, int ldda,
float *dB, int lddb )
{
// dA and dB iterate across row i
int i = blockIdx.x*blockDim.x + threadIdx.x;
if ( i < m ) {
dA += i;
dB += i;
const float *dAend = dA + n*ldda;
while( dA < dAend ) {
*dB = alpha*(*dA) + (*dB);
dA += ldda;
dB += lddb;
}
}
}
/* ===================================================================== */
extern "C" void
magmablas_sgeadd(
magma_int_t m, magma_int_t n,
float alpha,
const float *dA, magma_int_t ldda,
float *dB, magma_int_t lddb )
{
/*
Purpose
=======
ZGEADD adds two matrices, dB = alpha*dA + dB.
Arguments
=========
M (input) INTEGER
The number of rows of the matrix dA. M >= 0.
N (input) INTEGER
The number of columns of the matrix dA. N >= 0.
ALPHA (input) COMPLEX REAL
The scalar alpha.
dA (input) COMPLEX REAL array, dimension (LDDA,N)
The m by n matrix dA.
LDDA (input) INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
dB (input/output) COMPLEX REAL array, dimension (LDDB,N)
The m by n matrix dB.
LDDB (input) INTEGER
The leading dimension of the array dB. LDDB >= max(1,M).
===================================================================== */
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 || n == 0 )
return;
dim3 threads( NB );
dim3 grid( (m + NB - 1)/NB );
sgeadd_kernel<<< grid, threads, 0, magma_stream >>>(
m, n, alpha, dA, ldda, dB, lddb );
}
|
4ae0d5d1df92622be77c6259ef6eb88555817d66.hip | // !!! This is a file automatically generated by hipify!!!
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <iostream>
#include <iomanip>
// includes CUDA
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
// includes, project
#include <helper_cuda.h>
#include <helper_functions.h> // helper functions for SDK examples
#include <APSP_CPU.h>
#define MAX_THREADS_PER_BLOCK 1024
#define TILE_DIM 16
#define BLOCK_ROWS 8
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
using namespace std;
__global__ void
CUDA_APSP(int *d_mat,int k,int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i<N&&j<N){
int i0 = i*N + j;
int i1 = i*N + k;
int i2 = k*N + j;
if(d_mat[i1] != -1 && d_mat[i2] != -1)
d_mat[i0] =
(d_mat[i0] != -1 && d_mat[i0] < d_mat[i1] + d_mat[i2]) ? d_mat[i0] : (d_mat[i1] + d_mat[i2]);
//__syncthreads();
}
}
void CUDA_APSP_base(dim3 grid, dim3 threads, int *d_mat,int N){
int k;
for(k=0;k<N;k++)
hipLaunchKernelGGL(( CUDA_APSP), dim3(grid),dim3(threads), 0, 0, d_mat,k,N);
}
__global__ void
CUDA_APSP_coalcesing(int *d_mat,int *d_mat_trans,int k,int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i<N&&j<N){
int i0 = i*N + j;//all
int i0_trans=j*N +i;
//int i1 = i*N + k;//all k column, jump between rows
int i1_trans=k*N + i;//all k row, jump between colums... used for trans matrix
int i2 = k*N + j;//all k row, jump between columns
if(d_mat_trans[i1_trans] != -1 && d_mat[i2] != -1){
d_mat[i0] =
(d_mat[i0] != -1 && d_mat[i0] < d_mat_trans[i1_trans] + d_mat[i2]) ? d_mat[i0] : (d_mat_trans[i1_trans] + d_mat[i2]);
d_mat_trans[i0_trans]=d_mat[i0];
}
//__syncthreads();
}
}
__global__ void transposeDiagonal(int *odata,
int *idata, int width, int height, int nreps)
{
__shared__ float tile[TILE_DIM][TILE_DIM+1];
int blockIdx_x, blockIdx_y;
// diagonal reordering
if (width == height) {
blockIdx_y = blockIdx.x;
blockIdx_x = (blockIdx.x+blockIdx.y)%gridDim.x;
} else {
int bid = blockIdx.x + gridDim.x*blockIdx.y;
blockIdx_y = bid%gridDim.y;
blockIdx_x = ((bid/gridDim.y)+blockIdx_y)%gridDim.x;
}
int xIndex = blockIdx_x*TILE_DIM + threadIdx.x;
int yIndex = blockIdx_y*TILE_DIM + threadIdx.y;
int index_in = xIndex + (yIndex)*width;
xIndex = blockIdx_y*TILE_DIM + threadIdx.x;
yIndex = blockIdx_x*TILE_DIM + threadIdx.y;
int index_out = xIndex + (yIndex)*height;
for (int r=0; r < nreps; r++) {
for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) {
tile[threadIdx.y+i][threadIdx.x] =
idata[index_in+i*width];
}
__syncthreads();
for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) {
odata[index_out+i*height] =
tile[threadIdx.x][threadIdx.y+i];
}
}
}
void CUDA_APSP_coalcesing(dim3 grid, dim3 threads,dim3 grid_trans,dim3 threads_trans,int *d_mat,int N){
int mem_size=sizeof(int)*N*N;
int *d_mat_trans2;
checkCudaErrors(hipMalloc((void **) &d_mat_trans2, mem_size));
hipLaunchKernelGGL(( transposeDiagonal), dim3(grid_trans),dim3(threads_trans), 0, 0, d_mat_trans2,d_mat,N,N,1);
int k;
for(k=0;k<N;k++)
hipLaunchKernelGGL(( CUDA_APSP_coalcesing), dim3(grid),dim3(threads), 0, 0, d_mat,d_mat_trans2,k,N);
checkCudaErrors(hipFree(d_mat_trans2));
}
__global__ void
CUDA_APSP_SharedMemory(int *d_mat,int k,int N)
{
extern __shared__ int s_mem[];
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i<N&&j<N){
int i0 = i*N + j;
int i1 = i*N + k;
int i2 = k*N + j;
if(threadIdx.y==0)
s_mem[threadIdx.x]=d_mat[i1];
__syncthreads();
/* if(d_mat[i1] != -1 && d_mat[i2] != -1)
d_mat[i0] =
(d_mat[i0] != -1 && d_mat[i0] < d_mat[i1] + d_mat[i2]) ? d_mat[i0] : (d_mat[i1] + d_mat[i2]);
*/
if(s_mem[threadIdx.x] != -1 && d_mat[i2] != -1)
d_mat[i0] =
(d_mat[i0] != -1 && d_mat[i0] < s_mem[threadIdx.x] + d_mat[i2]) ? d_mat[i0] : (s_mem[threadIdx.x] + d_mat[i2]);
__syncthreads();
}
}
__global__ void
CUDA_APSP_SharedMemory_double(int *d_mat,int k,int N)
{
extern __shared__ int s_mem[];
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i<N&&j<N){
int i0 = i*N + j;
int i1 = i*N + k;
int i2 = k*N + j;
if(threadIdx.y==0)
s_mem[threadIdx.x]=d_mat[i1];
else if(threadIdx.x==0)
s_mem[blockDim.x+threadIdx.y]=d_mat[i2];
__syncthreads();
if(s_mem[threadIdx.x] != -1 && s_mem[blockDim.x+threadIdx.y] != -1)
d_mat[i0] =
(d_mat[i0] != -1 && d_mat[i0] < s_mem[threadIdx.x] + s_mem[blockDim.x+threadIdx.y]) ? d_mat[i0] : (s_mem[threadIdx.x] + s_mem[blockDim.x+threadIdx.y]);
__syncthreads();
}
}
void CUDA_APSP_SharedMemory(dim3 grid, dim3 threads, int *d_mat,int N){
int SizeS=sizeof(int)*threads.x;
int k;
for(k=0;k<N;k++)
hipLaunchKernelGGL(( CUDA_APSP_SharedMemory), dim3(grid),dim3(threads),SizeS, 0, d_mat,k,N);
}
void CUDA_APSP_SharedMemory_double(dim3 grid, dim3 threads, int *d_mat,int N){
int SizeS=sizeof(int)*threads.x;
int k;
for(k=0;k<N;k++)
hipLaunchKernelGGL(( CUDA_APSP_SharedMemory_double), dim3(grid),dim3(threads),SizeS, 0, d_mat,k,N);
}
__global__ void
CUDA_APSP_Advanced(int *d_mat,int *d_mat_trans,int k,int N)
{
extern __shared__ int s_mem[];
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i<N&&j<N){
int i0 = i*N + j;//all
int i0_trans=j*N +i;
//int i1 = i*N + k;//all k column, jump between rows
int i1_trans=k*N + i;//all k row, jump between colums... used for trans matrix
int i2 = k*N + j;//all k row, jump between columns
if(threadIdx.x==0)
s_mem[threadIdx.y]=d_mat[i2];
__syncthreads();
if(d_mat_trans[i1_trans] != -1 && s_mem[threadIdx.y]!= -1){
d_mat[i0] =
(d_mat[i0] != -1 && d_mat[i0] < d_mat_trans[i1_trans] +s_mem[threadIdx.y]) ? d_mat[i0] : (d_mat_trans[i1_trans] + s_mem[threadIdx.y]);
d_mat_trans[i0_trans]=d_mat[i0];
}
__syncthreads();
}
}
void CUDA_APSP_Advanced(dim3 grid, dim3 threads,dim3 grid_trans,dim3 threads_trans, int *d_mat,int N){
/*colased*/
int mem_size=sizeof(int)*N*N;
int *d_mat_trans2;
checkCudaErrors(hipMalloc((void **) &d_mat_trans2, mem_size));
hipLaunchKernelGGL(( transposeDiagonal), dim3(grid_trans),dim3(threads_trans), 0, 0, d_mat_trans2,d_mat,N,N,1);
int SizeS=sizeof(int)*threads.x;
int k;
for(k=0;k<N;k++)
hipLaunchKernelGGL(( CUDA_APSP_Advanced), dim3(grid),dim3(threads),SizeS, 0, d_mat,d_mat_trans2,k,N);
checkCudaErrors(hipFree(d_mat_trans2));
}
__global__ void
CUDA_APSP_Advanced_double(int *d_mat,int *d_mat_trans,int k,int N)
{
extern __shared__ int s_mem[];
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i<N&&j<N){
int i0 = i*N + j;//all
int i0_trans=j*N +i;
//int i1 = i*N + k;//all k column, jump between rows
int i1_trans=k*N + i;//all k row, jump between colums... used for trans matrix
int i2 = k*N + j;//all k row, jump between columns
if(threadIdx.x==0)
s_mem[threadIdx.y]=d_mat[i2];
else if(threadIdx.y==0)
s_mem[blockDim.x+threadIdx.x]=d_mat_trans[i1_trans];
__syncthreads();
if(s_mem[blockDim.x+threadIdx.x]!= -1 && s_mem[threadIdx.y]!= -1){
d_mat[i0] =
(d_mat[i0] != -1 && d_mat[i0] < s_mem[blockDim.x+threadIdx.x] +s_mem[threadIdx.y]) ? d_mat[i0] : (s_mem[blockDim.x+threadIdx.x] + s_mem[threadIdx.y]);
d_mat_trans[i0_trans]=d_mat[i0];
}
__syncthreads();
}
}
void CUDA_APSP_Advanced_double(dim3 grid, dim3 threads,dim3 grid_trans,dim3 threads_trans,int *d_mat,int N){
/*colased*/
int mem_size=sizeof(int)*N*N;
int *d_mat_trans2;
checkCudaErrors(hipMalloc((void **) &d_mat_trans2, mem_size));
hipLaunchKernelGGL(( transposeDiagonal), dim3(grid_trans),dim3(threads_trans), 0, 0, d_mat_trans2,d_mat,N,N,1);
int SizeS=2*sizeof(int)*threads.x;
int k;
for(k=0;k<N;k++)
hipLaunchKernelGGL(( CUDA_APSP_Advanced_double), dim3(grid),dim3(threads),SizeS, 0, d_mat,d_mat_trans2,k,N);
checkCudaErrors(hipFree(d_mat_trans2));
}
double Computation(int flag,size_t N,size_t P,int test,double cpu_time,const int *h_mat=NULL,int *o_h_mat=NULL,const int *d_mat=NULL){
int mem_size=sizeof(int)*N*N;
int *o_h_mat2 = (int*)malloc(mem_size);//use in GPU : as output
int *o_d_mat;//use in GPU : as input
// setup execution parameters
int num_of_blocks = 1;
int num_of_threads_per_block = P;
if(P>MAX_THREADS_PER_BLOCK){
cout<<"Number of process per block must less than MAX_T:"<<MAX_THREADS_PER_BLOCK<<endl;
exit(-1);
}
num_of_blocks=(int)ceil(N/(double)num_of_threads_per_block);
dim3 grid(num_of_blocks, num_of_blocks, 1);
dim3 threads(num_of_threads_per_block, num_of_threads_per_block, 1);
num_of_blocks=N/TILE_DIM;
dim3 grid_trans(num_of_blocks, num_of_blocks, 1);
dim3 threads_trans(TILE_DIM, BLOCK_ROWS, 1);
if(flag!=0){
checkCudaErrors(hipMalloc((void **) &o_d_mat, mem_size));
checkCudaErrors(hipMemcpy(o_d_mat, d_mat, mem_size,
hipMemcpyDeviceToDevice));
}else{
memcpy(o_h_mat,h_mat,mem_size);
// cout << "num of blocks are: "<< num_of_blocks<<" num of threads per block: "<<
// num_of_threads_per_block<<endl;
}
hipEvent_t start,stop;
hipEventCreate(&start);
hipEventCreate(&stop);
float elapsed_time_ms;
hipEventRecord(start,0);
if(!flag){ //work on CPU
ST_APSP(o_h_mat, N);
}else{
if(flag==1){ //work on GPU
CUDA_APSP_base(grid,threads,o_d_mat,N);
hipDeviceSynchronize();
checkCudaErrors(hipMemcpy(o_h_mat2, o_d_mat, mem_size,
hipMemcpyDeviceToHost));
}else if(flag==2){
CUDA_APSP_coalcesing(grid,threads,grid_trans,threads_trans,o_d_mat,N);
hipDeviceSynchronize();
checkCudaErrors(hipMemcpy(o_h_mat2, o_d_mat, mem_size,
hipMemcpyDeviceToHost));
}else if(flag==3){
CUDA_APSP_SharedMemory(grid,threads,o_d_mat,N);
hipDeviceSynchronize();
checkCudaErrors(hipMemcpy(o_h_mat2, o_d_mat, mem_size,
hipMemcpyDeviceToHost));
}else if(flag==4){
CUDA_APSP_SharedMemory_double(grid,threads,o_d_mat,N);
hipDeviceSynchronize();
checkCudaErrors(hipMemcpy(o_h_mat2, o_d_mat, mem_size,
hipMemcpyDeviceToHost));
}else if(flag==5){
CUDA_APSP_Advanced(grid,threads,grid_trans,threads_trans,o_d_mat,N);
hipDeviceSynchronize();
checkCudaErrors(hipMemcpy(o_h_mat2, o_d_mat, mem_size,
hipMemcpyDeviceToHost));
}else if(flag==6){
CUDA_APSP_Advanced_double(grid,threads,grid_trans,threads_trans,o_d_mat,N);
hipDeviceSynchronize();
checkCudaErrors(hipMemcpy(o_h_mat2, o_d_mat, mem_size,
hipMemcpyDeviceToHost));
}
}
float speedUp=1;
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed_time_ms,start,stop);
cout.flags(ios::internal);
cout.setf(ios::fixed);
cout <<"\n Time to calculate results on ";
if(!flag){
cout <<"CPU: ";
cpu_time = elapsed_time_ms;
}else{
speedUp = cpu_time/elapsed_time_ms;
if(flag==1)
cout <<"GPU(baseline): ";
else if(flag==2)
cout <<"GPU(colasing only): ";
else if(flag==3)
cout <<"GPU(shared mem only): ";
else if(flag==4)
cout <<"GPU(share mem double): ";
else if(flag==5)
cout <<"GPU(Advanced): ";
else if(flag==6)
cout <<"GPU(Advanced,double): ";
}
cout<<setprecision(3)<<elapsed_time_ms<<" ms, ";
if(flag!=0)
cout<<speedUp<<" speed up than on CPU";
cout<<endl;
hipEventDestroy(start);
hipEventDestroy(stop);
if(test){
if(CmpArray(o_h_mat, o_h_mat2, N*N))
printf("Your result is correct.\n");
else
printf("Your result is wrong.\n");
}
free(o_h_mat2);
if(flag!=0)
checkCudaErrors(hipFree(o_d_mat));
return elapsed_time_ms;
}
void usage(int argc, char **argv) {
fprintf(stderr, "Usage: %s <size of matrix> <num of process> <start command> <end command> <test>\n", argv[0]);
fprintf(stderr, "\t<size of matrix> - side size of matrix n (positive integer)\n");
fprintf(stderr, "\t<num of process> - number of thread per block(1 - 32)\n");
fprintf(stderr, "\t<start command> - differnt setting on GPU(1-6)\n");
fprintf(stderr, "\t<end command> - differnt setting on GPU(larger than start command, 1-6)\n");
fprintf(stderr, "\t<test > - differnt setting on GPU(larger than start command, 1-6)\n");
exit(1);
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main(int argc, char **argv)
{
/*Initial settings*/
size_t N;
size_t P;
printf("%s Starting...\n\n", argv[0]);
int devID = findCudaDevice(argc, (const char **)argv);
/*Get input*/
if(argc != 6)
{
usage(argc,argv);
}
N = atoi(argv[1]);
P = atoi(argv[2]);
int cmd_s=atoi(argv[3]);
int cmd_e=atoi(argv[4]);
int test=atoi(argv[5]);
printf("Size is %d,numP is %d,cmd_s is %d, cmd_e is %d, test is %d \n",N,P,cmd_s,cmd_e,test);
/*Allocate Data*/
unsigned int mem_size=sizeof(int)*N*N;
// allocate host memory
int *h_mat = (int*)malloc(mem_size);//use in CPU : as input
int *o_h_mat = (int*)malloc(mem_size);//use in CPU : as input
GenMatrix(h_mat, N);
int *d_mat;//use in GPU : as input
checkCudaErrors(hipMalloc((void **) &d_mat, mem_size));
checkCudaErrors(hipMemcpy(d_mat, h_mat, mem_size,
hipMemcpyHostToDevice));
double cpu_time=0;
/*Computation on HOST*/
cpu_time=Computation(0,N,P,0,cpu_time,h_mat,o_h_mat); //h_mat compute -> h_mat
/*Computation on GPU*/
for(int i=cmd_s;i<=cmd_e;i++){
Computation(i,N,P,test,cpu_time,h_mat,o_h_mat,d_mat);//d_mat compute->ref_mat
}
free(h_mat);
free(o_h_mat);
checkCudaErrors(hipFree(d_mat));
hipDeviceReset();
}
| 4ae0d5d1df92622be77c6259ef6eb88555817d66.cu | // includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <iostream>
#include <iomanip>
// includes CUDA
#include <cuda_runtime.h>
#include <cuda.h>
// includes, project
#include <helper_cuda.h>
#include <helper_functions.h> // helper functions for SDK examples
#include <APSP_CPU.h>
#define MAX_THREADS_PER_BLOCK 1024
#define TILE_DIM 16
#define BLOCK_ROWS 8
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
using namespace std;
__global__ void
CUDA_APSP(int *d_mat,int k,int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i<N&&j<N){
int i0 = i*N + j;
int i1 = i*N + k;
int i2 = k*N + j;
if(d_mat[i1] != -1 && d_mat[i2] != -1)
d_mat[i0] =
(d_mat[i0] != -1 && d_mat[i0] < d_mat[i1] + d_mat[i2]) ? d_mat[i0] : (d_mat[i1] + d_mat[i2]);
//__syncthreads();
}
}
void CUDA_APSP_base(dim3 grid, dim3 threads, int *d_mat,int N){
int k;
for(k=0;k<N;k++)
CUDA_APSP<<<grid,threads>>>(d_mat,k,N);
}
__global__ void
CUDA_APSP_coalcesing(int *d_mat,int *d_mat_trans,int k,int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i<N&&j<N){
int i0 = i*N + j;//all
int i0_trans=j*N +i;
//int i1 = i*N + k;//all k column, jump between rows
int i1_trans=k*N + i;//all k row, jump between colums... used for trans matrix
int i2 = k*N + j;//all k row, jump between columns
if(d_mat_trans[i1_trans] != -1 && d_mat[i2] != -1){
d_mat[i0] =
(d_mat[i0] != -1 && d_mat[i0] < d_mat_trans[i1_trans] + d_mat[i2]) ? d_mat[i0] : (d_mat_trans[i1_trans] + d_mat[i2]);
d_mat_trans[i0_trans]=d_mat[i0];
}
//__syncthreads();
}
}
__global__ void transposeDiagonal(int *odata,
int *idata, int width, int height, int nreps)
{
__shared__ float tile[TILE_DIM][TILE_DIM+1];
int blockIdx_x, blockIdx_y;
// diagonal reordering
if (width == height) {
blockIdx_y = blockIdx.x;
blockIdx_x = (blockIdx.x+blockIdx.y)%gridDim.x;
} else {
int bid = blockIdx.x + gridDim.x*blockIdx.y;
blockIdx_y = bid%gridDim.y;
blockIdx_x = ((bid/gridDim.y)+blockIdx_y)%gridDim.x;
}
int xIndex = blockIdx_x*TILE_DIM + threadIdx.x;
int yIndex = blockIdx_y*TILE_DIM + threadIdx.y;
int index_in = xIndex + (yIndex)*width;
xIndex = blockIdx_y*TILE_DIM + threadIdx.x;
yIndex = blockIdx_x*TILE_DIM + threadIdx.y;
int index_out = xIndex + (yIndex)*height;
for (int r=0; r < nreps; r++) {
for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) {
tile[threadIdx.y+i][threadIdx.x] =
idata[index_in+i*width];
}
__syncthreads();
for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) {
odata[index_out+i*height] =
tile[threadIdx.x][threadIdx.y+i];
}
}
}
void CUDA_APSP_coalcesing(dim3 grid, dim3 threads,dim3 grid_trans,dim3 threads_trans,int *d_mat,int N){
int mem_size=sizeof(int)*N*N;
int *d_mat_trans2;
checkCudaErrors(cudaMalloc((void **) &d_mat_trans2, mem_size));
transposeDiagonal<<<grid_trans,threads_trans>>>(d_mat_trans2,d_mat,N,N,1);
int k;
for(k=0;k<N;k++)
CUDA_APSP_coalcesing<<<grid,threads>>>(d_mat,d_mat_trans2,k,N);
checkCudaErrors(cudaFree(d_mat_trans2));
}
__global__ void
CUDA_APSP_SharedMemory(int *d_mat,int k,int N)
{
extern __shared__ int s_mem[];
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i<N&&j<N){
int i0 = i*N + j;
int i1 = i*N + k;
int i2 = k*N + j;
if(threadIdx.y==0)
s_mem[threadIdx.x]=d_mat[i1];
__syncthreads();
/* if(d_mat[i1] != -1 && d_mat[i2] != -1)
d_mat[i0] =
(d_mat[i0] != -1 && d_mat[i0] < d_mat[i1] + d_mat[i2]) ? d_mat[i0] : (d_mat[i1] + d_mat[i2]);
*/
if(s_mem[threadIdx.x] != -1 && d_mat[i2] != -1)
d_mat[i0] =
(d_mat[i0] != -1 && d_mat[i0] < s_mem[threadIdx.x] + d_mat[i2]) ? d_mat[i0] : (s_mem[threadIdx.x] + d_mat[i2]);
__syncthreads();
}
}
__global__ void
CUDA_APSP_SharedMemory_double(int *d_mat,int k,int N)
{
extern __shared__ int s_mem[];
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i<N&&j<N){
int i0 = i*N + j;
int i1 = i*N + k;
int i2 = k*N + j;
if(threadIdx.y==0)
s_mem[threadIdx.x]=d_mat[i1];
else if(threadIdx.x==0)
s_mem[blockDim.x+threadIdx.y]=d_mat[i2];
__syncthreads();
if(s_mem[threadIdx.x] != -1 && s_mem[blockDim.x+threadIdx.y] != -1)
d_mat[i0] =
(d_mat[i0] != -1 && d_mat[i0] < s_mem[threadIdx.x] + s_mem[blockDim.x+threadIdx.y]) ? d_mat[i0] : (s_mem[threadIdx.x] + s_mem[blockDim.x+threadIdx.y]);
__syncthreads();
}
}
void CUDA_APSP_SharedMemory(dim3 grid, dim3 threads, int *d_mat,int N){
int SizeS=sizeof(int)*threads.x;
int k;
for(k=0;k<N;k++)
CUDA_APSP_SharedMemory<<<grid,threads,SizeS>>>(d_mat,k,N);
}
void CUDA_APSP_SharedMemory_double(dim3 grid, dim3 threads, int *d_mat,int N){
int SizeS=sizeof(int)*threads.x;
int k;
for(k=0;k<N;k++)
CUDA_APSP_SharedMemory_double<<<grid,threads,SizeS>>>(d_mat,k,N);
}
__global__ void
CUDA_APSP_Advanced(int *d_mat,int *d_mat_trans,int k,int N)
{
extern __shared__ int s_mem[];
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i<N&&j<N){
int i0 = i*N + j;//all
int i0_trans=j*N +i;
//int i1 = i*N + k;//all k column, jump between rows
int i1_trans=k*N + i;//all k row, jump between colums... used for trans matrix
int i2 = k*N + j;//all k row, jump between columns
if(threadIdx.x==0)
s_mem[threadIdx.y]=d_mat[i2];
__syncthreads();
if(d_mat_trans[i1_trans] != -1 && s_mem[threadIdx.y]!= -1){
d_mat[i0] =
(d_mat[i0] != -1 && d_mat[i0] < d_mat_trans[i1_trans] +s_mem[threadIdx.y]) ? d_mat[i0] : (d_mat_trans[i1_trans] + s_mem[threadIdx.y]);
d_mat_trans[i0_trans]=d_mat[i0];
}
__syncthreads();
}
}
void CUDA_APSP_Advanced(dim3 grid, dim3 threads,dim3 grid_trans,dim3 threads_trans, int *d_mat,int N){
/*colased*/
int mem_size=sizeof(int)*N*N;
int *d_mat_trans2;
checkCudaErrors(cudaMalloc((void **) &d_mat_trans2, mem_size));
transposeDiagonal<<<grid_trans,threads_trans>>>(d_mat_trans2,d_mat,N,N,1);
int SizeS=sizeof(int)*threads.x;
int k;
for(k=0;k<N;k++)
CUDA_APSP_Advanced<<<grid,threads,SizeS>>>(d_mat,d_mat_trans2,k,N);
checkCudaErrors(cudaFree(d_mat_trans2));
}
__global__ void
CUDA_APSP_Advanced_double(int *d_mat,int *d_mat_trans,int k,int N)
{
extern __shared__ int s_mem[];
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i<N&&j<N){
int i0 = i*N + j;//all
int i0_trans=j*N +i;
//int i1 = i*N + k;//all k column, jump between rows
int i1_trans=k*N + i;//all k row, jump between colums... used for trans matrix
int i2 = k*N + j;//all k row, jump between columns
if(threadIdx.x==0)
s_mem[threadIdx.y]=d_mat[i2];
else if(threadIdx.y==0)
s_mem[blockDim.x+threadIdx.x]=d_mat_trans[i1_trans];
__syncthreads();
if(s_mem[blockDim.x+threadIdx.x]!= -1 && s_mem[threadIdx.y]!= -1){
d_mat[i0] =
(d_mat[i0] != -1 && d_mat[i0] < s_mem[blockDim.x+threadIdx.x] +s_mem[threadIdx.y]) ? d_mat[i0] : (s_mem[blockDim.x+threadIdx.x] + s_mem[threadIdx.y]);
d_mat_trans[i0_trans]=d_mat[i0];
}
__syncthreads();
}
}
void CUDA_APSP_Advanced_double(dim3 grid, dim3 threads,dim3 grid_trans,dim3 threads_trans,int *d_mat,int N){
/*colased*/
int mem_size=sizeof(int)*N*N;
int *d_mat_trans2;
checkCudaErrors(cudaMalloc((void **) &d_mat_trans2, mem_size));
transposeDiagonal<<<grid_trans,threads_trans>>>(d_mat_trans2,d_mat,N,N,1);
int SizeS=2*sizeof(int)*threads.x;
int k;
for(k=0;k<N;k++)
CUDA_APSP_Advanced_double<<<grid,threads,SizeS>>>(d_mat,d_mat_trans2,k,N);
checkCudaErrors(cudaFree(d_mat_trans2));
}
double Computation(int flag,size_t N,size_t P,int test,double cpu_time,const int *h_mat=NULL,int *o_h_mat=NULL,const int *d_mat=NULL){
int mem_size=sizeof(int)*N*N;
int *o_h_mat2 = (int*)malloc(mem_size);//use in GPU : as output
int *o_d_mat;//use in GPU : as input
// setup execution parameters
int num_of_blocks = 1;
int num_of_threads_per_block = P;
if(P>MAX_THREADS_PER_BLOCK){
cout<<"Number of process per block must less than MAX_T:"<<MAX_THREADS_PER_BLOCK<<endl;
exit(-1);
}
num_of_blocks=(int)ceil(N/(double)num_of_threads_per_block);
dim3 grid(num_of_blocks, num_of_blocks, 1);
dim3 threads(num_of_threads_per_block, num_of_threads_per_block, 1);
num_of_blocks=N/TILE_DIM;
dim3 grid_trans(num_of_blocks, num_of_blocks, 1);
dim3 threads_trans(TILE_DIM, BLOCK_ROWS, 1);
if(flag!=0){
checkCudaErrors(cudaMalloc((void **) &o_d_mat, mem_size));
checkCudaErrors(cudaMemcpy(o_d_mat, d_mat, mem_size,
cudaMemcpyDeviceToDevice));
}else{
memcpy(o_h_mat,h_mat,mem_size);
// cout << "num of blocks are: "<< num_of_blocks<<" num of threads per block: "<<
// num_of_threads_per_block<<endl;
}
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float elapsed_time_ms;
cudaEventRecord(start,0);
if(!flag){ //work on CPU
ST_APSP(o_h_mat, N);
}else{
if(flag==1){ //work on GPU
CUDA_APSP_base(grid,threads,o_d_mat,N);
cudaDeviceSynchronize();
checkCudaErrors(cudaMemcpy(o_h_mat2, o_d_mat, mem_size,
cudaMemcpyDeviceToHost));
}else if(flag==2){
CUDA_APSP_coalcesing(grid,threads,grid_trans,threads_trans,o_d_mat,N);
cudaDeviceSynchronize();
checkCudaErrors(cudaMemcpy(o_h_mat2, o_d_mat, mem_size,
cudaMemcpyDeviceToHost));
}else if(flag==3){
CUDA_APSP_SharedMemory(grid,threads,o_d_mat,N);
cudaDeviceSynchronize();
checkCudaErrors(cudaMemcpy(o_h_mat2, o_d_mat, mem_size,
cudaMemcpyDeviceToHost));
}else if(flag==4){
CUDA_APSP_SharedMemory_double(grid,threads,o_d_mat,N);
cudaDeviceSynchronize();
checkCudaErrors(cudaMemcpy(o_h_mat2, o_d_mat, mem_size,
cudaMemcpyDeviceToHost));
}else if(flag==5){
CUDA_APSP_Advanced(grid,threads,grid_trans,threads_trans,o_d_mat,N);
cudaDeviceSynchronize();
checkCudaErrors(cudaMemcpy(o_h_mat2, o_d_mat, mem_size,
cudaMemcpyDeviceToHost));
}else if(flag==6){
CUDA_APSP_Advanced_double(grid,threads,grid_trans,threads_trans,o_d_mat,N);
cudaDeviceSynchronize();
checkCudaErrors(cudaMemcpy(o_h_mat2, o_d_mat, mem_size,
cudaMemcpyDeviceToHost));
}
}
float speedUp=1;
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_time_ms,start,stop);
cout.flags(ios::internal);
cout.setf(ios::fixed);
cout <<"\n Time to calculate results on ";
if(!flag){
cout <<"CPU: ";
cpu_time = elapsed_time_ms;
}else{
speedUp = cpu_time/elapsed_time_ms;
if(flag==1)
cout <<"GPU(baseline): ";
else if(flag==2)
cout <<"GPU(colasing only): ";
else if(flag==3)
cout <<"GPU(shared mem only): ";
else if(flag==4)
cout <<"GPU(share mem double): ";
else if(flag==5)
cout <<"GPU(Advanced): ";
else if(flag==6)
cout <<"GPU(Advanced,double): ";
}
cout<<setprecision(3)<<elapsed_time_ms<<" ms, ";
if(flag!=0)
cout<<speedUp<<" speed up than on CPU";
cout<<endl;
cudaEventDestroy(start);
cudaEventDestroy(stop);
if(test){
if(CmpArray(o_h_mat, o_h_mat2, N*N))
printf("Your result is correct.\n");
else
printf("Your result is wrong.\n");
}
free(o_h_mat2);
if(flag!=0)
checkCudaErrors(cudaFree(o_d_mat));
return elapsed_time_ms;
}
void usage(int argc, char **argv) {
fprintf(stderr, "Usage: %s <size of matrix> <num of process> <start command> <end command> <test>\n", argv[0]);
fprintf(stderr, "\t<size of matrix> - side size of matrix n (positive integer)\n");
fprintf(stderr, "\t<num of process> - number of thread per block(1 - 32)\n");
fprintf(stderr, "\t<start command> - differnt setting on GPU(1-6)\n");
fprintf(stderr, "\t<end command> - differnt setting on GPU(larger than start command, 1-6)\n");
fprintf(stderr, "\t<test > - differnt setting on GPU(larger than start command, 1-6)\n");
exit(1);
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main(int argc, char **argv)
{
/*Initial settings*/
size_t N;
size_t P;
printf("%s Starting...\n\n", argv[0]);
int devID = findCudaDevice(argc, (const char **)argv);
/*Get input*/
if(argc != 6)
{
usage(argc,argv);
}
N = atoi(argv[1]);
P = atoi(argv[2]);
int cmd_s=atoi(argv[3]);
int cmd_e=atoi(argv[4]);
int test=atoi(argv[5]);
printf("Size is %d,numP is %d,cmd_s is %d, cmd_e is %d, test is %d \n",N,P,cmd_s,cmd_e,test);
/*Allocate Data*/
unsigned int mem_size=sizeof(int)*N*N;
// allocate host memory
int *h_mat = (int*)malloc(mem_size);//use in CPU : as input
int *o_h_mat = (int*)malloc(mem_size);//use in CPU : as input
GenMatrix(h_mat, N);
int *d_mat;//use in GPU : as input
checkCudaErrors(cudaMalloc((void **) &d_mat, mem_size));
checkCudaErrors(cudaMemcpy(d_mat, h_mat, mem_size,
cudaMemcpyHostToDevice));
double cpu_time=0;
/*Computation on HOST*/
cpu_time=Computation(0,N,P,0,cpu_time,h_mat,o_h_mat); //h_mat compute -> h_mat
/*Computation on GPU*/
for(int i=cmd_s;i<=cmd_e;i++){
Computation(i,N,P,test,cpu_time,h_mat,o_h_mat,d_mat);//d_mat compute->ref_mat
}
free(h_mat);
free(o_h_mat);
checkCudaErrors(cudaFree(d_mat));
cudaDeviceReset();
}
|
613e761598b0e8a352f9e06eb5e56d8fa2933ec3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <transient.hpp>
//#include <trans_cuda.hpp>
#include <map>
#include <thrust/for_each.h>
#include <string>
#include <utility>
//#include <boost/functional/hash.hpp>
//#include <boost/numeric/odeint/integrate/integrate_const.hpp>
//#include <boost/numeric/odeint/external/thrust/thrust.hpp>
//#include <iostream>
//#include <cmath>
//#include <boost/numeric/odeint/stepper/runge_kutta_dopri5.hpp>
//#include <thrust/device_vector.h>
//#include <thrust/iterator/permutation_iterator.h>
//#include <thrust/iterator/counting_iterator.h>
//#include <boost/numeric/odeint/external/thrust/thrust.hpp>
//#include <thrust/for_each.h>
//#include <thrust/device_vector.h>
//#include <thrust/execution_policy.h>
using namespace std;
using namespace boost::numeric::odeint;
namespace transient_analysis {
/*
typedef pair<string, string> strpair;
struct printf_functor
{
//template <string T1, GENERATOR T2>;
__host__ __device__
void operator()(const pair<string, string> mp)
{
// note that using printf in a __device__ function requires
// code compiled for a GPU with compute capability 2.0 or
// higher (nvcc --arch=sm_20)
//printf("%d\n", x);
//printf("%d\n", y);
printf("mp first=%s\n", mp.first);
printf("success in printf_functor\n");
//uint__t bus_id = bus_name_to_id[bus_name] - 1;
}
};
*/
//extern "C" void test_cuda(unordered_map<int, int> generators);
//extern "C" void test_cuda(map<string, GENERATOR> generators);
//void test_cuda(map<string, GENERATOR> &generators);
//extern __global__ void test_cuda();
Transient_Solver::
Transient_Solver(real__t start_time, real__t end_time, real__t time_stepping, string main_path)
: start_time(start_time), end_time(end_time), time_stepping(time_stepping), main_path(main_path) {}
Transient_Solver::~Transient_Solver() {
GraphLU_Destroy(Ybus_matrix);
free(Ybus_matrix);
free(eY);
free(ei);
free(ep);
free(gCurrent);
free(gVoltage);
}
void Transient_Solver::setup_system() {
load_system_data(main_path);
print_system_summary();
get_bus_name_id_mapping();
bus_types[1] = "PQ Bus";
bus_types[2] = "PV Bus";
bus_types[3] = "Slack Bus";
string output_path = "../output";
remove((output_path + "/genBusResults.csv").c_str());
remove((output_path + "/allBusResults.csv").c_str());
nBuses = buses.size();
nGen = generators.size();
nGenUnknowns = VS_output_idx + 1;
n = 2 * nBuses;
nnz = 4 * (line.size() + nBuses);
for (auto &g_hldr : generators) {
auto & bus_name=g_hldr.first;
auto & gen=g_hldr.second;
gen_solution [bus_name] = vector<real__t>(nGenUnknowns, 0.);
gen_error [bus_name] = vector<real__t>(nGenUnknowns, 0.);
gen_dq_current[bus_name] = vector<real__t>(2, 0.);
}
bus_voltage = vector<vector<real__t>> (nBuses, vector<real__t>(2, 0.));
// dump_Voltages();
print_gen_parameters();
// print_branch_data();
gCurrent = (real__t*)calloc(n, sizeof(real__t));
gVoltage = (real__t*)calloc(n, sizeof(real__t));
/** allocate memory for arrays used in CSR form */
eY = (real__t*)malloc(sizeof(real__t) * (nnz)); // nonzero values in Ybus matrix
ei = (uint__t*)malloc(sizeof(uint__t) * (nnz)); // column idx of Ybus matrix
ep = (uint__t*)malloc(sizeof(uint__t) * (n + 1)); // initial row pointers
Ybus_matrix = (SGraphLU*)malloc(sizeof(SGraphLU));
gVertex_all = vector<vector<real__t>>(nBuses, vector<real__t>(5, 0.));
current_time = start_time;
num_of_steps = 0;
tol = 1.e-4;
max_step_size = 0.01;
is_modify_Y_bus_matrix = true;
is_fault_treated = false;
}
void Transient_Solver::load_system_data(string folder_path) {
read_bus_data(buses, folder_path + "/system/Node.csv");
read_generator_node_data(buses, generators, folder_path + "/system/Generator.csv");
//read_fdpf_data(buses, "../output/fdpf.csv");
// read_load_data(buses, folder_path + "/system/Load.csv");
read_compensator_P_data(buses, folder_path + "/system/Compensator_P.csv");
// read_DC_line_data(buses, line, folder_path + "/system/DC_Line.csv");
read_AC_line_data(buses, line, folder_path + "/system/AC_Line.csv");
read_two_winding_transformer_data(buses, line, folder_path + "/system/Two_winding_transformer.csv");
read_three_winding_transformer_data(buses, line, folder_path + "/system/Three_winding_transformer.csv");
read_fdpf_data(buses, "../output/fdpf.csv");
read_EPRI_GEN_data(all_gen, folder_path + "/parameter/Synchronous_Machine.csv");
read_EPRI_GOV_I_data(all_gov_1, folder_path + "/parameter/Governor_1.csv");
read_EPRI_GOV_II_data(all_gov_2, folder_path + "/parameter/Governor_2.csv");
read_EPRI_GOV_III_data(all_gov_3, folder_path + "/parameter/Governor_3.csv");
read_EPRI_GOV_IV_data(all_gov_4, folder_path + "/parameter/Governor_4.csv");
read_EPRI_GOV_V_data(all_gov_5, folder_path + "/parameter/Governor_5.csv");
read_EPRI_GOV_VII_data(all_gov_7, folder_path + "/parameter/Governor_7.csv");
read_EPRI_GOV_VIII_data(all_gov_8, folder_path + "/parameter/Governor_8.csv");
read_EPRI_GOV_IX_data(all_gov_9, folder_path + "/parameter/Governor_9.csv");
read_EPRI_EXC_I_data(all_exc_1, folder_path + "/parameter/AVR_1.csv");
read_EPRI_EXC_II_data(all_exc_2, folder_path + "/parameter/AVR_2.csv");
read_EPRI_EXC_III_TO_X_data(all_exc_3_10, folder_path + "/parameter/AVR_3_to_10.csv");
read_EPRI_EXC_XI_TO_XII_data(all_exc_11_12, folder_path + "/parameter/AVR_11_to_12.csv");
read_EPRI_PSS_I_data(all_pss_1, folder_path + "/parameter/PSS_1.csv");
read_EPRI_PSS_II_data(all_pss_2, folder_path + "/parameter/PSS_2.csv");
read_EPRI_PSS_IV_VI_data(all_pss_4_6, folder_path + "/parameter/PSS_4_6.csv");
read_EPRI_PSS_V_data(all_pss_5, folder_path + "/parameter/PSS_5.csv");
read_EPRI_PSS_VIII_data(all_pss_8, folder_path + "/parameter/PSS_8.csv");
#if DEBUG
cout << "Transient simulation loading data success!\n";
#endif
}
void Transient_Solver::get_bus_name_id_mapping() {
int idx = 1; // id begins with 1
for (auto &b : buses) {
bus_name_to_id[b.first] = idx;
bus_id_to_name[idx] = b.first;
++idx;
}
#if DEBUG
printf("Transient simulation getting bus_name to bus_id mapping success!\n");
#endif
}
void Transient_Solver::dump_Voltages() {
if (current_time == start_time) {
for (int i = 0; i < buses.size(); ++i) {
string bus_name = bus_id_to_name[i];
bus_voltage[i][0] = buses[bus_name].Vm;
bus_voltage[i][1] = buses[bus_name].Va;
}
} else {
for (int i = 0; i < buses.size(); ++i) {
real__t Vx = gVoltage[i];
real__t Vy = gVoltage[i + nBuses];
bus_voltage[i][0] = sqrt(Vx * Vx + Vy * Vy);
bus_voltage[i][1] = atan2(Vy, Vx);
}
}
#if DEBUG
printf("Transient simulation dumping voltages success!\n");
#endif
}
string Transient_Solver::
solve_algebraic_equations_one_step(SGraphLU* matrix, real__t* rhs) {
/* call the graphlu solver to solve the sparse linear system
* notice that GraphLU_Solve_Singular allows the matrix to be *numerically*
* singular */
int ret_solve = GraphLU_Solve_Singular(matrix, rhs, 0);
if (ret_solve < 0) {
printf("Error: solve_algebraic_equations_one_step: %d\n", ret_solve);
return "FAILED";
}
return "SUCCESS";
}
/* a matrix-vector multiplier (not used in this code) */
void Transient_Solver::
matrix_vector_mult(const SGraphLU* matrix, const real__t* src, real__t* dst) {
uint__t n = matrix->n;
real__t* ax = matrix->ax;
uint__t* ai = matrix->ai;
uint__t* ap = matrix->ap;
for (int i = 0; i < n; ++i) {
dst[i] = 0;
for (int k = ap[i]; k < ap[i + 1]; ++k) {
dst[i] += ax[k] * src[ai[k]];
}
}
}
void Transient_Solver::update_step_size() {
real__t err = 0;
for (auto &g : gen_error) {
for (auto &v : g.second) err += abs(v * v);
}
err = max(sqrt(err), EPS);
printf("current error = %2.12f\n", err);
real__t q = pow(tol / err, 1. / 4.) * 0.8;
if (err < tol)
step_size = min(max(q, 0.1), 4.) * step_size;
else
step_size = min(max(q, 0.1), 1.) * step_size;
step_size = min(max_step_size, step_size);
}
void Transient_Solver::get_generator_current() {
for (auto &g_hldr : generators) {
auto & bus_name=g_hldr.first;
auto & gen=g_hldr.second;
uint__t bus_id = bus_name_to_id[bus_name] - 1;
uint__t Gen_Par = gen.Gen_Par;
if (Gen_Par == 0) continue;
real__t Vx = gVoltage[bus_id];
real__t Vy = gVoltage[bus_id + nBuses];
real__t delta = gen_solution[bus_name][delta_idx];
real__t Vd = Vx * sin(delta) - Vy * cos(delta);
real__t Vq = Vx * cos(delta) + Vy * sin(delta);
real__t Ra = all_gen[Gen_Par].Ra;
real__t Xdpp = all_gen[Gen_Par].Xdpp;
real__t Xqpp = all_gen[Gen_Par].Xqpp;
real__t Edpp = gen_solution[bus_name][Edpp_idx];
real__t Eqpp = gen_solution[bus_name][Eqpp_idx];
real__t denom = Ra * Ra + Xdpp * Xqpp;
assert(denom > EPS);
gen_dq_current[bus_name][0] = (+Ra * (Edpp - Vd) + Xqpp * (Eqpp - Vq)) / denom;
gen_dq_current[bus_name][1] = (-Xdpp * (Edpp - Vd) + Ra * (Eqpp - Vq)) / denom;
// cout << "dq currents in get_generator_current:" << endl;
// cout << "Id, Iq = " << gen_dq_current[bus_name][0] << ", " << gen_dq_current[bus_name][1] << endl;
#if DEBUG
printf("Transient simulation getting generator current success for bus %s!\n", bus_name.c_str());
#endif
}
}
void Transient_Solver::
apply_fault(const uint__t busID, const real__t t, const real__t fault_btime, const real__t fault_etime) {
/* busID is indexed from 1 */
if (t >= fault_btime && t < fault_etime - time_stepping) { // fault happens
if (is_fault_treated)
return;
for (int i = 0; i < gEdge_all.size(); ++i) {
if (gEdge_all[i][0] == busID - 1 && gEdge_all[i][1] == busID - 1) {
gEdge_all[i][2] += INF;
is_fault_treated = true;
break;
}
}
} else if (abs(t - fault_etime) <= EPS + time_stepping) { // fault cleared
is_fault_treated = false;
} else if (t > fault_etime) {
if (is_fault_treated)
return;
for (int i = 0; i < gEdge_all.size(); ++i) {
if (gEdge_all[i][0] == busID - 1 && gEdge_all[i][1] == busID - 1) {
gEdge_all[i][2] -= INF;
is_fault_treated = true;
break;
}
}
}
}
/** this function should be called after the compute_ODE_initial_values */
void Transient_Solver::assign_ode_solver_data(string bus_name, GENERATOR& gen) {
system.parameters.EXC_type = gen.AVR_Model;
switch (gen.AVR_Model) {
case 1:
system.parameters.exc_1 = all_exc_1[gen.AVR_Par]; break;
case 2:
system.parameters.exc_2 = all_exc_2[gen.AVR_Par]; break;
case 3: case 4: case 5: case 6: case 7: case 8: case 9: case 10:
system.parameters.exc_3_10 = all_exc_3_10[gen.AVR_Par]; break;
case 11: case 12:
system.parameters.exc_11_12 = all_exc_11_12[gen.AVR_Par]; break;
default:
break;
}
system.parameters.GOV_type = gen.GOV_Model;
switch (gen.GOV_Model) {
case 1: system.parameters.gov_1 = all_gov_1[gen.GOV_Par]; break;
case 2: system.parameters.gov_2 = all_gov_2[gen.GOV_Par]; break;
case 3: system.parameters.gov_3 = all_gov_3[gen.GOV_Par]; break;
case 4: system.parameters.gov_4 = all_gov_4[gen.GOV_Par]; break;
case 5: system.parameters.gov_5 = all_gov_5[gen.GOV_Par]; break;
case 7: system.parameters.gov_7 = all_gov_7[gen.GOV_Par]; break;
case 8: system.parameters.gov_8 = all_gov_8[gen.GOV_Par]; break;
case 9: system.parameters.gov_9 = all_gov_9[gen.GOV_Par]; break;
}
system.parameters.PSS_type = gen.PSS_Model;
switch (gen.PSS_Model) {
case 1: system.parameters.pss_1 = all_pss_1[gen.PSS_Par]; break;
case 2: system.parameters.pss_2 = all_pss_2[gen.PSS_Par]; break;
case 4: system.parameters.pss_4_6 = all_pss_4_6[gen.PSS_Par]; break;
case 5: system.parameters.pss_5 = all_pss_5[gen.PSS_Par]; break;
case 6: system.parameters.pss_4_6 = all_pss_4_6[gen.PSS_Par]; break;
case 8: system.parameters.pss_8 = all_pss_8[gen.PSS_Par]; break;
}
system.parameters.GEN_type = gen.Gen_Model;
if (gen.Gen_Par == 0) {
all_gen[0].Xdp = gen.Xdp < EPS ? 0.0001 : gen.Xdp;
all_gen[0].Xdpp = gen.Xdpp < EPS ? 0.0001 : gen.Xdpp;
all_gen[0].TJ = gen.TJ < EPS ? 999999.875 : gen.TJ;
all_gen[0].X2 = gen.X2;
all_gen[0].Ra = 0.;
}
system.parameters.gen = all_gen[gen.Gen_Par];
system.parameters.gen.bus_id = bus_name_to_id[bus_name] - 1;
system.parameters.omega_ref = gen.omega_ref;
system.parameters.freq_ref = gen.freq_ref;
system.parameters.Pe_ref = gen.Pe_ref;
system.parameters.Vt_ref = gen.Vt_ref;
system.parameters.Efd0 = gen.Efd0;
system.parameters.mu0 = gen.mu0;
system.parameters.Rate_MW = gen.Rate_MW;
}
void Transient_Solver::run(int argc, char** argv) {
/** initialize simulation settings */
setup_system();
compute_ODE_initial_values();
runge_kutta_dopri5<d_vector_type, value_type , d_vector_type , value_type> dopri5_stepper_type;
print_bus_data();
/** convert the data on the edges, notice that the conversion is
once for all, unless the topology of the network changes */
generate_edges_matrix();
const value_type dt = 0.1;
//d2_vector_type d2_gen_solution_set;
h2_vector_type h2_gen_solution_set;
h2_vector_type h2_gen_error_set;
//double h2_gen_solution_set[8][35];
while (current_time <= end_time) {
/* output every 10 steps - modify if necessary */
if ((num_of_steps % 5 == 0 && current_time > 0) || num_of_steps == 1) {
output_data_to_csv_one_step();
}
/* make sure that current_time + dt does not exceed end_time */
time_stepping = min(time_stepping, end_time - current_time);
if (time_stepping == 0) break;
/** add a fault to a LOAD bus */
int fault_bus_id = 81; // 36 is bus 28, in sichuan is bus 81;
assert(fault_bus_id <= nBuses);
apply_fault(fault_bus_id, current_time, 3.0, 3.1);
/** update the buses info, then generate the new Y bus matrix */
convert_nodes();
convert_to_CSR_arrays();
generate_Y_Bus_matrix();
/** As graphlu returns the solution in-place, we copy gCurrent to gVoltage */
memcpy(gVoltage, gCurrent, sizeof(*gCurrent) * n);
/** solve one step of algebraic equations */
string result = solve_algebraic_equations_one_step(Ybus_matrix, gVoltage);
if (result == "FAILED") {
std::cerr << "Solving algebraic equations FAILED, please check!!!\n";
std::terminate();
}
int gen_length = 35;
int gen_count = 0;
int gen_attr_count = 0;
d_vector_type d2_gen_solution_set(GEN_SIZE*gen_length);
d_vector_type d2_gen_error_set(GEN_SIZE*gen_length);
std::clock_t start = std::clock();
for (auto& g_hldr : generators) {
auto & bus_name=g_hldr.first;
auto & gen=g_hldr.second;
uint__t bus_id = bus_name_to_id[bus_name] - 1;
/** update the Vx and Vy values at each gen node */
system.Vx[gen_count] = gVoltage[bus_id];
system.Vy[gen_count] = gVoltage[bus_id + nBuses];
system.Id[gen_count] = gen_dq_current[bus_name][0];
system.Iq[gen_count] = gen_dq_current[bus_name][1];
assign_ode_solver_data(bus_name, gen);
gen_length = gen_solution[bus_name].size();
d_vector_type d_gen_solution_set = gen_solution[bus_name];
d_vector_type d_gen_error_set = gen_error[bus_name];
h2_vector_type h2_gen_solution_set;
h2_vector_type h2_gen_error_set;
//h2_gen_solution_set[gen_count][gen_attr_count]
h2_gen_solution_set.push_back(gen_solution[bus_name]);
h2_gen_error_set.push_back(gen_error[bus_name]);
//gen_count++;
thrust::copy(h2_gen_solution_set[0].begin(), h2_gen_solution_set[0].end(), &d2_gen_solution_set[0]);
thrust::copy(h2_gen_error_set[0].begin(), h2_gen_error_set[0].end(), &d2_gen_error_set[0]);
thrust::sequence(d2_gen_solution_set.begin(), d2_gen_solution_set.end());
thrust::sequence(d2_gen_error_set.begin(), d2_gen_error_set.end());
dopri5_stepper_type.do_step(system, d2_gen_solution_set, current_time,
time_stepping, d2_gen_error_set);
gen_solution[bus_name][mu_output_idx] = system.get_mu();
gen_solution[bus_name][PT_output_idx] = system.get_Pmech();
gen_solution[bus_name][Efd_output_idx] = system.get_Efd();
gen_solution[bus_name][VS_output_idx] = system.get_VS();
}
printf("+++After 325 Gen computing: %.4f seconds\n\n", (std::clock() - start) / (real__t)CLOCKS_PER_SEC);
//for(int i=0; i < GEN_SIZE; i++){
// thrust::copy(h2_gen_solution_set[i].begin(), h2_gen_solution_set[i].end(), &d2_gen_solution_set[i*gen_length]);
// thrust::copy(h2_gen_error_set[i].begin(), h2_gen_error_set[i].end(), &d2_gen_error_set[i*gen_length]);
//}
//thrust::sequence(d2_gen_solution_set.begin(), d2_gen_solution_set.end());
//thrust::sequence(d2_gen_error_set.begin(), d2_gen_error_set.end());
//dopri5_stepper_type.do_step(system, d2_gen_solution_set, current_time,
// time_stepping, d2_gen_error_set);
//for (auto& g_hldr : generators) {
// auto & bus_name=g_hldr.first;
// auto & gen=g_hldr.second;
//d2_gen_solution_set.push_back(gen_solution[bus_name]);
//integrate_const( dopri5_stepper_type , system , d_gen_solution_set , 0.0 , 1.0 , dt );
////dopri5_stepper_type.do_step(system, d_gen_solution_set, current_time,
//// time_stepping, d_gen_error_set);
//dopri5_stepper_type.do_step(system, gen_solution[bus_name], current_time,
// time_stepping, gen_error[bus_name]);
//integrate_adaptive( make_controlled( 1.0e-6 , 1.0e-6 , dopri5_stepper_type() ) , system , x , t , t + 1.0 , 0.1 );
//cout << "End of dp_step===========================" << endl;
/** post-process: prepare for outputs */
//gen_solution[bus_name][mu_output_idx] = system.get_mu();
//gen_solution[bus_name][PT_output_idx] = system.get_Pmech();
//gen_solution[bus_name][Efd_output_idx] = system.get_Efd();
//gen_solution[bus_name][VS_output_idx] = system.get_VS();
/* update the time and number of steps */
//current_time += time_stepping;
//num_of_steps++;
//}
//cout << "\n+++Now number of steps: " << num_of_steps << endl;
#if DEBUG
if (num_of_steps % 1 == 0) {
// print_matrix<real__t>(gEdge_all, "System Matrix");
// print_array<real__t>(gCurrent, buses.size(), "RHS: ");
// print_array<real__t>(gVoltage, buses.size(), "Solution: ");
print_bus_data();
print_gen_solution();
}
#endif
/* update the time and number of steps */
current_time += time_stepping;
num_of_steps++;
}
printf("\n\nTransient Simulation Result:\n");
print_bus_data();
print_gen_solution();
/* Done with the simulation! Congratulations! */
cout << "\n+++Total number of steps: " << num_of_steps << endl;
}
} // namespace transient_analysis
| 613e761598b0e8a352f9e06eb5e56d8fa2933ec3.cu | #include <transient.hpp>
//#include <trans_cuda.hpp>
#include <map>
#include <thrust/for_each.h>
#include <string>
#include <utility>
//#include <boost/functional/hash.hpp>
//#include <boost/numeric/odeint/integrate/integrate_const.hpp>
//#include <boost/numeric/odeint/external/thrust/thrust.hpp>
//#include <iostream>
//#include <cmath>
//#include <boost/numeric/odeint/stepper/runge_kutta_dopri5.hpp>
//#include <thrust/device_vector.h>
//#include <thrust/iterator/permutation_iterator.h>
//#include <thrust/iterator/counting_iterator.h>
//#include <boost/numeric/odeint/external/thrust/thrust.hpp>
//#include <thrust/for_each.h>
//#include <thrust/device_vector.h>
//#include <thrust/execution_policy.h>
using namespace std;
using namespace boost::numeric::odeint;
namespace transient_analysis {
/*
typedef pair<string, string> strpair;
struct printf_functor
{
//template <string T1, GENERATOR T2>;
__host__ __device__
void operator()(const pair<string, string> mp)
{
// note that using printf in a __device__ function requires
// code compiled for a GPU with compute capability 2.0 or
// higher (nvcc --arch=sm_20)
//printf("%d\n", x);
//printf("%d\n", y);
printf("mp first=%s\n", mp.first);
printf("success in printf_functor\n");
//uint__t bus_id = bus_name_to_id[bus_name] - 1;
}
};
*/
//extern "C" void test_cuda(unordered_map<int, int> generators);
//extern "C" void test_cuda(map<string, GENERATOR> generators);
//void test_cuda(map<string, GENERATOR> &generators);
//extern __global__ void test_cuda();
Transient_Solver::
Transient_Solver(real__t start_time, real__t end_time, real__t time_stepping, string main_path)
: start_time(start_time), end_time(end_time), time_stepping(time_stepping), main_path(main_path) {}
Transient_Solver::~Transient_Solver() {
GraphLU_Destroy(Ybus_matrix);
free(Ybus_matrix);
free(eY);
free(ei);
free(ep);
free(gCurrent);
free(gVoltage);
}
void Transient_Solver::setup_system() {
load_system_data(main_path);
print_system_summary();
get_bus_name_id_mapping();
bus_types[1] = "PQ Bus";
bus_types[2] = "PV Bus";
bus_types[3] = "Slack Bus";
string output_path = "../output";
remove((output_path + "/genBusResults.csv").c_str());
remove((output_path + "/allBusResults.csv").c_str());
nBuses = buses.size();
nGen = generators.size();
nGenUnknowns = VS_output_idx + 1;
n = 2 * nBuses;
nnz = 4 * (line.size() + nBuses);
for (auto &g_hldr : generators) {
auto & bus_name=g_hldr.first;
auto & gen=g_hldr.second;
gen_solution [bus_name] = vector<real__t>(nGenUnknowns, 0.);
gen_error [bus_name] = vector<real__t>(nGenUnknowns, 0.);
gen_dq_current[bus_name] = vector<real__t>(2, 0.);
}
bus_voltage = vector<vector<real__t>> (nBuses, vector<real__t>(2, 0.));
// dump_Voltages();
print_gen_parameters();
// print_branch_data();
gCurrent = (real__t*)calloc(n, sizeof(real__t));
gVoltage = (real__t*)calloc(n, sizeof(real__t));
/** allocate memory for arrays used in CSR form */
eY = (real__t*)malloc(sizeof(real__t) * (nnz)); // nonzero values in Ybus matrix
ei = (uint__t*)malloc(sizeof(uint__t) * (nnz)); // column idx of Ybus matrix
ep = (uint__t*)malloc(sizeof(uint__t) * (n + 1)); // initial row pointers
Ybus_matrix = (SGraphLU*)malloc(sizeof(SGraphLU));
gVertex_all = vector<vector<real__t>>(nBuses, vector<real__t>(5, 0.));
current_time = start_time;
num_of_steps = 0;
tol = 1.e-4;
max_step_size = 0.01;
is_modify_Y_bus_matrix = true;
is_fault_treated = false;
}
void Transient_Solver::load_system_data(string folder_path) {
read_bus_data(buses, folder_path + "/system/Node.csv");
read_generator_node_data(buses, generators, folder_path + "/system/Generator.csv");
//read_fdpf_data(buses, "../output/fdpf.csv");
// read_load_data(buses, folder_path + "/system/Load.csv");
read_compensator_P_data(buses, folder_path + "/system/Compensator_P.csv");
// read_DC_line_data(buses, line, folder_path + "/system/DC_Line.csv");
read_AC_line_data(buses, line, folder_path + "/system/AC_Line.csv");
read_two_winding_transformer_data(buses, line, folder_path + "/system/Two_winding_transformer.csv");
read_three_winding_transformer_data(buses, line, folder_path + "/system/Three_winding_transformer.csv");
read_fdpf_data(buses, "../output/fdpf.csv");
read_EPRI_GEN_data(all_gen, folder_path + "/parameter/Synchronous_Machine.csv");
read_EPRI_GOV_I_data(all_gov_1, folder_path + "/parameter/Governor_1.csv");
read_EPRI_GOV_II_data(all_gov_2, folder_path + "/parameter/Governor_2.csv");
read_EPRI_GOV_III_data(all_gov_3, folder_path + "/parameter/Governor_3.csv");
read_EPRI_GOV_IV_data(all_gov_4, folder_path + "/parameter/Governor_4.csv");
read_EPRI_GOV_V_data(all_gov_5, folder_path + "/parameter/Governor_5.csv");
read_EPRI_GOV_VII_data(all_gov_7, folder_path + "/parameter/Governor_7.csv");
read_EPRI_GOV_VIII_data(all_gov_8, folder_path + "/parameter/Governor_8.csv");
read_EPRI_GOV_IX_data(all_gov_9, folder_path + "/parameter/Governor_9.csv");
read_EPRI_EXC_I_data(all_exc_1, folder_path + "/parameter/AVR_1.csv");
read_EPRI_EXC_II_data(all_exc_2, folder_path + "/parameter/AVR_2.csv");
read_EPRI_EXC_III_TO_X_data(all_exc_3_10, folder_path + "/parameter/AVR_3_to_10.csv");
read_EPRI_EXC_XI_TO_XII_data(all_exc_11_12, folder_path + "/parameter/AVR_11_to_12.csv");
read_EPRI_PSS_I_data(all_pss_1, folder_path + "/parameter/PSS_1.csv");
read_EPRI_PSS_II_data(all_pss_2, folder_path + "/parameter/PSS_2.csv");
read_EPRI_PSS_IV_VI_data(all_pss_4_6, folder_path + "/parameter/PSS_4_6.csv");
read_EPRI_PSS_V_data(all_pss_5, folder_path + "/parameter/PSS_5.csv");
read_EPRI_PSS_VIII_data(all_pss_8, folder_path + "/parameter/PSS_8.csv");
#if DEBUG
cout << "Transient simulation loading data success!\n";
#endif
}
void Transient_Solver::get_bus_name_id_mapping() {
int idx = 1; // id begins with 1
for (auto &b : buses) {
bus_name_to_id[b.first] = idx;
bus_id_to_name[idx] = b.first;
++idx;
}
#if DEBUG
printf("Transient simulation getting bus_name to bus_id mapping success!\n");
#endif
}
void Transient_Solver::dump_Voltages() {
if (current_time == start_time) {
for (int i = 0; i < buses.size(); ++i) {
string bus_name = bus_id_to_name[i];
bus_voltage[i][0] = buses[bus_name].Vm;
bus_voltage[i][1] = buses[bus_name].Va;
}
} else {
for (int i = 0; i < buses.size(); ++i) {
real__t Vx = gVoltage[i];
real__t Vy = gVoltage[i + nBuses];
bus_voltage[i][0] = sqrt(Vx * Vx + Vy * Vy);
bus_voltage[i][1] = atan2(Vy, Vx);
}
}
#if DEBUG
printf("Transient simulation dumping voltages success!\n");
#endif
}
string Transient_Solver::
solve_algebraic_equations_one_step(SGraphLU* matrix, real__t* rhs) {
/* call the graphlu solver to solve the sparse linear system
* notice that GraphLU_Solve_Singular allows the matrix to be *numerically*
* singular */
int ret_solve = GraphLU_Solve_Singular(matrix, rhs, 0);
if (ret_solve < 0) {
printf("Error: solve_algebraic_equations_one_step: %d\n", ret_solve);
return "FAILED";
}
return "SUCCESS";
}
/* a matrix-vector multiplier (not used in this code) */
void Transient_Solver::
matrix_vector_mult(const SGraphLU* matrix, const real__t* src, real__t* dst) {
uint__t n = matrix->n;
real__t* ax = matrix->ax;
uint__t* ai = matrix->ai;
uint__t* ap = matrix->ap;
for (int i = 0; i < n; ++i) {
dst[i] = 0;
for (int k = ap[i]; k < ap[i + 1]; ++k) {
dst[i] += ax[k] * src[ai[k]];
}
}
}
void Transient_Solver::update_step_size() {
real__t err = 0;
for (auto &g : gen_error) {
for (auto &v : g.second) err += abs(v * v);
}
err = max(sqrt(err), EPS);
printf("current error = %2.12f\n", err);
real__t q = pow(tol / err, 1. / 4.) * 0.8;
if (err < tol)
step_size = min(max(q, 0.1), 4.) * step_size;
else
step_size = min(max(q, 0.1), 1.) * step_size;
step_size = min(max_step_size, step_size);
}
void Transient_Solver::get_generator_current() {
for (auto &g_hldr : generators) {
auto & bus_name=g_hldr.first;
auto & gen=g_hldr.second;
uint__t bus_id = bus_name_to_id[bus_name] - 1;
uint__t Gen_Par = gen.Gen_Par;
if (Gen_Par == 0) continue;
real__t Vx = gVoltage[bus_id];
real__t Vy = gVoltage[bus_id + nBuses];
real__t delta = gen_solution[bus_name][delta_idx];
real__t Vd = Vx * sin(delta) - Vy * cos(delta);
real__t Vq = Vx * cos(delta) + Vy * sin(delta);
real__t Ra = all_gen[Gen_Par].Ra;
real__t Xdpp = all_gen[Gen_Par].Xdpp;
real__t Xqpp = all_gen[Gen_Par].Xqpp;
real__t Edpp = gen_solution[bus_name][Edpp_idx];
real__t Eqpp = gen_solution[bus_name][Eqpp_idx];
real__t denom = Ra * Ra + Xdpp * Xqpp;
assert(denom > EPS);
gen_dq_current[bus_name][0] = (+Ra * (Edpp - Vd) + Xqpp * (Eqpp - Vq)) / denom;
gen_dq_current[bus_name][1] = (-Xdpp * (Edpp - Vd) + Ra * (Eqpp - Vq)) / denom;
// cout << "dq currents in get_generator_current:" << endl;
// cout << "Id, Iq = " << gen_dq_current[bus_name][0] << ", " << gen_dq_current[bus_name][1] << endl;
#if DEBUG
printf("Transient simulation getting generator current success for bus %s!\n", bus_name.c_str());
#endif
}
}
void Transient_Solver::
apply_fault(const uint__t busID, const real__t t, const real__t fault_btime, const real__t fault_etime) {
/* busID is indexed from 1 */
if (t >= fault_btime && t < fault_etime - time_stepping) { // fault happens
if (is_fault_treated)
return;
for (int i = 0; i < gEdge_all.size(); ++i) {
if (gEdge_all[i][0] == busID - 1 && gEdge_all[i][1] == busID - 1) {
gEdge_all[i][2] += INF;
is_fault_treated = true;
break;
}
}
} else if (abs(t - fault_etime) <= EPS + time_stepping) { // fault cleared
is_fault_treated = false;
} else if (t > fault_etime) {
if (is_fault_treated)
return;
for (int i = 0; i < gEdge_all.size(); ++i) {
if (gEdge_all[i][0] == busID - 1 && gEdge_all[i][1] == busID - 1) {
gEdge_all[i][2] -= INF;
is_fault_treated = true;
break;
}
}
}
}
/** this function should be called after the compute_ODE_initial_values */
void Transient_Solver::assign_ode_solver_data(string bus_name, GENERATOR& gen) {
system.parameters.EXC_type = gen.AVR_Model;
switch (gen.AVR_Model) {
case 1:
system.parameters.exc_1 = all_exc_1[gen.AVR_Par]; break;
case 2:
system.parameters.exc_2 = all_exc_2[gen.AVR_Par]; break;
case 3: case 4: case 5: case 6: case 7: case 8: case 9: case 10:
system.parameters.exc_3_10 = all_exc_3_10[gen.AVR_Par]; break;
case 11: case 12:
system.parameters.exc_11_12 = all_exc_11_12[gen.AVR_Par]; break;
default:
break;
}
system.parameters.GOV_type = gen.GOV_Model;
switch (gen.GOV_Model) {
case 1: system.parameters.gov_1 = all_gov_1[gen.GOV_Par]; break;
case 2: system.parameters.gov_2 = all_gov_2[gen.GOV_Par]; break;
case 3: system.parameters.gov_3 = all_gov_3[gen.GOV_Par]; break;
case 4: system.parameters.gov_4 = all_gov_4[gen.GOV_Par]; break;
case 5: system.parameters.gov_5 = all_gov_5[gen.GOV_Par]; break;
case 7: system.parameters.gov_7 = all_gov_7[gen.GOV_Par]; break;
case 8: system.parameters.gov_8 = all_gov_8[gen.GOV_Par]; break;
case 9: system.parameters.gov_9 = all_gov_9[gen.GOV_Par]; break;
}
system.parameters.PSS_type = gen.PSS_Model;
switch (gen.PSS_Model) {
case 1: system.parameters.pss_1 = all_pss_1[gen.PSS_Par]; break;
case 2: system.parameters.pss_2 = all_pss_2[gen.PSS_Par]; break;
case 4: system.parameters.pss_4_6 = all_pss_4_6[gen.PSS_Par]; break;
case 5: system.parameters.pss_5 = all_pss_5[gen.PSS_Par]; break;
case 6: system.parameters.pss_4_6 = all_pss_4_6[gen.PSS_Par]; break;
case 8: system.parameters.pss_8 = all_pss_8[gen.PSS_Par]; break;
}
system.parameters.GEN_type = gen.Gen_Model;
if (gen.Gen_Par == 0) {
all_gen[0].Xdp = gen.Xdp < EPS ? 0.0001 : gen.Xdp;
all_gen[0].Xdpp = gen.Xdpp < EPS ? 0.0001 : gen.Xdpp;
all_gen[0].TJ = gen.TJ < EPS ? 999999.875 : gen.TJ;
all_gen[0].X2 = gen.X2;
all_gen[0].Ra = 0.;
}
system.parameters.gen = all_gen[gen.Gen_Par];
system.parameters.gen.bus_id = bus_name_to_id[bus_name] - 1;
system.parameters.omega_ref = gen.omega_ref;
system.parameters.freq_ref = gen.freq_ref;
system.parameters.Pe_ref = gen.Pe_ref;
system.parameters.Vt_ref = gen.Vt_ref;
system.parameters.Efd0 = gen.Efd0;
system.parameters.mu0 = gen.mu0;
system.parameters.Rate_MW = gen.Rate_MW;
}
void Transient_Solver::run(int argc, char** argv) {
/** initialize simulation settings */
setup_system();
compute_ODE_initial_values();
runge_kutta_dopri5<d_vector_type, value_type , d_vector_type , value_type> dopri5_stepper_type;
print_bus_data();
/** convert the data on the edges, notice that the conversion is
once for all, unless the topology of the network changes */
generate_edges_matrix();
const value_type dt = 0.1;
//d2_vector_type d2_gen_solution_set;
h2_vector_type h2_gen_solution_set;
h2_vector_type h2_gen_error_set;
//double h2_gen_solution_set[8][35];
while (current_time <= end_time) {
/* output every 10 steps - modify if necessary */
if ((num_of_steps % 5 == 0 && current_time > 0) || num_of_steps == 1) {
output_data_to_csv_one_step();
}
/* make sure that current_time + dt does not exceed end_time */
time_stepping = min(time_stepping, end_time - current_time);
if (time_stepping == 0) break;
/** add a fault to a LOAD bus */
int fault_bus_id = 81; // 36 is bus 28, in sichuan is bus 81;
assert(fault_bus_id <= nBuses);
apply_fault(fault_bus_id, current_time, 3.0, 3.1);
/** update the buses info, then generate the new Y bus matrix */
convert_nodes();
convert_to_CSR_arrays();
generate_Y_Bus_matrix();
/** As graphlu returns the solution in-place, we copy gCurrent to gVoltage */
memcpy(gVoltage, gCurrent, sizeof(*gCurrent) * n);
/** solve one step of algebraic equations */
string result = solve_algebraic_equations_one_step(Ybus_matrix, gVoltage);
if (result == "FAILED") {
std::cerr << "Solving algebraic equations FAILED, please check!!!\n";
std::terminate();
}
int gen_length = 35;
int gen_count = 0;
int gen_attr_count = 0;
d_vector_type d2_gen_solution_set(GEN_SIZE*gen_length);
d_vector_type d2_gen_error_set(GEN_SIZE*gen_length);
std::clock_t start = std::clock();
for (auto& g_hldr : generators) {
auto & bus_name=g_hldr.first;
auto & gen=g_hldr.second;
uint__t bus_id = bus_name_to_id[bus_name] - 1;
/** update the Vx and Vy values at each gen node */
system.Vx[gen_count] = gVoltage[bus_id];
system.Vy[gen_count] = gVoltage[bus_id + nBuses];
system.Id[gen_count] = gen_dq_current[bus_name][0];
system.Iq[gen_count] = gen_dq_current[bus_name][1];
assign_ode_solver_data(bus_name, gen);
gen_length = gen_solution[bus_name].size();
d_vector_type d_gen_solution_set = gen_solution[bus_name];
d_vector_type d_gen_error_set = gen_error[bus_name];
h2_vector_type h2_gen_solution_set;
h2_vector_type h2_gen_error_set;
//h2_gen_solution_set[gen_count][gen_attr_count]
h2_gen_solution_set.push_back(gen_solution[bus_name]);
h2_gen_error_set.push_back(gen_error[bus_name]);
//gen_count++;
thrust::copy(h2_gen_solution_set[0].begin(), h2_gen_solution_set[0].end(), &d2_gen_solution_set[0]);
thrust::copy(h2_gen_error_set[0].begin(), h2_gen_error_set[0].end(), &d2_gen_error_set[0]);
thrust::sequence(d2_gen_solution_set.begin(), d2_gen_solution_set.end());
thrust::sequence(d2_gen_error_set.begin(), d2_gen_error_set.end());
dopri5_stepper_type.do_step(system, d2_gen_solution_set, current_time,
time_stepping, d2_gen_error_set);
gen_solution[bus_name][mu_output_idx] = system.get_mu();
gen_solution[bus_name][PT_output_idx] = system.get_Pmech();
gen_solution[bus_name][Efd_output_idx] = system.get_Efd();
gen_solution[bus_name][VS_output_idx] = system.get_VS();
}
printf("+++After 325 Gen computing: %.4f seconds\n\n", (std::clock() - start) / (real__t)CLOCKS_PER_SEC);
//for(int i=0; i < GEN_SIZE; i++){
// thrust::copy(h2_gen_solution_set[i].begin(), h2_gen_solution_set[i].end(), &d2_gen_solution_set[i*gen_length]);
// thrust::copy(h2_gen_error_set[i].begin(), h2_gen_error_set[i].end(), &d2_gen_error_set[i*gen_length]);
//}
//thrust::sequence(d2_gen_solution_set.begin(), d2_gen_solution_set.end());
//thrust::sequence(d2_gen_error_set.begin(), d2_gen_error_set.end());
//dopri5_stepper_type.do_step(system, d2_gen_solution_set, current_time,
// time_stepping, d2_gen_error_set);
//for (auto& g_hldr : generators) {
// auto & bus_name=g_hldr.first;
// auto & gen=g_hldr.second;
//d2_gen_solution_set.push_back(gen_solution[bus_name]);
//integrate_const( dopri5_stepper_type , system , d_gen_solution_set , 0.0 , 1.0 , dt );
////dopri5_stepper_type.do_step(system, d_gen_solution_set, current_time,
//// time_stepping, d_gen_error_set);
//dopri5_stepper_type.do_step(system, gen_solution[bus_name], current_time,
// time_stepping, gen_error[bus_name]);
//integrate_adaptive( make_controlled( 1.0e-6 , 1.0e-6 , dopri5_stepper_type() ) , system , x , t , t + 1.0 , 0.1 );
//cout << "End of dp_step===========================" << endl;
/** post-process: prepare for outputs */
//gen_solution[bus_name][mu_output_idx] = system.get_mu();
//gen_solution[bus_name][PT_output_idx] = system.get_Pmech();
//gen_solution[bus_name][Efd_output_idx] = system.get_Efd();
//gen_solution[bus_name][VS_output_idx] = system.get_VS();
/* update the time and number of steps */
//current_time += time_stepping;
//num_of_steps++;
//}
//cout << "\n+++Now number of steps: " << num_of_steps << endl;
#if DEBUG
if (num_of_steps % 1 == 0) {
// print_matrix<real__t>(gEdge_all, "System Matrix");
// print_array<real__t>(gCurrent, buses.size(), "RHS: ");
// print_array<real__t>(gVoltage, buses.size(), "Solution: ");
print_bus_data();
print_gen_solution();
}
#endif
/* update the time and number of steps */
current_time += time_stepping;
num_of_steps++;
}
printf("\n\nTransient Simulation Result:\n");
print_bus_data();
print_gen_solution();
/* Done with the simulation! Congratulations! */
cout << "\n+++Total number of steps: " << num_of_steps << endl;
}
} // namespace transient_analysis
|
8ba63ef3e427de666be4b9a9b18035f6ac54f138.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "utils.h"
#include <algorithm>
#include <random>
/// Buffer sizes we consider. The numbers are odd such that p[i]=(2*i)%K are all different.
static constexpr int kBufferSizes[] = {
17, 65, 251, 1001, 2001, 5001,
10'001, 25'001, 50'001, 100'001, 250'001, 500'001, 1'000'001,
5'000'001, 20'000'001, 50'000'001,
};
__global__ void copyKernelABP(int K, double *a, const int* p, const double*b) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < K) {
a[idx] = b[p[idx]];
}
}
__global__ void copyKernelAPB(int K, double *a, const int* p, const double*b) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < K) {
a[p[idx]] = b[idx];
}
}
__global__ void addKernelAAB(int K, double *a, const double*b) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < K) {
a[idx] = a[idx] + b[idx];
}
}
__global__ void addKernelAAB100(int K, double *a, const double*b) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < K) {
for (int i = 0; i < 100; ++i)
a[idx] = a[idx] + b[idx];
}
}
void subtask_b() {
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, 0);
const int threadsPerBlock = prop.maxThreadsPerBlock;
printf("Using device with %d threads per block\n", threadsPerBlock);
int maxK = kBufferSizes[sizeof(kBufferSizes) / sizeof(kBufferSizes[0]) - 1];
/// Pick a N with respect to K such that total running time is more or less uniform.
auto pickN = [](int K) {
return 100'000 / (int)std::sqrt(K) + 5; // Some heuristics.
};
double *aDev;
double *bDev;
int *pDev;
double *aHost;
int *pHost;
// TODO: Allocate the buffers. Immediately allocate large enough buffers to handle the largest case (maxK).
// Wrap all cuda APIs with the CUDA_CHECK macro, which will report if the API failed to execute.
// For example,
// CUDA_CHECK(hipMalloc(...));
// CUDA_CHECK(cudaCmd) check whether `cudaCmd` completed successfully.
CUDA_CHECK(hipMalloc(&aDev, maxK * sizeof(double)));
CUDA_CHECK(hipMalloc(&bDev, maxK * sizeof(double)));
CUDA_CHECK(hipMalloc(&pDev, maxK * sizeof(int)));
CUDA_CHECK(hipHostMalloc(&aHost, maxK * sizeof(double)));
CUDA_CHECK(hipHostMalloc(&pHost, maxK * sizeof(int)));
// Set aDev, bDev and aHost to 0.0 (not really that important).
CUDA_CHECK(hipMemset(aDev, 0, maxK * sizeof(double)));
CUDA_CHECK(hipMemset(bDev, 0, maxK * sizeof(double)));
memset(aHost, 0, maxK * sizeof(double));
// Task 1b.1)
for (int K : kBufferSizes) {
// TODO: Measure the execution time of synchronously uploading K doubles from the host to the device. Report GB/s
double dt = benchmark(pickN(K), [aDev, aHost, K](){
CUDA_CHECK(hipMemcpy(aDev, aHost, K * sizeof(double), hipMemcpyHostToDevice));
});
double gbps = K*sizeof(double) / dt / 1e9; // Gigabytes per second here;
// printf("upload K=%8d, averged over %8d runs --> %5.2f GB/s\n", K, pickN(K), gbps);
printf("upload K=%8d --> %5.2f GB/s\n", K, gbps);
}
// Task 1b.2)
/// Benchmark copying for a given access pattern (permutation).
auto benchmarkPermutedCopy = [=](const char *description, auto permutationFunc) {
for (int K : kBufferSizes) {
// Compute the permutation p[i].
permutationFunc(K);
/// TODO: Copy pHost to pDev. Don't forget CUDA_CHECK.
CUDA_CHECK(hipMemcpy(pDev, pHost, K * sizeof(int), hipMemcpyHostToDevice));
/// TODO: Benchmark the a_i = b_{p_i} kernel.
double dtABP = benchmark(pickN(K), [threadsPerBlock, K, aDev, bDev, pDev]() {
CUDA_LAUNCH(copyKernelABP, (K+threadsPerBlock-1) / threadsPerBlock, threadsPerBlock, K, aDev, pDev, bDev);
});
/// TODO: (OPTIONAL) Benchmark the a_{p_i} = b_i kernel;
double dtAPB = benchmark(pickN(K), [threadsPerBlock, K, aDev, bDev, pDev]() {
CUDA_LAUNCH(copyKernelAPB, (K+threadsPerBlock-1) / threadsPerBlock, threadsPerBlock, K, aDev, pDev, bDev);
});
// Report how many bytes per second was written.
printf("Case %s --> K=%8d [a=b_p] %6.2f GB/s [a_p=b] %6.2f GB/s written\n",
description, K,
1e-9 * K * sizeof(double) / dtABP,
1e-9 * K * sizeof(double) / dtAPB);
}
};
// The patterns are already implemented, do not modify!
std::mt19937 gen;
benchmarkPermutedCopy("p[i]=i", [pHost](int K) {
for (int i = 0; i < K; ++i)
pHost[i] = i;
});
benchmarkPermutedCopy("p[i]=(2*i)%K", [pHost](int K) {
for (int i = 0; i < K; ++i)
pHost[i] = (2 * i) % K;
});
benchmarkPermutedCopy("p[i]=(4*i)%K", [pHost](int K) {
for (int i = 0; i < K; ++i)
pHost[i] = (4 * i) % K;
});
benchmarkPermutedCopy("p[i]=i, 32-shuffled", [pHost, &gen](int K) {
for (int i = 0; i < K; ++i)
pHost[i] = i;
for (int i = 0; i < K; i += 32)
std::shuffle(pHost + i, pHost + ::min(i + 32, K), gen);
});
benchmarkPermutedCopy("fully shuffled", [pHost, &gen](int K) {
for (int i = 0; i < K; ++i)
pHost[i] = i;
std::shuffle(pHost, pHost + K, gen);
});
// Task 1b.3) and 1b.4)
for (int K : kBufferSizes) {
// TODO: Benchmark a_i += b_i kernel.
double dt1 = benchmark(pickN(K), [threadsPerBlock, K, aDev, bDev]() {
CUDA_LAUNCH(addKernelAAB, (K+threadsPerBlock-1) / threadsPerBlock, threadsPerBlock, K, aDev, bDev);
});;
// TODO: Benchmark the kernel that repeats a_i += b_i 100x times.
double dt100 = benchmark(pickN(K), [threadsPerBlock, K, aDev, bDev]() {
CUDA_LAUNCH(addKernelAAB100, (K+threadsPerBlock-1) / threadsPerBlock, threadsPerBlock, K, aDev, bDev);
});;
// printf("dts = %f, %f\n", dt1, dt100);
double gflops1 = K*1E-9/dt1;
double gflops100 = K*100*1E-9/dt100;
printf("a+b --> K=%8d 1x -> %4.1f GFLOP/s 100x -> %5.1f GFLOP/s\n", K, gflops1, gflops100);
}
// TODO: Free all host and all device buffers.
hipFree(aDev);
hipFree(bDev);
hipFree(pDev);
hipHostFree(aHost);
hipHostFree(pHost);
}
int main() {
subtask_b();
}
| 8ba63ef3e427de666be4b9a9b18035f6ac54f138.cu | #include "utils.h"
#include <algorithm>
#include <random>
/// Buffer sizes we consider. The numbers are odd such that p[i]=(2*i)%K are all different.
static constexpr int kBufferSizes[] = {
17, 65, 251, 1001, 2001, 5001,
10'001, 25'001, 50'001, 100'001, 250'001, 500'001, 1'000'001,
5'000'001, 20'000'001, 50'000'001,
};
__global__ void copyKernelABP(int K, double *a, const int* p, const double*b) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < K) {
a[idx] = b[p[idx]];
}
}
__global__ void copyKernelAPB(int K, double *a, const int* p, const double*b) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < K) {
a[p[idx]] = b[idx];
}
}
__global__ void addKernelAAB(int K, double *a, const double*b) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < K) {
a[idx] = a[idx] + b[idx];
}
}
__global__ void addKernelAAB100(int K, double *a, const double*b) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < K) {
for (int i = 0; i < 100; ++i)
a[idx] = a[idx] + b[idx];
}
}
void subtask_b() {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
const int threadsPerBlock = prop.maxThreadsPerBlock;
printf("Using device with %d threads per block\n", threadsPerBlock);
int maxK = kBufferSizes[sizeof(kBufferSizes) / sizeof(kBufferSizes[0]) - 1];
/// Pick a N with respect to K such that total running time is more or less uniform.
auto pickN = [](int K) {
return 100'000 / (int)std::sqrt(K) + 5; // Some heuristics.
};
double *aDev;
double *bDev;
int *pDev;
double *aHost;
int *pHost;
// TODO: Allocate the buffers. Immediately allocate large enough buffers to handle the largest case (maxK).
// Wrap all cuda APIs with the CUDA_CHECK macro, which will report if the API failed to execute.
// For example,
// CUDA_CHECK(cudaMalloc(...));
// CUDA_CHECK(cudaCmd) check whether `cudaCmd` completed successfully.
CUDA_CHECK(cudaMalloc(&aDev, maxK * sizeof(double)));
CUDA_CHECK(cudaMalloc(&bDev, maxK * sizeof(double)));
CUDA_CHECK(cudaMalloc(&pDev, maxK * sizeof(int)));
CUDA_CHECK(cudaMallocHost(&aHost, maxK * sizeof(double)));
CUDA_CHECK(cudaMallocHost(&pHost, maxK * sizeof(int)));
// Set aDev, bDev and aHost to 0.0 (not really that important).
CUDA_CHECK(cudaMemset(aDev, 0, maxK * sizeof(double)));
CUDA_CHECK(cudaMemset(bDev, 0, maxK * sizeof(double)));
memset(aHost, 0, maxK * sizeof(double));
// Task 1b.1)
for (int K : kBufferSizes) {
// TODO: Measure the execution time of synchronously uploading K doubles from the host to the device. Report GB/s
double dt = benchmark(pickN(K), [aDev, aHost, K](){
CUDA_CHECK(cudaMemcpy(aDev, aHost, K * sizeof(double), cudaMemcpyHostToDevice));
});
double gbps = K*sizeof(double) / dt / 1e9; // Gigabytes per second here;
// printf("upload K=%8d, averged over %8d runs --> %5.2f GB/s\n", K, pickN(K), gbps);
printf("upload K=%8d --> %5.2f GB/s\n", K, gbps);
}
// Task 1b.2)
/// Benchmark copying for a given access pattern (permutation).
auto benchmarkPermutedCopy = [=](const char *description, auto permutationFunc) {
for (int K : kBufferSizes) {
// Compute the permutation p[i].
permutationFunc(K);
/// TODO: Copy pHost to pDev. Don't forget CUDA_CHECK.
CUDA_CHECK(cudaMemcpy(pDev, pHost, K * sizeof(int), cudaMemcpyHostToDevice));
/// TODO: Benchmark the a_i = b_{p_i} kernel.
double dtABP = benchmark(pickN(K), [threadsPerBlock, K, aDev, bDev, pDev]() {
CUDA_LAUNCH(copyKernelABP, (K+threadsPerBlock-1) / threadsPerBlock, threadsPerBlock, K, aDev, pDev, bDev);
});
/// TODO: (OPTIONAL) Benchmark the a_{p_i} = b_i kernel;
double dtAPB = benchmark(pickN(K), [threadsPerBlock, K, aDev, bDev, pDev]() {
CUDA_LAUNCH(copyKernelAPB, (K+threadsPerBlock-1) / threadsPerBlock, threadsPerBlock, K, aDev, pDev, bDev);
});
// Report how many bytes per second was written.
printf("Case %s --> K=%8d [a=b_p] %6.2f GB/s [a_p=b] %6.2f GB/s written\n",
description, K,
1e-9 * K * sizeof(double) / dtABP,
1e-9 * K * sizeof(double) / dtAPB);
}
};
// The patterns are already implemented, do not modify!
std::mt19937 gen;
benchmarkPermutedCopy("p[i]=i", [pHost](int K) {
for (int i = 0; i < K; ++i)
pHost[i] = i;
});
benchmarkPermutedCopy("p[i]=(2*i)%K", [pHost](int K) {
for (int i = 0; i < K; ++i)
pHost[i] = (2 * i) % K;
});
benchmarkPermutedCopy("p[i]=(4*i)%K", [pHost](int K) {
for (int i = 0; i < K; ++i)
pHost[i] = (4 * i) % K;
});
benchmarkPermutedCopy("p[i]=i, 32-shuffled", [pHost, &gen](int K) {
for (int i = 0; i < K; ++i)
pHost[i] = i;
for (int i = 0; i < K; i += 32)
std::shuffle(pHost + i, pHost + std::min(i + 32, K), gen);
});
benchmarkPermutedCopy("fully shuffled", [pHost, &gen](int K) {
for (int i = 0; i < K; ++i)
pHost[i] = i;
std::shuffle(pHost, pHost + K, gen);
});
// Task 1b.3) and 1b.4)
for (int K : kBufferSizes) {
// TODO: Benchmark a_i += b_i kernel.
double dt1 = benchmark(pickN(K), [threadsPerBlock, K, aDev, bDev]() {
CUDA_LAUNCH(addKernelAAB, (K+threadsPerBlock-1) / threadsPerBlock, threadsPerBlock, K, aDev, bDev);
});;
// TODO: Benchmark the kernel that repeats a_i += b_i 100x times.
double dt100 = benchmark(pickN(K), [threadsPerBlock, K, aDev, bDev]() {
CUDA_LAUNCH(addKernelAAB100, (K+threadsPerBlock-1) / threadsPerBlock, threadsPerBlock, K, aDev, bDev);
});;
// printf("dts = %f, %f\n", dt1, dt100);
double gflops1 = K*1E-9/dt1;
double gflops100 = K*100*1E-9/dt100;
printf("a+b --> K=%8d 1x -> %4.1f GFLOP/s 100x -> %5.1f GFLOP/s\n", K, gflops1, gflops100);
}
// TODO: Free all host and all device buffers.
cudaFree(aDev);
cudaFree(bDev);
cudaFree(pDev);
cudaFreeHost(aHost);
cudaFreeHost(pHost);
}
int main() {
subtask_b();
}
|
e026931bd31d6adbf07902a1e91c2d697ef3a542.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN
#include "common_hip.cuh"
#include <kat/on_device/math.cuh>
#include <kat/on_device/printing.cuh>
namespace kernels {
template <typename I>
__global__ void try_out_integral_math_functions(I* results, I* __restrict expected)
{
size_t i { 0 };
bool print_first_indices_for_each_function { false };
auto maybe_print = [&](const char* section_title) {
if (print_first_indices_for_each_function) {
printf("%-30s tests start at index %3d\n", section_title, i);
}
};
results[i] = kat::strictly_between<I>( I{ 0 }, I{ 5 }, I{ 10 } ); expected[i++] = false;
results[i] = kat::strictly_between<I>( I{ 1 }, I{ 5 }, I{ 10 } ); expected[i++] = false;
results[i] = kat::strictly_between<I>( I{ 4 }, I{ 5 }, I{ 10 } ); expected[i++] = false;
results[i] = kat::strictly_between<I>( I{ 5 }, I{ 5 }, I{ 10 } ); expected[i++] = false;
results[i] = kat::strictly_between<I>( I{ 6 }, I{ 5 }, I{ 10 } ); expected[i++] = true;
results[i] = kat::strictly_between<I>( I{ 8 }, I{ 5 }, I{ 10 } ); expected[i++] = true;
results[i] = kat::strictly_between<I>( I{ 9 }, I{ 5 }, I{ 10 } ); expected[i++] = true;
results[i] = kat::strictly_between<I>( I{ 10 }, I{ 5 }, I{ 10 } ); expected[i++] = false;
results[i] = kat::strictly_between<I>( I{ 11 }, I{ 5 }, I{ 10 } ); expected[i++] = false;
results[i] = kat::strictly_between<I>( I{ 123 }, I{ 5 }, I{ 10 } ); expected[i++] = false;
maybe_print("between_or_equal");
results[i] = kat::between_or_equal<I>( I{ 1 }, I{ 5 }, I{ 10 } ); expected[i++] = false;
results[i] = kat::between_or_equal<I>( I{ 4 }, I{ 5 }, I{ 10 } ); expected[i++] = false;
results[i] = kat::between_or_equal<I>( I{ 5 }, I{ 5 }, I{ 10 } ); expected[i++] = true;
results[i] = kat::between_or_equal<I>( I{ 6 }, I{ 5 }, I{ 10 } ); expected[i++] = true;
results[i] = kat::between_or_equal<I>( I{ 8 }, I{ 5 }, I{ 10 } ); expected[i++] = true;
results[i] = kat::between_or_equal<I>( I{ 9 }, I{ 5 }, I{ 10 } ); expected[i++] = true;
results[i] = kat::between_or_equal<I>( I{ 10 }, I{ 5 }, I{ 10 } ); expected[i++] = true;
results[i] = kat::between_or_equal<I>( I{ 11 }, I{ 5 }, I{ 10 } ); expected[i++] = false;
results[i] = kat::between_or_equal<I>( I{ 123 }, I{ 5 }, I{ 10 } ); expected[i++] = false;
maybe_print("is_power_of_2");
results[i] = kat::is_power_of_2<I>(I{ 1}); expected[i++] = true;
results[i] = kat::is_power_of_2<I>(I{ 2}); expected[i++] = true;
results[i] = kat::is_power_of_2<I>(I{ 4}); expected[i++] = true;
results[i] = kat::is_power_of_2<I>(I{ 7}); expected[i++] = false;
results[i] = kat::is_power_of_2<I>(I{32}); expected[i++] = true;
results[i] = kat::is_power_of_2<I>(I{33}); expected[i++] = false;
maybe_print("modular_increment");
results[i] = kat::modular_increment<I>(I{ 0}, I{ 1}); expected[i++] = I{ 0 };
results[i] = kat::modular_increment<I>(I{ 1}, I{ 1}); expected[i++] = I{ 0 };
results[i] = kat::modular_increment<I>(I{ 0}, I{ 3}); expected[i++] = I{ 1 };
results[i] = kat::modular_increment<I>(I{ 1}, I{ 3}); expected[i++] = I{ 2 };
results[i] = kat::modular_increment<I>(I{ 2}, I{ 3}); expected[i++] = I{ 0 };
results[i] = kat::modular_increment<I>(I{ 3}, I{ 3}); expected[i++] = I{ 1 };
results[i] = kat::modular_increment<I>(I{ 4}, I{ 3}); expected[i++] = I{ 2 };
maybe_print("modular_decrement");
results[i] = kat::modular_decrement<I>(I{ 0}, I{ 1}); expected[i++] = I{ 0 };
results[i] = kat::modular_decrement<I>(I{ 1}, I{ 1}); expected[i++] = I{ 0 };
results[i] = kat::modular_decrement<I>(I{ 0}, I{ 3}); expected[i++] = I{ 2 };
results[i] = kat::modular_decrement<I>(I{ 1}, I{ 3}); expected[i++] = I{ 0 };
results[i] = kat::modular_decrement<I>(I{ 2}, I{ 3}); expected[i++] = I{ 1 };
results[i] = kat::modular_decrement<I>(I{ 3}, I{ 3}); expected[i++] = I{ 2 };
results[i] = kat::modular_decrement<I>(I{ 4}, I{ 3}); expected[i++] = I{ 0 };
maybe_print("ipow");
results[i] = kat::ipow<I>(I{ 0 }, 1 ); expected[i++] = I{ 0 };
results[i] = kat::ipow<I>(I{ 0 }, 2 ); expected[i++] = I{ 0 };
results[i] = kat::ipow<I>(I{ 0 }, 100 ); expected[i++] = I{ 0 };
results[i] = kat::ipow<I>(I{ 1 }, 0 ); expected[i++] = I{ 1 };
results[i] = kat::ipow<I>(I{ 1 }, 1 ); expected[i++] = I{ 1 };
results[i] = kat::ipow<I>(I{ 1 }, 2 ); expected[i++] = I{ 1 };
results[i] = kat::ipow<I>(I{ 1 }, 100 ); expected[i++] = I{ 1 };
results[i] = kat::ipow<I>(I{ 3 }, 0 ); expected[i++] = I{ 1 };
results[i] = kat::ipow<I>(I{ 3 }, 1 ); expected[i++] = I{ 3 };
results[i] = kat::ipow<I>(I{ 3 }, 2 ); expected[i++] = I{ 9 };
results[i] = kat::ipow<I>(I{ 3 }, 4 ); expected[i++] = I{ 81 };
maybe_print("unsafe div_rounding_up");
results[i] = kat::unsafe::div_rounding_up<I>( I{ 0 }, I{ 1 } ); expected[i++] = I{ 0 };
results[i] = kat::unsafe::div_rounding_up<I>( I{ 0 }, I{ 2 } ); expected[i++] = I{ 0 };
results[i] = kat::unsafe::div_rounding_up<I>( I{ 0 }, I{ 123 } ); expected[i++] = I{ 0 };
results[i] = kat::unsafe::div_rounding_up<I>( I{ 1 }, I{ 1 } ); expected[i++] = I{ 1 };
results[i] = kat::unsafe::div_rounding_up<I>( I{ 1 }, I{ 2 } ); expected[i++] = I{ 1 };
results[i] = kat::unsafe::div_rounding_up<I>( I{ 122 }, I{ 123 } ); expected[i++] = I{ 1 };
results[i] = kat::unsafe::div_rounding_up<I>( I{ 123 }, I{ 123 } ); expected[i++] = I{ 1 };
results[i] = kat::unsafe::div_rounding_up<I>( I{ 124 }, I{ 123 } ); expected[i++] = I{ 2 };
maybe_print("div_rounding_up");
results[i] = kat::div_rounding_up<I>( I{ 0 }, I{ 1 } ); expected[i++] = I{ 0 };
results[i] = kat::div_rounding_up<I>( I{ 0 }, I{ 2 } ); expected[i++] = I{ 0 };
results[i] = kat::div_rounding_up<I>( I{ 0 }, I{ 123 } ); expected[i++] = I{ 0 };
results[i] = kat::div_rounding_up<I>( I{ 1 }, I{ 1 } ); expected[i++] = I{ 1 };
results[i] = kat::div_rounding_up<I>( I{ 1 }, I{ 2 } ); expected[i++] = I{ 1 };
results[i] = kat::div_rounding_up<I>( I{ 122 }, I{ 123 } ); expected[i++] = I{ 1 };
results[i] = kat::div_rounding_up<I>( I{ 123 }, I{ 123 } ); expected[i++] = I{ 1 };
results[i] = kat::div_rounding_up<I>( I{ 124 }, I{ 123 } ); expected[i++] = I{ 2 };
results[i] = kat::div_rounding_up<I>( I{ 124 }, I{ 123 } ); expected[i++] = I{ 2 };
results[i] = kat::div_rounding_up<I>( std::numeric_limits<I>::max() , std::numeric_limits<I>::max() - 1 ); expected[i++] = I{ 2 };
results[i] = kat::div_rounding_up<I>( std::numeric_limits<I>::max() - 1, std::numeric_limits<I>::max() ); expected[i++] = I{ 1 };
maybe_print("round_down");
results[i] = kat::round_down<I>( I{ 0 }, I{ 2 } ); expected[i++] = I{ 0 };
results[i] = kat::round_down<I>( I{ 0 }, I{ 123 } ); expected[i++] = I{ 0 };
results[i] = kat::round_down<I>( I{ 1 }, I{ 2 } ); expected[i++] = I{ 0 };
results[i] = kat::round_down<I>( I{ 122 }, I{ 123 } ); expected[i++] = I{ 0 };
results[i] = kat::round_down<I>( I{ 123 }, I{ 123 } ); expected[i++] = I{ 123 };
results[i] = kat::round_down<I>( I{ 124 }, I{ 123 } ); expected[i++] = I{ 123 };
maybe_print("round_down_to_full_warps");
results[i] = kat::round_down_to_full_warps<I>( I{ 0 } ); expected[i++] = I{ 0 };
results[i] = kat::round_down_to_full_warps<I>( I{ 1 } ); expected[i++] = I{ 0 };
results[i] = kat::round_down_to_full_warps<I>( I{ 8 } ); expected[i++] = I{ 0 };
results[i] = kat::round_down_to_full_warps<I>( I{ 16 } ); expected[i++] = I{ 0 };
results[i] = kat::round_down_to_full_warps<I>( I{ 31 } ); expected[i++] = I{ 0 };
results[i] = kat::round_down_to_full_warps<I>( I{ 32 } ); expected[i++] = I{ 32 };
results[i] = kat::round_down_to_full_warps<I>( I{ 33 } ); expected[i++] = I{ 32 };
results[i] = kat::round_down_to_full_warps<I>( I{ 125 } ); expected[i++] = I{ 96 };
// TODO: Consider testing rounding-up with negative dividends
maybe_print("unsafe round_up");
results[i] = kat::unsafe::round_up<I>( I{ 0 }, I{ 1 } ); expected[i++] = I{ 0 };
results[i] = kat::unsafe::round_up<I>( I{ 0 }, I{ 2 } ); expected[i++] = I{ 0 };
results[i] = kat::unsafe::round_up<I>( I{ 0 }, I{ 123 } ); expected[i++] = I{ 0 };
results[i] = kat::unsafe::round_up<I>( I{ 1 }, I{ 1 } ); expected[i++] = I{ 1 };
results[i] = kat::unsafe::round_up<I>( I{ 1 }, I{ 2 } ); expected[i++] = I{ 2 };
results[i] = kat::unsafe::round_up<I>( I{ 63 }, I{ 64 } ); expected[i++] = I{ 64 };
results[i] = kat::unsafe::round_up<I>( I{ 64 }, I{ 64 } ); expected[i++] = I{ 64 };
results[i] = kat::unsafe::round_up<I>( I{ 65 }, I{ 32 } ); expected[i++] = I{ 96 };
maybe_print("round_up");
results[i] = kat::round_up<I>( I{ 0 }, I{ 1 } ); expected[i++] = I{ 0 };
results[i] = kat::round_up<I>( I{ 0 }, I{ 2 } ); expected[i++] = I{ 0 };
results[i] = kat::round_up<I>( I{ 0 }, I{ 123 } ); expected[i++] = I{ 0 };
results[i] = kat::round_up<I>( I{ 1 }, I{ 1 } ); expected[i++] = I{ 1 };
results[i] = kat::round_up<I>( I{ 1 }, I{ 2 } ); expected[i++] = I{ 2 };
results[i] = kat::round_up<I>( I{ 63 }, I{ 64 } ); expected[i++] = I{ 64 };
results[i] = kat::round_up<I>( I{ 64 }, I{ 64 } ); expected[i++] = I{ 64 };
results[i] = kat::round_up<I>( I{ 65 }, I{ 32 } ); expected[i++] = I{ 96 };
results[i] = kat::round_up<I>( std::numeric_limits<I>::max() - 1, std::numeric_limits<I>::max() ); expected[i++] = I{ std::numeric_limits<I>::max() };
maybe_print("round_down_to_power_of_2");
results[i] = kat::round_down_to_power_of_2<I>( I{ 1 }, I{ 1 } ); expected[i++] = I{ 1 };
results[i] = kat::round_down_to_power_of_2<I>( I{ 2 }, I{ 1 } ); expected[i++] = I{ 2 };
results[i] = kat::round_down_to_power_of_2<I>( I{ 3 }, I{ 1 } ); expected[i++] = I{ 3 };
results[i] = kat::round_down_to_power_of_2<I>( I{ 4 }, I{ 1 } ); expected[i++] = I{ 4 };
results[i] = kat::round_down_to_power_of_2<I>( I{ 123 }, I{ 1 } ); expected[i++] = I{ 123 };
results[i] = kat::round_down_to_power_of_2<I>( I{ 1 }, I{ 2 } ); expected[i++] = I{ 0 };
results[i] = kat::round_down_to_power_of_2<I>( I{ 2 }, I{ 2 } ); expected[i++] = I{ 2 };
results[i] = kat::round_down_to_power_of_2<I>( I{ 3 }, I{ 2 } ); expected[i++] = I{ 2 };
results[i] = kat::round_down_to_power_of_2<I>( I{ 4 }, I{ 2 } ); expected[i++] = I{ 4 };
results[i] = kat::round_down_to_power_of_2<I>( I{ 123 }, I{ 2 } ); expected[i++] = I{ 122 };
maybe_print("round_up_to_power_of_2");
results[i] = kat::round_up_to_power_of_2<I>( I{ 1 }, I{ 1 } ); expected[i++] = I{ 1 };
results[i] = kat::round_up_to_power_of_2<I>( I{ 2 }, I{ 1 } ); expected[i++] = I{ 2 };
results[i] = kat::round_up_to_power_of_2<I>( I{ 3 }, I{ 1 } ); expected[i++] = I{ 3 };
results[i] = kat::round_up_to_power_of_2<I>( I{ 4 }, I{ 1 } ); expected[i++] = I{ 4 };
results[i] = kat::round_up_to_power_of_2<I>( I{ 23 }, I{ 1 } ); expected[i++] = I{ 23 };
results[i] = kat::round_up_to_power_of_2<I>( I{ 1 }, I{ 2 } ); expected[i++] = I{ 2 };
results[i] = kat::round_up_to_power_of_2<I>( I{ 2 }, I{ 2 } ); expected[i++] = I{ 2 };
results[i] = kat::round_up_to_power_of_2<I>( I{ 3 }, I{ 2 } ); expected[i++] = I{ 4 };
results[i] = kat::round_up_to_power_of_2<I>( I{ 4 }, I{ 2 } ); expected[i++] = I{ 4 };
results[i] = kat::round_up_to_power_of_2<I>( I{ 63 }, I{ 2 } ); expected[i++] = I{ 64 };
maybe_print("unsafe round_up_to_power_of_2");
results[i] = kat::unsafe::round_up_to_power_of_2<I>( I{ 1 }, I{ 1 } ); expected[i++] = I{ 1 };
results[i] = kat::unsafe::round_up_to_power_of_2<I>( I{ 2 }, I{ 1 } ); expected[i++] = I{ 2 };
results[i] = kat::unsafe::round_up_to_power_of_2<I>( I{ 3 }, I{ 1 } ); expected[i++] = I{ 3 };
results[i] = kat::unsafe::round_up_to_power_of_2<I>( I{ 4 }, I{ 1 } ); expected[i++] = I{ 4 };
results[i] = kat::unsafe::round_up_to_power_of_2<I>( I{ 23 }, I{ 1 } ); expected[i++] = I{ 23 };
results[i] = kat::unsafe::round_up_to_power_of_2<I>( I{ 1 }, I{ 2 } ); expected[i++] = I{ 2 };
results[i] = kat::unsafe::round_up_to_power_of_2<I>( I{ 2 }, I{ 2 } ); expected[i++] = I{ 2 };
results[i] = kat::unsafe::round_up_to_power_of_2<I>( I{ 3 }, I{ 2 } ); expected[i++] = I{ 4 };
results[i] = kat::unsafe::round_up_to_power_of_2<I>( I{ 4 }, I{ 2 } ); expected[i++] = I{ 4 };
results[i] = kat::unsafe::round_up_to_power_of_2<I>( I{ 63 }, I{ 2 } ); expected[i++] = I{ 64 };
maybe_print("round_up_to_full_warps");
results[i] = kat::round_up_to_full_warps<I>( I{ 0 } ); expected[i++] = I{ 0 };
results[i] = kat::round_up_to_full_warps<I>( I{ 1 } ); expected[i++] = I{ 32 };
results[i] = kat::round_up_to_full_warps<I>( I{ 8 } ); expected[i++] = I{ 32 };
results[i] = kat::round_up_to_full_warps<I>( I{ 16 } ); expected[i++] = I{ 32 };
results[i] = kat::round_up_to_full_warps<I>( I{ 31 } ); expected[i++] = I{ 32 };
results[i] = kat::round_up_to_full_warps<I>( I{ 32 } ); expected[i++] = I{ 32 };
results[i] = kat::round_up_to_full_warps<I>( I{ 33 } ); expected[i++] = I{ 64 };
results[i] = kat::round_up_to_full_warps<I>( I{ 63 } ); expected[i++] = I{ 64 };
maybe_print("gcd");
results[i] = kat::gcd<I>( I{ 1 }, I{ 1 } ); expected[i++] = I{ 1 };
results[i] = kat::gcd<I>( I{ 2 }, I{ 1 } ); expected[i++] = I{ 1 };
results[i] = kat::gcd<I>( I{ 1 }, I{ 2 } ); expected[i++] = I{ 1 };
results[i] = kat::gcd<I>( I{ 2 }, I{ 2 } ); expected[i++] = I{ 2 };
results[i] = kat::gcd<I>( I{ 8 }, I{ 4 } ); expected[i++] = I{ 4 };
results[i] = kat::gcd<I>( I{ 4 }, I{ 8 } ); expected[i++] = I{ 4 };
results[i] = kat::gcd<I>( I{ 10 }, I{ 6 } ); expected[i++] = I{ 2 };
results[i] = kat::gcd<I>( I{ 120 }, I{ 70 } ); expected[i++] = I{ 10 };
results[i] = kat::gcd<I>( I{ 70 }, I{ 120 } ); expected[i++] = I{ 10 };
results[i] = kat::gcd<I>( I{ 97 }, I{ 120 } ); expected[i++] = I{ 1 };
maybe_print("lcm");
results[i] = kat::lcm<I>( I{ 1 }, I{ 1 } ); expected[i++] = I{ 1 };
results[i] = kat::lcm<I>( I{ 2 }, I{ 1 } ); expected[i++] = I{ 2 };
results[i] = kat::lcm<I>( I{ 1 }, I{ 2 } ); expected[i++] = I{ 2 };
results[i] = kat::lcm<I>( I{ 2 }, I{ 2 } ); expected[i++] = I{ 2 };
results[i] = kat::lcm<I>( I{ 5 }, I{ 3 } ); expected[i++] = I{ 15 };
results[i] = kat::lcm<I>( I{ 8 }, I{ 4 } ); expected[i++] = I{ 8 };
results[i] = kat::lcm<I>( I{ 4 }, I{ 8 } ); expected[i++] = I{ 8 };
results[i] = kat::lcm<I>( I{ 10 }, I{ 6 } ); expected[i++] = I{ 30 };
maybe_print("is_even");
results[i] = kat::is_even<I>( I{ 0 } ); expected[i++] = true;
results[i] = kat::is_even<I>( I{ 1 } ); expected[i++] = false;
results[i] = kat::is_even<I>( I{ 2 } ); expected[i++] = true;
results[i] = kat::is_even<I>( I{ 3 } ); expected[i++] = false;
results[i] = kat::is_even<I>( I{ 123 } ); expected[i++] = false;
results[i] = kat::is_even<I>( I{ 124 } ); expected[i++] = true;
maybe_print("is_odd");
results[i] = kat::is_odd<I>( I{ 0 } ); expected[i++] = false;
results[i] = kat::is_odd<I>( I{ 1 } ); expected[i++] = true;
results[i] = kat::is_odd<I>( I{ 2 } ); expected[i++] = false;
results[i] = kat::is_odd<I>( I{ 3 } ); expected[i++] = true;
results[i] = kat::is_odd<I>( I{ 123 } ); expected[i++] = true;
results[i] = kat::is_odd<I>( I{ 124 } ); expected[i++] = false;
maybe_print("log2");
results[i] = kat::log2<I>( I{ 1 } ); expected[i++] = 0;
results[i] = kat::log2<I>( I{ 2 } ); expected[i++] = 1;
results[i] = kat::log2<I>( I{ 3 } ); expected[i++] = 1;
results[i] = kat::log2<I>( I{ 4 } ); expected[i++] = 2;
results[i] = kat::log2<I>( I{ 6 } ); expected[i++] = 2;
results[i] = kat::log2<I>( I{ 7 } ); expected[i++] = 2;
results[i] = kat::log2<I>( I{ 8 } ); expected[i++] = 3;
results[i] = kat::log2<I>( I{ 127 } ); expected[i++] = 6;
// We don't have a goot integer sqrt() implementation to offer here. Perhaps
// we could offer something based on casting to float?
//
// results[i] = kat::sqrt<I>( I{ 0 } ); expected[i++] = 0;
// results[i] = kat::sqrt<I>( I{ 1 } ); expected[i++] = 1;
// results[i] = kat::sqrt<I>( I{ 2 } ); expected[i++] = 1;
// results[i] = kat::sqrt<I>( I{ 3 } ); expected[i++] = 1;
// results[i] = kat::sqrt<I>( I{ 4 } ); expected[i++] = 2;
// results[i] = kat::sqrt<I>( I{ 5 } ); expected[i++] = 2;
// results[i] = kat::sqrt<I>( I{ 9 } ); expected[i++] = 3;
// results[i] = kat::sqrt<I>( I{ 10 } ); expected[i++] = 3;
// results[i] = kat::sqrt<I>( I{ 127 } ); expected[i++] = 11;
maybe_print("div_by_power_of_2");
results[i] = kat::div_by_power_of_2<I>( I{ 0 }, I { 1 }); expected[i++] = I{ 0 };
results[i] = kat::div_by_power_of_2<I>( I{ 1 }, I { 1 }); expected[i++] = I{ 1 };
results[i] = kat::div_by_power_of_2<I>( I{ 111 }, I { 1 }); expected[i++] = I{ 111 };
results[i] = kat::div_by_power_of_2<I>( I{ 0 }, I { 2 }); expected[i++] = I{ 0 };
results[i] = kat::div_by_power_of_2<I>( I{ 1 }, I { 2 }); expected[i++] = I{ 0 };
results[i] = kat::div_by_power_of_2<I>( I{ 2 }, I { 2 }); expected[i++] = I{ 1 };
results[i] = kat::div_by_power_of_2<I>( I{ 3 }, I { 2 }); expected[i++] = I{ 1 };
results[i] = kat::div_by_power_of_2<I>( I{ 4 }, I { 2 }); expected[i++] = I{ 2 };
results[i] = kat::div_by_power_of_2<I>( I{ 111 }, I { 2 }); expected[i++] = I{ 55 };
results[i] = kat::div_by_power_of_2<I>( I{ 0 }, I { 16 }); expected[i++] = I{ 0 };
results[i] = kat::div_by_power_of_2<I>( I{ 1 }, I { 16 }); expected[i++] = I{ 0 };
results[i] = kat::div_by_power_of_2<I>( I{ 15 }, I { 16 }); expected[i++] = I{ 0 };
results[i] = kat::div_by_power_of_2<I>( I{ 16 }, I { 16 }); expected[i++] = I{ 1 };
results[i] = kat::div_by_power_of_2<I>( I{ 17 }, I { 16 }); expected[i++] = I{ 1 };
results[i] = kat::div_by_power_of_2<I>( I{ 32 }, I { 16 }); expected[i++] = I{ 2 };
results[i] = kat::div_by_power_of_2<I>( I{ 111 }, I { 16 }); expected[i++] = I{ 6 };
maybe_print("divides");
results[i] = kat::divides<I>( I{ 1 }, I{ 0 } ); expected[i++] = true;
results[i] = kat::divides<I>( I{ 2 }, I{ 0 } ); expected[i++] = true;
results[i] = kat::divides<I>( I{ 3 }, I{ 0 } ); expected[i++] = true;
results[i] = kat::divides<I>( I{ 1 }, I{ 1 } ); expected[i++] = true;
results[i] = kat::divides<I>( I{ 2 }, I{ 1 } ); expected[i++] = false;
results[i] = kat::divides<I>( I{ 3 }, I{ 1 } ); expected[i++] = false;
results[i] = kat::divides<I>( I{ 1 }, I{ 2 } ); expected[i++] = true;
results[i] = kat::divides<I>( I{ 2 }, I{ 2 } ); expected[i++] = true;
results[i] = kat::divides<I>( I{ 3 }, I{ 2 } ); expected[i++] = false;
results[i] = kat::divides<I>( I{ 4 }, I{ 2 } ); expected[i++] = false;
results[i] = kat::divides<I>( I{ 6 }, I{ 9 } ); expected[i++] = false;
results[i] = kat::divides<I>( I{ 9 }, I{ 6 } ); expected[i++] = false;
results[i] = kat::divides<I>( I{ 4 }, I{ 24 } ); expected[i++] = true;
results[i] = kat::divides<I>( I{ 24 }, I{ 4 } ); expected[i++] = false;
maybe_print("is_divisible_by");
results[i] = kat::is_divisible_by<I>( I{ 0 }, I{ 1 } ); expected[i++] = true;
results[i] = kat::is_divisible_by<I>( I{ 0 }, I{ 2 } ); expected[i++] = true;
results[i] = kat::is_divisible_by<I>( I{ 0 }, I{ 3 } ); expected[i++] = true;
results[i] = kat::is_divisible_by<I>( I{ 1 }, I{ 1 } ); expected[i++] = true;
results[i] = kat::is_divisible_by<I>( I{ 1 }, I{ 2 } ); expected[i++] = false;
results[i] = kat::is_divisible_by<I>( I{ 1 }, I{ 3 } ); expected[i++] = false;
results[i] = kat::is_divisible_by<I>( I{ 2 }, I{ 1 } ); expected[i++] = true;
results[i] = kat::is_divisible_by<I>( I{ 2 }, I{ 2 } ); expected[i++] = true;
results[i] = kat::is_divisible_by<I>( I{ 2 }, I{ 3 } ); expected[i++] = false;
results[i] = kat::is_divisible_by<I>( I{ 2 }, I{ 4 } ); expected[i++] = false;
results[i] = kat::is_divisible_by<I>( I{ 9 }, I{ 6 } ); expected[i++] = false;
results[i] = kat::is_divisible_by<I>( I{ 6 }, I{ 9 } ); expected[i++] = false;
results[i] = kat::is_divisible_by<I>( I{ 24 }, I{ 4 } ); expected[i++] = true;
results[i] = kat::is_divisible_by<I>( I{ 4 }, I{ 24 } ); expected[i++] = false;
maybe_print("is_divisible_by_power_of_2");
results[i] = kat::is_divisible_by_power_of_2<I>( I{ 0 }, I{ 1 } ); expected[i++] = true;
results[i] = kat::is_divisible_by_power_of_2<I>( I{ 0 }, I{ 2 } ); expected[i++] = true;
results[i] = kat::is_divisible_by_power_of_2<I>( I{ 1 }, I{ 1 } ); expected[i++] = true;
results[i] = kat::is_divisible_by_power_of_2<I>( I{ 1 }, I{ 2 } ); expected[i++] = false;
results[i] = kat::is_divisible_by_power_of_2<I>( I{ 2 }, I{ 1 } ); expected[i++] = true;
results[i] = kat::is_divisible_by_power_of_2<I>( I{ 2 }, I{ 2 } ); expected[i++] = true;
results[i] = kat::is_divisible_by_power_of_2<I>( I{ 2 }, I{ 4 } ); expected[i++] = false;
results[i] = kat::is_divisible_by_power_of_2<I>( I{ 24 }, I{ 4 } ); expected[i++] = true;
results[i] = kat::is_divisible_by_power_of_2<I>( I{ 72 }, I{ 16 } ); expected[i++] = false;
results[i] = kat::is_divisible_by_power_of_2<I>( I{ 64 }, I{ 16 } ); expected[i++] = true;
maybe_print("power_of_2_divides");
results[i] = kat::power_of_2_divides<I>( I{ 1 }, I{ 0 } ); expected[i++] = true;
results[i] = kat::power_of_2_divides<I>( I{ 2 }, I{ 0 } ); expected[i++] = true;
results[i] = kat::power_of_2_divides<I>( I{ 1 }, I{ 1 } ); expected[i++] = true;
results[i] = kat::power_of_2_divides<I>( I{ 2 }, I{ 1 } ); expected[i++] = false;
results[i] = kat::power_of_2_divides<I>( I{ 1 }, I{ 2 } ); expected[i++] = true;
results[i] = kat::power_of_2_divides<I>( I{ 2 }, I{ 2 } ); expected[i++] = true;
results[i] = kat::power_of_2_divides<I>( I{ 4 }, I{ 2 } ); expected[i++] = false;
results[i] = kat::power_of_2_divides<I>( I{ 4 }, I{ 24 } ); expected[i++] = true;
results[i] = kat::power_of_2_divides<I>( I{ 16 }, I{ 72 } ); expected[i++] = false;
results[i] = kat::power_of_2_divides<I>( I{ 16 }, I{ 64 } ); expected[i++] = true;
maybe_print("log2_of_power_of_2");
results[i] = kat::log2_of_power_of_2<I>( I{ 1 } ); expected[i++] = I{ 0 };
results[i] = kat::log2_of_power_of_2<I>( I{ 2 } ); expected[i++] = I{ 1 };
results[i] = kat::log2_of_power_of_2<I>( I{ 4 } ); expected[i++] = I{ 2 };
results[i] = kat::log2_of_power_of_2<I>( I{ 8 } ); expected[i++] = I{ 3 };
results[i] = kat::log2_of_power_of_2<I>( I{ 16 } ); expected[i++] = I{ 4 };
results[i] = kat::log2_of_power_of_2<I>( I{ 32 } ); expected[i++] = I{ 5 };
results[i] = kat::log2_of_power_of_2<I>( I{ 64 } ); expected[i++] = I{ 6 };
maybe_print("modulo_power_of_2");
results[i] = kat::modulo_power_of_2<I>( I{ 0 }, I{ 1 } ); expected[i++] = I{ 0 };
results[i] = kat::modulo_power_of_2<I>( I{ 1 }, I{ 1 } ); expected[i++] = I{ 0 };
results[i] = kat::modulo_power_of_2<I>( I{ 2 }, I{ 1 } ); expected[i++] = I{ 0 };
results[i] = kat::modulo_power_of_2<I>( I{ 3 }, I{ 1 } ); expected[i++] = I{ 0 };
results[i] = kat::modulo_power_of_2<I>( I{ 4 }, I{ 1 } ); expected[i++] = I{ 0 };
results[i] = kat::modulo_power_of_2<I>( I{ 5 }, I{ 1 } ); expected[i++] = I{ 0 };
results[i] = kat::modulo_power_of_2<I>( I{ 63 }, I{ 1 } ); expected[i++] = I{ 0 };
results[i] = kat::modulo_power_of_2<I>( I{ 0 }, I{ 2 } ); expected[i++] = I{ 0 };
results[i] = kat::modulo_power_of_2<I>( I{ 1 }, I{ 2 } ); expected[i++] = I{ 1 };
results[i] = kat::modulo_power_of_2<I>( I{ 2 }, I{ 2 } ); expected[i++] = I{ 0 };
results[i] = kat::modulo_power_of_2<I>( I{ 3 }, I{ 2 } ); expected[i++] = I{ 1 };
results[i] = kat::modulo_power_of_2<I>( I{ 4 }, I{ 2 } ); expected[i++] = I{ 0 };
results[i] = kat::modulo_power_of_2<I>( I{ 5 }, I{ 2 } ); expected[i++] = I{ 1 };
results[i] = kat::modulo_power_of_2<I>( I{ 63 }, I{ 2 } ); expected[i++] = I{ 1 };
results[i] = kat::modulo_power_of_2<I>( I{ 0 }, I{ 4 } ); expected[i++] = I{ 0 };
results[i] = kat::modulo_power_of_2<I>( I{ 1 }, I{ 4 } ); expected[i++] = I{ 1 };
results[i] = kat::modulo_power_of_2<I>( I{ 2 }, I{ 4 } ); expected[i++] = I{ 2 };
results[i] = kat::modulo_power_of_2<I>( I{ 3 }, I{ 4 } ); expected[i++] = I{ 3 };
results[i] = kat::modulo_power_of_2<I>( I{ 4 }, I{ 4 } ); expected[i++] = I{ 0 };
results[i] = kat::modulo_power_of_2<I>( I{ 5 }, I{ 4 } ); expected[i++] = I{ 1 };
results[i] = kat::modulo_power_of_2<I>( I{ 63 }, I{ 4 } ); expected[i++] = I{ 3 };
#define NUM_TEST_LINES 268
}
} // namespace kernels
// TODO:
// * Test between_or_equal and strictly_between with differing types for all 3 arguments
// * Some floating-point tests
// * gcd tests with values of different types
// * Some tests with negative values
#define INSTANTIATE_CONSTEXPR_MATH_TEST(_tp) \
compile_time_execution_results<_tp> UNIQUE_IDENTIFIER(test_struct_); \
#define INTEGER_TYPES \
int8_t, int16_t, int32_t, int64_t, \
uint8_t, uint16_t, uint32_t, uint64_t, \
char, short, int, long, long long, \
signed char, signed short, signed int, signed long, signed long long, \
unsigned char, unsigned short, unsigned int, unsigned long, unsigned long long
TEST_SUITE("math") {
TEST_CASE_TEMPLATE("run-time on-device", I, INTEGER_TYPES)
{
cuda::device_t<> device { cuda::device::current::get() };
auto block_size { 1 };
auto num_grid_blocks { 1 };
auto launch_config { cuda::make_launch_config(block_size, num_grid_blocks) };
auto device_side_results { cuda::memory::device::make_unique<I[]>(device, NUM_TEST_LINES) };
auto device_side_expected_results { cuda::memory::device::make_unique<I[]>(device, NUM_TEST_LINES) };
auto host_side_results { std::unique_ptr<I[]>(new I[NUM_TEST_LINES]) };
auto host_side_expected_results { std::unique_ptr<I[]>(new I[NUM_TEST_LINES]) };
cuda::launch(
kernels::try_out_integral_math_functions<I>,
launch_config,
device_side_results.get(), device_side_expected_results.get());
cuda::memory::copy(host_side_results.get(), device_side_results.get(), sizeof(I) * NUM_TEST_LINES);
cuda::memory::copy(host_side_expected_results.get(), device_side_expected_results.get(), sizeof(I) * NUM_TEST_LINES);
for(auto i { 0 }; i < NUM_TEST_LINES; i++) {
CHECK(host_side_results.get()[i] == host_side_expected_results.get()[i]);
if (host_side_results.get()[i] != host_side_expected_results.get()[i]) {
MESSAGE("index of failure was: " << i);
}
}
}
} // TEST_SUITE("constexpr_math")
| e026931bd31d6adbf07902a1e91c2d697ef3a542.cu | #define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN
#include "common.cuh"
#include <kat/on_device/math.cuh>
#include <kat/on_device/printing.cuh>
namespace kernels {
template <typename I>
__global__ void try_out_integral_math_functions(I* results, I* __restrict expected)
{
size_t i { 0 };
bool print_first_indices_for_each_function { false };
auto maybe_print = [&](const char* section_title) {
if (print_first_indices_for_each_function) {
printf("%-30s tests start at index %3d\n", section_title, i);
}
};
results[i] = kat::strictly_between<I>( I{ 0 }, I{ 5 }, I{ 10 } ); expected[i++] = false;
results[i] = kat::strictly_between<I>( I{ 1 }, I{ 5 }, I{ 10 } ); expected[i++] = false;
results[i] = kat::strictly_between<I>( I{ 4 }, I{ 5 }, I{ 10 } ); expected[i++] = false;
results[i] = kat::strictly_between<I>( I{ 5 }, I{ 5 }, I{ 10 } ); expected[i++] = false;
results[i] = kat::strictly_between<I>( I{ 6 }, I{ 5 }, I{ 10 } ); expected[i++] = true;
results[i] = kat::strictly_between<I>( I{ 8 }, I{ 5 }, I{ 10 } ); expected[i++] = true;
results[i] = kat::strictly_between<I>( I{ 9 }, I{ 5 }, I{ 10 } ); expected[i++] = true;
results[i] = kat::strictly_between<I>( I{ 10 }, I{ 5 }, I{ 10 } ); expected[i++] = false;
results[i] = kat::strictly_between<I>( I{ 11 }, I{ 5 }, I{ 10 } ); expected[i++] = false;
results[i] = kat::strictly_between<I>( I{ 123 }, I{ 5 }, I{ 10 } ); expected[i++] = false;
maybe_print("between_or_equal");
results[i] = kat::between_or_equal<I>( I{ 1 }, I{ 5 }, I{ 10 } ); expected[i++] = false;
results[i] = kat::between_or_equal<I>( I{ 4 }, I{ 5 }, I{ 10 } ); expected[i++] = false;
results[i] = kat::between_or_equal<I>( I{ 5 }, I{ 5 }, I{ 10 } ); expected[i++] = true;
results[i] = kat::between_or_equal<I>( I{ 6 }, I{ 5 }, I{ 10 } ); expected[i++] = true;
results[i] = kat::between_or_equal<I>( I{ 8 }, I{ 5 }, I{ 10 } ); expected[i++] = true;
results[i] = kat::between_or_equal<I>( I{ 9 }, I{ 5 }, I{ 10 } ); expected[i++] = true;
results[i] = kat::between_or_equal<I>( I{ 10 }, I{ 5 }, I{ 10 } ); expected[i++] = true;
results[i] = kat::between_or_equal<I>( I{ 11 }, I{ 5 }, I{ 10 } ); expected[i++] = false;
results[i] = kat::between_or_equal<I>( I{ 123 }, I{ 5 }, I{ 10 } ); expected[i++] = false;
maybe_print("is_power_of_2");
results[i] = kat::is_power_of_2<I>(I{ 1}); expected[i++] = true;
results[i] = kat::is_power_of_2<I>(I{ 2}); expected[i++] = true;
results[i] = kat::is_power_of_2<I>(I{ 4}); expected[i++] = true;
results[i] = kat::is_power_of_2<I>(I{ 7}); expected[i++] = false;
results[i] = kat::is_power_of_2<I>(I{32}); expected[i++] = true;
results[i] = kat::is_power_of_2<I>(I{33}); expected[i++] = false;
maybe_print("modular_increment");
results[i] = kat::modular_increment<I>(I{ 0}, I{ 1}); expected[i++] = I{ 0 };
results[i] = kat::modular_increment<I>(I{ 1}, I{ 1}); expected[i++] = I{ 0 };
results[i] = kat::modular_increment<I>(I{ 0}, I{ 3}); expected[i++] = I{ 1 };
results[i] = kat::modular_increment<I>(I{ 1}, I{ 3}); expected[i++] = I{ 2 };
results[i] = kat::modular_increment<I>(I{ 2}, I{ 3}); expected[i++] = I{ 0 };
results[i] = kat::modular_increment<I>(I{ 3}, I{ 3}); expected[i++] = I{ 1 };
results[i] = kat::modular_increment<I>(I{ 4}, I{ 3}); expected[i++] = I{ 2 };
maybe_print("modular_decrement");
results[i] = kat::modular_decrement<I>(I{ 0}, I{ 1}); expected[i++] = I{ 0 };
results[i] = kat::modular_decrement<I>(I{ 1}, I{ 1}); expected[i++] = I{ 0 };
results[i] = kat::modular_decrement<I>(I{ 0}, I{ 3}); expected[i++] = I{ 2 };
results[i] = kat::modular_decrement<I>(I{ 1}, I{ 3}); expected[i++] = I{ 0 };
results[i] = kat::modular_decrement<I>(I{ 2}, I{ 3}); expected[i++] = I{ 1 };
results[i] = kat::modular_decrement<I>(I{ 3}, I{ 3}); expected[i++] = I{ 2 };
results[i] = kat::modular_decrement<I>(I{ 4}, I{ 3}); expected[i++] = I{ 0 };
maybe_print("ipow");
results[i] = kat::ipow<I>(I{ 0 }, 1 ); expected[i++] = I{ 0 };
results[i] = kat::ipow<I>(I{ 0 }, 2 ); expected[i++] = I{ 0 };
results[i] = kat::ipow<I>(I{ 0 }, 100 ); expected[i++] = I{ 0 };
results[i] = kat::ipow<I>(I{ 1 }, 0 ); expected[i++] = I{ 1 };
results[i] = kat::ipow<I>(I{ 1 }, 1 ); expected[i++] = I{ 1 };
results[i] = kat::ipow<I>(I{ 1 }, 2 ); expected[i++] = I{ 1 };
results[i] = kat::ipow<I>(I{ 1 }, 100 ); expected[i++] = I{ 1 };
results[i] = kat::ipow<I>(I{ 3 }, 0 ); expected[i++] = I{ 1 };
results[i] = kat::ipow<I>(I{ 3 }, 1 ); expected[i++] = I{ 3 };
results[i] = kat::ipow<I>(I{ 3 }, 2 ); expected[i++] = I{ 9 };
results[i] = kat::ipow<I>(I{ 3 }, 4 ); expected[i++] = I{ 81 };
maybe_print("unsafe div_rounding_up");
results[i] = kat::unsafe::div_rounding_up<I>( I{ 0 }, I{ 1 } ); expected[i++] = I{ 0 };
results[i] = kat::unsafe::div_rounding_up<I>( I{ 0 }, I{ 2 } ); expected[i++] = I{ 0 };
results[i] = kat::unsafe::div_rounding_up<I>( I{ 0 }, I{ 123 } ); expected[i++] = I{ 0 };
results[i] = kat::unsafe::div_rounding_up<I>( I{ 1 }, I{ 1 } ); expected[i++] = I{ 1 };
results[i] = kat::unsafe::div_rounding_up<I>( I{ 1 }, I{ 2 } ); expected[i++] = I{ 1 };
results[i] = kat::unsafe::div_rounding_up<I>( I{ 122 }, I{ 123 } ); expected[i++] = I{ 1 };
results[i] = kat::unsafe::div_rounding_up<I>( I{ 123 }, I{ 123 } ); expected[i++] = I{ 1 };
results[i] = kat::unsafe::div_rounding_up<I>( I{ 124 }, I{ 123 } ); expected[i++] = I{ 2 };
maybe_print("div_rounding_up");
results[i] = kat::div_rounding_up<I>( I{ 0 }, I{ 1 } ); expected[i++] = I{ 0 };
results[i] = kat::div_rounding_up<I>( I{ 0 }, I{ 2 } ); expected[i++] = I{ 0 };
results[i] = kat::div_rounding_up<I>( I{ 0 }, I{ 123 } ); expected[i++] = I{ 0 };
results[i] = kat::div_rounding_up<I>( I{ 1 }, I{ 1 } ); expected[i++] = I{ 1 };
results[i] = kat::div_rounding_up<I>( I{ 1 }, I{ 2 } ); expected[i++] = I{ 1 };
results[i] = kat::div_rounding_up<I>( I{ 122 }, I{ 123 } ); expected[i++] = I{ 1 };
results[i] = kat::div_rounding_up<I>( I{ 123 }, I{ 123 } ); expected[i++] = I{ 1 };
results[i] = kat::div_rounding_up<I>( I{ 124 }, I{ 123 } ); expected[i++] = I{ 2 };
results[i] = kat::div_rounding_up<I>( I{ 124 }, I{ 123 } ); expected[i++] = I{ 2 };
results[i] = kat::div_rounding_up<I>( std::numeric_limits<I>::max() , std::numeric_limits<I>::max() - 1 ); expected[i++] = I{ 2 };
results[i] = kat::div_rounding_up<I>( std::numeric_limits<I>::max() - 1, std::numeric_limits<I>::max() ); expected[i++] = I{ 1 };
maybe_print("round_down");
results[i] = kat::round_down<I>( I{ 0 }, I{ 2 } ); expected[i++] = I{ 0 };
results[i] = kat::round_down<I>( I{ 0 }, I{ 123 } ); expected[i++] = I{ 0 };
results[i] = kat::round_down<I>( I{ 1 }, I{ 2 } ); expected[i++] = I{ 0 };
results[i] = kat::round_down<I>( I{ 122 }, I{ 123 } ); expected[i++] = I{ 0 };
results[i] = kat::round_down<I>( I{ 123 }, I{ 123 } ); expected[i++] = I{ 123 };
results[i] = kat::round_down<I>( I{ 124 }, I{ 123 } ); expected[i++] = I{ 123 };
maybe_print("round_down_to_full_warps");
results[i] = kat::round_down_to_full_warps<I>( I{ 0 } ); expected[i++] = I{ 0 };
results[i] = kat::round_down_to_full_warps<I>( I{ 1 } ); expected[i++] = I{ 0 };
results[i] = kat::round_down_to_full_warps<I>( I{ 8 } ); expected[i++] = I{ 0 };
results[i] = kat::round_down_to_full_warps<I>( I{ 16 } ); expected[i++] = I{ 0 };
results[i] = kat::round_down_to_full_warps<I>( I{ 31 } ); expected[i++] = I{ 0 };
results[i] = kat::round_down_to_full_warps<I>( I{ 32 } ); expected[i++] = I{ 32 };
results[i] = kat::round_down_to_full_warps<I>( I{ 33 } ); expected[i++] = I{ 32 };
results[i] = kat::round_down_to_full_warps<I>( I{ 125 } ); expected[i++] = I{ 96 };
// TODO: Consider testing rounding-up with negative dividends
maybe_print("unsafe round_up");
results[i] = kat::unsafe::round_up<I>( I{ 0 }, I{ 1 } ); expected[i++] = I{ 0 };
results[i] = kat::unsafe::round_up<I>( I{ 0 }, I{ 2 } ); expected[i++] = I{ 0 };
results[i] = kat::unsafe::round_up<I>( I{ 0 }, I{ 123 } ); expected[i++] = I{ 0 };
results[i] = kat::unsafe::round_up<I>( I{ 1 }, I{ 1 } ); expected[i++] = I{ 1 };
results[i] = kat::unsafe::round_up<I>( I{ 1 }, I{ 2 } ); expected[i++] = I{ 2 };
results[i] = kat::unsafe::round_up<I>( I{ 63 }, I{ 64 } ); expected[i++] = I{ 64 };
results[i] = kat::unsafe::round_up<I>( I{ 64 }, I{ 64 } ); expected[i++] = I{ 64 };
results[i] = kat::unsafe::round_up<I>( I{ 65 }, I{ 32 } ); expected[i++] = I{ 96 };
maybe_print("round_up");
results[i] = kat::round_up<I>( I{ 0 }, I{ 1 } ); expected[i++] = I{ 0 };
results[i] = kat::round_up<I>( I{ 0 }, I{ 2 } ); expected[i++] = I{ 0 };
results[i] = kat::round_up<I>( I{ 0 }, I{ 123 } ); expected[i++] = I{ 0 };
results[i] = kat::round_up<I>( I{ 1 }, I{ 1 } ); expected[i++] = I{ 1 };
results[i] = kat::round_up<I>( I{ 1 }, I{ 2 } ); expected[i++] = I{ 2 };
results[i] = kat::round_up<I>( I{ 63 }, I{ 64 } ); expected[i++] = I{ 64 };
results[i] = kat::round_up<I>( I{ 64 }, I{ 64 } ); expected[i++] = I{ 64 };
results[i] = kat::round_up<I>( I{ 65 }, I{ 32 } ); expected[i++] = I{ 96 };
results[i] = kat::round_up<I>( std::numeric_limits<I>::max() - 1, std::numeric_limits<I>::max() ); expected[i++] = I{ std::numeric_limits<I>::max() };
maybe_print("round_down_to_power_of_2");
results[i] = kat::round_down_to_power_of_2<I>( I{ 1 }, I{ 1 } ); expected[i++] = I{ 1 };
results[i] = kat::round_down_to_power_of_2<I>( I{ 2 }, I{ 1 } ); expected[i++] = I{ 2 };
results[i] = kat::round_down_to_power_of_2<I>( I{ 3 }, I{ 1 } ); expected[i++] = I{ 3 };
results[i] = kat::round_down_to_power_of_2<I>( I{ 4 }, I{ 1 } ); expected[i++] = I{ 4 };
results[i] = kat::round_down_to_power_of_2<I>( I{ 123 }, I{ 1 } ); expected[i++] = I{ 123 };
results[i] = kat::round_down_to_power_of_2<I>( I{ 1 }, I{ 2 } ); expected[i++] = I{ 0 };
results[i] = kat::round_down_to_power_of_2<I>( I{ 2 }, I{ 2 } ); expected[i++] = I{ 2 };
results[i] = kat::round_down_to_power_of_2<I>( I{ 3 }, I{ 2 } ); expected[i++] = I{ 2 };
results[i] = kat::round_down_to_power_of_2<I>( I{ 4 }, I{ 2 } ); expected[i++] = I{ 4 };
results[i] = kat::round_down_to_power_of_2<I>( I{ 123 }, I{ 2 } ); expected[i++] = I{ 122 };
maybe_print("round_up_to_power_of_2");
results[i] = kat::round_up_to_power_of_2<I>( I{ 1 }, I{ 1 } ); expected[i++] = I{ 1 };
results[i] = kat::round_up_to_power_of_2<I>( I{ 2 }, I{ 1 } ); expected[i++] = I{ 2 };
results[i] = kat::round_up_to_power_of_2<I>( I{ 3 }, I{ 1 } ); expected[i++] = I{ 3 };
results[i] = kat::round_up_to_power_of_2<I>( I{ 4 }, I{ 1 } ); expected[i++] = I{ 4 };
results[i] = kat::round_up_to_power_of_2<I>( I{ 23 }, I{ 1 } ); expected[i++] = I{ 23 };
results[i] = kat::round_up_to_power_of_2<I>( I{ 1 }, I{ 2 } ); expected[i++] = I{ 2 };
results[i] = kat::round_up_to_power_of_2<I>( I{ 2 }, I{ 2 } ); expected[i++] = I{ 2 };
results[i] = kat::round_up_to_power_of_2<I>( I{ 3 }, I{ 2 } ); expected[i++] = I{ 4 };
results[i] = kat::round_up_to_power_of_2<I>( I{ 4 }, I{ 2 } ); expected[i++] = I{ 4 };
results[i] = kat::round_up_to_power_of_2<I>( I{ 63 }, I{ 2 } ); expected[i++] = I{ 64 };
maybe_print("unsafe round_up_to_power_of_2");
results[i] = kat::unsafe::round_up_to_power_of_2<I>( I{ 1 }, I{ 1 } ); expected[i++] = I{ 1 };
results[i] = kat::unsafe::round_up_to_power_of_2<I>( I{ 2 }, I{ 1 } ); expected[i++] = I{ 2 };
results[i] = kat::unsafe::round_up_to_power_of_2<I>( I{ 3 }, I{ 1 } ); expected[i++] = I{ 3 };
results[i] = kat::unsafe::round_up_to_power_of_2<I>( I{ 4 }, I{ 1 } ); expected[i++] = I{ 4 };
results[i] = kat::unsafe::round_up_to_power_of_2<I>( I{ 23 }, I{ 1 } ); expected[i++] = I{ 23 };
results[i] = kat::unsafe::round_up_to_power_of_2<I>( I{ 1 }, I{ 2 } ); expected[i++] = I{ 2 };
results[i] = kat::unsafe::round_up_to_power_of_2<I>( I{ 2 }, I{ 2 } ); expected[i++] = I{ 2 };
results[i] = kat::unsafe::round_up_to_power_of_2<I>( I{ 3 }, I{ 2 } ); expected[i++] = I{ 4 };
results[i] = kat::unsafe::round_up_to_power_of_2<I>( I{ 4 }, I{ 2 } ); expected[i++] = I{ 4 };
results[i] = kat::unsafe::round_up_to_power_of_2<I>( I{ 63 }, I{ 2 } ); expected[i++] = I{ 64 };
maybe_print("round_up_to_full_warps");
results[i] = kat::round_up_to_full_warps<I>( I{ 0 } ); expected[i++] = I{ 0 };
results[i] = kat::round_up_to_full_warps<I>( I{ 1 } ); expected[i++] = I{ 32 };
results[i] = kat::round_up_to_full_warps<I>( I{ 8 } ); expected[i++] = I{ 32 };
results[i] = kat::round_up_to_full_warps<I>( I{ 16 } ); expected[i++] = I{ 32 };
results[i] = kat::round_up_to_full_warps<I>( I{ 31 } ); expected[i++] = I{ 32 };
results[i] = kat::round_up_to_full_warps<I>( I{ 32 } ); expected[i++] = I{ 32 };
results[i] = kat::round_up_to_full_warps<I>( I{ 33 } ); expected[i++] = I{ 64 };
results[i] = kat::round_up_to_full_warps<I>( I{ 63 } ); expected[i++] = I{ 64 };
maybe_print("gcd");
results[i] = kat::gcd<I>( I{ 1 }, I{ 1 } ); expected[i++] = I{ 1 };
results[i] = kat::gcd<I>( I{ 2 }, I{ 1 } ); expected[i++] = I{ 1 };
results[i] = kat::gcd<I>( I{ 1 }, I{ 2 } ); expected[i++] = I{ 1 };
results[i] = kat::gcd<I>( I{ 2 }, I{ 2 } ); expected[i++] = I{ 2 };
results[i] = kat::gcd<I>( I{ 8 }, I{ 4 } ); expected[i++] = I{ 4 };
results[i] = kat::gcd<I>( I{ 4 }, I{ 8 } ); expected[i++] = I{ 4 };
results[i] = kat::gcd<I>( I{ 10 }, I{ 6 } ); expected[i++] = I{ 2 };
results[i] = kat::gcd<I>( I{ 120 }, I{ 70 } ); expected[i++] = I{ 10 };
results[i] = kat::gcd<I>( I{ 70 }, I{ 120 } ); expected[i++] = I{ 10 };
results[i] = kat::gcd<I>( I{ 97 }, I{ 120 } ); expected[i++] = I{ 1 };
maybe_print("lcm");
results[i] = kat::lcm<I>( I{ 1 }, I{ 1 } ); expected[i++] = I{ 1 };
results[i] = kat::lcm<I>( I{ 2 }, I{ 1 } ); expected[i++] = I{ 2 };
results[i] = kat::lcm<I>( I{ 1 }, I{ 2 } ); expected[i++] = I{ 2 };
results[i] = kat::lcm<I>( I{ 2 }, I{ 2 } ); expected[i++] = I{ 2 };
results[i] = kat::lcm<I>( I{ 5 }, I{ 3 } ); expected[i++] = I{ 15 };
results[i] = kat::lcm<I>( I{ 8 }, I{ 4 } ); expected[i++] = I{ 8 };
results[i] = kat::lcm<I>( I{ 4 }, I{ 8 } ); expected[i++] = I{ 8 };
results[i] = kat::lcm<I>( I{ 10 }, I{ 6 } ); expected[i++] = I{ 30 };
maybe_print("is_even");
results[i] = kat::is_even<I>( I{ 0 } ); expected[i++] = true;
results[i] = kat::is_even<I>( I{ 1 } ); expected[i++] = false;
results[i] = kat::is_even<I>( I{ 2 } ); expected[i++] = true;
results[i] = kat::is_even<I>( I{ 3 } ); expected[i++] = false;
results[i] = kat::is_even<I>( I{ 123 } ); expected[i++] = false;
results[i] = kat::is_even<I>( I{ 124 } ); expected[i++] = true;
maybe_print("is_odd");
results[i] = kat::is_odd<I>( I{ 0 } ); expected[i++] = false;
results[i] = kat::is_odd<I>( I{ 1 } ); expected[i++] = true;
results[i] = kat::is_odd<I>( I{ 2 } ); expected[i++] = false;
results[i] = kat::is_odd<I>( I{ 3 } ); expected[i++] = true;
results[i] = kat::is_odd<I>( I{ 123 } ); expected[i++] = true;
results[i] = kat::is_odd<I>( I{ 124 } ); expected[i++] = false;
maybe_print("log2");
results[i] = kat::log2<I>( I{ 1 } ); expected[i++] = 0;
results[i] = kat::log2<I>( I{ 2 } ); expected[i++] = 1;
results[i] = kat::log2<I>( I{ 3 } ); expected[i++] = 1;
results[i] = kat::log2<I>( I{ 4 } ); expected[i++] = 2;
results[i] = kat::log2<I>( I{ 6 } ); expected[i++] = 2;
results[i] = kat::log2<I>( I{ 7 } ); expected[i++] = 2;
results[i] = kat::log2<I>( I{ 8 } ); expected[i++] = 3;
results[i] = kat::log2<I>( I{ 127 } ); expected[i++] = 6;
// We don't have a goot integer sqrt() implementation to offer here. Perhaps
// we could offer something based on casting to float?
//
// results[i] = kat::sqrt<I>( I{ 0 } ); expected[i++] = 0;
// results[i] = kat::sqrt<I>( I{ 1 } ); expected[i++] = 1;
// results[i] = kat::sqrt<I>( I{ 2 } ); expected[i++] = 1;
// results[i] = kat::sqrt<I>( I{ 3 } ); expected[i++] = 1;
// results[i] = kat::sqrt<I>( I{ 4 } ); expected[i++] = 2;
// results[i] = kat::sqrt<I>( I{ 5 } ); expected[i++] = 2;
// results[i] = kat::sqrt<I>( I{ 9 } ); expected[i++] = 3;
// results[i] = kat::sqrt<I>( I{ 10 } ); expected[i++] = 3;
// results[i] = kat::sqrt<I>( I{ 127 } ); expected[i++] = 11;
maybe_print("div_by_power_of_2");
results[i] = kat::div_by_power_of_2<I>( I{ 0 }, I { 1 }); expected[i++] = I{ 0 };
results[i] = kat::div_by_power_of_2<I>( I{ 1 }, I { 1 }); expected[i++] = I{ 1 };
results[i] = kat::div_by_power_of_2<I>( I{ 111 }, I { 1 }); expected[i++] = I{ 111 };
results[i] = kat::div_by_power_of_2<I>( I{ 0 }, I { 2 }); expected[i++] = I{ 0 };
results[i] = kat::div_by_power_of_2<I>( I{ 1 }, I { 2 }); expected[i++] = I{ 0 };
results[i] = kat::div_by_power_of_2<I>( I{ 2 }, I { 2 }); expected[i++] = I{ 1 };
results[i] = kat::div_by_power_of_2<I>( I{ 3 }, I { 2 }); expected[i++] = I{ 1 };
results[i] = kat::div_by_power_of_2<I>( I{ 4 }, I { 2 }); expected[i++] = I{ 2 };
results[i] = kat::div_by_power_of_2<I>( I{ 111 }, I { 2 }); expected[i++] = I{ 55 };
results[i] = kat::div_by_power_of_2<I>( I{ 0 }, I { 16 }); expected[i++] = I{ 0 };
results[i] = kat::div_by_power_of_2<I>( I{ 1 }, I { 16 }); expected[i++] = I{ 0 };
results[i] = kat::div_by_power_of_2<I>( I{ 15 }, I { 16 }); expected[i++] = I{ 0 };
results[i] = kat::div_by_power_of_2<I>( I{ 16 }, I { 16 }); expected[i++] = I{ 1 };
results[i] = kat::div_by_power_of_2<I>( I{ 17 }, I { 16 }); expected[i++] = I{ 1 };
results[i] = kat::div_by_power_of_2<I>( I{ 32 }, I { 16 }); expected[i++] = I{ 2 };
results[i] = kat::div_by_power_of_2<I>( I{ 111 }, I { 16 }); expected[i++] = I{ 6 };
maybe_print("divides");
results[i] = kat::divides<I>( I{ 1 }, I{ 0 } ); expected[i++] = true;
results[i] = kat::divides<I>( I{ 2 }, I{ 0 } ); expected[i++] = true;
results[i] = kat::divides<I>( I{ 3 }, I{ 0 } ); expected[i++] = true;
results[i] = kat::divides<I>( I{ 1 }, I{ 1 } ); expected[i++] = true;
results[i] = kat::divides<I>( I{ 2 }, I{ 1 } ); expected[i++] = false;
results[i] = kat::divides<I>( I{ 3 }, I{ 1 } ); expected[i++] = false;
results[i] = kat::divides<I>( I{ 1 }, I{ 2 } ); expected[i++] = true;
results[i] = kat::divides<I>( I{ 2 }, I{ 2 } ); expected[i++] = true;
results[i] = kat::divides<I>( I{ 3 }, I{ 2 } ); expected[i++] = false;
results[i] = kat::divides<I>( I{ 4 }, I{ 2 } ); expected[i++] = false;
results[i] = kat::divides<I>( I{ 6 }, I{ 9 } ); expected[i++] = false;
results[i] = kat::divides<I>( I{ 9 }, I{ 6 } ); expected[i++] = false;
results[i] = kat::divides<I>( I{ 4 }, I{ 24 } ); expected[i++] = true;
results[i] = kat::divides<I>( I{ 24 }, I{ 4 } ); expected[i++] = false;
maybe_print("is_divisible_by");
results[i] = kat::is_divisible_by<I>( I{ 0 }, I{ 1 } ); expected[i++] = true;
results[i] = kat::is_divisible_by<I>( I{ 0 }, I{ 2 } ); expected[i++] = true;
results[i] = kat::is_divisible_by<I>( I{ 0 }, I{ 3 } ); expected[i++] = true;
results[i] = kat::is_divisible_by<I>( I{ 1 }, I{ 1 } ); expected[i++] = true;
results[i] = kat::is_divisible_by<I>( I{ 1 }, I{ 2 } ); expected[i++] = false;
results[i] = kat::is_divisible_by<I>( I{ 1 }, I{ 3 } ); expected[i++] = false;
results[i] = kat::is_divisible_by<I>( I{ 2 }, I{ 1 } ); expected[i++] = true;
results[i] = kat::is_divisible_by<I>( I{ 2 }, I{ 2 } ); expected[i++] = true;
results[i] = kat::is_divisible_by<I>( I{ 2 }, I{ 3 } ); expected[i++] = false;
results[i] = kat::is_divisible_by<I>( I{ 2 }, I{ 4 } ); expected[i++] = false;
results[i] = kat::is_divisible_by<I>( I{ 9 }, I{ 6 } ); expected[i++] = false;
results[i] = kat::is_divisible_by<I>( I{ 6 }, I{ 9 } ); expected[i++] = false;
results[i] = kat::is_divisible_by<I>( I{ 24 }, I{ 4 } ); expected[i++] = true;
results[i] = kat::is_divisible_by<I>( I{ 4 }, I{ 24 } ); expected[i++] = false;
maybe_print("is_divisible_by_power_of_2");
results[i] = kat::is_divisible_by_power_of_2<I>( I{ 0 }, I{ 1 } ); expected[i++] = true;
results[i] = kat::is_divisible_by_power_of_2<I>( I{ 0 }, I{ 2 } ); expected[i++] = true;
results[i] = kat::is_divisible_by_power_of_2<I>( I{ 1 }, I{ 1 } ); expected[i++] = true;
results[i] = kat::is_divisible_by_power_of_2<I>( I{ 1 }, I{ 2 } ); expected[i++] = false;
results[i] = kat::is_divisible_by_power_of_2<I>( I{ 2 }, I{ 1 } ); expected[i++] = true;
results[i] = kat::is_divisible_by_power_of_2<I>( I{ 2 }, I{ 2 } ); expected[i++] = true;
results[i] = kat::is_divisible_by_power_of_2<I>( I{ 2 }, I{ 4 } ); expected[i++] = false;
results[i] = kat::is_divisible_by_power_of_2<I>( I{ 24 }, I{ 4 } ); expected[i++] = true;
results[i] = kat::is_divisible_by_power_of_2<I>( I{ 72 }, I{ 16 } ); expected[i++] = false;
results[i] = kat::is_divisible_by_power_of_2<I>( I{ 64 }, I{ 16 } ); expected[i++] = true;
maybe_print("power_of_2_divides");
results[i] = kat::power_of_2_divides<I>( I{ 1 }, I{ 0 } ); expected[i++] = true;
results[i] = kat::power_of_2_divides<I>( I{ 2 }, I{ 0 } ); expected[i++] = true;
results[i] = kat::power_of_2_divides<I>( I{ 1 }, I{ 1 } ); expected[i++] = true;
results[i] = kat::power_of_2_divides<I>( I{ 2 }, I{ 1 } ); expected[i++] = false;
results[i] = kat::power_of_2_divides<I>( I{ 1 }, I{ 2 } ); expected[i++] = true;
results[i] = kat::power_of_2_divides<I>( I{ 2 }, I{ 2 } ); expected[i++] = true;
results[i] = kat::power_of_2_divides<I>( I{ 4 }, I{ 2 } ); expected[i++] = false;
results[i] = kat::power_of_2_divides<I>( I{ 4 }, I{ 24 } ); expected[i++] = true;
results[i] = kat::power_of_2_divides<I>( I{ 16 }, I{ 72 } ); expected[i++] = false;
results[i] = kat::power_of_2_divides<I>( I{ 16 }, I{ 64 } ); expected[i++] = true;
maybe_print("log2_of_power_of_2");
results[i] = kat::log2_of_power_of_2<I>( I{ 1 } ); expected[i++] = I{ 0 };
results[i] = kat::log2_of_power_of_2<I>( I{ 2 } ); expected[i++] = I{ 1 };
results[i] = kat::log2_of_power_of_2<I>( I{ 4 } ); expected[i++] = I{ 2 };
results[i] = kat::log2_of_power_of_2<I>( I{ 8 } ); expected[i++] = I{ 3 };
results[i] = kat::log2_of_power_of_2<I>( I{ 16 } ); expected[i++] = I{ 4 };
results[i] = kat::log2_of_power_of_2<I>( I{ 32 } ); expected[i++] = I{ 5 };
results[i] = kat::log2_of_power_of_2<I>( I{ 64 } ); expected[i++] = I{ 6 };
maybe_print("modulo_power_of_2");
results[i] = kat::modulo_power_of_2<I>( I{ 0 }, I{ 1 } ); expected[i++] = I{ 0 };
results[i] = kat::modulo_power_of_2<I>( I{ 1 }, I{ 1 } ); expected[i++] = I{ 0 };
results[i] = kat::modulo_power_of_2<I>( I{ 2 }, I{ 1 } ); expected[i++] = I{ 0 };
results[i] = kat::modulo_power_of_2<I>( I{ 3 }, I{ 1 } ); expected[i++] = I{ 0 };
results[i] = kat::modulo_power_of_2<I>( I{ 4 }, I{ 1 } ); expected[i++] = I{ 0 };
results[i] = kat::modulo_power_of_2<I>( I{ 5 }, I{ 1 } ); expected[i++] = I{ 0 };
results[i] = kat::modulo_power_of_2<I>( I{ 63 }, I{ 1 } ); expected[i++] = I{ 0 };
results[i] = kat::modulo_power_of_2<I>( I{ 0 }, I{ 2 } ); expected[i++] = I{ 0 };
results[i] = kat::modulo_power_of_2<I>( I{ 1 }, I{ 2 } ); expected[i++] = I{ 1 };
results[i] = kat::modulo_power_of_2<I>( I{ 2 }, I{ 2 } ); expected[i++] = I{ 0 };
results[i] = kat::modulo_power_of_2<I>( I{ 3 }, I{ 2 } ); expected[i++] = I{ 1 };
results[i] = kat::modulo_power_of_2<I>( I{ 4 }, I{ 2 } ); expected[i++] = I{ 0 };
results[i] = kat::modulo_power_of_2<I>( I{ 5 }, I{ 2 } ); expected[i++] = I{ 1 };
results[i] = kat::modulo_power_of_2<I>( I{ 63 }, I{ 2 } ); expected[i++] = I{ 1 };
results[i] = kat::modulo_power_of_2<I>( I{ 0 }, I{ 4 } ); expected[i++] = I{ 0 };
results[i] = kat::modulo_power_of_2<I>( I{ 1 }, I{ 4 } ); expected[i++] = I{ 1 };
results[i] = kat::modulo_power_of_2<I>( I{ 2 }, I{ 4 } ); expected[i++] = I{ 2 };
results[i] = kat::modulo_power_of_2<I>( I{ 3 }, I{ 4 } ); expected[i++] = I{ 3 };
results[i] = kat::modulo_power_of_2<I>( I{ 4 }, I{ 4 } ); expected[i++] = I{ 0 };
results[i] = kat::modulo_power_of_2<I>( I{ 5 }, I{ 4 } ); expected[i++] = I{ 1 };
results[i] = kat::modulo_power_of_2<I>( I{ 63 }, I{ 4 } ); expected[i++] = I{ 3 };
#define NUM_TEST_LINES 268
}
} // namespace kernels
// TODO:
// * Test between_or_equal and strictly_between with differing types for all 3 arguments
// * Some floating-point tests
// * gcd tests with values of different types
// * Some tests with negative values
#define INSTANTIATE_CONSTEXPR_MATH_TEST(_tp) \
compile_time_execution_results<_tp> UNIQUE_IDENTIFIER(test_struct_); \
#define INTEGER_TYPES \
int8_t, int16_t, int32_t, int64_t, \
uint8_t, uint16_t, uint32_t, uint64_t, \
char, short, int, long, long long, \
signed char, signed short, signed int, signed long, signed long long, \
unsigned char, unsigned short, unsigned int, unsigned long, unsigned long long
TEST_SUITE("math") {
TEST_CASE_TEMPLATE("run-time on-device", I, INTEGER_TYPES)
{
cuda::device_t<> device { cuda::device::current::get() };
auto block_size { 1 };
auto num_grid_blocks { 1 };
auto launch_config { cuda::make_launch_config(block_size, num_grid_blocks) };
auto device_side_results { cuda::memory::device::make_unique<I[]>(device, NUM_TEST_LINES) };
auto device_side_expected_results { cuda::memory::device::make_unique<I[]>(device, NUM_TEST_LINES) };
auto host_side_results { std::unique_ptr<I[]>(new I[NUM_TEST_LINES]) };
auto host_side_expected_results { std::unique_ptr<I[]>(new I[NUM_TEST_LINES]) };
cuda::launch(
kernels::try_out_integral_math_functions<I>,
launch_config,
device_side_results.get(), device_side_expected_results.get());
cuda::memory::copy(host_side_results.get(), device_side_results.get(), sizeof(I) * NUM_TEST_LINES);
cuda::memory::copy(host_side_expected_results.get(), device_side_expected_results.get(), sizeof(I) * NUM_TEST_LINES);
for(auto i { 0 }; i < NUM_TEST_LINES; i++) {
CHECK(host_side_results.get()[i] == host_side_expected_results.get()[i]);
if (host_side_results.get()[i] != host_side_expected_results.get()[i]) {
MESSAGE("index of failure was: " << i);
}
}
}
} // TEST_SUITE("constexpr_math")
|
f3ecaa16b1ee1133da14c69a586cfc11353a9af8.hip | // !!! This is a file automatically generated by hipify!!!
//#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <cstdlib>
#define SIZE 1024
/*
void VectorAdd(int *a, int *b, int *c,int n) {
int i = 0;
for(; i < n; ++i) {
c[i] = a[i] + b[i];
}
}
*/
__global__ void VectorAdd(int *a, int *b, int *c,int n) {
int i = threadIdx.x;
if( i < n)
c[i] = a[i] + b[i];
}
int main()
{
int *a, *b, *c;
//a = (int *) std::malloc(SIZE * sizeof(int));
hipMallocManaged(&a, SIZE * sizeof(int));
//b = (int *) std::malloc(SIZE * sizeof(int));
hipMallocManaged(&b, SIZE * sizeof(int));
//c = (int *) std::malloc(SIZE * sizeof(int));
hipMallocManaged(&c, SIZE * sizeof(int));
for(int i = 0; i < SIZE; ++i) {
a[i] = i;
b[i] = i;
c[i] = 0;
}
//VectorAdd(a, b, c, SIZE);
hipLaunchKernelGGL(( VectorAdd) , dim3(1), dim3(SIZE), 0, 0, a, b, c, SIZE); // block and thread size
hipDeviceSynchronize();
for(int i = 0; i < 10; ++i)
printf("C[%d] = %d \n", i, c[i]);
//free(a);
hipFree(a);
//free(b);
hipFree(b);
//free(c);
hipFree(c);
return 0;
} | f3ecaa16b1ee1133da14c69a586cfc11353a9af8.cu | //#include <cuda_runtime_api.h>
#include <cuda.h>
#include <stdio.h>
#include <cstdlib>
#define SIZE 1024
/*
void VectorAdd(int *a, int *b, int *c,int n) {
int i = 0;
for(; i < n; ++i) {
c[i] = a[i] + b[i];
}
}
*/
__global__ void VectorAdd(int *a, int *b, int *c,int n) {
int i = threadIdx.x;
if( i < n)
c[i] = a[i] + b[i];
}
int main()
{
int *a, *b, *c;
//a = (int *) std::malloc(SIZE * sizeof(int));
cudaMallocManaged(&a, SIZE * sizeof(int));
//b = (int *) std::malloc(SIZE * sizeof(int));
cudaMallocManaged(&b, SIZE * sizeof(int));
//c = (int *) std::malloc(SIZE * sizeof(int));
cudaMallocManaged(&c, SIZE * sizeof(int));
for(int i = 0; i < SIZE; ++i) {
a[i] = i;
b[i] = i;
c[i] = 0;
}
//VectorAdd(a, b, c, SIZE);
VectorAdd <<<1, SIZE>>> (a, b, c, SIZE); // block and thread size
cudaDeviceSynchronize();
for(int i = 0; i < 10; ++i)
printf("C[%d] = %d \n", i, c[i]);
//free(a);
cudaFree(a);
//free(b);
cudaFree(b);
//free(c);
cudaFree(c);
return 0;
} |
1e857a50fec2392662c7cc9a3fbacc0a1eae1c84.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
***************************************************************************
Author(s) : Marwan Abdellah
Email(s) : <abdellah.marwan@gmail.com>
Module : cu_arr_1D_mul.cu
Description :
Created :
Note(s) :
***************************************************************************
*/
#ifndef _CU_1D_MUL_CU_
#define _CU_1D_MUL_CU_
/* */
__global__
void mulArrays_kernel__i(const int* in_1, const int* in_2, int* out, int arraySize)
{
int thd_ID;
int blk_Dim = blockDim.x;
int blk_Idx = blockIdx.x;
int thd_Idx = threadIdx.x;
thd_ID = blk_Dim * blk_Idx + thd_Idx;
if (thd_ID < arraySize)
out[thd_ID] = in_1[thd_ID] * in_2[thd_ID];
}
/* */
__global__
void mulArrays_kernel__f(const float* in_1, const float* in_2, float* out, int arraySize)
{
int thd_ID;
int blk_Dim = blockDim.x;
int blk_Idx = blockIdx.x;
int thd_Idx = threadIdx.x;
thd_ID = blk_Dim * blk_Idx + thd_Idx;
if (thd_ID < arraySize)
out[thd_ID] = in_1[thd_ID] * in_2[thd_ID];
}
/* */
__global__
void mulArrays_kernel__d(const double* in_1, const double* in_2, double* out, int arraySize)
{
int thd_ID;
int blk_Dim = blockDim.x;
int blk_Idx = blockIdx.x;
int thd_Idx = threadIdx.x;
thd_ID = blk_Dim * blk_Idx + thd_Idx;
if (thd_ID < arraySize)
out[thd_ID] = in_1[thd_ID] * in_2[thd_ID];
}
#endif /* _CU_1D_MUL_CU_ */ | 1e857a50fec2392662c7cc9a3fbacc0a1eae1c84.cu | /*
***************************************************************************
Author(s) : Marwan Abdellah
Email(s) : <abdellah.marwan@gmail.com>
Module : cu_arr_1D_mul.cu
Description :
Created :
Note(s) :
***************************************************************************
*/
#ifndef _CU_1D_MUL_CU_
#define _CU_1D_MUL_CU_
/* */
__global__
void mulArrays_kernel__i(const int* in_1, const int* in_2, int* out, int arraySize)
{
int thd_ID;
int blk_Dim = blockDim.x;
int blk_Idx = blockIdx.x;
int thd_Idx = threadIdx.x;
thd_ID = blk_Dim * blk_Idx + thd_Idx;
if (thd_ID < arraySize)
out[thd_ID] = in_1[thd_ID] * in_2[thd_ID];
}
/* */
__global__
void mulArrays_kernel__f(const float* in_1, const float* in_2, float* out, int arraySize)
{
int thd_ID;
int blk_Dim = blockDim.x;
int blk_Idx = blockIdx.x;
int thd_Idx = threadIdx.x;
thd_ID = blk_Dim * blk_Idx + thd_Idx;
if (thd_ID < arraySize)
out[thd_ID] = in_1[thd_ID] * in_2[thd_ID];
}
/* */
__global__
void mulArrays_kernel__d(const double* in_1, const double* in_2, double* out, int arraySize)
{
int thd_ID;
int blk_Dim = blockDim.x;
int blk_Idx = blockIdx.x;
int thd_Idx = threadIdx.x;
thd_ID = blk_Dim * blk_Idx + thd_Idx;
if (thd_ID < arraySize)
out[thd_ID] = in_1[thd_ID] * in_2[thd_ID];
}
#endif /* _CU_1D_MUL_CU_ */ |
d51d05a3caff7348cb542a9d4e550d9840dff4a5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author raver119@gmail.com
//
#include <helpers/PointersManager.h>
#include <ops/declarable/helpers/flatten.h>
namespace sd {
namespace ops {
namespace helpers {
template <typename T>
static void SD_KERNEL flattenKernel(void **xBuffers, sd::LongType **xShapeInfos, sd::LongType *offsets,
sd::LongType numInputs, void *zBuffer, const sd::LongType *zShapeInfo, char order) {
int xCoord[SD_MAX_RANK];
// each block of threads works on 1 input array
for (sd::LongType e = blockIdx.x; e < numInputs; e += gridDim.x) {
auto z = reinterpret_cast<T *>(zBuffer) + offsets[e];
auto xBuffer = reinterpret_cast<T *>(xBuffers[e]);
auto xShapeInfo = xShapeInfos[e];
auto xLength = shape::length(xShapeInfo);
// each element of this input array has own place within common output array
for (sd::Unsigned i = threadIdx.x; i < xLength; i += blockDim.x)
z[i] = xBuffer[getIndexOffsetOrdered(i, xShapeInfo, order)];
}
}
template <typename T>
static void flatten_(sd::LaunchContext *context, std::vector<NDArray *> &inputs, NDArray *output, char order) {
PointersManager pm(context, "flatten");
std::vector<const void *> hdBuffers(inputs.size());
std::vector<sd::LongType> hOffsets(inputs.size());
std::vector<const sd::LongType *> hdShapes(inputs.size());
sd::LongType cOffset = 0;
// calculating offsets in output
for (int e = 0; e < inputs.size(); e++) {
hOffsets[e] = cOffset;
cOffset += inputs[e]->lengthOf();
hdBuffers[e] = inputs[e]->specialBuffer();
hdShapes[e] = inputs[e]->specialShapeInfo();
}
// copying pointers to device
auto dBuffers = (void **)pm.replicatePointer(hdBuffers.data(), inputs.size() * sizeof(void *));
auto dShapes = (sd::LongType **)pm.replicatePointer(hdShapes.data(), inputs.size() * sizeof(sd::LongType *));
auto dOffsets = (sd::LongType *)pm.replicatePointer(hOffsets.data(), inputs.size() * sizeof(sd::LongType));
hipLaunchKernelGGL(( flattenKernel<T>), dim3(256), dim3(512), 8192, *context->getCudaStream(),
dBuffers, dShapes, dOffsets, inputs.size(), output->specialBuffer(), output->specialShapeInfo(), order);
pm.synchronize();
}
void flatten(sd::LaunchContext *context, std::vector<NDArray *> &inputs, NDArray *output, char order) {
// FIXME: we want NDArrayFactory::prepareSpecialUse here eventually
for (auto v : inputs) v->syncToDevice();
BUILD_SINGLE_SELECTOR(output->dataType(), flatten_, (context, inputs, output, order), SD_COMMON_TYPES);
NDArray::registerSpecialUse({output}, {});
}
} // namespace helpers
} // namespace ops
} // namespace sd
| d51d05a3caff7348cb542a9d4e550d9840dff4a5.cu | /* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author raver119@gmail.com
//
#include <helpers/PointersManager.h>
#include <ops/declarable/helpers/flatten.h>
namespace sd {
namespace ops {
namespace helpers {
template <typename T>
static void SD_KERNEL flattenKernel(void **xBuffers, sd::LongType **xShapeInfos, sd::LongType *offsets,
sd::LongType numInputs, void *zBuffer, const sd::LongType *zShapeInfo, char order) {
int xCoord[SD_MAX_RANK];
// each block of threads works on 1 input array
for (sd::LongType e = blockIdx.x; e < numInputs; e += gridDim.x) {
auto z = reinterpret_cast<T *>(zBuffer) + offsets[e];
auto xBuffer = reinterpret_cast<T *>(xBuffers[e]);
auto xShapeInfo = xShapeInfos[e];
auto xLength = shape::length(xShapeInfo);
// each element of this input array has own place within common output array
for (sd::Unsigned i = threadIdx.x; i < xLength; i += blockDim.x)
z[i] = xBuffer[getIndexOffsetOrdered(i, xShapeInfo, order)];
}
}
template <typename T>
static void flatten_(sd::LaunchContext *context, std::vector<NDArray *> &inputs, NDArray *output, char order) {
PointersManager pm(context, "flatten");
std::vector<const void *> hdBuffers(inputs.size());
std::vector<sd::LongType> hOffsets(inputs.size());
std::vector<const sd::LongType *> hdShapes(inputs.size());
sd::LongType cOffset = 0;
// calculating offsets in output
for (int e = 0; e < inputs.size(); e++) {
hOffsets[e] = cOffset;
cOffset += inputs[e]->lengthOf();
hdBuffers[e] = inputs[e]->specialBuffer();
hdShapes[e] = inputs[e]->specialShapeInfo();
}
// copying pointers to device
auto dBuffers = (void **)pm.replicatePointer(hdBuffers.data(), inputs.size() * sizeof(void *));
auto dShapes = (sd::LongType **)pm.replicatePointer(hdShapes.data(), inputs.size() * sizeof(sd::LongType *));
auto dOffsets = (sd::LongType *)pm.replicatePointer(hOffsets.data(), inputs.size() * sizeof(sd::LongType));
flattenKernel<T><<<256, 512, 8192, *context->getCudaStream()>>>(
dBuffers, dShapes, dOffsets, inputs.size(), output->specialBuffer(), output->specialShapeInfo(), order);
pm.synchronize();
}
void flatten(sd::LaunchContext *context, std::vector<NDArray *> &inputs, NDArray *output, char order) {
// FIXME: we want NDArrayFactory::prepareSpecialUse here eventually
for (auto v : inputs) v->syncToDevice();
BUILD_SINGLE_SELECTOR(output->dataType(), flatten_, (context, inputs, output, order), SD_COMMON_TYPES);
NDArray::registerSpecialUse({output}, {});
}
} // namespace helpers
} // namespace ops
} // namespace sd
|
e03daff9dc19f3805a6ea73b5a8fa0e912c55d8b.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <math_functions.h>
#include "bbob_generators.cuh"
__device__ double fitness_function(double x[], int number_of_variables)
{
const double condition = 1.0e6;
size_t i = 0;
double result;
result = x[i] * x[i];
for(i = 1; i < number_of_variables; ++i)
{
const double exponent = 1.0 * (double)(long)i / ((double)(long)number_of_variables - 1.0);
result += pow(condition, exponent) * x[i] * x[i];
}
return result;
}
__device__ double wrapped_fitness_function(double x[], int number_of_variables,
double* xopt, double* M, double* b, double fopt)
{
transform_vars_shift(x, number_of_variables, xopt);
transform_vars_affine(x, number_of_variables, M, b);
transform_vars_oscillate(x, number_of_variables);
double temp[1];
temp[0] = fitness_function(x, number_of_variables);
transform_obj_shift(temp, 1, fopt);
return temp[0];
}
extern "C" {
__global__ void generateData(int dimension,
int rseed,
int function,
int instance,
double* vars_affine_m,
double* vars_affine_b,
double* vars_shift_xopt,
double* obj_shift_fopt)
{
bbob2009_compute_xopt(vars_shift_xopt, rseed, dimension);
obj_shift_fopt[0] = bbob2009_compute_fopt(function, instance);
double rot1[MAX_DIMENSIONS][MAX_DIMENSIONS];
bbob2009_compute_rotation(dimension, rot1, rseed + 1000000);
bbob2009_copy_rotation_matrix(rot1, vars_affine_m, vars_affine_b, dimension);
}
__global__ void transposeKernel(
double* positions,
double* velocities,
double* personalBests,
double* personalBestValues,
int particlesCount,
int dimensionsCount,
double* xopt, double* M, double* b, double fopt)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i >= particlesCount) return;
double* particleLoc = positions + i * dimensionsCount;
double* particleVel = velocities + i * dimensionsCount;
for(int i = 0; i < dimensionsCount; i++)
{
particleLoc[i] += particleVel[i];
}
clamp(particleLoc, dimensionsCount, -5.0, 5.0);
double tempLocation[MAX_DIMENSIONS];
for(int i = 0; i < dimensionsCount; i++)
{
tempLocation[i] = particleLoc[i];
}
double newValue = wrapped_fitness_function(tempLocation, dimensionsCount, xopt, M, b, fopt);
if(newValue < personalBestValues[i])
{
personalBestValues[i] = newValue;
double* particlePersonalBest = personalBests + i * dimensionsCount;
for(int i = 0; i < dimensionsCount; i++)
particlePersonalBest[i] = particleLoc[i];
}
}
} | e03daff9dc19f3805a6ea73b5a8fa0e912c55d8b.cu | #include <cuda_runtime.h>
#include <cuda.h>
#include <math_functions.h>
#include "bbob_generators.cuh"
__device__ double fitness_function(double x[], int number_of_variables)
{
const double condition = 1.0e6;
size_t i = 0;
double result;
result = x[i] * x[i];
for(i = 1; i < number_of_variables; ++i)
{
const double exponent = 1.0 * (double)(long)i / ((double)(long)number_of_variables - 1.0);
result += pow(condition, exponent) * x[i] * x[i];
}
return result;
}
__device__ double wrapped_fitness_function(double x[], int number_of_variables,
double* xopt, double* M, double* b, double fopt)
{
transform_vars_shift(x, number_of_variables, xopt);
transform_vars_affine(x, number_of_variables, M, b);
transform_vars_oscillate(x, number_of_variables);
double temp[1];
temp[0] = fitness_function(x, number_of_variables);
transform_obj_shift(temp, 1, fopt);
return temp[0];
}
extern "C" {
__global__ void generateData(int dimension,
int rseed,
int function,
int instance,
double* vars_affine_m,
double* vars_affine_b,
double* vars_shift_xopt,
double* obj_shift_fopt)
{
bbob2009_compute_xopt(vars_shift_xopt, rseed, dimension);
obj_shift_fopt[0] = bbob2009_compute_fopt(function, instance);
double rot1[MAX_DIMENSIONS][MAX_DIMENSIONS];
bbob2009_compute_rotation(dimension, rot1, rseed + 1000000);
bbob2009_copy_rotation_matrix(rot1, vars_affine_m, vars_affine_b, dimension);
}
__global__ void transposeKernel(
double* positions,
double* velocities,
double* personalBests,
double* personalBestValues,
int particlesCount,
int dimensionsCount,
double* xopt, double* M, double* b, double fopt)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i >= particlesCount) return;
double* particleLoc = positions + i * dimensionsCount;
double* particleVel = velocities + i * dimensionsCount;
for(int i = 0; i < dimensionsCount; i++)
{
particleLoc[i] += particleVel[i];
}
clamp(particleLoc, dimensionsCount, -5.0, 5.0);
double tempLocation[MAX_DIMENSIONS];
for(int i = 0; i < dimensionsCount; i++)
{
tempLocation[i] = particleLoc[i];
}
double newValue = wrapped_fitness_function(tempLocation, dimensionsCount, xopt, M, b, fopt);
if(newValue < personalBestValues[i])
{
personalBestValues[i] = newValue;
double* particlePersonalBest = personalBests + i * dimensionsCount;
for(int i = 0; i < dimensionsCount; i++)
particlePersonalBest[i] = particleLoc[i];
}
}
} |
54a7cd6bd3fc436a9a8c02753b9d3e097824971a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// RUN: %clang_cc1 %s -triple nvptx-unknown-unknown -fcuda-allow-host-calls-from-host-device -fcuda-is-device -Wno-cuda-compat -emit-llvm -o - | FileCheck %s
#include "Inputs/cuda.h"
extern "C"
void host_function() {}
// CHECK-LABEL: define void @hd_function_a
extern "C"
__host__ __device__ void hd_function_a() {
// CHECK: call void @host_function
host_function();
}
// CHECK: declare void @host_function
// CHECK-LABEL: define void @hd_function_b
extern "C"
__host__ __device__ void hd_function_b(bool b) { if (b) host_function(); }
// CHECK-LABEL: define void @device_function_b
extern "C"
__device__ void device_function_b() { hd_function_b(false); }
// CHECK-LABEL: define void @global_function
extern "C"
__global__ void global_function() {
// CHECK: call void @device_function_b
device_function_b();
}
// CHECK: !{{[0-9]+}} = !{void ()* @global_function, !"kernel", i32 1}
| 54a7cd6bd3fc436a9a8c02753b9d3e097824971a.cu | // RUN: %clang_cc1 %s -triple nvptx-unknown-unknown -fcuda-allow-host-calls-from-host-device -fcuda-is-device -Wno-cuda-compat -emit-llvm -o - | FileCheck %s
#include "Inputs/cuda.h"
extern "C"
void host_function() {}
// CHECK-LABEL: define void @hd_function_a
extern "C"
__host__ __device__ void hd_function_a() {
// CHECK: call void @host_function
host_function();
}
// CHECK: declare void @host_function
// CHECK-LABEL: define void @hd_function_b
extern "C"
__host__ __device__ void hd_function_b(bool b) { if (b) host_function(); }
// CHECK-LABEL: define void @device_function_b
extern "C"
__device__ void device_function_b() { hd_function_b(false); }
// CHECK-LABEL: define void @global_function
extern "C"
__global__ void global_function() {
// CHECK: call void @device_function_b
device_function_b();
}
// CHECK: !{{[0-9]+}} = !{void ()* @global_function, !"kernel", i32 1}
|
66adabda170b19c3e4dae6e980614ea81036ad0b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cmath>
#include <cstdio>
#include "../include/slic.h"
__device__ __constant__ float slic_factor;
void initializeSlicFactor()
{
const float * slic_factor_hp = &slic_factor_h;
hipError_t cudaStatus = hipMemcpyToSymbol(slic_factor, slic_factor_hp, sizeof(float));
}
__global__ void k_cumulativeCountOrig(const pix_data* d_pix_data, const own_data* d_own_data, spx_data* d_spx_data)
{
//if (threadIdx.x == 0 && threadIdx.y == 0 && blockIdx.x == 0 && blockIdx.y == 0)
//{
//printf("k\n");
//}
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (y < pix_height && x < pix_width)
{
int pix_index = y * pix_width + x;
int i = d_own_data[pix_index].i;
int j = d_own_data[pix_index].j;
int spx_index = j * spx_width + i;
atomicAdd(&(d_spx_data[spx_index].accum[0]), d_pix_data[pix_index].l);
atomicAdd(&(d_spx_data[spx_index].accum[1]), d_pix_data[pix_index].a);
atomicAdd(&(d_spx_data[spx_index].accum[2]), d_pix_data[pix_index].b);
atomicAdd(&(d_spx_data[spx_index].accum[3]), 1);
atomicAdd(&(d_spx_data[spx_index].accum[4]), x);
atomicAdd(&(d_spx_data[spx_index].accum[5]), y);
}
}
__global__ void k_cumulativeCountOpt1(const pix_data* d_pix_data, const own_data* d_own_data, spx_data* d_spx_data)
{
// If we do 16 instead of 8, only have enough memory for a short, not an int,
// and 16*32*255 does not fit in a short
__shared__ unsigned short acc[6][3][3][8][32]; //LAB+count, 3x3 neighbors, 8x32 values
const int arraySize=6*3*3;
const int dimensions=8*32;
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = (blockIdx.y * blockDim.y + threadIdx.y) / OPT6;
int sx = threadIdx.x;
int sy = threadIdx.y / OPT6;
int cc = threadIdx.y % OPT6;
int ccs = 0; // 0 or cc ?
int ccstep = 1; // 1 or OPT6 value ?
if (cc == 0) {
for (int nx=0;nx<3;++nx) for (int ny=0;ny<3;++ny) for(int c=ccs;c<6;c+=ccstep) acc[c][ny][nx][sy][sx]=0;
}
//__syncthreads(); // Sometimes needed for OPT6
int i_center = blockIdx.x * blockDim.x / spx_size;
//int j_center = (blockIdx.y * blockDim.y / 4) / spx_size; //OPT6
int j_center = y / spx_size;
if (cc==0) { //OPT6
int pix_index = y * pix_width + x;
int i = d_own_data[pix_index].i;
int j = d_own_data[pix_index].j;
int nx = (i<i_center) ? 0 : ((i>i_center) ? 2 : 1);
int ny = (j<j_center) ? 0 : ((j>j_center) ? 2 : 1);
acc[0][ny][nx][sy][sx] = d_pix_data[pix_index].l;
acc[1][ny][nx][sy][sx] = d_pix_data[pix_index].a;
acc[2][ny][nx][sy][sx] = d_pix_data[pix_index].b;
acc[3][ny][nx][sy][sx] = 1;
acc[4][ny][nx][sy][sx] = x - (i_center * spx_size);
acc[5][ny][nx][sy][sx] = y - (j_center * spx_size);
} //OPT6
__syncthreads();
unsigned short* accptr = (unsigned short*)acc;
// Collapse over X and Y
int tid = threadIdx.y * blockDim.x + threadIdx.x;
for (int step=32*8/2; step>0; step /= 2)
{
int locationIndex = tid % step;
int threadGroup = tid / step;
//int maxThreadGroup = dimensions / step;
//int maxThreadGroup = blockDim.x * blockDim.y / step;
int maxThreadGroup = 32 * 8 * OPT6 / step; //OPT6
int maxLoopIndex = (arraySize + maxThreadGroup - 1) / maxThreadGroup;
// Divide arraySize (3*3*6=54) by max threadGroup + 1 and that's the loop
// Actual a = loop index * (max threadGroup + 1) + innerIndex
for (int loopIndex=0; loopIndex<maxLoopIndex; loopIndex++)
{
int innerIndex = loopIndex * maxThreadGroup + threadGroup;
if (innerIndex >= arraySize) continue;
//printf("i %d d %d l %d s %d t %d ts %d\n", innerIndex, dimensions, locationIndex, step,
//innerIndex*dimensions+locationIndex, innerIndex*dimensions+locationIndex+step);
*(accptr + (innerIndex*dimensions + locationIndex)) +=
*(accptr + (innerIndex*dimensions + locationIndex + step));
}
__syncthreads();
}
if (tid >= arraySize) return;
// Now, acc[c][ny][nx][0][0] has the values we need
int c = tid % 6;
tid /= 6;
int nx = tid % 3;
int ny = tid / 3;
int j = j_center + ny - 1;
if (j<0 || j>=spx_height) return;
int i = i_center + nx - 1;
if (i<0 || i>=spx_width) return;
int spx_index = j * spx_width + i;
atomicAdd(&(d_spx_data[spx_index].accum[c]), (int)acc[c][ny][nx][0][0] +
(c>3 ? (((c==4)?i_center:j_center)*spx_size*acc[3][ny][nx][0][0]) : 0));
//if (i_center==30 && j_center==15 && d_spx_data[spx_index].accum[3]>0) printf("ic:%d jc:%d x:%d y:%d, qty:%d\n",i_center,j_center,d_spx_data[spx_index].accum[4],d_spx_data[spx_index].accum[5], d_spx_data[spx_index].accum[3]);
}
__global__ void k_averaging(spx_data* d_spx_data)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < spx_width && j < spx_height)
{
int spx_index = j * spx_width + i;
d_spx_data[spx_index].l = d_spx_data[spx_index].accum[0] / d_spx_data[spx_index].accum[3];
d_spx_data[spx_index].a = d_spx_data[spx_index].accum[1] / d_spx_data[spx_index].accum[3];
d_spx_data[spx_index].b = d_spx_data[spx_index].accum[2] / d_spx_data[spx_index].accum[3];
d_spx_data[spx_index].x = d_spx_data[spx_index].accum[4] / d_spx_data[spx_index].accum[3];
d_spx_data[spx_index].y = d_spx_data[spx_index].accum[5] / d_spx_data[spx_index].accum[3];
}
}
__global__ void k_ownershipOpt(const pix_data* d_pix_data, own_data* d_own_data, const spx_data* d_spx_data)
{
__shared__ spx_data spx[9 * 32];
float min_dist = 10E99;// max_float;
int min_i = 0;
int min_j = 0;
int i_sign[9] = {-1, -1, -1, 0, 0, 0, 1, 1, 1};
int j_sign[9] = {-1, 0, 1, -1, 0, 1, -1, 0, 1};
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (y < pix_height && x < pix_width)
{
int pix_index = y * pix_width + x;
int i_center = x/spx_size;
int j_center = y/spx_size;
int l = d_pix_data[pix_index].l;
int a = d_pix_data[pix_index].a;
int b = d_pix_data[pix_index].b;
if(threadIdx.x == 0 && threadIdx.y == 0 && blockIdx.x % 3 == 0)// && threadIdx.y == 0)
{
int sh_idx = 0;
for (int i = i_center - window_size; i <= i_center + window_size; i++) // i = i_center - 1, i_center, i_center + 1
{
for(int j = j_center - window_size; j <= j_center + window_size; j++) // j = j_center - 1, j_center, j_center + 1
{
if (j < 0 || j >= spx_height || i < 0 || i > spx_width)
{
sh_idx++;
continue;
}
int spx_index = j * spx_width + i;
// if(threadIdx.x == 0 && threadIdx.y == 0 && blockIdx.x == 0 && blockIdx.y == 0)
// printf("%i ::::: %i\n", spx_index, sh_idx);
spx[sh_idx + 8*blockIdx.x] = d_spx_data[spx_index];
if(blockIdx.x > 0 && (sh_idx == 0 || sh_idx == 1 || sh_idx == 2 || sh_idx == 3 || sh_idx == 4 || sh_idx == 5)) //Why blockIdx.x-1 > 0 crashes?
spx[sh_idx+3 + 8*(blockIdx.x-1)] = spx[sh_idx + 8*blockIdx.x];
if(blockIdx.x > 0 && (sh_idx == 0 || sh_idx == 1 || sh_idx == 2)) //Why blockIdx.x-1 > 0 crashes?
spx[sh_idx+6 + 8*(blockIdx.x-2)] = spx[sh_idx + 8*blockIdx.x];
if(blockIdx.x < blockDim.x && (sh_idx == 3 || sh_idx == 4 || sh_idx == 5 || sh_idx == 6 || sh_idx == 7 || sh_idx == 8))
spx[sh_idx-3 + 8*(blockIdx.x+1)] = spx[sh_idx + 8*blockIdx.x];
if(blockIdx.x < blockDim.x && (sh_idx == 6 || sh_idx == 7 || sh_idx == 8))
spx[sh_idx-6 + 8*(blockIdx.x+2)] = spx[sh_idx + 8*blockIdx.x];
sh_idx++;
}
}
}
__syncthreads();
for(int i=0; i<9; i++)
{
int l_dist = l-(int)(spx[i + 8*blockIdx.x].l);
l_dist *= l_dist;
int a_dist = a-(int)(spx[i + 8*blockIdx.x].a);
a_dist *= a_dist;
int b_dist = b-(int)(spx[i + 8*blockIdx.x].b);
b_dist *= b_dist;
int dlab = l_dist + a_dist + b_dist;
int x_dist = x-(int)spx[i + 8*blockIdx.x].x;
x_dist *= x_dist;
int y_dist = y-(int)spx[i + 8*blockIdx.x].y;
y_dist *= y_dist;
int dxy = x_dist + y_dist;
float D = dlab + slic_factor * dxy;
if (D < min_dist)
{
min_dist = D;
min_i = i_center + i_sign[i]*window_size;
min_j = j_center + j_sign[i]*window_size;
}
}
d_own_data[pix_index].i = min_i;
d_own_data[pix_index].j = min_j;
}
}
__global__ void k_ownershipOrig(const pix_data* d_pix_data, own_data* d_own_data, const spx_data* d_spx_data)
{
float min_dist = 10E99;// max_float;
int min_i = 0;
int min_j = 0;
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (y < pix_height && x < pix_width)
{
int pix_index = y * pix_width + x;
int i_center = x/spx_size;
int j_center = y/spx_size;
int l = d_pix_data[pix_index].l;
int a = d_pix_data[pix_index].a;
int b = d_pix_data[pix_index].b;
for (int i = i_center - window_size; i <= i_center + window_size; i++)
{
if (i < 0 || i >= spx_width) continue;
for(int j = j_center - window_size; j <= j_center + window_size; j++)
{
if (j < 0 || j >= spx_height) continue;
int spx_index = j * spx_width + i;
int l_dist = l-(int)(d_spx_data[spx_index].l);
l_dist *= l_dist;
int a_dist = a-(int)(d_spx_data[spx_index].a);
a_dist *= a_dist;
int b_dist = b-(int)(d_spx_data[spx_index].b);
b_dist *= b_dist;
int dlab = l_dist + a_dist + b_dist;
int x_dist = x-(int)d_spx_data[spx_index].x;
x_dist *= x_dist;
int y_dist = y-(int)d_spx_data[spx_index].y;
y_dist *= y_dist;
int dxy = x_dist + y_dist;
float D = dlab + slic_factor * dxy;
if (D < min_dist)
{
min_dist = D;
min_i = i;
min_j = j;
}
}
}
d_own_data[pix_index].i = min_i;
d_own_data[pix_index].j = min_j;
//d_own_data[pix_index].i = (i_center / 4) * 4;
//d_own_data[pix_index].j = (j_center / 4) * 4;
}
}
__global__ void k_ownershipOpt2(const pix_data* d_pix_data, own_data* d_own_data, const spx_data* d_spx_data)
{
float min_dist = 10E99;// max_float;
int min_i = 0;
int min_j = 0;
__shared__ int spx[3][3][5]; // Y, X, LABXY
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (y < pix_height && x < pix_width)
{
int pix_index = y * pix_width + x;
int i_center = x/spx_size;
int j_center = y/spx_size;
// Initialize SMEM
int tid = threadIdx.x + blockDim.x * threadIdx.y;
int nx = tid % 3;
tid /= 3;
int ny = tid % 3;
tid /= 3;
if (tid < 5)
{
int value;
int i = i_center + nx - 1;
int j = j_center + ny - 1;
if (i<0 || i>=spx_width || j<0 || j>=spx_height)
{
value = -1;
}
else
{
int spx_index = j * spx_width + i;
const spx_data& spix = d_spx_data[spx_index];
switch(tid) //TODO:Get rid of it by using better data struct.?
{
case 0: value=spix.l; break;
case 1: value=spix.a; break;
case 2: value=spix.b; break;
case 3: value=spix.x; break;
case 4: value=spix.y; break;
}
}
spx[ny][nx][tid] = value;
}
__syncthreads();
int l = d_pix_data[pix_index].l;
int a = d_pix_data[pix_index].a;
int b = d_pix_data[pix_index].b;
for (int ny=0; ny<3; ++ny) for (int nx=0; nx<3; ++nx)
{
int* spix = spx[ny][nx];
if (spix[0]==-1) continue;
int l_dist = l-spix[0];
l_dist *= l_dist;
int a_dist = a-spix[1];
a_dist *= a_dist;
int b_dist = b-spix[2];
b_dist *= b_dist;
int dlab = l_dist + a_dist + b_dist;
int x_dist = x-spix[3];
x_dist *= x_dist;
int y_dist = y-spix[4];
y_dist *= y_dist;
int dxy = x_dist + y_dist;
float D = dlab + slic_factor * dxy;
if (D < min_dist)
{
min_dist = D;
min_i = i_center + nx - 1;
min_j = j_center + ny - 1;
}
}
d_own_data[pix_index].i = min_i;
d_own_data[pix_index].j = min_j;
//d_own_data[pix_index].i = (i_center / 4) * 4;
//d_own_data[pix_index].j = (j_center / 4) * 4;
}
}
__global__ void k_reset(spx_data* d_spx_data)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < spx_width && j < spx_height)
{
int spx_index = j * spx_width + i;
d_spx_data[spx_index].accum[0] = 0;
d_spx_data[spx_index].accum[1] = 0;
d_spx_data[spx_index].accum[2] = 0;
d_spx_data[spx_index].accum[3] = 0;
d_spx_data[spx_index].accum[4] = 0;
d_spx_data[spx_index].accum[5] = 0;
}
}
| 66adabda170b19c3e4dae6e980614ea81036ad0b.cu | #include <cmath>
#include <cstdio>
#include "../include/slic.h"
__device__ __constant__ float slic_factor;
void initializeSlicFactor()
{
const float * slic_factor_hp = &slic_factor_h;
cudaError_t cudaStatus = cudaMemcpyToSymbol(slic_factor, slic_factor_hp, sizeof(float));
}
__global__ void k_cumulativeCountOrig(const pix_data* d_pix_data, const own_data* d_own_data, spx_data* d_spx_data)
{
//if (threadIdx.x == 0 && threadIdx.y == 0 && blockIdx.x == 0 && blockIdx.y == 0)
//{
//printf("k\n");
//}
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (y < pix_height && x < pix_width)
{
int pix_index = y * pix_width + x;
int i = d_own_data[pix_index].i;
int j = d_own_data[pix_index].j;
int spx_index = j * spx_width + i;
atomicAdd(&(d_spx_data[spx_index].accum[0]), d_pix_data[pix_index].l);
atomicAdd(&(d_spx_data[spx_index].accum[1]), d_pix_data[pix_index].a);
atomicAdd(&(d_spx_data[spx_index].accum[2]), d_pix_data[pix_index].b);
atomicAdd(&(d_spx_data[spx_index].accum[3]), 1);
atomicAdd(&(d_spx_data[spx_index].accum[4]), x);
atomicAdd(&(d_spx_data[spx_index].accum[5]), y);
}
}
__global__ void k_cumulativeCountOpt1(const pix_data* d_pix_data, const own_data* d_own_data, spx_data* d_spx_data)
{
// If we do 16 instead of 8, only have enough memory for a short, not an int,
// and 16*32*255 does not fit in a short
__shared__ unsigned short acc[6][3][3][8][32]; //LAB+count, 3x3 neighbors, 8x32 values
const int arraySize=6*3*3;
const int dimensions=8*32;
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = (blockIdx.y * blockDim.y + threadIdx.y) / OPT6;
int sx = threadIdx.x;
int sy = threadIdx.y / OPT6;
int cc = threadIdx.y % OPT6;
int ccs = 0; // 0 or cc ?
int ccstep = 1; // 1 or OPT6 value ?
if (cc == 0) {
for (int nx=0;nx<3;++nx) for (int ny=0;ny<3;++ny) for(int c=ccs;c<6;c+=ccstep) acc[c][ny][nx][sy][sx]=0;
}
//__syncthreads(); // Sometimes needed for OPT6
int i_center = blockIdx.x * blockDim.x / spx_size;
//int j_center = (blockIdx.y * blockDim.y / 4) / spx_size; //OPT6
int j_center = y / spx_size;
if (cc==0) { //OPT6
int pix_index = y * pix_width + x;
int i = d_own_data[pix_index].i;
int j = d_own_data[pix_index].j;
int nx = (i<i_center) ? 0 : ((i>i_center) ? 2 : 1);
int ny = (j<j_center) ? 0 : ((j>j_center) ? 2 : 1);
acc[0][ny][nx][sy][sx] = d_pix_data[pix_index].l;
acc[1][ny][nx][sy][sx] = d_pix_data[pix_index].a;
acc[2][ny][nx][sy][sx] = d_pix_data[pix_index].b;
acc[3][ny][nx][sy][sx] = 1;
acc[4][ny][nx][sy][sx] = x - (i_center * spx_size);
acc[5][ny][nx][sy][sx] = y - (j_center * spx_size);
} //OPT6
__syncthreads();
unsigned short* accptr = (unsigned short*)acc;
// Collapse over X and Y
int tid = threadIdx.y * blockDim.x + threadIdx.x;
for (int step=32*8/2; step>0; step /= 2)
{
int locationIndex = tid % step;
int threadGroup = tid / step;
//int maxThreadGroup = dimensions / step;
//int maxThreadGroup = blockDim.x * blockDim.y / step;
int maxThreadGroup = 32 * 8 * OPT6 / step; //OPT6
int maxLoopIndex = (arraySize + maxThreadGroup - 1) / maxThreadGroup;
// Divide arraySize (3*3*6=54) by max threadGroup + 1 and that's the loop
// Actual a = loop index * (max threadGroup + 1) + innerIndex
for (int loopIndex=0; loopIndex<maxLoopIndex; loopIndex++)
{
int innerIndex = loopIndex * maxThreadGroup + threadGroup;
if (innerIndex >= arraySize) continue;
//printf("i %d d %d l %d s %d t %d ts %d\n", innerIndex, dimensions, locationIndex, step,
//innerIndex*dimensions+locationIndex, innerIndex*dimensions+locationIndex+step);
*(accptr + (innerIndex*dimensions + locationIndex)) +=
*(accptr + (innerIndex*dimensions + locationIndex + step));
}
__syncthreads();
}
if (tid >= arraySize) return;
// Now, acc[c][ny][nx][0][0] has the values we need
int c = tid % 6;
tid /= 6;
int nx = tid % 3;
int ny = tid / 3;
int j = j_center + ny - 1;
if (j<0 || j>=spx_height) return;
int i = i_center + nx - 1;
if (i<0 || i>=spx_width) return;
int spx_index = j * spx_width + i;
atomicAdd(&(d_spx_data[spx_index].accum[c]), (int)acc[c][ny][nx][0][0] +
(c>3 ? (((c==4)?i_center:j_center)*spx_size*acc[3][ny][nx][0][0]) : 0));
//if (i_center==30 && j_center==15 && d_spx_data[spx_index].accum[3]>0) printf("ic:%d jc:%d x:%d y:%d, qty:%d\n",i_center,j_center,d_spx_data[spx_index].accum[4],d_spx_data[spx_index].accum[5], d_spx_data[spx_index].accum[3]);
}
__global__ void k_averaging(spx_data* d_spx_data)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < spx_width && j < spx_height)
{
int spx_index = j * spx_width + i;
d_spx_data[spx_index].l = d_spx_data[spx_index].accum[0] / d_spx_data[spx_index].accum[3];
d_spx_data[spx_index].a = d_spx_data[spx_index].accum[1] / d_spx_data[spx_index].accum[3];
d_spx_data[spx_index].b = d_spx_data[spx_index].accum[2] / d_spx_data[spx_index].accum[3];
d_spx_data[spx_index].x = d_spx_data[spx_index].accum[4] / d_spx_data[spx_index].accum[3];
d_spx_data[spx_index].y = d_spx_data[spx_index].accum[5] / d_spx_data[spx_index].accum[3];
}
}
__global__ void k_ownershipOpt(const pix_data* d_pix_data, own_data* d_own_data, const spx_data* d_spx_data)
{
__shared__ spx_data spx[9 * 32];
float min_dist = 10E99;// max_float;
int min_i = 0;
int min_j = 0;
int i_sign[9] = {-1, -1, -1, 0, 0, 0, 1, 1, 1};
int j_sign[9] = {-1, 0, 1, -1, 0, 1, -1, 0, 1};
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (y < pix_height && x < pix_width)
{
int pix_index = y * pix_width + x;
int i_center = x/spx_size;
int j_center = y/spx_size;
int l = d_pix_data[pix_index].l;
int a = d_pix_data[pix_index].a;
int b = d_pix_data[pix_index].b;
if(threadIdx.x == 0 && threadIdx.y == 0 && blockIdx.x % 3 == 0)// && threadIdx.y == 0)
{
int sh_idx = 0;
for (int i = i_center - window_size; i <= i_center + window_size; i++) // i = i_center - 1, i_center, i_center + 1
{
for(int j = j_center - window_size; j <= j_center + window_size; j++) // j = j_center - 1, j_center, j_center + 1
{
if (j < 0 || j >= spx_height || i < 0 || i > spx_width)
{
sh_idx++;
continue;
}
int spx_index = j * spx_width + i;
// if(threadIdx.x == 0 && threadIdx.y == 0 && blockIdx.x == 0 && blockIdx.y == 0)
// printf("%i ::::: %i\n", spx_index, sh_idx);
spx[sh_idx + 8*blockIdx.x] = d_spx_data[spx_index];
if(blockIdx.x > 0 && (sh_idx == 0 || sh_idx == 1 || sh_idx == 2 || sh_idx == 3 || sh_idx == 4 || sh_idx == 5)) //Why blockIdx.x-1 > 0 crashes?
spx[sh_idx+3 + 8*(blockIdx.x-1)] = spx[sh_idx + 8*blockIdx.x];
if(blockIdx.x > 0 && (sh_idx == 0 || sh_idx == 1 || sh_idx == 2)) //Why blockIdx.x-1 > 0 crashes?
spx[sh_idx+6 + 8*(blockIdx.x-2)] = spx[sh_idx + 8*blockIdx.x];
if(blockIdx.x < blockDim.x && (sh_idx == 3 || sh_idx == 4 || sh_idx == 5 || sh_idx == 6 || sh_idx == 7 || sh_idx == 8))
spx[sh_idx-3 + 8*(blockIdx.x+1)] = spx[sh_idx + 8*blockIdx.x];
if(blockIdx.x < blockDim.x && (sh_idx == 6 || sh_idx == 7 || sh_idx == 8))
spx[sh_idx-6 + 8*(blockIdx.x+2)] = spx[sh_idx + 8*blockIdx.x];
sh_idx++;
}
}
}
__syncthreads();
for(int i=0; i<9; i++)
{
int l_dist = l-(int)(spx[i + 8*blockIdx.x].l);
l_dist *= l_dist;
int a_dist = a-(int)(spx[i + 8*blockIdx.x].a);
a_dist *= a_dist;
int b_dist = b-(int)(spx[i + 8*blockIdx.x].b);
b_dist *= b_dist;
int dlab = l_dist + a_dist + b_dist;
int x_dist = x-(int)spx[i + 8*blockIdx.x].x;
x_dist *= x_dist;
int y_dist = y-(int)spx[i + 8*blockIdx.x].y;
y_dist *= y_dist;
int dxy = x_dist + y_dist;
float D = dlab + slic_factor * dxy;
if (D < min_dist)
{
min_dist = D;
min_i = i_center + i_sign[i]*window_size;
min_j = j_center + j_sign[i]*window_size;
}
}
d_own_data[pix_index].i = min_i;
d_own_data[pix_index].j = min_j;
}
}
__global__ void k_ownershipOrig(const pix_data* d_pix_data, own_data* d_own_data, const spx_data* d_spx_data)
{
float min_dist = 10E99;// max_float;
int min_i = 0;
int min_j = 0;
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (y < pix_height && x < pix_width)
{
int pix_index = y * pix_width + x;
int i_center = x/spx_size;
int j_center = y/spx_size;
int l = d_pix_data[pix_index].l;
int a = d_pix_data[pix_index].a;
int b = d_pix_data[pix_index].b;
for (int i = i_center - window_size; i <= i_center + window_size; i++)
{
if (i < 0 || i >= spx_width) continue;
for(int j = j_center - window_size; j <= j_center + window_size; j++)
{
if (j < 0 || j >= spx_height) continue;
int spx_index = j * spx_width + i;
int l_dist = l-(int)(d_spx_data[spx_index].l);
l_dist *= l_dist;
int a_dist = a-(int)(d_spx_data[spx_index].a);
a_dist *= a_dist;
int b_dist = b-(int)(d_spx_data[spx_index].b);
b_dist *= b_dist;
int dlab = l_dist + a_dist + b_dist;
int x_dist = x-(int)d_spx_data[spx_index].x;
x_dist *= x_dist;
int y_dist = y-(int)d_spx_data[spx_index].y;
y_dist *= y_dist;
int dxy = x_dist + y_dist;
float D = dlab + slic_factor * dxy;
if (D < min_dist)
{
min_dist = D;
min_i = i;
min_j = j;
}
}
}
d_own_data[pix_index].i = min_i;
d_own_data[pix_index].j = min_j;
//d_own_data[pix_index].i = (i_center / 4) * 4;
//d_own_data[pix_index].j = (j_center / 4) * 4;
}
}
__global__ void k_ownershipOpt2(const pix_data* d_pix_data, own_data* d_own_data, const spx_data* d_spx_data)
{
float min_dist = 10E99;// max_float;
int min_i = 0;
int min_j = 0;
__shared__ int spx[3][3][5]; // Y, X, LABXY
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (y < pix_height && x < pix_width)
{
int pix_index = y * pix_width + x;
int i_center = x/spx_size;
int j_center = y/spx_size;
// Initialize SMEM
int tid = threadIdx.x + blockDim.x * threadIdx.y;
int nx = tid % 3;
tid /= 3;
int ny = tid % 3;
tid /= 3;
if (tid < 5)
{
int value;
int i = i_center + nx - 1;
int j = j_center + ny - 1;
if (i<0 || i>=spx_width || j<0 || j>=spx_height)
{
value = -1;
}
else
{
int spx_index = j * spx_width + i;
const spx_data& spix = d_spx_data[spx_index];
switch(tid) //TODO:Get rid of it by using better data struct.?
{
case 0: value=spix.l; break;
case 1: value=spix.a; break;
case 2: value=spix.b; break;
case 3: value=spix.x; break;
case 4: value=spix.y; break;
}
}
spx[ny][nx][tid] = value;
}
__syncthreads();
int l = d_pix_data[pix_index].l;
int a = d_pix_data[pix_index].a;
int b = d_pix_data[pix_index].b;
for (int ny=0; ny<3; ++ny) for (int nx=0; nx<3; ++nx)
{
int* spix = spx[ny][nx];
if (spix[0]==-1) continue;
int l_dist = l-spix[0];
l_dist *= l_dist;
int a_dist = a-spix[1];
a_dist *= a_dist;
int b_dist = b-spix[2];
b_dist *= b_dist;
int dlab = l_dist + a_dist + b_dist;
int x_dist = x-spix[3];
x_dist *= x_dist;
int y_dist = y-spix[4];
y_dist *= y_dist;
int dxy = x_dist + y_dist;
float D = dlab + slic_factor * dxy;
if (D < min_dist)
{
min_dist = D;
min_i = i_center + nx - 1;
min_j = j_center + ny - 1;
}
}
d_own_data[pix_index].i = min_i;
d_own_data[pix_index].j = min_j;
//d_own_data[pix_index].i = (i_center / 4) * 4;
//d_own_data[pix_index].j = (j_center / 4) * 4;
}
}
__global__ void k_reset(spx_data* d_spx_data)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < spx_width && j < spx_height)
{
int spx_index = j * spx_width + i;
d_spx_data[spx_index].accum[0] = 0;
d_spx_data[spx_index].accum[1] = 0;
d_spx_data[spx_index].accum[2] = 0;
d_spx_data[spx_index].accum[3] = 0;
d_spx_data[spx_index].accum[4] = 0;
d_spx_data[spx_index].accum[5] = 0;
}
}
|
c82a44da11b7af33b147df047d4d3b75cbf3ecc6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernel.h"
#define TX 32
#define TY 32
__device__ unsigned char clip(int n) {
return n > 255 ? 255 : (n < 0 ? 0 : n);
}
__global__ void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) {
const int c = blockIdx.x * blockDim.x + threadIdx.x;
const int r = blockIdx.y * blockDim.y + threadIdx.y;
if ((c >= w) || (r >= h)) return;
const int i = r * w + c;
const int dist = sqrtf((c - pos.x)*(c - pos.x) + (r - pos.y)*(r - pos.y));//here error
const unsigned char intensity = clip(255 - dist);
d_out[i].x = 0;
d_out[i].y = intensity;
d_out[i].z = 0;
d_out[i].w = 255;
}
void kernelLauncher(uchar4 * d_out, int w, int h, int2 pos)
{
const dim3 blockSize(TX, TY);
const dim3 gridSize = dim3((w + TX - 1) / TX, (h + TY - 1) / TY);
distanceKernel << <gridSize, blockSize >> > (d_out, w, h, pos);
}
| c82a44da11b7af33b147df047d4d3b75cbf3ecc6.cu | #include "kernel.h"
#define TX 32
#define TY 32
__device__ unsigned char clip(int n) {
return n > 255 ? 255 : (n < 0 ? 0 : n);
}
__global__ void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) {
const int c = blockIdx.x * blockDim.x + threadIdx.x;
const int r = blockIdx.y * blockDim.y + threadIdx.y;
if ((c >= w) || (r >= h)) return;
const int i = r * w + c;
const int dist = sqrtf((c - pos.x)*(c - pos.x) + (r - pos.y)*(r - pos.y));//here error
const unsigned char intensity = clip(255 - dist);
d_out[i].x = 0;
d_out[i].y = intensity;
d_out[i].z = 0;
d_out[i].w = 255;
}
void kernelLauncher(uchar4 * d_out, int w, int h, int2 pos)
{
const dim3 blockSize(TX, TY);
const dim3 gridSize = dim3((w + TX - 1) / TX, (h + TY - 1) / TY);
distanceKernel << <gridSize, blockSize >> > (d_out, w, h, pos);
}
|
66660ca0a9a64965c167440d4784740da7322d8a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cuDProd.h"
__global__ void kernelDProduct(int N, int item_per_thread,
double * __restrict__ a_vec,
double * __restrict__ b_vec,
double * __restrict__ output) {
extern __shared__ double partial_dot[];
int gid = blockDim.x * blockIdx.x + threadIdx.x;
int lid = threadIdx.x;
int group_size = blockDim.x;
int offset = gid *item_per_thread;
int x;
partial_dot[lid] = 0;
for (x = offset ; x < offset + item_per_thread; x++){
if(x < N){
partial_dot[lid] += a_vec[x] * b_vec[x];
}
}
__syncthreads();
for(int i = group_size/2; i>0; i >>= 1) {
if(lid < i) {
partial_dot[lid] += partial_dot[lid + i];
}
__syncthreads();
}
if(lid == 0) {
output[blockIdx.x] = partial_dot[0];
}
}
//get optimal block size
int getDProdBlockSize(int n,int minGridSize, int blockSize){
hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, kernelDProduct, 0, n);
return blockSize;
}
double runDotProduct(int n,
double * xMatrix,
int xMatrixSize,
double * yMatrix,
int yMatrixSize,
double * dprod_output,
int outMatrixSize,
double * xMatrix_d,
double * yMatrix_d,
double * dprod_output_d,
int item_per_thread,
int blockSize,
int numBlocks){
int x;
double my_res= 0;
//copy arrays to host
hipMemcpy( xMatrix_d, xMatrix, sizeof(double)*xMatrixSize, hipMemcpyHostToDevice );
hipMemcpy( yMatrix_d, yMatrix, sizeof(double)*yMatrixSize, hipMemcpyHostToDevice );
//blockSize = blockSize/item_per_thread;
//launch kernel
hipLaunchKernelGGL(( kernelDProduct), dim3(numBlocks), dim3(blockSize), blockSize * sizeof(double), 0, n, item_per_thread,
xMatrix_d, yMatrix_d, dprod_output_d);
hipMemcpy(dprod_output, dprod_output_d, sizeof(double)*numBlocks, hipMemcpyDeviceToHost );
//reduction
for( x = 0;x < numBlocks; x++){
my_res += dprod_output[x];
}
return my_res;
}
| 66660ca0a9a64965c167440d4784740da7322d8a.cu | #include "cuDProd.h"
__global__ void kernelDProduct(int N, int item_per_thread,
double * __restrict__ a_vec,
double * __restrict__ b_vec,
double * __restrict__ output) {
extern __shared__ double partial_dot[];
int gid = blockDim.x * blockIdx.x + threadIdx.x;
int lid = threadIdx.x;
int group_size = blockDim.x;
int offset = gid *item_per_thread;
int x;
partial_dot[lid] = 0;
for (x = offset ; x < offset + item_per_thread; x++){
if(x < N){
partial_dot[lid] += a_vec[x] * b_vec[x];
}
}
__syncthreads();
for(int i = group_size/2; i>0; i >>= 1) {
if(lid < i) {
partial_dot[lid] += partial_dot[lid + i];
}
__syncthreads();
}
if(lid == 0) {
output[blockIdx.x] = partial_dot[0];
}
}
//get optimal block size
int getDProdBlockSize(int n,int minGridSize, int blockSize){
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, kernelDProduct, 0, n);
return blockSize;
}
double runDotProduct(int n,
double * xMatrix,
int xMatrixSize,
double * yMatrix,
int yMatrixSize,
double * dprod_output,
int outMatrixSize,
double * xMatrix_d,
double * yMatrix_d,
double * dprod_output_d,
int item_per_thread,
int blockSize,
int numBlocks){
int x;
double my_res= 0;
//copy arrays to host
cudaMemcpy( xMatrix_d, xMatrix, sizeof(double)*xMatrixSize, cudaMemcpyHostToDevice );
cudaMemcpy( yMatrix_d, yMatrix, sizeof(double)*yMatrixSize, cudaMemcpyHostToDevice );
//blockSize = blockSize/item_per_thread;
//launch kernel
kernelDProduct<<<numBlocks, blockSize, blockSize * sizeof(double)>>>(n, item_per_thread,
xMatrix_d, yMatrix_d, dprod_output_d);
cudaMemcpy(dprod_output, dprod_output_d, sizeof(double)*numBlocks, cudaMemcpyDeviceToHost );
//reduction
for( x = 0;x < numBlocks; x++){
my_res += dprod_output[x];
}
return my_res;
}
|
ef948e3497d37a69fe734150e1e52f0dfae1ec5a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <iostream>
#include <math.h>
#include <bits/stdc++.h>
#include <hip/hip_fp16.h>
#include <sstream>
#include <fstream>
#include <string.h>
//#include <stdlib.h>
//#include <time.h>
using namespace std;
/***DEFINING THE DEFINES FOR THE ARRAY INDICES****************************/
//#define N 128
#define C 96
#define H 31
#define W 31
#define R 5
#define S 5
#define M 256
#define E 27
#define F 27
#define U 1
__global__
void ew_gpu_mmul(float* d_o, __half* d_i, __half* d_w, int width, int height, int stride, int ip_height, int wt_width, int num_wt,int num_img, int num_ch)
{
int row = threadIdx.y; int col = threadIdx.x;
if((row<height) && (col<width))
{
for (int i=0; i<wt_width; i++){
for (int j=0; j<wt_width; j++){
for(int k=0; k<num_ch; k++){
d_o[blockIdx.x*num_wt*blockDim.x*blockDim.y+blockIdx.y*blockDim.x*blockDim.y+row*blockDim.x+col] += __half2float(d_i[blockIdx.x*num_ch*ip_height*ip_height+k*ip_height*ip_height+(stride*(row)+i)*ip_height+(stride*(col)+j)])*__half2float(d_w[blockIdx.y*num_ch*wt_width*wt_width+k*wt_width*wt_width+(i*wt_width+j)]);
}
}
}
}
}
void element_wise_mmul(float* output, __half* input, __half* weight, int batch_size)
{
int x,y,i,j,m,n,k;
for(n=0; n<batch_size; n++){
for (m=0 ; m<M; m++){
for (x=0; x<F; x++){
for(y=0; y<E; y++){
// OP[x][y] = 0; // adding bias to output
for (i=0; i<R; i++){
for (j=0; j<S; j++){
for(k=0; k<C; k++){
__half ip = input[n*C*H*W+k*H*W+(U*x+i)*H+(U*y+j)];
__half wt = weight[m*C*R*S+k*R*S+i*S+j];
float prod = __half2float(ip)*__half2float(wt);
if(prod>=0)
output[n*E*F*M+m*E*F+x*E+y] += prod;
//OP[x][y] += IP[U*x+i][U*y+j]*WT[i][j];
}}
}
}
}
}
}
}
int main(int argc, char* argv[])
{
int batch_size = atoi(argv[1]);
/*************INITALIZING MATRICES*********************************/
__half *IP = (__half*) malloc(batch_size*C*H*W*sizeof(__half));
//float IP[H][W];
float *OP = (float*) malloc(batch_size*M*F*E*sizeof(float));
//float OP[F][E];
float *OPG = (float*) malloc(batch_size*M*F*E*sizeof(float));
__half *WT = (__half*) malloc(M*C*R*S*sizeof(__half));
//float WT[R][S];
float* d_o;
__half* d_i;
__half* d_w;
//clock_t cpu_start, gpu_start, cpu_end, gpu_end;
//int a,b,c,d;
int c,d,m,n,k;
/*INITIALIZING WEIGHT MATRIX*/
for (m=0; m<M; m++){
for(k=0;k<C;k++){
for (c=0; c<R; c++){
for(d=0; d<S; d++){
//WT[c][d] = 2.0;
WT[m*C*R*S+k*R*S+c*S+d] = __float2half((float)rand()/(RAND_MAX+1.0));
}
}
}
}
/*INITIALIZING OUTPUT MATRIX*/
for (n=0; n<batch_size;n++){
for (m=0; m<M; m++){
for (c=0; c<F; c++){
for(d=0; d<E; d++){
//OP[c][d] = 0;
OP[n*M*F*E+m*F*E+c*E+d] = 0;
}
}
}
}
/*INITIALIZING INPUT MATRIX*/
for (n=0; n<batch_size; n++){
for(k=0;k<C;k++){
for (c=0; c<H; c++){
for(d=0; d<W; d++){
// IP[c][d] = (a+b+c+d);
if ((c<=1) || (d<=1) || (c>=29) || (d>=29))
IP[n*C*H*W+k*H*W+c*W+d] = __float2half(0);
else
IP[n*C*H*W+k*H*W+c*W+d] = __float2half((float)rand()/(RAND_MAX+1.0));
}
}
}
}
hipMalloc((void**) &d_i,batch_size*C*H*W*sizeof(__half));
hipMemcpy(d_i, IP, batch_size*C*H*W*sizeof(__half), hipMemcpyHostToDevice);
hipMalloc((void**) &d_w, M*C*R*S*sizeof(__half));
hipMemcpy(d_w, WT, M*C*R*S*sizeof(__half), hipMemcpyHostToDevice);
hipMalloc((void**) &d_o, batch_size*M*E*F*sizeof(float));
//cpu_start = clock();
//clock_t start, end;
//start = clock();
//element_wise_mmul(OP, IP, WT, batch_size);
//end = clock();
//printf("cpu time is %f secs\n", (float)(end-start)/CLOCKS_PER_SEC);
//cpu_end = clock();
dim3 dimGrid(batch_size,256,1);
dim3 dimBlock(27,27,1);
//gpu_start = clock();hipLaunchKernelGGL((
ew_gpu_mmul), dim3(dimGrid), dim3(dimBlock), 0, 0, d_o,d_i,d_w,27,27,1,31,5,256,batch_size,96);
//gpu_end = clock();
hipMemcpy(OPG,d_o, batch_size*M*E*F*sizeof(float), hipMemcpyDeviceToHost);
float max_error = 0;
int g,h,s,u;
string filename = "layer_2_"+to_string(batch_size);
ifstream fin(filename.c_str());
string line ;
for (u=0;u<batch_size;u++){
for (s=0;s<M;s++){
for (g=0; g<F; g++){
for(h=0; h<E; h++){
getline(fin,line);
float error = abs(OPG[u*M*F*E+s*E*F+g*E+h]-atof(line.c_str()));
//float error = abs(OPG[u*M*F*E+s*E*F+g*E+h]-OP[u*M*F*E+s*E*F+g*E+h]);
if (error > max_error)
max_error = error;
// printf("the output is %f for index %d, %d,%d,%d.\n",OP[u*M*F*E+s*E*F+g*E+h],u,s,g,h);
// printf("diff CPU and GPU is %f for index %d,%d,%d,%d.\n", OPG[u*M*F*E+s*E*F+g*E+h]-OP[u*M*F*E+s*E*F+g*E+h],u,s,g,h);
// printf("the output from GPU is %f for index %d,%d,%d,%d.\n",OPG[u*M*F*E+s*E*F+g*E+h],u,s,g,h);
}
}
}
}
fin.close();
printf("max error = %f\n", max_error);
//cout<<"time taken by cpu call is "<<((double)(cpu_end-cpu_start))/CLOCKS_PER_SEC<<"secs"<<endl;
//cout<<"time taken by gpu call is "<<((double)(gpu_end-gpu_start))/CLOCKS_PER_SEC<<"secs"<<endl;
hipFree(d_o);
hipFree(d_i);
hipFree(d_w);
free(OPG);
free(IP);
free(WT);
free(OP);
return 0;
}
| ef948e3497d37a69fe734150e1e52f0dfae1ec5a.cu | #include <stdio.h>
#include <iostream>
#include <math.h>
#include <bits/stdc++.h>
#include <cuda_fp16.h>
#include <sstream>
#include <fstream>
#include <string.h>
//#include <stdlib.h>
//#include <time.h>
using namespace std;
/***DEFINING THE DEFINES FOR THE ARRAY INDICES****************************/
//#define N 128
#define C 96
#define H 31
#define W 31
#define R 5
#define S 5
#define M 256
#define E 27
#define F 27
#define U 1
__global__
void ew_gpu_mmul(float* d_o, __half* d_i, __half* d_w, int width, int height, int stride, int ip_height, int wt_width, int num_wt,int num_img, int num_ch)
{
int row = threadIdx.y; int col = threadIdx.x;
if((row<height) && (col<width))
{
for (int i=0; i<wt_width; i++){
for (int j=0; j<wt_width; j++){
for(int k=0; k<num_ch; k++){
d_o[blockIdx.x*num_wt*blockDim.x*blockDim.y+blockIdx.y*blockDim.x*blockDim.y+row*blockDim.x+col] += __half2float(d_i[blockIdx.x*num_ch*ip_height*ip_height+k*ip_height*ip_height+(stride*(row)+i)*ip_height+(stride*(col)+j)])*__half2float(d_w[blockIdx.y*num_ch*wt_width*wt_width+k*wt_width*wt_width+(i*wt_width+j)]);
}
}
}
}
}
void element_wise_mmul(float* output, __half* input, __half* weight, int batch_size)
{
int x,y,i,j,m,n,k;
for(n=0; n<batch_size; n++){
for (m=0 ; m<M; m++){
for (x=0; x<F; x++){
for(y=0; y<E; y++){
// OP[x][y] = 0; // adding bias to output
for (i=0; i<R; i++){
for (j=0; j<S; j++){
for(k=0; k<C; k++){
__half ip = input[n*C*H*W+k*H*W+(U*x+i)*H+(U*y+j)];
__half wt = weight[m*C*R*S+k*R*S+i*S+j];
float prod = __half2float(ip)*__half2float(wt);
if(prod>=0)
output[n*E*F*M+m*E*F+x*E+y] += prod;
//OP[x][y] += IP[U*x+i][U*y+j]*WT[i][j];
}}
}
}
}
}
}
}
int main(int argc, char* argv[])
{
int batch_size = atoi(argv[1]);
/*************INITALIZING MATRICES*********************************/
__half *IP = (__half*) malloc(batch_size*C*H*W*sizeof(__half));
//float IP[H][W];
float *OP = (float*) malloc(batch_size*M*F*E*sizeof(float));
//float OP[F][E];
float *OPG = (float*) malloc(batch_size*M*F*E*sizeof(float));
__half *WT = (__half*) malloc(M*C*R*S*sizeof(__half));
//float WT[R][S];
float* d_o;
__half* d_i;
__half* d_w;
//clock_t cpu_start, gpu_start, cpu_end, gpu_end;
//int a,b,c,d;
int c,d,m,n,k;
/*INITIALIZING WEIGHT MATRIX*/
for (m=0; m<M; m++){
for(k=0;k<C;k++){
for (c=0; c<R; c++){
for(d=0; d<S; d++){
//WT[c][d] = 2.0;
WT[m*C*R*S+k*R*S+c*S+d] = __float2half((float)rand()/(RAND_MAX+1.0));
}
}
}
}
/*INITIALIZING OUTPUT MATRIX*/
for (n=0; n<batch_size;n++){
for (m=0; m<M; m++){
for (c=0; c<F; c++){
for(d=0; d<E; d++){
//OP[c][d] = 0;
OP[n*M*F*E+m*F*E+c*E+d] = 0;
}
}
}
}
/*INITIALIZING INPUT MATRIX*/
for (n=0; n<batch_size; n++){
for(k=0;k<C;k++){
for (c=0; c<H; c++){
for(d=0; d<W; d++){
// IP[c][d] = (a+b+c+d);
if ((c<=1) || (d<=1) || (c>=29) || (d>=29))
IP[n*C*H*W+k*H*W+c*W+d] = __float2half(0);
else
IP[n*C*H*W+k*H*W+c*W+d] = __float2half((float)rand()/(RAND_MAX+1.0));
}
}
}
}
cudaMalloc((void**) &d_i,batch_size*C*H*W*sizeof(__half));
cudaMemcpy(d_i, IP, batch_size*C*H*W*sizeof(__half), cudaMemcpyHostToDevice);
cudaMalloc((void**) &d_w, M*C*R*S*sizeof(__half));
cudaMemcpy(d_w, WT, M*C*R*S*sizeof(__half), cudaMemcpyHostToDevice);
cudaMalloc((void**) &d_o, batch_size*M*E*F*sizeof(float));
//cpu_start = clock();
//clock_t start, end;
//start = clock();
//element_wise_mmul(OP, IP, WT, batch_size);
//end = clock();
//printf("cpu time is %f secs\n", (float)(end-start)/CLOCKS_PER_SEC);
//cpu_end = clock();
dim3 dimGrid(batch_size,256,1);
dim3 dimBlock(27,27,1);
//gpu_start = clock();
ew_gpu_mmul<<<dimGrid, dimBlock>>>(d_o,d_i,d_w,27,27,1,31,5,256,batch_size,96);
//gpu_end = clock();
cudaMemcpy(OPG,d_o, batch_size*M*E*F*sizeof(float), cudaMemcpyDeviceToHost);
float max_error = 0;
int g,h,s,u;
string filename = "layer_2_"+to_string(batch_size);
ifstream fin(filename.c_str());
string line ;
for (u=0;u<batch_size;u++){
for (s=0;s<M;s++){
for (g=0; g<F; g++){
for(h=0; h<E; h++){
getline(fin,line);
float error = abs(OPG[u*M*F*E+s*E*F+g*E+h]-atof(line.c_str()));
//float error = abs(OPG[u*M*F*E+s*E*F+g*E+h]-OP[u*M*F*E+s*E*F+g*E+h]);
if (error > max_error)
max_error = error;
// printf("the output is %f for index %d, %d,%d,%d.\n",OP[u*M*F*E+s*E*F+g*E+h],u,s,g,h);
// printf("diff CPU and GPU is %f for index %d,%d,%d,%d.\n", OPG[u*M*F*E+s*E*F+g*E+h]-OP[u*M*F*E+s*E*F+g*E+h],u,s,g,h);
// printf("the output from GPU is %f for index %d,%d,%d,%d.\n",OPG[u*M*F*E+s*E*F+g*E+h],u,s,g,h);
}
}
}
}
fin.close();
printf("max error = %f\n", max_error);
//cout<<"time taken by cpu call is "<<((double)(cpu_end-cpu_start))/CLOCKS_PER_SEC<<"secs"<<endl;
//cout<<"time taken by gpu call is "<<((double)(gpu_end-gpu_start))/CLOCKS_PER_SEC<<"secs"<<endl;
cudaFree(d_o);
cudaFree(d_i);
cudaFree(d_w);
free(OPG);
free(IP);
free(WT);
free(OP);
return 0;
}
|
f6e6e5ac456fcdcbade282120900e875dde008b8.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include "utils.h"
#define TPOS(X, Y, DIM)((X) * (DIM) + (Y))
#define T_I 0
#define C_I 1
#define B_I 2
#define L_J 0
#define C_J 1
#define R_J 2
__global__ void calculate_next_generation(const bboard* d_a,
bboard* d_result,
const int dim,
const int dim_board_w,
const int dim_board_h,
const size_t pitch,
const int remaining_cells_w,
const int remaining_cells_h
) {
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int major_i = __mul24(blockIdx.x, blockDim.x) + tx; // row
const int major_j = __mul24(blockIdx.y, blockDim.y) + ty; // col
if (major_i >= dim_board_h) return;
if (major_j >= dim_board_w) return;
extern __shared__ bboard tiles[];
int bx, by;
if (blockIdx.x == gridDim.x - 1) {
bx = dim_board_h - blockIdx.x * blockDim.x + 2;
} else {
bx = blockDim.x + 2;
}
if (blockIdx.y == gridDim.y - 1) {
by = dim_board_w - blockIdx.y * blockDim.y + 2;
} else {
by = blockDim.y + 2;
}
int major_t = (major_i - 1 + dim_board_h) % dim_board_h;
int major_b = (major_i + 1) % dim_board_h;
int major_l = (major_j - 1 + dim_board_w) % dim_board_w;
int major_r = (major_j + 1) % dim_board_w;
bboard* top_row = (bboard*)((char*)d_a + major_t* pitch);
bboard* row = (bboard*)((char*)d_a + major_i * pitch);
bboard* bot_row = (bboard*)((char*)d_a + major_b * pitch);
tiles[TPOS(tx + 1, ty + 1, by)] = row[major_j];
if (ty == 0) {
//is in the left edge of the block. keep row
tiles[TPOS(tx + 1, 0, by)] = row[major_l];
if (tx == 0) {
//top left corner!
tiles[TPOS(0, 0, by)] = top_row[major_l];
}
if (tx == bx - 3) {
//bottom left corner
tiles[TPOS(bx - 1, 0, by)] = bot_row[major_l];
}
}
if (ty == by - 3) {
//is on the right edge
tiles[TPOS(tx + 1, by - 1, by)] = row[major_r];
if (tx == 0) {
// top right corner
tiles[TPOS(0, by - 1, by)] = top_row[major_r];
}
if (tx == bx - 3) {
// bottom right corner
tiles[TPOS(bx - 1, by - 1, by)] = bot_row[major_r];
}
}
if (tx == 0) {
//is on the upper edge of the block. keep col
tiles[TPOS(0, ty + 1, by)] = top_row[major_j];
}
if (tx == bx - 3) {
//is on the bottom edge
tiles[TPOS(bx - 1, ty + 1, by)] = bot_row[major_j];
}
__syncthreads();
bboard neighbors[3][3];
neighbors[C_I][C_J] = tiles[TPOS(tx + 1, ty + 1, by)];
neighbors[C_I][L_J] = tiles[TPOS(tx + 1, ty, by)];
neighbors[C_I][R_J] = tiles[TPOS(tx + 1, ty + 2, by)];
neighbors[T_I][C_J] = tiles[TPOS(tx, ty + 1, by)];
neighbors[T_I][L_J] = tiles[TPOS(tx, ty, by)];
neighbors[T_I][R_J] = tiles[TPOS(tx, ty + 2, by)];
neighbors[B_I][C_J] = tiles[TPOS(tx + 2, ty + 1, by)];
neighbors[B_I][L_J] = tiles[TPOS(tx + 2, ty, by)];
neighbors[B_I][R_J] = tiles[TPOS(tx + 2, ty + 2, by)];
const bool is_edge_r = (major_j == dim_board_w - 1);
const bool is_edge_d = (major_i == dim_board_h - 1);
const bool is_edge_u = (major_i == 0);
const bool is_edge_l = (major_j == 0);
const char limit_i = HEIGHT - __mul24(remaining_cells_h, is_edge_d);
const char limit_j = WIDTH - __mul24(remaining_cells_w, is_edge_r);
bboard value = 0;
char first_cells, second_cells;
char alive_cells, this_cell;
// char left_j;
// bool set;
#define i 0
#define up_i (HEIGHT - 1 - remaining_cells_h * is_edge_u)
#define up_n T_I
#define down_i (i + 1)
#define down_n (C_I)
#include "code_includes/outter_loop.c"
#undef i
for (char i = 1; i < limit_i - 1; i++) {
#define up_i (i - 1)
#define up_n C_I
#define down_i (i + 1)
#define down_n C_I
#include "code_includes/outter_loop.c"
}
#define i (limit_i - 1)
#define up_i (i - 1)
#define up_n C_I
#define down_i 0
#define down_n B_I
#include "code_includes/outter_loop.c"
#undef i
bboard* row_result = (bboard*)((char*)d_result + major_i * pitch);
row_result[major_j] = value;
}
| f6e6e5ac456fcdcbade282120900e875dde008b8.cu | #include <cuda_runtime.h>
#include "utils.h"
#define TPOS(X, Y, DIM)((X) * (DIM) + (Y))
#define T_I 0
#define C_I 1
#define B_I 2
#define L_J 0
#define C_J 1
#define R_J 2
__global__ void calculate_next_generation(const bboard* d_a,
bboard* d_result,
const int dim,
const int dim_board_w,
const int dim_board_h,
const size_t pitch,
const int remaining_cells_w,
const int remaining_cells_h
) {
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int major_i = __mul24(blockIdx.x, blockDim.x) + tx; // row
const int major_j = __mul24(blockIdx.y, blockDim.y) + ty; // col
if (major_i >= dim_board_h) return;
if (major_j >= dim_board_w) return;
extern __shared__ bboard tiles[];
int bx, by;
if (blockIdx.x == gridDim.x - 1) {
bx = dim_board_h - blockIdx.x * blockDim.x + 2;
} else {
bx = blockDim.x + 2;
}
if (blockIdx.y == gridDim.y - 1) {
by = dim_board_w - blockIdx.y * blockDim.y + 2;
} else {
by = blockDim.y + 2;
}
int major_t = (major_i - 1 + dim_board_h) % dim_board_h;
int major_b = (major_i + 1) % dim_board_h;
int major_l = (major_j - 1 + dim_board_w) % dim_board_w;
int major_r = (major_j + 1) % dim_board_w;
bboard* top_row = (bboard*)((char*)d_a + major_t* pitch);
bboard* row = (bboard*)((char*)d_a + major_i * pitch);
bboard* bot_row = (bboard*)((char*)d_a + major_b * pitch);
tiles[TPOS(tx + 1, ty + 1, by)] = row[major_j];
if (ty == 0) {
//is in the left edge of the block. keep row
tiles[TPOS(tx + 1, 0, by)] = row[major_l];
if (tx == 0) {
//top left corner!
tiles[TPOS(0, 0, by)] = top_row[major_l];
}
if (tx == bx - 3) {
//bottom left corner
tiles[TPOS(bx - 1, 0, by)] = bot_row[major_l];
}
}
if (ty == by - 3) {
//is on the right edge
tiles[TPOS(tx + 1, by - 1, by)] = row[major_r];
if (tx == 0) {
// top right corner
tiles[TPOS(0, by - 1, by)] = top_row[major_r];
}
if (tx == bx - 3) {
// bottom right corner
tiles[TPOS(bx - 1, by - 1, by)] = bot_row[major_r];
}
}
if (tx == 0) {
//is on the upper edge of the block. keep col
tiles[TPOS(0, ty + 1, by)] = top_row[major_j];
}
if (tx == bx - 3) {
//is on the bottom edge
tiles[TPOS(bx - 1, ty + 1, by)] = bot_row[major_j];
}
__syncthreads();
bboard neighbors[3][3];
neighbors[C_I][C_J] = tiles[TPOS(tx + 1, ty + 1, by)];
neighbors[C_I][L_J] = tiles[TPOS(tx + 1, ty, by)];
neighbors[C_I][R_J] = tiles[TPOS(tx + 1, ty + 2, by)];
neighbors[T_I][C_J] = tiles[TPOS(tx, ty + 1, by)];
neighbors[T_I][L_J] = tiles[TPOS(tx, ty, by)];
neighbors[T_I][R_J] = tiles[TPOS(tx, ty + 2, by)];
neighbors[B_I][C_J] = tiles[TPOS(tx + 2, ty + 1, by)];
neighbors[B_I][L_J] = tiles[TPOS(tx + 2, ty, by)];
neighbors[B_I][R_J] = tiles[TPOS(tx + 2, ty + 2, by)];
const bool is_edge_r = (major_j == dim_board_w - 1);
const bool is_edge_d = (major_i == dim_board_h - 1);
const bool is_edge_u = (major_i == 0);
const bool is_edge_l = (major_j == 0);
const char limit_i = HEIGHT - __mul24(remaining_cells_h, is_edge_d);
const char limit_j = WIDTH - __mul24(remaining_cells_w, is_edge_r);
bboard value = 0;
char first_cells, second_cells;
char alive_cells, this_cell;
// char left_j;
// bool set;
#define i 0
#define up_i (HEIGHT - 1 - remaining_cells_h * is_edge_u)
#define up_n T_I
#define down_i (i + 1)
#define down_n (C_I)
#include "code_includes/outter_loop.c"
#undef i
for (char i = 1; i < limit_i - 1; i++) {
#define up_i (i - 1)
#define up_n C_I
#define down_i (i + 1)
#define down_n C_I
#include "code_includes/outter_loop.c"
}
#define i (limit_i - 1)
#define up_i (i - 1)
#define up_n C_I
#define down_i 0
#define down_n B_I
#include "code_includes/outter_loop.c"
#undef i
bboard* row_result = (bboard*)((char*)d_result + major_i * pitch);
row_result[major_j] = value;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.